Linux Audio

Check our new training course

Loading...
v6.13.7
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3   Copyright (C) 2000-2001 Qualcomm Incorporated
   4
   5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
   6
   7   This program is free software; you can redistribute it and/or modify
   8   it under the terms of the GNU General Public License version 2 as
   9   published by the Free Software Foundation;
  10
  11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  19
  20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  22   SOFTWARE IS DISCLAIMED.
  23*/
  24
  25/* Bluetooth HCI sockets. */
  26#include <linux/compat.h>
  27#include <linux/export.h>
  28#include <linux/utsname.h>
  29#include <linux/sched.h>
  30#include <linux/unaligned.h>
  31
  32#include <net/bluetooth/bluetooth.h>
  33#include <net/bluetooth/hci_core.h>
  34#include <net/bluetooth/hci_mon.h>
  35#include <net/bluetooth/mgmt.h>
  36
  37#include "mgmt_util.h"
  38
  39static LIST_HEAD(mgmt_chan_list);
  40static DEFINE_MUTEX(mgmt_chan_list_lock);
  41
  42static DEFINE_IDA(sock_cookie_ida);
  43
  44static atomic_t monitor_promisc = ATOMIC_INIT(0);
  45
  46/* ----- HCI socket interface ----- */
  47
  48/* Socket info */
  49#define hci_pi(sk) ((struct hci_pinfo *) sk)
  50
  51struct hci_pinfo {
  52	struct bt_sock    bt;
  53	struct hci_dev    *hdev;
  54	struct hci_filter filter;
  55	__u8              cmsg_mask;
  56	unsigned short    channel;
  57	unsigned long     flags;
  58	__u32             cookie;
  59	char              comm[TASK_COMM_LEN];
  60	__u16             mtu;
  61};
  62
  63static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
  64{
  65	struct hci_dev *hdev = hci_pi(sk)->hdev;
  66
  67	if (!hdev)
  68		return ERR_PTR(-EBADFD);
  69	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
  70		return ERR_PTR(-EPIPE);
  71	return hdev;
  72}
  73
  74void hci_sock_set_flag(struct sock *sk, int nr)
  75{
  76	set_bit(nr, &hci_pi(sk)->flags);
  77}
  78
  79void hci_sock_clear_flag(struct sock *sk, int nr)
  80{
  81	clear_bit(nr, &hci_pi(sk)->flags);
  82}
  83
  84int hci_sock_test_flag(struct sock *sk, int nr)
  85{
  86	return test_bit(nr, &hci_pi(sk)->flags);
  87}
  88
  89unsigned short hci_sock_get_channel(struct sock *sk)
  90{
  91	return hci_pi(sk)->channel;
  92}
  93
  94u32 hci_sock_get_cookie(struct sock *sk)
  95{
  96	return hci_pi(sk)->cookie;
  97}
  98
  99static bool hci_sock_gen_cookie(struct sock *sk)
 100{
 101	int id = hci_pi(sk)->cookie;
 102
 103	if (!id) {
 104		id = ida_alloc_min(&sock_cookie_ida, 1, GFP_KERNEL);
 105		if (id < 0)
 106			id = 0xffffffff;
 107
 108		hci_pi(sk)->cookie = id;
 109		get_task_comm(hci_pi(sk)->comm, current);
 110		return true;
 111	}
 112
 113	return false;
 114}
 115
 116static void hci_sock_free_cookie(struct sock *sk)
 117{
 118	int id = hci_pi(sk)->cookie;
 119
 120	if (id) {
 121		hci_pi(sk)->cookie = 0xffffffff;
 122		ida_free(&sock_cookie_ida, id);
 123	}
 124}
 125
 126static inline int hci_test_bit(int nr, const void *addr)
 127{
 128	return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
 129}
 130
 131/* Security filter */
 132#define HCI_SFLT_MAX_OGF  5
 133
 134struct hci_sec_filter {
 135	__u32 type_mask;
 136	__u32 event_mask[2];
 137	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
 138};
 139
 140static const struct hci_sec_filter hci_sec_filter = {
 141	/* Packet types */
 142	0x10,
 143	/* Events */
 144	{ 0x1000d9fe, 0x0000b00c },
 145	/* Commands */
 146	{
 147		{ 0x0 },
 148		/* OGF_LINK_CTL */
 149		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
 150		/* OGF_LINK_POLICY */
 151		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
 152		/* OGF_HOST_CTL */
 153		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
 154		/* OGF_INFO_PARAM */
 155		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
 156		/* OGF_STATUS_PARAM */
 157		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
 158	}
 159};
 160
 161static struct bt_sock_list hci_sk_list = {
 162	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
 163};
 164
 165static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
 166{
 167	struct hci_filter *flt;
 168	int flt_type, flt_event;
 169
 170	/* Apply filter */
 171	flt = &hci_pi(sk)->filter;
 172
 173	flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
 174
 175	if (!test_bit(flt_type, &flt->type_mask))
 176		return true;
 177
 178	/* Extra filter for event packets only */
 179	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
 180		return false;
 181
 182	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
 183
 184	if (!hci_test_bit(flt_event, &flt->event_mask))
 185		return true;
 186
 187	/* Check filter only when opcode is set */
 188	if (!flt->opcode)
 189		return false;
 190
 191	if (flt_event == HCI_EV_CMD_COMPLETE &&
 192	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
 193		return true;
 194
 195	if (flt_event == HCI_EV_CMD_STATUS &&
 196	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
 197		return true;
 198
 199	return false;
 200}
 201
 202/* Send frame to RAW socket */
 203void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
 204{
 205	struct sock *sk;
 206	struct sk_buff *skb_copy = NULL;
 207
 208	BT_DBG("hdev %p len %d", hdev, skb->len);
 209
 210	read_lock(&hci_sk_list.lock);
 211
 212	sk_for_each(sk, &hci_sk_list.head) {
 213		struct sk_buff *nskb;
 214
 215		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
 216			continue;
 217
 218		/* Don't send frame to the socket it came from */
 219		if (skb->sk == sk)
 220			continue;
 221
 222		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
 223			if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
 224			    hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 225			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 226			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
 227			    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
 228				continue;
 229			if (is_filtered_packet(sk, skb))
 230				continue;
 231		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 232			if (!bt_cb(skb)->incoming)
 233				continue;
 234			if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 235			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 236			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
 237			    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
 238				continue;
 239		} else {
 240			/* Don't send frame to other channel types */
 241			continue;
 242		}
 243
 244		if (!skb_copy) {
 245			/* Create a private copy with headroom */
 246			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
 247			if (!skb_copy)
 248				continue;
 249
 250			/* Put type byte before the data */
 251			memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
 252		}
 253
 254		nskb = skb_clone(skb_copy, GFP_ATOMIC);
 255		if (!nskb)
 256			continue;
 257
 258		if (sock_queue_rcv_skb(sk, nskb))
 259			kfree_skb(nskb);
 260	}
 261
 262	read_unlock(&hci_sk_list.lock);
 263
 264	kfree_skb(skb_copy);
 265}
 266
 267static void hci_sock_copy_creds(struct sock *sk, struct sk_buff *skb)
 268{
 269	struct scm_creds *creds;
 270
 271	if (!sk || WARN_ON(!skb))
 272		return;
 273
 274	creds = &bt_cb(skb)->creds;
 275
 276	/* Check if peer credentials is set */
 277	if (!sk->sk_peer_pid) {
 278		/* Check if parent peer credentials is set */
 279		if (bt_sk(sk)->parent && bt_sk(sk)->parent->sk_peer_pid)
 280			sk = bt_sk(sk)->parent;
 281		else
 282			return;
 283	}
 284
 285	/* Check if scm_creds already set */
 286	if (creds->pid == pid_vnr(sk->sk_peer_pid))
 287		return;
 288
 289	memset(creds, 0, sizeof(*creds));
 290
 291	creds->pid = pid_vnr(sk->sk_peer_pid);
 292	if (sk->sk_peer_cred) {
 293		creds->uid = sk->sk_peer_cred->uid;
 294		creds->gid = sk->sk_peer_cred->gid;
 295	}
 296}
 297
 298static struct sk_buff *hci_skb_clone(struct sk_buff *skb)
 299{
 300	struct sk_buff *nskb;
 301
 302	if (!skb)
 303		return NULL;
 304
 305	nskb = skb_clone(skb, GFP_ATOMIC);
 306	if (!nskb)
 307		return NULL;
 308
 309	hci_sock_copy_creds(skb->sk, nskb);
 310
 311	return nskb;
 312}
 313
 314/* Send frame to sockets with specific channel */
 315static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
 316				  int flag, struct sock *skip_sk)
 317{
 318	struct sock *sk;
 319
 320	BT_DBG("channel %u len %d", channel, skb->len);
 321
 
 
 322	sk_for_each(sk, &hci_sk_list.head) {
 323		struct sk_buff *nskb;
 324
 325		/* Ignore socket without the flag set */
 326		if (!hci_sock_test_flag(sk, flag))
 327			continue;
 328
 329		/* Skip the original socket */
 330		if (sk == skip_sk)
 331			continue;
 332
 333		if (sk->sk_state != BT_BOUND)
 334			continue;
 335
 336		if (hci_pi(sk)->channel != channel)
 337			continue;
 338
 339		nskb = hci_skb_clone(skb);
 340		if (!nskb)
 341			continue;
 342
 343		if (sock_queue_rcv_skb(sk, nskb))
 344			kfree_skb(nskb);
 345	}
 346
 347}
 348
 349void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
 350			 int flag, struct sock *skip_sk)
 351{
 352	read_lock(&hci_sk_list.lock);
 353	__hci_send_to_channel(channel, skb, flag, skip_sk);
 354	read_unlock(&hci_sk_list.lock);
 355}
 356
 357/* Send frame to monitor socket */
 358void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
 359{
 360	struct sk_buff *skb_copy = NULL;
 361	struct hci_mon_hdr *hdr;
 362	__le16 opcode;
 363
 364	if (!atomic_read(&monitor_promisc))
 365		return;
 366
 367	BT_DBG("hdev %p len %d", hdev, skb->len);
 368
 369	switch (hci_skb_pkt_type(skb)) {
 370	case HCI_COMMAND_PKT:
 371		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
 372		break;
 373	case HCI_EVENT_PKT:
 374		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
 375		break;
 376	case HCI_ACLDATA_PKT:
 377		if (bt_cb(skb)->incoming)
 378			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
 379		else
 380			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
 381		break;
 382	case HCI_SCODATA_PKT:
 383		if (bt_cb(skb)->incoming)
 384			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
 385		else
 386			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
 387		break;
 388	case HCI_ISODATA_PKT:
 389		if (bt_cb(skb)->incoming)
 390			opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT);
 391		else
 392			opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
 393		break;
 394	case HCI_DIAG_PKT:
 395		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
 396		break;
 397	default:
 398		return;
 399	}
 400
 401	/* Create a private copy with headroom */
 402	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
 403	if (!skb_copy)
 404		return;
 405
 406	hci_sock_copy_creds(skb->sk, skb_copy);
 407
 408	/* Put header before the data */
 409	hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
 410	hdr->opcode = opcode;
 411	hdr->index = cpu_to_le16(hdev->id);
 412	hdr->len = cpu_to_le16(skb->len);
 413
 414	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
 415			    HCI_SOCK_TRUSTED, NULL);
 416	kfree_skb(skb_copy);
 417}
 418
 419void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
 420				 void *data, u16 data_len, ktime_t tstamp,
 421				 int flag, struct sock *skip_sk)
 422{
 423	struct sock *sk;
 424	__le16 index;
 425
 426	if (hdev)
 427		index = cpu_to_le16(hdev->id);
 428	else
 429		index = cpu_to_le16(MGMT_INDEX_NONE);
 430
 431	read_lock(&hci_sk_list.lock);
 432
 433	sk_for_each(sk, &hci_sk_list.head) {
 434		struct hci_mon_hdr *hdr;
 435		struct sk_buff *skb;
 436
 437		if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
 438			continue;
 439
 440		/* Ignore socket without the flag set */
 441		if (!hci_sock_test_flag(sk, flag))
 442			continue;
 443
 444		/* Skip the original socket */
 445		if (sk == skip_sk)
 446			continue;
 447
 448		skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
 449		if (!skb)
 450			continue;
 451
 452		put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 453		put_unaligned_le16(event, skb_put(skb, 2));
 454
 455		if (data)
 456			skb_put_data(skb, data, data_len);
 457
 458		skb->tstamp = tstamp;
 459
 460		hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 461		hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
 462		hdr->index = index;
 463		hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 464
 465		__hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 466				      HCI_SOCK_TRUSTED, NULL);
 467		kfree_skb(skb);
 468	}
 469
 470	read_unlock(&hci_sk_list.lock);
 471}
 472
 473static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
 474{
 475	struct hci_mon_hdr *hdr;
 476	struct hci_mon_new_index *ni;
 477	struct hci_mon_index_info *ii;
 478	struct sk_buff *skb;
 479	__le16 opcode;
 480
 481	switch (event) {
 482	case HCI_DEV_REG:
 483		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
 484		if (!skb)
 485			return NULL;
 486
 487		ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
 488		ni->type = 0x00; /* Old hdev->dev_type */
 489		ni->bus = hdev->bus;
 490		bacpy(&ni->bdaddr, &hdev->bdaddr);
 491		memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name,
 492			       strnlen(hdev->name, sizeof(ni->name)), '\0');
 493
 494		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
 495		break;
 496
 497	case HCI_DEV_UNREG:
 498		skb = bt_skb_alloc(0, GFP_ATOMIC);
 499		if (!skb)
 500			return NULL;
 501
 502		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
 503		break;
 504
 505	case HCI_DEV_SETUP:
 506		if (hdev->manufacturer == 0xffff)
 507			return NULL;
 508		fallthrough;
 
 509
 510	case HCI_DEV_UP:
 511		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
 512		if (!skb)
 513			return NULL;
 514
 515		ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
 516		bacpy(&ii->bdaddr, &hdev->bdaddr);
 517		ii->manufacturer = cpu_to_le16(hdev->manufacturer);
 518
 519		opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
 520		break;
 521
 522	case HCI_DEV_OPEN:
 523		skb = bt_skb_alloc(0, GFP_ATOMIC);
 524		if (!skb)
 525			return NULL;
 526
 527		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
 528		break;
 529
 530	case HCI_DEV_CLOSE:
 531		skb = bt_skb_alloc(0, GFP_ATOMIC);
 532		if (!skb)
 533			return NULL;
 534
 535		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
 536		break;
 537
 538	default:
 539		return NULL;
 540	}
 541
 542	__net_timestamp(skb);
 543
 544	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 545	hdr->opcode = opcode;
 546	hdr->index = cpu_to_le16(hdev->id);
 547	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 548
 549	return skb;
 550}
 551
 552static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
 553{
 554	struct hci_mon_hdr *hdr;
 555	struct sk_buff *skb;
 556	u16 format;
 557	u8 ver[3];
 558	u32 flags;
 559
 560	/* No message needed when cookie is not present */
 561	if (!hci_pi(sk)->cookie)
 562		return NULL;
 563
 564	switch (hci_pi(sk)->channel) {
 565	case HCI_CHANNEL_RAW:
 566		format = 0x0000;
 567		ver[0] = BT_SUBSYS_VERSION;
 568		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
 569		break;
 570	case HCI_CHANNEL_USER:
 571		format = 0x0001;
 572		ver[0] = BT_SUBSYS_VERSION;
 573		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
 574		break;
 575	case HCI_CHANNEL_CONTROL:
 576		format = 0x0002;
 577		mgmt_fill_version_info(ver);
 578		break;
 579	default:
 580		/* No message for unsupported format */
 581		return NULL;
 582	}
 583
 584	skb = bt_skb_alloc(14 + TASK_COMM_LEN, GFP_ATOMIC);
 585	if (!skb)
 586		return NULL;
 587
 588	hci_sock_copy_creds(sk, skb);
 589
 590	flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
 591
 592	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 593	put_unaligned_le16(format, skb_put(skb, 2));
 594	skb_put_data(skb, ver, sizeof(ver));
 595	put_unaligned_le32(flags, skb_put(skb, 4));
 596	skb_put_u8(skb, TASK_COMM_LEN);
 597	skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
 598
 599	__net_timestamp(skb);
 600
 601	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 602	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
 603	if (hci_pi(sk)->hdev)
 604		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
 605	else
 606		hdr->index = cpu_to_le16(HCI_DEV_NONE);
 607	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 608
 609	return skb;
 610}
 611
 612static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
 613{
 614	struct hci_mon_hdr *hdr;
 615	struct sk_buff *skb;
 616
 617	/* No message needed when cookie is not present */
 618	if (!hci_pi(sk)->cookie)
 619		return NULL;
 620
 621	switch (hci_pi(sk)->channel) {
 622	case HCI_CHANNEL_RAW:
 623	case HCI_CHANNEL_USER:
 624	case HCI_CHANNEL_CONTROL:
 625		break;
 626	default:
 627		/* No message for unsupported format */
 628		return NULL;
 629	}
 630
 631	skb = bt_skb_alloc(4, GFP_ATOMIC);
 632	if (!skb)
 633		return NULL;
 634
 635	hci_sock_copy_creds(sk, skb);
 636
 637	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 638
 639	__net_timestamp(skb);
 640
 641	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 642	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
 643	if (hci_pi(sk)->hdev)
 644		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
 645	else
 646		hdr->index = cpu_to_le16(HCI_DEV_NONE);
 647	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 648
 649	return skb;
 650}
 651
 652static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
 653						   u16 opcode, u16 len,
 654						   const void *buf)
 655{
 656	struct hci_mon_hdr *hdr;
 657	struct sk_buff *skb;
 658
 659	skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
 660	if (!skb)
 661		return NULL;
 662
 663	hci_sock_copy_creds(sk, skb);
 664
 665	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 666	put_unaligned_le16(opcode, skb_put(skb, 2));
 667
 668	if (buf)
 669		skb_put_data(skb, buf, len);
 670
 671	__net_timestamp(skb);
 672
 673	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 674	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
 675	hdr->index = cpu_to_le16(index);
 676	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 677
 678	return skb;
 679}
 680
 681static void __printf(2, 3)
 682send_monitor_note(struct sock *sk, const char *fmt, ...)
 683{
 684	size_t len;
 685	struct hci_mon_hdr *hdr;
 686	struct sk_buff *skb;
 687	va_list args;
 688
 689	va_start(args, fmt);
 690	len = vsnprintf(NULL, 0, fmt, args);
 691	va_end(args);
 692
 693	skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
 694	if (!skb)
 695		return;
 696
 697	hci_sock_copy_creds(sk, skb);
 698
 699	va_start(args, fmt);
 700	vsprintf(skb_put(skb, len), fmt, args);
 701	*(u8 *)skb_put(skb, 1) = 0;
 702	va_end(args);
 703
 704	__net_timestamp(skb);
 705
 706	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 707	hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
 708	hdr->index = cpu_to_le16(HCI_DEV_NONE);
 709	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 710
 711	if (sock_queue_rcv_skb(sk, skb))
 712		kfree_skb(skb);
 713}
 714
 715static void send_monitor_replay(struct sock *sk)
 716{
 717	struct hci_dev *hdev;
 718
 719	read_lock(&hci_dev_list_lock);
 720
 721	list_for_each_entry(hdev, &hci_dev_list, list) {
 722		struct sk_buff *skb;
 723
 724		skb = create_monitor_event(hdev, HCI_DEV_REG);
 725		if (!skb)
 726			continue;
 727
 728		if (sock_queue_rcv_skb(sk, skb))
 729			kfree_skb(skb);
 730
 731		if (!test_bit(HCI_RUNNING, &hdev->flags))
 732			continue;
 733
 734		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
 735		if (!skb)
 736			continue;
 737
 738		if (sock_queue_rcv_skb(sk, skb))
 739			kfree_skb(skb);
 740
 741		if (test_bit(HCI_UP, &hdev->flags))
 742			skb = create_monitor_event(hdev, HCI_DEV_UP);
 743		else if (hci_dev_test_flag(hdev, HCI_SETUP))
 744			skb = create_monitor_event(hdev, HCI_DEV_SETUP);
 745		else
 746			skb = NULL;
 747
 748		if (skb) {
 749			if (sock_queue_rcv_skb(sk, skb))
 750				kfree_skb(skb);
 751		}
 752	}
 753
 754	read_unlock(&hci_dev_list_lock);
 755}
 756
 757static void send_monitor_control_replay(struct sock *mon_sk)
 758{
 759	struct sock *sk;
 760
 761	read_lock(&hci_sk_list.lock);
 762
 763	sk_for_each(sk, &hci_sk_list.head) {
 764		struct sk_buff *skb;
 765
 766		skb = create_monitor_ctrl_open(sk);
 767		if (!skb)
 768			continue;
 769
 770		if (sock_queue_rcv_skb(mon_sk, skb))
 771			kfree_skb(skb);
 772	}
 773
 774	read_unlock(&hci_sk_list.lock);
 775}
 776
 777/* Generate internal stack event */
 778static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
 779{
 780	struct hci_event_hdr *hdr;
 781	struct hci_ev_stack_internal *ev;
 782	struct sk_buff *skb;
 783
 784	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
 785	if (!skb)
 786		return;
 787
 788	hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
 789	hdr->evt  = HCI_EV_STACK_INTERNAL;
 790	hdr->plen = sizeof(*ev) + dlen;
 791
 792	ev = skb_put(skb, sizeof(*ev) + dlen);
 793	ev->type = type;
 794	memcpy(ev->data, data, dlen);
 795
 796	bt_cb(skb)->incoming = 1;
 797	__net_timestamp(skb);
 798
 799	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
 800	hci_send_to_sock(hdev, skb);
 801	kfree_skb(skb);
 802}
 803
 804void hci_sock_dev_event(struct hci_dev *hdev, int event)
 805{
 806	BT_DBG("hdev %s event %d", hdev->name, event);
 807
 808	if (atomic_read(&monitor_promisc)) {
 809		struct sk_buff *skb;
 810
 811		/* Send event to monitor */
 812		skb = create_monitor_event(hdev, event);
 813		if (skb) {
 814			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 815					    HCI_SOCK_TRUSTED, NULL);
 816			kfree_skb(skb);
 817		}
 818	}
 819
 820	if (event <= HCI_DEV_DOWN) {
 821		struct hci_ev_si_device ev;
 822
 823		/* Send event to sockets */
 824		ev.event  = event;
 825		ev.dev_id = hdev->id;
 826		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
 827	}
 828
 829	if (event == HCI_DEV_UNREG) {
 830		struct sock *sk;
 831
 832		/* Wake up sockets using this dead device */
 833		read_lock(&hci_sk_list.lock);
 834		sk_for_each(sk, &hci_sk_list.head) {
 
 835			if (hci_pi(sk)->hdev == hdev) {
 
 836				sk->sk_err = EPIPE;
 
 837				sk->sk_state_change(sk);
 
 
 838			}
 
 839		}
 840		read_unlock(&hci_sk_list.lock);
 841	}
 842}
 843
 844static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
 845{
 846	struct hci_mgmt_chan *c;
 847
 848	list_for_each_entry(c, &mgmt_chan_list, list) {
 849		if (c->channel == channel)
 850			return c;
 851	}
 852
 853	return NULL;
 854}
 855
 856static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
 857{
 858	struct hci_mgmt_chan *c;
 859
 860	mutex_lock(&mgmt_chan_list_lock);
 861	c = __hci_mgmt_chan_find(channel);
 862	mutex_unlock(&mgmt_chan_list_lock);
 863
 864	return c;
 865}
 866
 867int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
 868{
 869	if (c->channel < HCI_CHANNEL_CONTROL)
 870		return -EINVAL;
 871
 872	mutex_lock(&mgmt_chan_list_lock);
 873	if (__hci_mgmt_chan_find(c->channel)) {
 874		mutex_unlock(&mgmt_chan_list_lock);
 875		return -EALREADY;
 876	}
 877
 878	list_add_tail(&c->list, &mgmt_chan_list);
 879
 880	mutex_unlock(&mgmt_chan_list_lock);
 881
 882	return 0;
 883}
 884EXPORT_SYMBOL(hci_mgmt_chan_register);
 885
 886void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
 887{
 888	mutex_lock(&mgmt_chan_list_lock);
 889	list_del(&c->list);
 890	mutex_unlock(&mgmt_chan_list_lock);
 891}
 892EXPORT_SYMBOL(hci_mgmt_chan_unregister);
 893
 894static int hci_sock_release(struct socket *sock)
 895{
 896	struct sock *sk = sock->sk;
 897	struct hci_dev *hdev;
 898	struct sk_buff *skb;
 899
 900	BT_DBG("sock %p sk %p", sock, sk);
 901
 902	if (!sk)
 903		return 0;
 904
 905	lock_sock(sk);
 906
 907	switch (hci_pi(sk)->channel) {
 908	case HCI_CHANNEL_MONITOR:
 909		atomic_dec(&monitor_promisc);
 910		break;
 911	case HCI_CHANNEL_RAW:
 912	case HCI_CHANNEL_USER:
 913	case HCI_CHANNEL_CONTROL:
 914		/* Send event to monitor */
 915		skb = create_monitor_ctrl_close(sk);
 916		if (skb) {
 917			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 918					    HCI_SOCK_TRUSTED, NULL);
 919			kfree_skb(skb);
 920		}
 921
 922		hci_sock_free_cookie(sk);
 923		break;
 924	}
 925
 926	bt_sock_unlink(&hci_sk_list, sk);
 927
 928	hdev = hci_pi(sk)->hdev;
 929	if (hdev) {
 930		if (hci_pi(sk)->channel == HCI_CHANNEL_USER &&
 931		    !hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
 932			/* When releasing a user channel exclusive access,
 933			 * call hci_dev_do_close directly instead of calling
 934			 * hci_dev_close to ensure the exclusive access will
 935			 * be released and the controller brought back down.
 936			 *
 937			 * The checking of HCI_AUTO_OFF is not needed in this
 938			 * case since it will have been cleared already when
 939			 * opening the user channel.
 940			 *
 941			 * Make sure to also check that we haven't already
 942			 * unregistered since all the cleanup will have already
 943			 * been complete and hdev will get released when we put
 944			 * below.
 945			 */
 946			hci_dev_do_close(hdev);
 947			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
 948			mgmt_index_added(hdev);
 949		}
 950
 951		atomic_dec(&hdev->promisc);
 952		hci_dev_put(hdev);
 953	}
 954
 955	sock_orphan(sk);
 956	release_sock(sk);
 
 
 
 957	sock_put(sk);
 958	return 0;
 959}
 960
 961static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg)
 962{
 963	bdaddr_t bdaddr;
 964	int err;
 965
 966	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 967		return -EFAULT;
 968
 969	hci_dev_lock(hdev);
 970
 971	err = hci_bdaddr_list_add(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
 972
 973	hci_dev_unlock(hdev);
 974
 975	return err;
 976}
 977
 978static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
 979{
 980	bdaddr_t bdaddr;
 981	int err;
 982
 983	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 984		return -EFAULT;
 985
 986	hci_dev_lock(hdev);
 987
 988	err = hci_bdaddr_list_del(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
 989
 990	hci_dev_unlock(hdev);
 991
 992	return err;
 993}
 994
 995/* Ioctls that require bound socket */
 996static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
 997				unsigned long arg)
 998{
 999	struct hci_dev *hdev = hci_hdev_from_sock(sk);
1000
1001	if (IS_ERR(hdev))
1002		return PTR_ERR(hdev);
1003
1004	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1005		return -EBUSY;
1006
1007	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1008		return -EOPNOTSUPP;
1009
 
 
 
1010	switch (cmd) {
1011	case HCISETRAW:
1012		if (!capable(CAP_NET_ADMIN))
1013			return -EPERM;
1014		return -EOPNOTSUPP;
1015
1016	case HCIGETCONNINFO:
1017		return hci_get_conn_info(hdev, (void __user *)arg);
1018
1019	case HCIGETAUTHINFO:
1020		return hci_get_auth_info(hdev, (void __user *)arg);
1021
1022	case HCIBLOCKADDR:
1023		if (!capable(CAP_NET_ADMIN))
1024			return -EPERM;
1025		return hci_sock_reject_list_add(hdev, (void __user *)arg);
1026
1027	case HCIUNBLOCKADDR:
1028		if (!capable(CAP_NET_ADMIN))
1029			return -EPERM;
1030		return hci_sock_reject_list_del(hdev, (void __user *)arg);
1031	}
1032
1033	return -ENOIOCTLCMD;
1034}
1035
1036static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
1037			  unsigned long arg)
1038{
1039	void __user *argp = (void __user *)arg;
1040	struct sock *sk = sock->sk;
1041	int err;
1042
1043	BT_DBG("cmd %x arg %lx", cmd, arg);
1044
1045	/* Make sure the cmd is valid before doing anything */
1046	switch (cmd) {
1047	case HCIGETDEVLIST:
1048	case HCIGETDEVINFO:
1049	case HCIGETCONNLIST:
1050	case HCIDEVUP:
1051	case HCIDEVDOWN:
1052	case HCIDEVRESET:
1053	case HCIDEVRESTAT:
1054	case HCISETSCAN:
1055	case HCISETAUTH:
1056	case HCISETENCRYPT:
1057	case HCISETPTYPE:
1058	case HCISETLINKPOL:
1059	case HCISETLINKMODE:
1060	case HCISETACLMTU:
1061	case HCISETSCOMTU:
1062	case HCIINQUIRY:
1063	case HCISETRAW:
1064	case HCIGETCONNINFO:
1065	case HCIGETAUTHINFO:
1066	case HCIBLOCKADDR:
1067	case HCIUNBLOCKADDR:
1068		break;
1069	default:
1070		return -ENOIOCTLCMD;
1071	}
1072
1073	lock_sock(sk);
1074
1075	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1076		err = -EBADFD;
1077		goto done;
1078	}
1079
1080	/* When calling an ioctl on an unbound raw socket, then ensure
1081	 * that the monitor gets informed. Ensure that the resulting event
1082	 * is only send once by checking if the cookie exists or not. The
1083	 * socket cookie will be only ever generated once for the lifetime
1084	 * of a given socket.
1085	 */
1086	if (hci_sock_gen_cookie(sk)) {
1087		struct sk_buff *skb;
1088
1089		/* Perform careful checks before setting the HCI_SOCK_TRUSTED
1090		 * flag. Make sure that not only the current task but also
1091		 * the socket opener has the required capability, since
1092		 * privileged programs can be tricked into making ioctl calls
1093		 * on HCI sockets, and the socket should not be marked as
1094		 * trusted simply because the ioctl caller is privileged.
1095		 */
1096		if (sk_capable(sk, CAP_NET_ADMIN))
1097			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1098
1099		/* Send event to monitor */
1100		skb = create_monitor_ctrl_open(sk);
1101		if (skb) {
1102			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1103					    HCI_SOCK_TRUSTED, NULL);
1104			kfree_skb(skb);
1105		}
1106	}
1107
1108	release_sock(sk);
1109
1110	switch (cmd) {
1111	case HCIGETDEVLIST:
1112		return hci_get_dev_list(argp);
1113
1114	case HCIGETDEVINFO:
1115		return hci_get_dev_info(argp);
1116
1117	case HCIGETCONNLIST:
1118		return hci_get_conn_list(argp);
1119
1120	case HCIDEVUP:
1121		if (!capable(CAP_NET_ADMIN))
1122			return -EPERM;
1123		return hci_dev_open(arg);
1124
1125	case HCIDEVDOWN:
1126		if (!capable(CAP_NET_ADMIN))
1127			return -EPERM;
1128		return hci_dev_close(arg);
1129
1130	case HCIDEVRESET:
1131		if (!capable(CAP_NET_ADMIN))
1132			return -EPERM;
1133		return hci_dev_reset(arg);
1134
1135	case HCIDEVRESTAT:
1136		if (!capable(CAP_NET_ADMIN))
1137			return -EPERM;
1138		return hci_dev_reset_stat(arg);
1139
1140	case HCISETSCAN:
1141	case HCISETAUTH:
1142	case HCISETENCRYPT:
1143	case HCISETPTYPE:
1144	case HCISETLINKPOL:
1145	case HCISETLINKMODE:
1146	case HCISETACLMTU:
1147	case HCISETSCOMTU:
1148		if (!capable(CAP_NET_ADMIN))
1149			return -EPERM;
1150		return hci_dev_cmd(cmd, argp);
1151
1152	case HCIINQUIRY:
1153		return hci_inquiry(argp);
1154	}
1155
1156	lock_sock(sk);
1157
1158	err = hci_sock_bound_ioctl(sk, cmd, arg);
1159
1160done:
1161	release_sock(sk);
1162	return err;
1163}
1164
1165#ifdef CONFIG_COMPAT
1166static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
1167				 unsigned long arg)
1168{
1169	switch (cmd) {
1170	case HCIDEVUP:
1171	case HCIDEVDOWN:
1172	case HCIDEVRESET:
1173	case HCIDEVRESTAT:
1174		return hci_sock_ioctl(sock, cmd, arg);
1175	}
1176
1177	return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
1178}
1179#endif
1180
1181static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1182			 int addr_len)
1183{
1184	struct sockaddr_hci haddr;
1185	struct sock *sk = sock->sk;
1186	struct hci_dev *hdev = NULL;
1187	struct sk_buff *skb;
1188	int len, err = 0;
1189
1190	BT_DBG("sock %p sk %p", sock, sk);
1191
1192	if (!addr)
1193		return -EINVAL;
1194
1195	memset(&haddr, 0, sizeof(haddr));
1196	len = min_t(unsigned int, sizeof(haddr), addr_len);
1197	memcpy(&haddr, addr, len);
1198
1199	if (haddr.hci_family != AF_BLUETOOTH)
1200		return -EINVAL;
1201
1202	lock_sock(sk);
1203
1204	/* Allow detaching from dead device and attaching to alive device, if
1205	 * the caller wants to re-bind (instead of close) this socket in
1206	 * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1207	 */
1208	hdev = hci_pi(sk)->hdev;
1209	if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1210		hci_pi(sk)->hdev = NULL;
1211		sk->sk_state = BT_OPEN;
1212		hci_dev_put(hdev);
1213	}
1214	hdev = NULL;
1215
1216	if (sk->sk_state == BT_BOUND) {
1217		err = -EALREADY;
1218		goto done;
1219	}
1220
1221	switch (haddr.hci_channel) {
1222	case HCI_CHANNEL_RAW:
1223		if (hci_pi(sk)->hdev) {
1224			err = -EALREADY;
1225			goto done;
1226		}
1227
1228		if (haddr.hci_dev != HCI_DEV_NONE) {
1229			hdev = hci_dev_get(haddr.hci_dev);
1230			if (!hdev) {
1231				err = -ENODEV;
1232				goto done;
1233			}
1234
1235			atomic_inc(&hdev->promisc);
1236		}
1237
1238		hci_pi(sk)->channel = haddr.hci_channel;
1239
1240		if (!hci_sock_gen_cookie(sk)) {
1241			/* In the case when a cookie has already been assigned,
1242			 * then there has been already an ioctl issued against
1243			 * an unbound socket and with that triggered an open
1244			 * notification. Send a close notification first to
1245			 * allow the state transition to bounded.
1246			 */
1247			skb = create_monitor_ctrl_close(sk);
1248			if (skb) {
1249				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1250						    HCI_SOCK_TRUSTED, NULL);
1251				kfree_skb(skb);
1252			}
1253		}
1254
1255		if (capable(CAP_NET_ADMIN))
1256			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1257
1258		hci_pi(sk)->hdev = hdev;
1259
1260		/* Send event to monitor */
1261		skb = create_monitor_ctrl_open(sk);
1262		if (skb) {
1263			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1264					    HCI_SOCK_TRUSTED, NULL);
1265			kfree_skb(skb);
1266		}
1267		break;
1268
1269	case HCI_CHANNEL_USER:
1270		if (hci_pi(sk)->hdev) {
1271			err = -EALREADY;
1272			goto done;
1273		}
1274
1275		if (haddr.hci_dev == HCI_DEV_NONE) {
1276			err = -EINVAL;
1277			goto done;
1278		}
1279
1280		if (!capable(CAP_NET_ADMIN)) {
1281			err = -EPERM;
1282			goto done;
1283		}
1284
1285		hdev = hci_dev_get(haddr.hci_dev);
1286		if (!hdev) {
1287			err = -ENODEV;
1288			goto done;
1289		}
1290
1291		if (test_bit(HCI_INIT, &hdev->flags) ||
1292		    hci_dev_test_flag(hdev, HCI_SETUP) ||
1293		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1294		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1295		     test_bit(HCI_UP, &hdev->flags))) {
1296			err = -EBUSY;
1297			hci_dev_put(hdev);
1298			goto done;
1299		}
1300
1301		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1302			err = -EUSERS;
1303			hci_dev_put(hdev);
1304			goto done;
1305		}
1306
1307		mgmt_index_removed(hdev);
1308
1309		err = hci_dev_open(hdev->id);
1310		if (err) {
1311			if (err == -EALREADY) {
1312				/* In case the transport is already up and
1313				 * running, clear the error here.
1314				 *
1315				 * This can happen when opening a user
1316				 * channel and HCI_AUTO_OFF grace period
1317				 * is still active.
1318				 */
1319				err = 0;
1320			} else {
1321				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1322				mgmt_index_added(hdev);
1323				hci_dev_put(hdev);
1324				goto done;
1325			}
1326		}
1327
1328		hci_pi(sk)->channel = haddr.hci_channel;
1329
1330		if (!hci_sock_gen_cookie(sk)) {
1331			/* In the case when a cookie has already been assigned,
1332			 * this socket will transition from a raw socket into
1333			 * a user channel socket. For a clean transition, send
1334			 * the close notification first.
1335			 */
1336			skb = create_monitor_ctrl_close(sk);
1337			if (skb) {
1338				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1339						    HCI_SOCK_TRUSTED, NULL);
1340				kfree_skb(skb);
1341			}
1342		}
1343
1344		/* The user channel is restricted to CAP_NET_ADMIN
1345		 * capabilities and with that implicitly trusted.
1346		 */
1347		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1348
1349		hci_pi(sk)->hdev = hdev;
1350
1351		/* Send event to monitor */
1352		skb = create_monitor_ctrl_open(sk);
1353		if (skb) {
1354			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1355					    HCI_SOCK_TRUSTED, NULL);
1356			kfree_skb(skb);
1357		}
1358
1359		atomic_inc(&hdev->promisc);
1360		break;
1361
1362	case HCI_CHANNEL_MONITOR:
1363		if (haddr.hci_dev != HCI_DEV_NONE) {
1364			err = -EINVAL;
1365			goto done;
1366		}
1367
1368		if (!capable(CAP_NET_RAW)) {
1369			err = -EPERM;
1370			goto done;
1371		}
1372
1373		hci_pi(sk)->channel = haddr.hci_channel;
1374
1375		/* The monitor interface is restricted to CAP_NET_RAW
1376		 * capabilities and with that implicitly trusted.
1377		 */
1378		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1379
1380		send_monitor_note(sk, "Linux version %s (%s)",
1381				  init_utsname()->release,
1382				  init_utsname()->machine);
1383		send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1384				  BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1385		send_monitor_replay(sk);
1386		send_monitor_control_replay(sk);
1387
1388		atomic_inc(&monitor_promisc);
1389		break;
1390
1391	case HCI_CHANNEL_LOGGING:
1392		if (haddr.hci_dev != HCI_DEV_NONE) {
1393			err = -EINVAL;
1394			goto done;
1395		}
1396
1397		if (!capable(CAP_NET_ADMIN)) {
1398			err = -EPERM;
1399			goto done;
1400		}
1401
1402		hci_pi(sk)->channel = haddr.hci_channel;
1403		break;
1404
1405	default:
1406		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1407			err = -EINVAL;
1408			goto done;
1409		}
1410
1411		if (haddr.hci_dev != HCI_DEV_NONE) {
1412			err = -EINVAL;
1413			goto done;
1414		}
1415
1416		/* Users with CAP_NET_ADMIN capabilities are allowed
1417		 * access to all management commands and events. For
1418		 * untrusted users the interface is restricted and
1419		 * also only untrusted events are sent.
1420		 */
1421		if (capable(CAP_NET_ADMIN))
1422			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1423
1424		hci_pi(sk)->channel = haddr.hci_channel;
1425
1426		/* At the moment the index and unconfigured index events
1427		 * are enabled unconditionally. Setting them on each
1428		 * socket when binding keeps this functionality. They
1429		 * however might be cleared later and then sending of these
1430		 * events will be disabled, but that is then intentional.
1431		 *
1432		 * This also enables generic events that are safe to be
1433		 * received by untrusted users. Example for such events
1434		 * are changes to settings, class of device, name etc.
1435		 */
1436		if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1437			if (!hci_sock_gen_cookie(sk)) {
1438				/* In the case when a cookie has already been
1439				 * assigned, this socket will transition from
1440				 * a raw socket into a control socket. To
1441				 * allow for a clean transition, send the
1442				 * close notification first.
1443				 */
1444				skb = create_monitor_ctrl_close(sk);
1445				if (skb) {
1446					hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1447							    HCI_SOCK_TRUSTED, NULL);
1448					kfree_skb(skb);
1449				}
1450			}
1451
1452			/* Send event to monitor */
1453			skb = create_monitor_ctrl_open(sk);
1454			if (skb) {
1455				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1456						    HCI_SOCK_TRUSTED, NULL);
1457				kfree_skb(skb);
1458			}
1459
1460			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1461			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1462			hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1463			hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1464			hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1465			hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1466		}
1467		break;
1468	}
1469
1470	/* Default MTU to HCI_MAX_FRAME_SIZE if not set */
1471	if (!hci_pi(sk)->mtu)
1472		hci_pi(sk)->mtu = HCI_MAX_FRAME_SIZE;
1473
 
1474	sk->sk_state = BT_BOUND;
1475
1476done:
1477	release_sock(sk);
1478	return err;
1479}
1480
1481static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1482			    int peer)
1483{
1484	struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1485	struct sock *sk = sock->sk;
1486	struct hci_dev *hdev;
1487	int err = 0;
1488
1489	BT_DBG("sock %p sk %p", sock, sk);
1490
1491	if (peer)
1492		return -EOPNOTSUPP;
1493
1494	lock_sock(sk);
1495
1496	hdev = hci_hdev_from_sock(sk);
1497	if (IS_ERR(hdev)) {
1498		err = PTR_ERR(hdev);
1499		goto done;
1500	}
1501
 
1502	haddr->hci_family = AF_BLUETOOTH;
1503	haddr->hci_dev    = hdev->id;
1504	haddr->hci_channel= hci_pi(sk)->channel;
1505	err = sizeof(*haddr);
1506
1507done:
1508	release_sock(sk);
1509	return err;
1510}
1511
1512static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1513			  struct sk_buff *skb)
1514{
1515	__u8 mask = hci_pi(sk)->cmsg_mask;
1516
1517	if (mask & HCI_CMSG_DIR) {
1518		int incoming = bt_cb(skb)->incoming;
1519		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1520			 &incoming);
1521	}
1522
1523	if (mask & HCI_CMSG_TSTAMP) {
1524#ifdef CONFIG_COMPAT
1525		struct old_timeval32 ctv;
1526#endif
1527		struct __kernel_old_timeval tv;
1528		void *data;
1529		int len;
1530
1531		skb_get_timestamp(skb, &tv);
1532
1533		data = &tv;
1534		len = sizeof(tv);
1535#ifdef CONFIG_COMPAT
1536		if (!COMPAT_USE_64BIT_TIME &&
1537		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
1538			ctv.tv_sec = tv.tv_sec;
1539			ctv.tv_usec = tv.tv_usec;
1540			data = &ctv;
1541			len = sizeof(ctv);
1542		}
1543#endif
1544
1545		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1546	}
1547}
1548
1549static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1550			    size_t len, int flags)
1551{
1552	struct scm_cookie scm;
1553	struct sock *sk = sock->sk;
1554	struct sk_buff *skb;
1555	int copied, err;
1556	unsigned int skblen;
1557
1558	BT_DBG("sock %p, sk %p", sock, sk);
1559
1560	if (flags & MSG_OOB)
1561		return -EOPNOTSUPP;
1562
1563	if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1564		return -EOPNOTSUPP;
1565
1566	if (sk->sk_state == BT_CLOSED)
1567		return 0;
1568
1569	skb = skb_recv_datagram(sk, flags, &err);
1570	if (!skb)
1571		return err;
1572
1573	skblen = skb->len;
1574	copied = skb->len;
1575	if (len < copied) {
1576		msg->msg_flags |= MSG_TRUNC;
1577		copied = len;
1578	}
1579
1580	skb_reset_transport_header(skb);
1581	err = skb_copy_datagram_msg(skb, 0, msg, copied);
1582
1583	switch (hci_pi(sk)->channel) {
1584	case HCI_CHANNEL_RAW:
1585		hci_sock_cmsg(sk, msg, skb);
1586		break;
1587	case HCI_CHANNEL_USER:
1588	case HCI_CHANNEL_MONITOR:
1589		sock_recv_timestamp(msg, sk, skb);
1590		break;
1591	default:
1592		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1593			sock_recv_timestamp(msg, sk, skb);
1594		break;
1595	}
1596
1597	memset(&scm, 0, sizeof(scm));
1598	scm.creds = bt_cb(skb)->creds;
1599
1600	skb_free_datagram(sk, skb);
1601
1602	if (flags & MSG_TRUNC)
1603		copied = skblen;
1604
1605	scm_recv(sock, msg, &scm, flags);
1606
1607	return err ? : copied;
1608}
1609
1610static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1611			struct sk_buff *skb)
1612{
 
1613	u8 *cp;
1614	struct mgmt_hdr *hdr;
1615	u16 opcode, index, len;
1616	struct hci_dev *hdev = NULL;
1617	const struct hci_mgmt_handler *handler;
1618	bool var_len, no_hdev;
1619	int err;
1620
1621	BT_DBG("got %d bytes", skb->len);
1622
1623	if (skb->len < sizeof(*hdr))
1624		return -EINVAL;
1625
1626	hdr = (void *)skb->data;
 
 
 
 
 
 
 
 
 
1627	opcode = __le16_to_cpu(hdr->opcode);
1628	index = __le16_to_cpu(hdr->index);
1629	len = __le16_to_cpu(hdr->len);
1630
1631	if (len != skb->len - sizeof(*hdr)) {
1632		err = -EINVAL;
1633		goto done;
1634	}
1635
1636	if (chan->channel == HCI_CHANNEL_CONTROL) {
1637		struct sk_buff *cmd;
1638
1639		/* Send event to monitor */
1640		cmd = create_monitor_ctrl_command(sk, index, opcode, len,
1641						  skb->data + sizeof(*hdr));
1642		if (cmd) {
1643			hci_send_to_channel(HCI_CHANNEL_MONITOR, cmd,
1644					    HCI_SOCK_TRUSTED, NULL);
1645			kfree_skb(cmd);
1646		}
1647	}
1648
1649	if (opcode >= chan->handler_count ||
1650	    chan->handlers[opcode].func == NULL) {
1651		BT_DBG("Unknown op %u", opcode);
1652		err = mgmt_cmd_status(sk, index, opcode,
1653				      MGMT_STATUS_UNKNOWN_COMMAND);
1654		goto done;
1655	}
1656
1657	handler = &chan->handlers[opcode];
1658
1659	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1660	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1661		err = mgmt_cmd_status(sk, index, opcode,
1662				      MGMT_STATUS_PERMISSION_DENIED);
1663		goto done;
1664	}
1665
1666	if (index != MGMT_INDEX_NONE) {
1667		hdev = hci_dev_get(index);
1668		if (!hdev) {
1669			err = mgmt_cmd_status(sk, index, opcode,
1670					      MGMT_STATUS_INVALID_INDEX);
1671			goto done;
1672		}
1673
1674		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1675		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1676		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1677			err = mgmt_cmd_status(sk, index, opcode,
1678					      MGMT_STATUS_INVALID_INDEX);
1679			goto done;
1680		}
1681
1682		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1683		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1684			err = mgmt_cmd_status(sk, index, opcode,
1685					      MGMT_STATUS_INVALID_INDEX);
1686			goto done;
1687		}
1688	}
1689
1690	if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) {
1691		no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1692		if (no_hdev != !hdev) {
1693			err = mgmt_cmd_status(sk, index, opcode,
1694					      MGMT_STATUS_INVALID_INDEX);
1695			goto done;
1696		}
1697	}
1698
1699	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1700	if ((var_len && len < handler->data_len) ||
1701	    (!var_len && len != handler->data_len)) {
1702		err = mgmt_cmd_status(sk, index, opcode,
1703				      MGMT_STATUS_INVALID_PARAMS);
1704		goto done;
1705	}
1706
1707	if (hdev && chan->hdev_init)
1708		chan->hdev_init(sk, hdev);
1709
1710	cp = skb->data + sizeof(*hdr);
1711
1712	err = handler->func(sk, hdev, cp, len);
1713	if (err < 0)
1714		goto done;
1715
1716	err = skb->len;
1717
1718done:
1719	if (hdev)
1720		hci_dev_put(hdev);
1721
 
1722	return err;
1723}
1724
1725static int hci_logging_frame(struct sock *sk, struct sk_buff *skb,
1726			     unsigned int flags)
1727{
1728	struct hci_mon_hdr *hdr;
 
1729	struct hci_dev *hdev;
1730	u16 index;
1731	int err;
1732
1733	/* The logging frame consists at minimum of the standard header,
1734	 * the priority byte, the ident length byte and at least one string
1735	 * terminator NUL byte. Anything shorter are invalid packets.
1736	 */
1737	if (skb->len < sizeof(*hdr) + 3)
1738		return -EINVAL;
1739
 
 
 
 
 
 
 
 
 
1740	hdr = (void *)skb->data;
1741
1742	if (__le16_to_cpu(hdr->len) != skb->len - sizeof(*hdr))
1743		return -EINVAL;
 
 
1744
1745	if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1746		__u8 priority = skb->data[sizeof(*hdr)];
1747		__u8 ident_len = skb->data[sizeof(*hdr) + 1];
1748
1749		/* Only the priorities 0-7 are valid and with that any other
1750		 * value results in an invalid packet.
1751		 *
1752		 * The priority byte is followed by an ident length byte and
1753		 * the NUL terminated ident string. Check that the ident
1754		 * length is not overflowing the packet and also that the
1755		 * ident string itself is NUL terminated. In case the ident
1756		 * length is zero, the length value actually doubles as NUL
1757		 * terminator identifier.
1758		 *
1759		 * The message follows the ident string (if present) and
1760		 * must be NUL terminated. Otherwise it is not a valid packet.
1761		 */
1762		if (priority > 7 || skb->data[skb->len - 1] != 0x00 ||
1763		    ident_len > skb->len - sizeof(*hdr) - 3 ||
1764		    skb->data[sizeof(*hdr) + ident_len + 1] != 0x00)
1765			return -EINVAL;
 
 
1766	} else {
1767		return -EINVAL;
 
1768	}
1769
1770	index = __le16_to_cpu(hdr->index);
1771
1772	if (index != MGMT_INDEX_NONE) {
1773		hdev = hci_dev_get(index);
1774		if (!hdev)
1775			return -ENODEV;
 
 
1776	} else {
1777		hdev = NULL;
1778	}
1779
1780	hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1781
1782	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1783	err = skb->len;
1784
1785	if (hdev)
1786		hci_dev_put(hdev);
1787
 
 
1788	return err;
1789}
1790
1791static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1792			    size_t len)
1793{
1794	struct sock *sk = sock->sk;
1795	struct hci_mgmt_chan *chan;
1796	struct hci_dev *hdev;
1797	struct sk_buff *skb;
1798	int err;
1799	const unsigned int flags = msg->msg_flags;
1800
1801	BT_DBG("sock %p sk %p", sock, sk);
1802
1803	if (flags & MSG_OOB)
1804		return -EOPNOTSUPP;
1805
1806	if (flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
1807		return -EINVAL;
1808
1809	if (len < 4 || len > hci_pi(sk)->mtu)
1810		return -EINVAL;
1811
1812	skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0);
1813	if (IS_ERR(skb))
1814		return PTR_ERR(skb);
1815
1816	lock_sock(sk);
1817
1818	switch (hci_pi(sk)->channel) {
1819	case HCI_CHANNEL_RAW:
1820	case HCI_CHANNEL_USER:
1821		break;
1822	case HCI_CHANNEL_MONITOR:
1823		err = -EOPNOTSUPP;
1824		goto drop;
1825	case HCI_CHANNEL_LOGGING:
1826		err = hci_logging_frame(sk, skb, flags);
1827		goto drop;
1828	default:
1829		mutex_lock(&mgmt_chan_list_lock);
1830		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1831		if (chan)
1832			err = hci_mgmt_cmd(chan, sk, skb);
1833		else
1834			err = -EINVAL;
1835
1836		mutex_unlock(&mgmt_chan_list_lock);
1837		goto drop;
1838	}
1839
1840	hdev = hci_hdev_from_sock(sk);
1841	if (IS_ERR(hdev)) {
1842		err = PTR_ERR(hdev);
1843		goto drop;
1844	}
1845
1846	if (!test_bit(HCI_UP, &hdev->flags)) {
1847		err = -ENETDOWN;
 
 
 
 
 
 
 
 
 
1848		goto drop;
1849	}
1850
1851	hci_skb_pkt_type(skb) = skb->data[0];
1852	skb_pull(skb, 1);
1853
1854	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1855		/* No permission check is needed for user channel
1856		 * since that gets enforced when binding the socket.
1857		 *
1858		 * However check that the packet type is valid.
1859		 */
1860		if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1861		    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1862		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1863		    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1864			err = -EINVAL;
1865			goto drop;
1866		}
1867
1868		skb_queue_tail(&hdev->raw_q, skb);
1869		queue_work(hdev->workqueue, &hdev->tx_work);
1870	} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1871		u16 opcode = get_unaligned_le16(skb->data);
1872		u16 ogf = hci_opcode_ogf(opcode);
1873		u16 ocf = hci_opcode_ocf(opcode);
1874
1875		if (((ogf > HCI_SFLT_MAX_OGF) ||
1876		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1877				   &hci_sec_filter.ocf_mask[ogf])) &&
1878		    !capable(CAP_NET_RAW)) {
1879			err = -EPERM;
1880			goto drop;
1881		}
1882
1883		/* Since the opcode has already been extracted here, store
1884		 * a copy of the value for later use by the drivers.
1885		 */
1886		hci_skb_opcode(skb) = opcode;
1887
1888		if (ogf == 0x3f) {
1889			skb_queue_tail(&hdev->raw_q, skb);
1890			queue_work(hdev->workqueue, &hdev->tx_work);
1891		} else {
1892			/* Stand-alone HCI commands must be flagged as
1893			 * single-command requests.
1894			 */
1895			bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1896
1897			skb_queue_tail(&hdev->cmd_q, skb);
1898			queue_work(hdev->workqueue, &hdev->cmd_work);
1899		}
1900	} else {
1901		if (!capable(CAP_NET_RAW)) {
1902			err = -EPERM;
1903			goto drop;
1904		}
1905
1906		if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1907		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1908		    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1909			err = -EINVAL;
1910			goto drop;
1911		}
1912
1913		skb_queue_tail(&hdev->raw_q, skb);
1914		queue_work(hdev->workqueue, &hdev->tx_work);
1915	}
1916
1917	err = len;
1918
1919done:
1920	release_sock(sk);
1921	return err;
1922
1923drop:
1924	kfree_skb(skb);
1925	goto done;
1926}
1927
1928static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
1929				   sockptr_t optval, unsigned int optlen)
1930{
1931	struct hci_ufilter uf = { .opcode = 0 };
1932	struct sock *sk = sock->sk;
1933	int err = 0, opt = 0;
1934
1935	BT_DBG("sk %p, opt %d", sk, optname);
1936
1937	lock_sock(sk);
1938
1939	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1940		err = -EBADFD;
1941		goto done;
1942	}
1943
1944	switch (optname) {
1945	case HCI_DATA_DIR:
1946		err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
1947		if (err)
1948			break;
 
1949
1950		if (opt)
1951			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1952		else
1953			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1954		break;
1955
1956	case HCI_TIME_STAMP:
1957		err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
1958		if (err)
1959			break;
 
1960
1961		if (opt)
1962			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1963		else
1964			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1965		break;
1966
1967	case HCI_FILTER:
1968		{
1969			struct hci_filter *f = &hci_pi(sk)->filter;
1970
1971			uf.type_mask = f->type_mask;
1972			uf.opcode    = f->opcode;
1973			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1974			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1975		}
1976
1977		err = copy_safe_from_sockptr(&uf, sizeof(uf), optval, optlen);
1978		if (err)
 
1979			break;
 
1980
1981		if (!capable(CAP_NET_RAW)) {
1982			uf.type_mask &= hci_sec_filter.type_mask;
1983			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1984			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1985		}
1986
1987		{
1988			struct hci_filter *f = &hci_pi(sk)->filter;
1989
1990			f->type_mask = uf.type_mask;
1991			f->opcode    = uf.opcode;
1992			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
1993			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
1994		}
1995		break;
1996
1997	default:
1998		err = -ENOPROTOOPT;
1999		break;
2000	}
2001
2002done:
2003	release_sock(sk);
2004	return err;
2005}
2006
2007static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
2008			       sockptr_t optval, unsigned int optlen)
2009{
2010	struct sock *sk = sock->sk;
2011	int err = 0;
2012	u16 opt;
2013
2014	BT_DBG("sk %p, opt %d", sk, optname);
2015
2016	if (level == SOL_HCI)
2017		return hci_sock_setsockopt_old(sock, level, optname, optval,
2018					       optlen);
2019
2020	if (level != SOL_BLUETOOTH)
2021		return -ENOPROTOOPT;
2022
2023	lock_sock(sk);
2024
2025	switch (optname) {
2026	case BT_SNDMTU:
2027	case BT_RCVMTU:
2028		switch (hci_pi(sk)->channel) {
2029		/* Don't allow changing MTU for channels that are meant for HCI
2030		 * traffic only.
2031		 */
2032		case HCI_CHANNEL_RAW:
2033		case HCI_CHANNEL_USER:
2034			err = -ENOPROTOOPT;
2035			goto done;
2036		}
2037
2038		err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
2039		if (err)
2040			break;
2041
2042		hci_pi(sk)->mtu = opt;
2043		break;
2044
2045	default:
2046		err = -ENOPROTOOPT;
2047		break;
2048	}
2049
2050done:
2051	release_sock(sk);
2052	return err;
2053}
2054
2055static int hci_sock_getsockopt_old(struct socket *sock, int level, int optname,
2056				   char __user *optval, int __user *optlen)
2057{
2058	struct hci_ufilter uf;
2059	struct sock *sk = sock->sk;
2060	int len, opt, err = 0;
2061
2062	BT_DBG("sk %p, opt %d", sk, optname);
2063
2064	if (get_user(len, optlen))
2065		return -EFAULT;
2066
2067	lock_sock(sk);
2068
2069	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
2070		err = -EBADFD;
2071		goto done;
2072	}
2073
2074	switch (optname) {
2075	case HCI_DATA_DIR:
2076		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
2077			opt = 1;
2078		else
2079			opt = 0;
2080
2081		if (put_user(opt, optval))
2082			err = -EFAULT;
2083		break;
2084
2085	case HCI_TIME_STAMP:
2086		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
2087			opt = 1;
2088		else
2089			opt = 0;
2090
2091		if (put_user(opt, optval))
2092			err = -EFAULT;
2093		break;
2094
2095	case HCI_FILTER:
2096		{
2097			struct hci_filter *f = &hci_pi(sk)->filter;
2098
2099			memset(&uf, 0, sizeof(uf));
2100			uf.type_mask = f->type_mask;
2101			uf.opcode    = f->opcode;
2102			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
2103			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
2104		}
2105
2106		len = min_t(unsigned int, len, sizeof(uf));
2107		if (copy_to_user(optval, &uf, len))
2108			err = -EFAULT;
2109		break;
2110
2111	default:
2112		err = -ENOPROTOOPT;
2113		break;
2114	}
2115
2116done:
2117	release_sock(sk);
2118	return err;
2119}
2120
2121static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
2122			       char __user *optval, int __user *optlen)
2123{
2124	struct sock *sk = sock->sk;
2125	int err = 0;
2126
2127	BT_DBG("sk %p, opt %d", sk, optname);
2128
2129	if (level == SOL_HCI)
2130		return hci_sock_getsockopt_old(sock, level, optname, optval,
2131					       optlen);
2132
2133	if (level != SOL_BLUETOOTH)
2134		return -ENOPROTOOPT;
2135
2136	lock_sock(sk);
2137
2138	switch (optname) {
2139	case BT_SNDMTU:
2140	case BT_RCVMTU:
2141		if (put_user(hci_pi(sk)->mtu, (u16 __user *)optval))
2142			err = -EFAULT;
2143		break;
2144
2145	default:
2146		err = -ENOPROTOOPT;
2147		break;
2148	}
2149
2150	release_sock(sk);
2151	return err;
2152}
2153
2154static void hci_sock_destruct(struct sock *sk)
2155{
2156	mgmt_cleanup(sk);
2157	skb_queue_purge(&sk->sk_receive_queue);
2158	skb_queue_purge(&sk->sk_write_queue);
2159}
2160
2161static const struct proto_ops hci_sock_ops = {
2162	.family		= PF_BLUETOOTH,
2163	.owner		= THIS_MODULE,
2164	.release	= hci_sock_release,
2165	.bind		= hci_sock_bind,
2166	.getname	= hci_sock_getname,
2167	.sendmsg	= hci_sock_sendmsg,
2168	.recvmsg	= hci_sock_recvmsg,
2169	.ioctl		= hci_sock_ioctl,
2170#ifdef CONFIG_COMPAT
2171	.compat_ioctl	= hci_sock_compat_ioctl,
2172#endif
2173	.poll		= datagram_poll,
2174	.listen		= sock_no_listen,
2175	.shutdown	= sock_no_shutdown,
2176	.setsockopt	= hci_sock_setsockopt,
2177	.getsockopt	= hci_sock_getsockopt,
2178	.connect	= sock_no_connect,
2179	.socketpair	= sock_no_socketpair,
2180	.accept		= sock_no_accept,
2181	.mmap		= sock_no_mmap
2182};
2183
2184static struct proto hci_sk_proto = {
2185	.name		= "HCI",
2186	.owner		= THIS_MODULE,
2187	.obj_size	= sizeof(struct hci_pinfo)
2188};
2189
2190static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2191			   int kern)
2192{
2193	struct sock *sk;
2194
2195	BT_DBG("sock %p", sock);
2196
2197	if (sock->type != SOCK_RAW)
2198		return -ESOCKTNOSUPPORT;
2199
2200	sock->ops = &hci_sock_ops;
2201
2202	sk = bt_sock_alloc(net, sock, &hci_sk_proto, protocol, GFP_ATOMIC,
2203			   kern);
2204	if (!sk)
2205		return -ENOMEM;
2206
 
 
 
 
 
 
2207	sock->state = SS_UNCONNECTED;
2208	sk->sk_destruct = hci_sock_destruct;
2209
2210	bt_sock_link(&hci_sk_list, sk);
2211	return 0;
2212}
2213
2214static const struct net_proto_family hci_sock_family_ops = {
2215	.family	= PF_BLUETOOTH,
2216	.owner	= THIS_MODULE,
2217	.create	= hci_sock_create,
2218};
2219
2220int __init hci_sock_init(void)
2221{
2222	int err;
2223
2224	BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2225
2226	err = proto_register(&hci_sk_proto, 0);
2227	if (err < 0)
2228		return err;
2229
2230	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2231	if (err < 0) {
2232		BT_ERR("HCI socket registration failed");
2233		goto error;
2234	}
2235
2236	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2237	if (err < 0) {
2238		BT_ERR("Failed to create HCI proc file");
2239		bt_sock_unregister(BTPROTO_HCI);
2240		goto error;
2241	}
2242
2243	BT_INFO("HCI socket layer initialized");
2244
2245	return 0;
2246
2247error:
2248	proto_unregister(&hci_sk_proto);
2249	return err;
2250}
2251
2252void hci_sock_cleanup(void)
2253{
2254	bt_procfs_cleanup(&init_net, "hci");
2255	bt_sock_unregister(BTPROTO_HCI);
2256	proto_unregister(&hci_sk_proto);
2257}
v4.6
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3   Copyright (C) 2000-2001 Qualcomm Incorporated
   4
   5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
   6
   7   This program is free software; you can redistribute it and/or modify
   8   it under the terms of the GNU General Public License version 2 as
   9   published by the Free Software Foundation;
  10
  11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  19
  20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  22   SOFTWARE IS DISCLAIMED.
  23*/
  24
  25/* Bluetooth HCI sockets. */
  26
  27#include <linux/export.h>
  28#include <linux/utsname.h>
  29#include <asm/unaligned.h>
 
  30
  31#include <net/bluetooth/bluetooth.h>
  32#include <net/bluetooth/hci_core.h>
  33#include <net/bluetooth/hci_mon.h>
  34#include <net/bluetooth/mgmt.h>
  35
  36#include "mgmt_util.h"
  37
  38static LIST_HEAD(mgmt_chan_list);
  39static DEFINE_MUTEX(mgmt_chan_list_lock);
  40
 
 
  41static atomic_t monitor_promisc = ATOMIC_INIT(0);
  42
  43/* ----- HCI socket interface ----- */
  44
  45/* Socket info */
  46#define hci_pi(sk) ((struct hci_pinfo *) sk)
  47
  48struct hci_pinfo {
  49	struct bt_sock    bt;
  50	struct hci_dev    *hdev;
  51	struct hci_filter filter;
  52	__u32             cmsg_mask;
  53	unsigned short    channel;
  54	unsigned long     flags;
 
 
 
  55};
  56
 
 
 
 
 
 
 
 
 
 
 
  57void hci_sock_set_flag(struct sock *sk, int nr)
  58{
  59	set_bit(nr, &hci_pi(sk)->flags);
  60}
  61
  62void hci_sock_clear_flag(struct sock *sk, int nr)
  63{
  64	clear_bit(nr, &hci_pi(sk)->flags);
  65}
  66
  67int hci_sock_test_flag(struct sock *sk, int nr)
  68{
  69	return test_bit(nr, &hci_pi(sk)->flags);
  70}
  71
  72unsigned short hci_sock_get_channel(struct sock *sk)
  73{
  74	return hci_pi(sk)->channel;
  75}
  76
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  77static inline int hci_test_bit(int nr, const void *addr)
  78{
  79	return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
  80}
  81
  82/* Security filter */
  83#define HCI_SFLT_MAX_OGF  5
  84
  85struct hci_sec_filter {
  86	__u32 type_mask;
  87	__u32 event_mask[2];
  88	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
  89};
  90
  91static const struct hci_sec_filter hci_sec_filter = {
  92	/* Packet types */
  93	0x10,
  94	/* Events */
  95	{ 0x1000d9fe, 0x0000b00c },
  96	/* Commands */
  97	{
  98		{ 0x0 },
  99		/* OGF_LINK_CTL */
 100		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
 101		/* OGF_LINK_POLICY */
 102		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
 103		/* OGF_HOST_CTL */
 104		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
 105		/* OGF_INFO_PARAM */
 106		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
 107		/* OGF_STATUS_PARAM */
 108		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
 109	}
 110};
 111
 112static struct bt_sock_list hci_sk_list = {
 113	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
 114};
 115
 116static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
 117{
 118	struct hci_filter *flt;
 119	int flt_type, flt_event;
 120
 121	/* Apply filter */
 122	flt = &hci_pi(sk)->filter;
 123
 124	flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
 125
 126	if (!test_bit(flt_type, &flt->type_mask))
 127		return true;
 128
 129	/* Extra filter for event packets only */
 130	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
 131		return false;
 132
 133	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
 134
 135	if (!hci_test_bit(flt_event, &flt->event_mask))
 136		return true;
 137
 138	/* Check filter only when opcode is set */
 139	if (!flt->opcode)
 140		return false;
 141
 142	if (flt_event == HCI_EV_CMD_COMPLETE &&
 143	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
 144		return true;
 145
 146	if (flt_event == HCI_EV_CMD_STATUS &&
 147	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
 148		return true;
 149
 150	return false;
 151}
 152
 153/* Send frame to RAW socket */
 154void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
 155{
 156	struct sock *sk;
 157	struct sk_buff *skb_copy = NULL;
 158
 159	BT_DBG("hdev %p len %d", hdev, skb->len);
 160
 161	read_lock(&hci_sk_list.lock);
 162
 163	sk_for_each(sk, &hci_sk_list.head) {
 164		struct sk_buff *nskb;
 165
 166		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
 167			continue;
 168
 169		/* Don't send frame to the socket it came from */
 170		if (skb->sk == sk)
 171			continue;
 172
 173		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
 174			if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
 175			    hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 176			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 177			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
 
 178				continue;
 179			if (is_filtered_packet(sk, skb))
 180				continue;
 181		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 182			if (!bt_cb(skb)->incoming)
 183				continue;
 184			if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 185			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 186			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
 
 187				continue;
 188		} else {
 189			/* Don't send frame to other channel types */
 190			continue;
 191		}
 192
 193		if (!skb_copy) {
 194			/* Create a private copy with headroom */
 195			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
 196			if (!skb_copy)
 197				continue;
 198
 199			/* Put type byte before the data */
 200			memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
 201		}
 202
 203		nskb = skb_clone(skb_copy, GFP_ATOMIC);
 204		if (!nskb)
 205			continue;
 206
 207		if (sock_queue_rcv_skb(sk, nskb))
 208			kfree_skb(nskb);
 209	}
 210
 211	read_unlock(&hci_sk_list.lock);
 212
 213	kfree_skb(skb_copy);
 214}
 215
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 216/* Send frame to sockets with specific channel */
 217void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
 218			 int flag, struct sock *skip_sk)
 219{
 220	struct sock *sk;
 221
 222	BT_DBG("channel %u len %d", channel, skb->len);
 223
 224	read_lock(&hci_sk_list.lock);
 225
 226	sk_for_each(sk, &hci_sk_list.head) {
 227		struct sk_buff *nskb;
 228
 229		/* Ignore socket without the flag set */
 230		if (!hci_sock_test_flag(sk, flag))
 231			continue;
 232
 233		/* Skip the original socket */
 234		if (sk == skip_sk)
 235			continue;
 236
 237		if (sk->sk_state != BT_BOUND)
 238			continue;
 239
 240		if (hci_pi(sk)->channel != channel)
 241			continue;
 242
 243		nskb = skb_clone(skb, GFP_ATOMIC);
 244		if (!nskb)
 245			continue;
 246
 247		if (sock_queue_rcv_skb(sk, nskb))
 248			kfree_skb(nskb);
 249	}
 250
 
 
 
 
 
 
 
 251	read_unlock(&hci_sk_list.lock);
 252}
 253
 254/* Send frame to monitor socket */
 255void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
 256{
 257	struct sk_buff *skb_copy = NULL;
 258	struct hci_mon_hdr *hdr;
 259	__le16 opcode;
 260
 261	if (!atomic_read(&monitor_promisc))
 262		return;
 263
 264	BT_DBG("hdev %p len %d", hdev, skb->len);
 265
 266	switch (hci_skb_pkt_type(skb)) {
 267	case HCI_COMMAND_PKT:
 268		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
 269		break;
 270	case HCI_EVENT_PKT:
 271		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
 272		break;
 273	case HCI_ACLDATA_PKT:
 274		if (bt_cb(skb)->incoming)
 275			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
 276		else
 277			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
 278		break;
 279	case HCI_SCODATA_PKT:
 280		if (bt_cb(skb)->incoming)
 281			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
 282		else
 283			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
 284		break;
 
 
 
 
 
 
 285	case HCI_DIAG_PKT:
 286		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
 287		break;
 288	default:
 289		return;
 290	}
 291
 292	/* Create a private copy with headroom */
 293	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
 294	if (!skb_copy)
 295		return;
 296
 
 
 297	/* Put header before the data */
 298	hdr = (void *)skb_push(skb_copy, HCI_MON_HDR_SIZE);
 299	hdr->opcode = opcode;
 300	hdr->index = cpu_to_le16(hdev->id);
 301	hdr->len = cpu_to_le16(skb->len);
 302
 303	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
 304			    HCI_SOCK_TRUSTED, NULL);
 305	kfree_skb(skb_copy);
 306}
 307
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 308static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
 309{
 310	struct hci_mon_hdr *hdr;
 311	struct hci_mon_new_index *ni;
 312	struct hci_mon_index_info *ii;
 313	struct sk_buff *skb;
 314	__le16 opcode;
 315
 316	switch (event) {
 317	case HCI_DEV_REG:
 318		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
 319		if (!skb)
 320			return NULL;
 321
 322		ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
 323		ni->type = hdev->dev_type;
 324		ni->bus = hdev->bus;
 325		bacpy(&ni->bdaddr, &hdev->bdaddr);
 326		memcpy(ni->name, hdev->name, 8);
 
 327
 328		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
 329		break;
 330
 331	case HCI_DEV_UNREG:
 332		skb = bt_skb_alloc(0, GFP_ATOMIC);
 333		if (!skb)
 334			return NULL;
 335
 336		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
 337		break;
 338
 339	case HCI_DEV_SETUP:
 340		if (hdev->manufacturer == 0xffff)
 341			return NULL;
 342
 343		/* fall through */
 344
 345	case HCI_DEV_UP:
 346		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
 347		if (!skb)
 348			return NULL;
 349
 350		ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
 351		bacpy(&ii->bdaddr, &hdev->bdaddr);
 352		ii->manufacturer = cpu_to_le16(hdev->manufacturer);
 353
 354		opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
 355		break;
 356
 357	case HCI_DEV_OPEN:
 358		skb = bt_skb_alloc(0, GFP_ATOMIC);
 359		if (!skb)
 360			return NULL;
 361
 362		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
 363		break;
 364
 365	case HCI_DEV_CLOSE:
 366		skb = bt_skb_alloc(0, GFP_ATOMIC);
 367		if (!skb)
 368			return NULL;
 369
 370		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
 371		break;
 372
 373	default:
 374		return NULL;
 375	}
 376
 377	__net_timestamp(skb);
 378
 379	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 380	hdr->opcode = opcode;
 381	hdr->index = cpu_to_le16(hdev->id);
 382	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 383
 384	return skb;
 385}
 386
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 387static void __printf(2, 3)
 388send_monitor_note(struct sock *sk, const char *fmt, ...)
 389{
 390	size_t len;
 391	struct hci_mon_hdr *hdr;
 392	struct sk_buff *skb;
 393	va_list args;
 394
 395	va_start(args, fmt);
 396	len = vsnprintf(NULL, 0, fmt, args);
 397	va_end(args);
 398
 399	skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
 400	if (!skb)
 401		return;
 402
 
 
 403	va_start(args, fmt);
 404	vsprintf(skb_put(skb, len), fmt, args);
 405	*skb_put(skb, 1) = 0;
 406	va_end(args);
 407
 408	__net_timestamp(skb);
 409
 410	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 411	hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
 412	hdr->index = cpu_to_le16(HCI_DEV_NONE);
 413	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 414
 415	if (sock_queue_rcv_skb(sk, skb))
 416		kfree_skb(skb);
 417}
 418
 419static void send_monitor_replay(struct sock *sk)
 420{
 421	struct hci_dev *hdev;
 422
 423	read_lock(&hci_dev_list_lock);
 424
 425	list_for_each_entry(hdev, &hci_dev_list, list) {
 426		struct sk_buff *skb;
 427
 428		skb = create_monitor_event(hdev, HCI_DEV_REG);
 429		if (!skb)
 430			continue;
 431
 432		if (sock_queue_rcv_skb(sk, skb))
 433			kfree_skb(skb);
 434
 435		if (!test_bit(HCI_RUNNING, &hdev->flags))
 436			continue;
 437
 438		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
 439		if (!skb)
 440			continue;
 441
 442		if (sock_queue_rcv_skb(sk, skb))
 443			kfree_skb(skb);
 444
 445		if (test_bit(HCI_UP, &hdev->flags))
 446			skb = create_monitor_event(hdev, HCI_DEV_UP);
 447		else if (hci_dev_test_flag(hdev, HCI_SETUP))
 448			skb = create_monitor_event(hdev, HCI_DEV_SETUP);
 449		else
 450			skb = NULL;
 451
 452		if (skb) {
 453			if (sock_queue_rcv_skb(sk, skb))
 454				kfree_skb(skb);
 455		}
 456	}
 457
 458	read_unlock(&hci_dev_list_lock);
 459}
 460
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 461/* Generate internal stack event */
 462static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
 463{
 464	struct hci_event_hdr *hdr;
 465	struct hci_ev_stack_internal *ev;
 466	struct sk_buff *skb;
 467
 468	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
 469	if (!skb)
 470		return;
 471
 472	hdr = (void *)skb_put(skb, HCI_EVENT_HDR_SIZE);
 473	hdr->evt  = HCI_EV_STACK_INTERNAL;
 474	hdr->plen = sizeof(*ev) + dlen;
 475
 476	ev  = (void *)skb_put(skb, sizeof(*ev) + dlen);
 477	ev->type = type;
 478	memcpy(ev->data, data, dlen);
 479
 480	bt_cb(skb)->incoming = 1;
 481	__net_timestamp(skb);
 482
 483	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
 484	hci_send_to_sock(hdev, skb);
 485	kfree_skb(skb);
 486}
 487
 488void hci_sock_dev_event(struct hci_dev *hdev, int event)
 489{
 490	BT_DBG("hdev %s event %d", hdev->name, event);
 491
 492	if (atomic_read(&monitor_promisc)) {
 493		struct sk_buff *skb;
 494
 495		/* Send event to monitor */
 496		skb = create_monitor_event(hdev, event);
 497		if (skb) {
 498			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 499					    HCI_SOCK_TRUSTED, NULL);
 500			kfree_skb(skb);
 501		}
 502	}
 503
 504	if (event <= HCI_DEV_DOWN) {
 505		struct hci_ev_si_device ev;
 506
 507		/* Send event to sockets */
 508		ev.event  = event;
 509		ev.dev_id = hdev->id;
 510		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
 511	}
 512
 513	if (event == HCI_DEV_UNREG) {
 514		struct sock *sk;
 515
 516		/* Detach sockets from device */
 517		read_lock(&hci_sk_list.lock);
 518		sk_for_each(sk, &hci_sk_list.head) {
 519			bh_lock_sock_nested(sk);
 520			if (hci_pi(sk)->hdev == hdev) {
 521				hci_pi(sk)->hdev = NULL;
 522				sk->sk_err = EPIPE;
 523				sk->sk_state = BT_OPEN;
 524				sk->sk_state_change(sk);
 525
 526				hci_dev_put(hdev);
 527			}
 528			bh_unlock_sock(sk);
 529		}
 530		read_unlock(&hci_sk_list.lock);
 531	}
 532}
 533
 534static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
 535{
 536	struct hci_mgmt_chan *c;
 537
 538	list_for_each_entry(c, &mgmt_chan_list, list) {
 539		if (c->channel == channel)
 540			return c;
 541	}
 542
 543	return NULL;
 544}
 545
 546static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
 547{
 548	struct hci_mgmt_chan *c;
 549
 550	mutex_lock(&mgmt_chan_list_lock);
 551	c = __hci_mgmt_chan_find(channel);
 552	mutex_unlock(&mgmt_chan_list_lock);
 553
 554	return c;
 555}
 556
 557int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
 558{
 559	if (c->channel < HCI_CHANNEL_CONTROL)
 560		return -EINVAL;
 561
 562	mutex_lock(&mgmt_chan_list_lock);
 563	if (__hci_mgmt_chan_find(c->channel)) {
 564		mutex_unlock(&mgmt_chan_list_lock);
 565		return -EALREADY;
 566	}
 567
 568	list_add_tail(&c->list, &mgmt_chan_list);
 569
 570	mutex_unlock(&mgmt_chan_list_lock);
 571
 572	return 0;
 573}
 574EXPORT_SYMBOL(hci_mgmt_chan_register);
 575
 576void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
 577{
 578	mutex_lock(&mgmt_chan_list_lock);
 579	list_del(&c->list);
 580	mutex_unlock(&mgmt_chan_list_lock);
 581}
 582EXPORT_SYMBOL(hci_mgmt_chan_unregister);
 583
 584static int hci_sock_release(struct socket *sock)
 585{
 586	struct sock *sk = sock->sk;
 587	struct hci_dev *hdev;
 
 588
 589	BT_DBG("sock %p sk %p", sock, sk);
 590
 591	if (!sk)
 592		return 0;
 593
 594	hdev = hci_pi(sk)->hdev;
 595
 596	if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
 
 597		atomic_dec(&monitor_promisc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 598
 599	bt_sock_unlink(&hci_sk_list, sk);
 600
 
 601	if (hdev) {
 602		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 603			/* When releasing an user channel exclusive access,
 
 604			 * call hci_dev_do_close directly instead of calling
 605			 * hci_dev_close to ensure the exclusive access will
 606			 * be released and the controller brought back down.
 607			 *
 608			 * The checking of HCI_AUTO_OFF is not needed in this
 609			 * case since it will have been cleared already when
 610			 * opening the user channel.
 
 
 
 
 
 611			 */
 612			hci_dev_do_close(hdev);
 613			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
 614			mgmt_index_added(hdev);
 615		}
 616
 617		atomic_dec(&hdev->promisc);
 618		hci_dev_put(hdev);
 619	}
 620
 621	sock_orphan(sk);
 622
 623	skb_queue_purge(&sk->sk_receive_queue);
 624	skb_queue_purge(&sk->sk_write_queue);
 625
 626	sock_put(sk);
 627	return 0;
 628}
 629
 630static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
 631{
 632	bdaddr_t bdaddr;
 633	int err;
 634
 635	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 636		return -EFAULT;
 637
 638	hci_dev_lock(hdev);
 639
 640	err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
 641
 642	hci_dev_unlock(hdev);
 643
 644	return err;
 645}
 646
 647static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
 648{
 649	bdaddr_t bdaddr;
 650	int err;
 651
 652	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 653		return -EFAULT;
 654
 655	hci_dev_lock(hdev);
 656
 657	err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
 658
 659	hci_dev_unlock(hdev);
 660
 661	return err;
 662}
 663
 664/* Ioctls that require bound socket */
 665static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
 666				unsigned long arg)
 667{
 668	struct hci_dev *hdev = hci_pi(sk)->hdev;
 669
 670	if (!hdev)
 671		return -EBADFD;
 672
 673	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
 674		return -EBUSY;
 675
 676	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
 677		return -EOPNOTSUPP;
 678
 679	if (hdev->dev_type != HCI_BREDR)
 680		return -EOPNOTSUPP;
 681
 682	switch (cmd) {
 683	case HCISETRAW:
 684		if (!capable(CAP_NET_ADMIN))
 685			return -EPERM;
 686		return -EOPNOTSUPP;
 687
 688	case HCIGETCONNINFO:
 689		return hci_get_conn_info(hdev, (void __user *)arg);
 690
 691	case HCIGETAUTHINFO:
 692		return hci_get_auth_info(hdev, (void __user *)arg);
 693
 694	case HCIBLOCKADDR:
 695		if (!capable(CAP_NET_ADMIN))
 696			return -EPERM;
 697		return hci_sock_blacklist_add(hdev, (void __user *)arg);
 698
 699	case HCIUNBLOCKADDR:
 700		if (!capable(CAP_NET_ADMIN))
 701			return -EPERM;
 702		return hci_sock_blacklist_del(hdev, (void __user *)arg);
 703	}
 704
 705	return -ENOIOCTLCMD;
 706}
 707
 708static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
 709			  unsigned long arg)
 710{
 711	void __user *argp = (void __user *)arg;
 712	struct sock *sk = sock->sk;
 713	int err;
 714
 715	BT_DBG("cmd %x arg %lx", cmd, arg);
 716
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 717	lock_sock(sk);
 718
 719	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
 720		err = -EBADFD;
 721		goto done;
 722	}
 723
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 724	release_sock(sk);
 725
 726	switch (cmd) {
 727	case HCIGETDEVLIST:
 728		return hci_get_dev_list(argp);
 729
 730	case HCIGETDEVINFO:
 731		return hci_get_dev_info(argp);
 732
 733	case HCIGETCONNLIST:
 734		return hci_get_conn_list(argp);
 735
 736	case HCIDEVUP:
 737		if (!capable(CAP_NET_ADMIN))
 738			return -EPERM;
 739		return hci_dev_open(arg);
 740
 741	case HCIDEVDOWN:
 742		if (!capable(CAP_NET_ADMIN))
 743			return -EPERM;
 744		return hci_dev_close(arg);
 745
 746	case HCIDEVRESET:
 747		if (!capable(CAP_NET_ADMIN))
 748			return -EPERM;
 749		return hci_dev_reset(arg);
 750
 751	case HCIDEVRESTAT:
 752		if (!capable(CAP_NET_ADMIN))
 753			return -EPERM;
 754		return hci_dev_reset_stat(arg);
 755
 756	case HCISETSCAN:
 757	case HCISETAUTH:
 758	case HCISETENCRYPT:
 759	case HCISETPTYPE:
 760	case HCISETLINKPOL:
 761	case HCISETLINKMODE:
 762	case HCISETACLMTU:
 763	case HCISETSCOMTU:
 764		if (!capable(CAP_NET_ADMIN))
 765			return -EPERM;
 766		return hci_dev_cmd(cmd, argp);
 767
 768	case HCIINQUIRY:
 769		return hci_inquiry(argp);
 770	}
 771
 772	lock_sock(sk);
 773
 774	err = hci_sock_bound_ioctl(sk, cmd, arg);
 775
 776done:
 777	release_sock(sk);
 778	return err;
 779}
 780
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 781static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
 782			 int addr_len)
 783{
 784	struct sockaddr_hci haddr;
 785	struct sock *sk = sock->sk;
 786	struct hci_dev *hdev = NULL;
 
 787	int len, err = 0;
 788
 789	BT_DBG("sock %p sk %p", sock, sk);
 790
 791	if (!addr)
 792		return -EINVAL;
 793
 794	memset(&haddr, 0, sizeof(haddr));
 795	len = min_t(unsigned int, sizeof(haddr), addr_len);
 796	memcpy(&haddr, addr, len);
 797
 798	if (haddr.hci_family != AF_BLUETOOTH)
 799		return -EINVAL;
 800
 801	lock_sock(sk);
 802
 
 
 
 
 
 
 
 
 
 
 
 
 803	if (sk->sk_state == BT_BOUND) {
 804		err = -EALREADY;
 805		goto done;
 806	}
 807
 808	switch (haddr.hci_channel) {
 809	case HCI_CHANNEL_RAW:
 810		if (hci_pi(sk)->hdev) {
 811			err = -EALREADY;
 812			goto done;
 813		}
 814
 815		if (haddr.hci_dev != HCI_DEV_NONE) {
 816			hdev = hci_dev_get(haddr.hci_dev);
 817			if (!hdev) {
 818				err = -ENODEV;
 819				goto done;
 820			}
 821
 822			atomic_inc(&hdev->promisc);
 823		}
 824
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 825		hci_pi(sk)->hdev = hdev;
 
 
 
 
 
 
 
 
 826		break;
 827
 828	case HCI_CHANNEL_USER:
 829		if (hci_pi(sk)->hdev) {
 830			err = -EALREADY;
 831			goto done;
 832		}
 833
 834		if (haddr.hci_dev == HCI_DEV_NONE) {
 835			err = -EINVAL;
 836			goto done;
 837		}
 838
 839		if (!capable(CAP_NET_ADMIN)) {
 840			err = -EPERM;
 841			goto done;
 842		}
 843
 844		hdev = hci_dev_get(haddr.hci_dev);
 845		if (!hdev) {
 846			err = -ENODEV;
 847			goto done;
 848		}
 849
 850		if (test_bit(HCI_INIT, &hdev->flags) ||
 851		    hci_dev_test_flag(hdev, HCI_SETUP) ||
 852		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
 853		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
 854		     test_bit(HCI_UP, &hdev->flags))) {
 855			err = -EBUSY;
 856			hci_dev_put(hdev);
 857			goto done;
 858		}
 859
 860		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
 861			err = -EUSERS;
 862			hci_dev_put(hdev);
 863			goto done;
 864		}
 865
 866		mgmt_index_removed(hdev);
 867
 868		err = hci_dev_open(hdev->id);
 869		if (err) {
 870			if (err == -EALREADY) {
 871				/* In case the transport is already up and
 872				 * running, clear the error here.
 873				 *
 874				 * This can happen when opening an user
 875				 * channel and HCI_AUTO_OFF grace period
 876				 * is still active.
 877				 */
 878				err = 0;
 879			} else {
 880				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
 881				mgmt_index_added(hdev);
 882				hci_dev_put(hdev);
 883				goto done;
 884			}
 885		}
 886
 887		atomic_inc(&hdev->promisc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 888
 889		hci_pi(sk)->hdev = hdev;
 
 
 
 
 
 
 
 
 
 
 890		break;
 891
 892	case HCI_CHANNEL_MONITOR:
 893		if (haddr.hci_dev != HCI_DEV_NONE) {
 894			err = -EINVAL;
 895			goto done;
 896		}
 897
 898		if (!capable(CAP_NET_RAW)) {
 899			err = -EPERM;
 900			goto done;
 901		}
 902
 
 
 903		/* The monitor interface is restricted to CAP_NET_RAW
 904		 * capabilities and with that implicitly trusted.
 905		 */
 906		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
 907
 908		send_monitor_note(sk, "Linux version %s (%s)",
 909				  init_utsname()->release,
 910				  init_utsname()->machine);
 911		send_monitor_note(sk, "Bluetooth subsystem version %s",
 912				  BT_SUBSYS_VERSION);
 913		send_monitor_replay(sk);
 
 914
 915		atomic_inc(&monitor_promisc);
 916		break;
 917
 918	case HCI_CHANNEL_LOGGING:
 919		if (haddr.hci_dev != HCI_DEV_NONE) {
 920			err = -EINVAL;
 921			goto done;
 922		}
 923
 924		if (!capable(CAP_NET_ADMIN)) {
 925			err = -EPERM;
 926			goto done;
 927		}
 
 
 928		break;
 929
 930	default:
 931		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
 932			err = -EINVAL;
 933			goto done;
 934		}
 935
 936		if (haddr.hci_dev != HCI_DEV_NONE) {
 937			err = -EINVAL;
 938			goto done;
 939		}
 940
 941		/* Users with CAP_NET_ADMIN capabilities are allowed
 942		 * access to all management commands and events. For
 943		 * untrusted users the interface is restricted and
 944		 * also only untrusted events are sent.
 945		 */
 946		if (capable(CAP_NET_ADMIN))
 947			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
 948
 
 
 949		/* At the moment the index and unconfigured index events
 950		 * are enabled unconditionally. Setting them on each
 951		 * socket when binding keeps this functionality. They
 952		 * however might be cleared later and then sending of these
 953		 * events will be disabled, but that is then intentional.
 954		 *
 955		 * This also enables generic events that are safe to be
 956		 * received by untrusted users. Example for such events
 957		 * are changes to settings, class of device, name etc.
 958		 */
 959		if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 960			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
 961			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
 962			hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
 
 
 
 963		}
 964		break;
 965	}
 966
 
 
 
 967
 968	hci_pi(sk)->channel = haddr.hci_channel;
 969	sk->sk_state = BT_BOUND;
 970
 971done:
 972	release_sock(sk);
 973	return err;
 974}
 975
 976static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
 977			    int *addr_len, int peer)
 978{
 979	struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
 980	struct sock *sk = sock->sk;
 981	struct hci_dev *hdev;
 982	int err = 0;
 983
 984	BT_DBG("sock %p sk %p", sock, sk);
 985
 986	if (peer)
 987		return -EOPNOTSUPP;
 988
 989	lock_sock(sk);
 990
 991	hdev = hci_pi(sk)->hdev;
 992	if (!hdev) {
 993		err = -EBADFD;
 994		goto done;
 995	}
 996
 997	*addr_len = sizeof(*haddr);
 998	haddr->hci_family = AF_BLUETOOTH;
 999	haddr->hci_dev    = hdev->id;
1000	haddr->hci_channel= hci_pi(sk)->channel;
 
1001
1002done:
1003	release_sock(sk);
1004	return err;
1005}
1006
1007static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1008			  struct sk_buff *skb)
1009{
1010	__u32 mask = hci_pi(sk)->cmsg_mask;
1011
1012	if (mask & HCI_CMSG_DIR) {
1013		int incoming = bt_cb(skb)->incoming;
1014		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1015			 &incoming);
1016	}
1017
1018	if (mask & HCI_CMSG_TSTAMP) {
1019#ifdef CONFIG_COMPAT
1020		struct compat_timeval ctv;
1021#endif
1022		struct timeval tv;
1023		void *data;
1024		int len;
1025
1026		skb_get_timestamp(skb, &tv);
1027
1028		data = &tv;
1029		len = sizeof(tv);
1030#ifdef CONFIG_COMPAT
1031		if (!COMPAT_USE_64BIT_TIME &&
1032		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
1033			ctv.tv_sec = tv.tv_sec;
1034			ctv.tv_usec = tv.tv_usec;
1035			data = &ctv;
1036			len = sizeof(ctv);
1037		}
1038#endif
1039
1040		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1041	}
1042}
1043
1044static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1045			    size_t len, int flags)
1046{
1047	int noblock = flags & MSG_DONTWAIT;
1048	struct sock *sk = sock->sk;
1049	struct sk_buff *skb;
1050	int copied, err;
 
1051
1052	BT_DBG("sock %p, sk %p", sock, sk);
1053
1054	if (flags & MSG_OOB)
1055		return -EOPNOTSUPP;
1056
1057	if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1058		return -EOPNOTSUPP;
1059
1060	if (sk->sk_state == BT_CLOSED)
1061		return 0;
1062
1063	skb = skb_recv_datagram(sk, flags, noblock, &err);
1064	if (!skb)
1065		return err;
1066
 
1067	copied = skb->len;
1068	if (len < copied) {
1069		msg->msg_flags |= MSG_TRUNC;
1070		copied = len;
1071	}
1072
1073	skb_reset_transport_header(skb);
1074	err = skb_copy_datagram_msg(skb, 0, msg, copied);
1075
1076	switch (hci_pi(sk)->channel) {
1077	case HCI_CHANNEL_RAW:
1078		hci_sock_cmsg(sk, msg, skb);
1079		break;
1080	case HCI_CHANNEL_USER:
1081	case HCI_CHANNEL_MONITOR:
1082		sock_recv_timestamp(msg, sk, skb);
1083		break;
1084	default:
1085		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1086			sock_recv_timestamp(msg, sk, skb);
1087		break;
1088	}
1089
 
 
 
1090	skb_free_datagram(sk, skb);
1091
 
 
 
 
 
1092	return err ? : copied;
1093}
1094
1095static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1096			struct msghdr *msg, size_t msglen)
1097{
1098	void *buf;
1099	u8 *cp;
1100	struct mgmt_hdr *hdr;
1101	u16 opcode, index, len;
1102	struct hci_dev *hdev = NULL;
1103	const struct hci_mgmt_handler *handler;
1104	bool var_len, no_hdev;
1105	int err;
1106
1107	BT_DBG("got %zu bytes", msglen);
1108
1109	if (msglen < sizeof(*hdr))
1110		return -EINVAL;
1111
1112	buf = kmalloc(msglen, GFP_KERNEL);
1113	if (!buf)
1114		return -ENOMEM;
1115
1116	if (memcpy_from_msg(buf, msg, msglen)) {
1117		err = -EFAULT;
1118		goto done;
1119	}
1120
1121	hdr = buf;
1122	opcode = __le16_to_cpu(hdr->opcode);
1123	index = __le16_to_cpu(hdr->index);
1124	len = __le16_to_cpu(hdr->len);
1125
1126	if (len != msglen - sizeof(*hdr)) {
1127		err = -EINVAL;
1128		goto done;
1129	}
1130
 
 
 
 
 
 
 
 
 
 
 
 
 
1131	if (opcode >= chan->handler_count ||
1132	    chan->handlers[opcode].func == NULL) {
1133		BT_DBG("Unknown op %u", opcode);
1134		err = mgmt_cmd_status(sk, index, opcode,
1135				      MGMT_STATUS_UNKNOWN_COMMAND);
1136		goto done;
1137	}
1138
1139	handler = &chan->handlers[opcode];
1140
1141	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1142	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1143		err = mgmt_cmd_status(sk, index, opcode,
1144				      MGMT_STATUS_PERMISSION_DENIED);
1145		goto done;
1146	}
1147
1148	if (index != MGMT_INDEX_NONE) {
1149		hdev = hci_dev_get(index);
1150		if (!hdev) {
1151			err = mgmt_cmd_status(sk, index, opcode,
1152					      MGMT_STATUS_INVALID_INDEX);
1153			goto done;
1154		}
1155
1156		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1157		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1158		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1159			err = mgmt_cmd_status(sk, index, opcode,
1160					      MGMT_STATUS_INVALID_INDEX);
1161			goto done;
1162		}
1163
1164		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1165		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1166			err = mgmt_cmd_status(sk, index, opcode,
1167					      MGMT_STATUS_INVALID_INDEX);
1168			goto done;
1169		}
1170	}
1171
1172	no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1173	if (no_hdev != !hdev) {
1174		err = mgmt_cmd_status(sk, index, opcode,
1175				      MGMT_STATUS_INVALID_INDEX);
1176		goto done;
 
 
1177	}
1178
1179	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1180	if ((var_len && len < handler->data_len) ||
1181	    (!var_len && len != handler->data_len)) {
1182		err = mgmt_cmd_status(sk, index, opcode,
1183				      MGMT_STATUS_INVALID_PARAMS);
1184		goto done;
1185	}
1186
1187	if (hdev && chan->hdev_init)
1188		chan->hdev_init(sk, hdev);
1189
1190	cp = buf + sizeof(*hdr);
1191
1192	err = handler->func(sk, hdev, cp, len);
1193	if (err < 0)
1194		goto done;
1195
1196	err = msglen;
1197
1198done:
1199	if (hdev)
1200		hci_dev_put(hdev);
1201
1202	kfree(buf);
1203	return err;
1204}
1205
1206static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
 
1207{
1208	struct hci_mon_hdr *hdr;
1209	struct sk_buff *skb;
1210	struct hci_dev *hdev;
1211	u16 index;
1212	int err;
1213
1214	/* The logging frame consists at minimum of the standard header,
1215	 * the priority byte, the ident length byte and at least one string
1216	 * terminator NUL byte. Anything shorter are invalid packets.
1217	 */
1218	if (len < sizeof(*hdr) + 3)
1219		return -EINVAL;
1220
1221	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1222	if (!skb)
1223		return err;
1224
1225	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1226		err = -EFAULT;
1227		goto drop;
1228	}
1229
1230	hdr = (void *)skb->data;
1231
1232	if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1233		err = -EINVAL;
1234		goto drop;
1235	}
1236
1237	if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1238		__u8 priority = skb->data[sizeof(*hdr)];
1239		__u8 ident_len = skb->data[sizeof(*hdr) + 1];
1240
1241		/* Only the priorities 0-7 are valid and with that any other
1242		 * value results in an invalid packet.
1243		 *
1244		 * The priority byte is followed by an ident length byte and
1245		 * the NUL terminated ident string. Check that the ident
1246		 * length is not overflowing the packet and also that the
1247		 * ident string itself is NUL terminated. In case the ident
1248		 * length is zero, the length value actually doubles as NUL
1249		 * terminator identifier.
1250		 *
1251		 * The message follows the ident string (if present) and
1252		 * must be NUL terminated. Otherwise it is not a valid packet.
1253		 */
1254		if (priority > 7 || skb->data[len - 1] != 0x00 ||
1255		    ident_len > len - sizeof(*hdr) - 3 ||
1256		    skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1257			err = -EINVAL;
1258			goto drop;
1259		}
1260	} else {
1261		err = -EINVAL;
1262		goto drop;
1263	}
1264
1265	index = __le16_to_cpu(hdr->index);
1266
1267	if (index != MGMT_INDEX_NONE) {
1268		hdev = hci_dev_get(index);
1269		if (!hdev) {
1270			err = -ENODEV;
1271			goto drop;
1272		}
1273	} else {
1274		hdev = NULL;
1275	}
1276
1277	hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1278
1279	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1280	err = len;
1281
1282	if (hdev)
1283		hci_dev_put(hdev);
1284
1285drop:
1286	kfree_skb(skb);
1287	return err;
1288}
1289
1290static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1291			    size_t len)
1292{
1293	struct sock *sk = sock->sk;
1294	struct hci_mgmt_chan *chan;
1295	struct hci_dev *hdev;
1296	struct sk_buff *skb;
1297	int err;
 
1298
1299	BT_DBG("sock %p sk %p", sock, sk);
1300
1301	if (msg->msg_flags & MSG_OOB)
1302		return -EOPNOTSUPP;
1303
1304	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
1305		return -EINVAL;
1306
1307	if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1308		return -EINVAL;
1309
 
 
 
 
1310	lock_sock(sk);
1311
1312	switch (hci_pi(sk)->channel) {
1313	case HCI_CHANNEL_RAW:
1314	case HCI_CHANNEL_USER:
1315		break;
1316	case HCI_CHANNEL_MONITOR:
1317		err = -EOPNOTSUPP;
1318		goto done;
1319	case HCI_CHANNEL_LOGGING:
1320		err = hci_logging_frame(sk, msg, len);
1321		goto done;
1322	default:
1323		mutex_lock(&mgmt_chan_list_lock);
1324		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1325		if (chan)
1326			err = hci_mgmt_cmd(chan, sk, msg, len);
1327		else
1328			err = -EINVAL;
1329
1330		mutex_unlock(&mgmt_chan_list_lock);
1331		goto done;
1332	}
1333
1334	hdev = hci_pi(sk)->hdev;
1335	if (!hdev) {
1336		err = -EBADFD;
1337		goto done;
1338	}
1339
1340	if (!test_bit(HCI_UP, &hdev->flags)) {
1341		err = -ENETDOWN;
1342		goto done;
1343	}
1344
1345	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1346	if (!skb)
1347		goto done;
1348
1349	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1350		err = -EFAULT;
1351		goto drop;
1352	}
1353
1354	hci_skb_pkt_type(skb) = skb->data[0];
1355	skb_pull(skb, 1);
1356
1357	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1358		/* No permission check is needed for user channel
1359		 * since that gets enforced when binding the socket.
1360		 *
1361		 * However check that the packet type is valid.
1362		 */
1363		if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1364		    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1365		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
 
1366			err = -EINVAL;
1367			goto drop;
1368		}
1369
1370		skb_queue_tail(&hdev->raw_q, skb);
1371		queue_work(hdev->workqueue, &hdev->tx_work);
1372	} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1373		u16 opcode = get_unaligned_le16(skb->data);
1374		u16 ogf = hci_opcode_ogf(opcode);
1375		u16 ocf = hci_opcode_ocf(opcode);
1376
1377		if (((ogf > HCI_SFLT_MAX_OGF) ||
1378		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1379				   &hci_sec_filter.ocf_mask[ogf])) &&
1380		    !capable(CAP_NET_RAW)) {
1381			err = -EPERM;
1382			goto drop;
1383		}
1384
1385		/* Since the opcode has already been extracted here, store
1386		 * a copy of the value for later use by the drivers.
1387		 */
1388		hci_skb_opcode(skb) = opcode;
1389
1390		if (ogf == 0x3f) {
1391			skb_queue_tail(&hdev->raw_q, skb);
1392			queue_work(hdev->workqueue, &hdev->tx_work);
1393		} else {
1394			/* Stand-alone HCI commands must be flagged as
1395			 * single-command requests.
1396			 */
1397			bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1398
1399			skb_queue_tail(&hdev->cmd_q, skb);
1400			queue_work(hdev->workqueue, &hdev->cmd_work);
1401		}
1402	} else {
1403		if (!capable(CAP_NET_RAW)) {
1404			err = -EPERM;
1405			goto drop;
1406		}
1407
1408		if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1409		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
 
1410			err = -EINVAL;
1411			goto drop;
1412		}
1413
1414		skb_queue_tail(&hdev->raw_q, skb);
1415		queue_work(hdev->workqueue, &hdev->tx_work);
1416	}
1417
1418	err = len;
1419
1420done:
1421	release_sock(sk);
1422	return err;
1423
1424drop:
1425	kfree_skb(skb);
1426	goto done;
1427}
1428
1429static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1430			       char __user *optval, unsigned int len)
1431{
1432	struct hci_ufilter uf = { .opcode = 0 };
1433	struct sock *sk = sock->sk;
1434	int err = 0, opt = 0;
1435
1436	BT_DBG("sk %p, opt %d", sk, optname);
1437
1438	lock_sock(sk);
1439
1440	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1441		err = -EBADFD;
1442		goto done;
1443	}
1444
1445	switch (optname) {
1446	case HCI_DATA_DIR:
1447		if (get_user(opt, (int __user *)optval)) {
1448			err = -EFAULT;
1449			break;
1450		}
1451
1452		if (opt)
1453			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1454		else
1455			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1456		break;
1457
1458	case HCI_TIME_STAMP:
1459		if (get_user(opt, (int __user *)optval)) {
1460			err = -EFAULT;
1461			break;
1462		}
1463
1464		if (opt)
1465			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1466		else
1467			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1468		break;
1469
1470	case HCI_FILTER:
1471		{
1472			struct hci_filter *f = &hci_pi(sk)->filter;
1473
1474			uf.type_mask = f->type_mask;
1475			uf.opcode    = f->opcode;
1476			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1477			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1478		}
1479
1480		len = min_t(unsigned int, len, sizeof(uf));
1481		if (copy_from_user(&uf, optval, len)) {
1482			err = -EFAULT;
1483			break;
1484		}
1485
1486		if (!capable(CAP_NET_RAW)) {
1487			uf.type_mask &= hci_sec_filter.type_mask;
1488			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1489			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1490		}
1491
1492		{
1493			struct hci_filter *f = &hci_pi(sk)->filter;
1494
1495			f->type_mask = uf.type_mask;
1496			f->opcode    = uf.opcode;
1497			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
1498			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
1499		}
1500		break;
1501
1502	default:
1503		err = -ENOPROTOOPT;
1504		break;
1505	}
1506
1507done:
1508	release_sock(sk);
1509	return err;
1510}
1511
1512static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1513			       char __user *optval, int __user *optlen)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1514{
1515	struct hci_ufilter uf;
1516	struct sock *sk = sock->sk;
1517	int len, opt, err = 0;
1518
1519	BT_DBG("sk %p, opt %d", sk, optname);
1520
1521	if (get_user(len, optlen))
1522		return -EFAULT;
1523
1524	lock_sock(sk);
1525
1526	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1527		err = -EBADFD;
1528		goto done;
1529	}
1530
1531	switch (optname) {
1532	case HCI_DATA_DIR:
1533		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1534			opt = 1;
1535		else
1536			opt = 0;
1537
1538		if (put_user(opt, optval))
1539			err = -EFAULT;
1540		break;
1541
1542	case HCI_TIME_STAMP:
1543		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1544			opt = 1;
1545		else
1546			opt = 0;
1547
1548		if (put_user(opt, optval))
1549			err = -EFAULT;
1550		break;
1551
1552	case HCI_FILTER:
1553		{
1554			struct hci_filter *f = &hci_pi(sk)->filter;
1555
1556			memset(&uf, 0, sizeof(uf));
1557			uf.type_mask = f->type_mask;
1558			uf.opcode    = f->opcode;
1559			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1560			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1561		}
1562
1563		len = min_t(unsigned int, len, sizeof(uf));
1564		if (copy_to_user(optval, &uf, len))
1565			err = -EFAULT;
1566		break;
1567
1568	default:
1569		err = -ENOPROTOOPT;
1570		break;
1571	}
1572
1573done:
1574	release_sock(sk);
1575	return err;
1576}
1577
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1578static const struct proto_ops hci_sock_ops = {
1579	.family		= PF_BLUETOOTH,
1580	.owner		= THIS_MODULE,
1581	.release	= hci_sock_release,
1582	.bind		= hci_sock_bind,
1583	.getname	= hci_sock_getname,
1584	.sendmsg	= hci_sock_sendmsg,
1585	.recvmsg	= hci_sock_recvmsg,
1586	.ioctl		= hci_sock_ioctl,
 
 
 
1587	.poll		= datagram_poll,
1588	.listen		= sock_no_listen,
1589	.shutdown	= sock_no_shutdown,
1590	.setsockopt	= hci_sock_setsockopt,
1591	.getsockopt	= hci_sock_getsockopt,
1592	.connect	= sock_no_connect,
1593	.socketpair	= sock_no_socketpair,
1594	.accept		= sock_no_accept,
1595	.mmap		= sock_no_mmap
1596};
1597
1598static struct proto hci_sk_proto = {
1599	.name		= "HCI",
1600	.owner		= THIS_MODULE,
1601	.obj_size	= sizeof(struct hci_pinfo)
1602};
1603
1604static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1605			   int kern)
1606{
1607	struct sock *sk;
1608
1609	BT_DBG("sock %p", sock);
1610
1611	if (sock->type != SOCK_RAW)
1612		return -ESOCKTNOSUPPORT;
1613
1614	sock->ops = &hci_sock_ops;
1615
1616	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
 
1617	if (!sk)
1618		return -ENOMEM;
1619
1620	sock_init_data(sock, sk);
1621
1622	sock_reset_flag(sk, SOCK_ZAPPED);
1623
1624	sk->sk_protocol = protocol;
1625
1626	sock->state = SS_UNCONNECTED;
1627	sk->sk_state = BT_OPEN;
1628
1629	bt_sock_link(&hci_sk_list, sk);
1630	return 0;
1631}
1632
1633static const struct net_proto_family hci_sock_family_ops = {
1634	.family	= PF_BLUETOOTH,
1635	.owner	= THIS_MODULE,
1636	.create	= hci_sock_create,
1637};
1638
1639int __init hci_sock_init(void)
1640{
1641	int err;
1642
1643	BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1644
1645	err = proto_register(&hci_sk_proto, 0);
1646	if (err < 0)
1647		return err;
1648
1649	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1650	if (err < 0) {
1651		BT_ERR("HCI socket registration failed");
1652		goto error;
1653	}
1654
1655	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1656	if (err < 0) {
1657		BT_ERR("Failed to create HCI proc file");
1658		bt_sock_unregister(BTPROTO_HCI);
1659		goto error;
1660	}
1661
1662	BT_INFO("HCI socket layer initialized");
1663
1664	return 0;
1665
1666error:
1667	proto_unregister(&hci_sk_proto);
1668	return err;
1669}
1670
1671void hci_sock_cleanup(void)
1672{
1673	bt_procfs_cleanup(&init_net, "hci");
1674	bt_sock_unregister(BTPROTO_HCI);
1675	proto_unregister(&hci_sk_proto);
1676}