Linux Audio

Check our new training course

Loading...
v6.13.7
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3   Copyright (C) 2000-2001 Qualcomm Incorporated
   4
   5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
   6
   7   This program is free software; you can redistribute it and/or modify
   8   it under the terms of the GNU General Public License version 2 as
   9   published by the Free Software Foundation;
  10
  11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  19
  20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  22   SOFTWARE IS DISCLAIMED.
  23*/
  24
  25/* Bluetooth HCI sockets. */
  26#include <linux/compat.h>
  27#include <linux/export.h>
  28#include <linux/utsname.h>
  29#include <linux/sched.h>
  30#include <linux/unaligned.h>
  31
  32#include <net/bluetooth/bluetooth.h>
  33#include <net/bluetooth/hci_core.h>
  34#include <net/bluetooth/hci_mon.h>
  35#include <net/bluetooth/mgmt.h>
  36
  37#include "mgmt_util.h"
  38
  39static LIST_HEAD(mgmt_chan_list);
  40static DEFINE_MUTEX(mgmt_chan_list_lock);
  41
  42static DEFINE_IDA(sock_cookie_ida);
  43
  44static atomic_t monitor_promisc = ATOMIC_INIT(0);
  45
  46/* ----- HCI socket interface ----- */
  47
  48/* Socket info */
  49#define hci_pi(sk) ((struct hci_pinfo *) sk)
  50
  51struct hci_pinfo {
  52	struct bt_sock    bt;
  53	struct hci_dev    *hdev;
  54	struct hci_filter filter;
  55	__u8              cmsg_mask;
  56	unsigned short    channel;
  57	unsigned long     flags;
  58	__u32             cookie;
  59	char              comm[TASK_COMM_LEN];
  60	__u16             mtu;
  61};
  62
  63static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
  64{
  65	struct hci_dev *hdev = hci_pi(sk)->hdev;
  66
  67	if (!hdev)
  68		return ERR_PTR(-EBADFD);
  69	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
  70		return ERR_PTR(-EPIPE);
  71	return hdev;
  72}
  73
  74void hci_sock_set_flag(struct sock *sk, int nr)
  75{
  76	set_bit(nr, &hci_pi(sk)->flags);
  77}
  78
  79void hci_sock_clear_flag(struct sock *sk, int nr)
  80{
  81	clear_bit(nr, &hci_pi(sk)->flags);
  82}
  83
  84int hci_sock_test_flag(struct sock *sk, int nr)
  85{
  86	return test_bit(nr, &hci_pi(sk)->flags);
  87}
  88
  89unsigned short hci_sock_get_channel(struct sock *sk)
  90{
  91	return hci_pi(sk)->channel;
  92}
  93
  94u32 hci_sock_get_cookie(struct sock *sk)
  95{
  96	return hci_pi(sk)->cookie;
  97}
  98
  99static bool hci_sock_gen_cookie(struct sock *sk)
 100{
 101	int id = hci_pi(sk)->cookie;
 102
 103	if (!id) {
 104		id = ida_alloc_min(&sock_cookie_ida, 1, GFP_KERNEL);
 105		if (id < 0)
 106			id = 0xffffffff;
 107
 108		hci_pi(sk)->cookie = id;
 109		get_task_comm(hci_pi(sk)->comm, current);
 110		return true;
 111	}
 112
 113	return false;
 114}
 115
 116static void hci_sock_free_cookie(struct sock *sk)
 117{
 118	int id = hci_pi(sk)->cookie;
 119
 120	if (id) {
 121		hci_pi(sk)->cookie = 0xffffffff;
 122		ida_free(&sock_cookie_ida, id);
 123	}
 124}
 125
 126static inline int hci_test_bit(int nr, const void *addr)
 127{
 128	return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
 129}
 130
 131/* Security filter */
 132#define HCI_SFLT_MAX_OGF  5
 133
 134struct hci_sec_filter {
 135	__u32 type_mask;
 136	__u32 event_mask[2];
 137	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
 138};
 139
 140static const struct hci_sec_filter hci_sec_filter = {
 141	/* Packet types */
 142	0x10,
 143	/* Events */
 144	{ 0x1000d9fe, 0x0000b00c },
 145	/* Commands */
 146	{
 147		{ 0x0 },
 148		/* OGF_LINK_CTL */
 149		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
 150		/* OGF_LINK_POLICY */
 151		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
 152		/* OGF_HOST_CTL */
 153		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
 154		/* OGF_INFO_PARAM */
 155		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
 156		/* OGF_STATUS_PARAM */
 157		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
 158	}
 159};
 160
 161static struct bt_sock_list hci_sk_list = {
 162	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
 163};
 164
 165static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
 166{
 167	struct hci_filter *flt;
 168	int flt_type, flt_event;
 169
 170	/* Apply filter */
 171	flt = &hci_pi(sk)->filter;
 172
 173	flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
 174
 175	if (!test_bit(flt_type, &flt->type_mask))
 176		return true;
 177
 178	/* Extra filter for event packets only */
 179	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
 180		return false;
 181
 182	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
 183
 184	if (!hci_test_bit(flt_event, &flt->event_mask))
 185		return true;
 186
 187	/* Check filter only when opcode is set */
 188	if (!flt->opcode)
 189		return false;
 190
 191	if (flt_event == HCI_EV_CMD_COMPLETE &&
 192	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
 193		return true;
 194
 195	if (flt_event == HCI_EV_CMD_STATUS &&
 196	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
 197		return true;
 198
 199	return false;
 200}
 201
 202/* Send frame to RAW socket */
 203void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
 204{
 205	struct sock *sk;
 206	struct sk_buff *skb_copy = NULL;
 207
 208	BT_DBG("hdev %p len %d", hdev, skb->len);
 209
 210	read_lock(&hci_sk_list.lock);
 211
 212	sk_for_each(sk, &hci_sk_list.head) {
 213		struct sk_buff *nskb;
 214
 215		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
 216			continue;
 217
 218		/* Don't send frame to the socket it came from */
 219		if (skb->sk == sk)
 220			continue;
 221
 222		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
 223			if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
 224			    hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 225			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 226			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
 227			    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
 228				continue;
 229			if (is_filtered_packet(sk, skb))
 230				continue;
 231		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 232			if (!bt_cb(skb)->incoming)
 233				continue;
 234			if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 235			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 236			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
 237			    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
 238				continue;
 239		} else {
 240			/* Don't send frame to other channel types */
 241			continue;
 242		}
 243
 244		if (!skb_copy) {
 245			/* Create a private copy with headroom */
 246			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
 247			if (!skb_copy)
 248				continue;
 249
 250			/* Put type byte before the data */
 251			memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
 252		}
 253
 254		nskb = skb_clone(skb_copy, GFP_ATOMIC);
 255		if (!nskb)
 256			continue;
 257
 258		if (sock_queue_rcv_skb(sk, nskb))
 259			kfree_skb(nskb);
 260	}
 261
 262	read_unlock(&hci_sk_list.lock);
 263
 264	kfree_skb(skb_copy);
 265}
 266
 267static void hci_sock_copy_creds(struct sock *sk, struct sk_buff *skb)
 268{
 269	struct scm_creds *creds;
 270
 271	if (!sk || WARN_ON(!skb))
 272		return;
 273
 274	creds = &bt_cb(skb)->creds;
 275
 276	/* Check if peer credentials is set */
 277	if (!sk->sk_peer_pid) {
 278		/* Check if parent peer credentials is set */
 279		if (bt_sk(sk)->parent && bt_sk(sk)->parent->sk_peer_pid)
 280			sk = bt_sk(sk)->parent;
 281		else
 282			return;
 283	}
 284
 285	/* Check if scm_creds already set */
 286	if (creds->pid == pid_vnr(sk->sk_peer_pid))
 287		return;
 288
 289	memset(creds, 0, sizeof(*creds));
 290
 291	creds->pid = pid_vnr(sk->sk_peer_pid);
 292	if (sk->sk_peer_cred) {
 293		creds->uid = sk->sk_peer_cred->uid;
 294		creds->gid = sk->sk_peer_cred->gid;
 295	}
 296}
 297
 298static struct sk_buff *hci_skb_clone(struct sk_buff *skb)
 299{
 300	struct sk_buff *nskb;
 301
 302	if (!skb)
 303		return NULL;
 304
 305	nskb = skb_clone(skb, GFP_ATOMIC);
 306	if (!nskb)
 307		return NULL;
 308
 309	hci_sock_copy_creds(skb->sk, nskb);
 310
 311	return nskb;
 312}
 313
 314/* Send frame to sockets with specific channel */
 315static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
 316				  int flag, struct sock *skip_sk)
 317{
 318	struct sock *sk;
 319
 320	BT_DBG("channel %u len %d", channel, skb->len);
 321
 
 
 322	sk_for_each(sk, &hci_sk_list.head) {
 323		struct sk_buff *nskb;
 324
 325		/* Ignore socket without the flag set */
 326		if (!hci_sock_test_flag(sk, flag))
 327			continue;
 328
 329		/* Skip the original socket */
 330		if (sk == skip_sk)
 331			continue;
 332
 333		if (sk->sk_state != BT_BOUND)
 334			continue;
 335
 336		if (hci_pi(sk)->channel != channel)
 337			continue;
 338
 339		nskb = hci_skb_clone(skb);
 340		if (!nskb)
 341			continue;
 342
 343		if (sock_queue_rcv_skb(sk, nskb))
 344			kfree_skb(nskb);
 345	}
 346
 347}
 348
 349void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
 350			 int flag, struct sock *skip_sk)
 351{
 352	read_lock(&hci_sk_list.lock);
 353	__hci_send_to_channel(channel, skb, flag, skip_sk);
 354	read_unlock(&hci_sk_list.lock);
 355}
 356
 357/* Send frame to monitor socket */
 358void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
 359{
 360	struct sk_buff *skb_copy = NULL;
 361	struct hci_mon_hdr *hdr;
 362	__le16 opcode;
 363
 364	if (!atomic_read(&monitor_promisc))
 365		return;
 366
 367	BT_DBG("hdev %p len %d", hdev, skb->len);
 368
 369	switch (hci_skb_pkt_type(skb)) {
 370	case HCI_COMMAND_PKT:
 371		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
 372		break;
 373	case HCI_EVENT_PKT:
 374		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
 375		break;
 376	case HCI_ACLDATA_PKT:
 377		if (bt_cb(skb)->incoming)
 378			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
 379		else
 380			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
 381		break;
 382	case HCI_SCODATA_PKT:
 383		if (bt_cb(skb)->incoming)
 384			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
 385		else
 386			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
 387		break;
 388	case HCI_ISODATA_PKT:
 389		if (bt_cb(skb)->incoming)
 390			opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT);
 391		else
 392			opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
 393		break;
 394	case HCI_DIAG_PKT:
 395		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
 396		break;
 397	default:
 398		return;
 399	}
 400
 401	/* Create a private copy with headroom */
 402	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
 403	if (!skb_copy)
 404		return;
 405
 406	hci_sock_copy_creds(skb->sk, skb_copy);
 407
 408	/* Put header before the data */
 409	hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
 410	hdr->opcode = opcode;
 411	hdr->index = cpu_to_le16(hdev->id);
 412	hdr->len = cpu_to_le16(skb->len);
 413
 414	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
 415			    HCI_SOCK_TRUSTED, NULL);
 416	kfree_skb(skb_copy);
 417}
 418
 419void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
 420				 void *data, u16 data_len, ktime_t tstamp,
 421				 int flag, struct sock *skip_sk)
 422{
 423	struct sock *sk;
 424	__le16 index;
 425
 426	if (hdev)
 427		index = cpu_to_le16(hdev->id);
 428	else
 429		index = cpu_to_le16(MGMT_INDEX_NONE);
 430
 431	read_lock(&hci_sk_list.lock);
 432
 433	sk_for_each(sk, &hci_sk_list.head) {
 434		struct hci_mon_hdr *hdr;
 435		struct sk_buff *skb;
 436
 437		if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
 438			continue;
 439
 440		/* Ignore socket without the flag set */
 441		if (!hci_sock_test_flag(sk, flag))
 442			continue;
 443
 444		/* Skip the original socket */
 445		if (sk == skip_sk)
 446			continue;
 447
 448		skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
 449		if (!skb)
 450			continue;
 451
 452		put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 453		put_unaligned_le16(event, skb_put(skb, 2));
 454
 455		if (data)
 456			skb_put_data(skb, data, data_len);
 457
 458		skb->tstamp = tstamp;
 459
 460		hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 461		hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
 462		hdr->index = index;
 463		hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 464
 465		__hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 466				      HCI_SOCK_TRUSTED, NULL);
 467		kfree_skb(skb);
 468	}
 469
 470	read_unlock(&hci_sk_list.lock);
 471}
 472
 473static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
 474{
 475	struct hci_mon_hdr *hdr;
 476	struct hci_mon_new_index *ni;
 477	struct hci_mon_index_info *ii;
 478	struct sk_buff *skb;
 479	__le16 opcode;
 480
 481	switch (event) {
 482	case HCI_DEV_REG:
 483		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
 484		if (!skb)
 485			return NULL;
 486
 487		ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
 488		ni->type = 0x00; /* Old hdev->dev_type */
 489		ni->bus = hdev->bus;
 490		bacpy(&ni->bdaddr, &hdev->bdaddr);
 491		memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name,
 492			       strnlen(hdev->name, sizeof(ni->name)), '\0');
 493
 494		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
 495		break;
 496
 497	case HCI_DEV_UNREG:
 498		skb = bt_skb_alloc(0, GFP_ATOMIC);
 499		if (!skb)
 500			return NULL;
 501
 502		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
 503		break;
 504
 505	case HCI_DEV_SETUP:
 506		if (hdev->manufacturer == 0xffff)
 507			return NULL;
 508		fallthrough;
 
 509
 510	case HCI_DEV_UP:
 511		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
 512		if (!skb)
 513			return NULL;
 514
 515		ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
 516		bacpy(&ii->bdaddr, &hdev->bdaddr);
 517		ii->manufacturer = cpu_to_le16(hdev->manufacturer);
 518
 519		opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
 520		break;
 521
 522	case HCI_DEV_OPEN:
 523		skb = bt_skb_alloc(0, GFP_ATOMIC);
 524		if (!skb)
 525			return NULL;
 526
 527		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
 528		break;
 529
 530	case HCI_DEV_CLOSE:
 531		skb = bt_skb_alloc(0, GFP_ATOMIC);
 532		if (!skb)
 533			return NULL;
 534
 535		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
 536		break;
 537
 538	default:
 539		return NULL;
 540	}
 541
 542	__net_timestamp(skb);
 543
 544	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 545	hdr->opcode = opcode;
 546	hdr->index = cpu_to_le16(hdev->id);
 547	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 548
 549	return skb;
 550}
 551
 552static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
 553{
 554	struct hci_mon_hdr *hdr;
 555	struct sk_buff *skb;
 556	u16 format;
 557	u8 ver[3];
 558	u32 flags;
 559
 560	/* No message needed when cookie is not present */
 561	if (!hci_pi(sk)->cookie)
 562		return NULL;
 563
 564	switch (hci_pi(sk)->channel) {
 565	case HCI_CHANNEL_RAW:
 566		format = 0x0000;
 567		ver[0] = BT_SUBSYS_VERSION;
 568		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
 569		break;
 570	case HCI_CHANNEL_USER:
 571		format = 0x0001;
 572		ver[0] = BT_SUBSYS_VERSION;
 573		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
 574		break;
 575	case HCI_CHANNEL_CONTROL:
 576		format = 0x0002;
 577		mgmt_fill_version_info(ver);
 578		break;
 579	default:
 580		/* No message for unsupported format */
 581		return NULL;
 582	}
 583
 584	skb = bt_skb_alloc(14 + TASK_COMM_LEN, GFP_ATOMIC);
 585	if (!skb)
 586		return NULL;
 587
 588	hci_sock_copy_creds(sk, skb);
 589
 590	flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
 591
 592	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 593	put_unaligned_le16(format, skb_put(skb, 2));
 594	skb_put_data(skb, ver, sizeof(ver));
 595	put_unaligned_le32(flags, skb_put(skb, 4));
 596	skb_put_u8(skb, TASK_COMM_LEN);
 597	skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
 598
 599	__net_timestamp(skb);
 600
 601	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 602	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
 603	if (hci_pi(sk)->hdev)
 604		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
 605	else
 606		hdr->index = cpu_to_le16(HCI_DEV_NONE);
 607	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 608
 609	return skb;
 610}
 611
 612static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
 613{
 614	struct hci_mon_hdr *hdr;
 615	struct sk_buff *skb;
 616
 617	/* No message needed when cookie is not present */
 618	if (!hci_pi(sk)->cookie)
 619		return NULL;
 620
 621	switch (hci_pi(sk)->channel) {
 622	case HCI_CHANNEL_RAW:
 623	case HCI_CHANNEL_USER:
 624	case HCI_CHANNEL_CONTROL:
 625		break;
 626	default:
 627		/* No message for unsupported format */
 628		return NULL;
 629	}
 630
 631	skb = bt_skb_alloc(4, GFP_ATOMIC);
 632	if (!skb)
 633		return NULL;
 634
 635	hci_sock_copy_creds(sk, skb);
 636
 637	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 638
 639	__net_timestamp(skb);
 640
 641	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 642	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
 643	if (hci_pi(sk)->hdev)
 644		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
 645	else
 646		hdr->index = cpu_to_le16(HCI_DEV_NONE);
 647	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 648
 649	return skb;
 650}
 651
 652static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
 653						   u16 opcode, u16 len,
 654						   const void *buf)
 655{
 656	struct hci_mon_hdr *hdr;
 657	struct sk_buff *skb;
 658
 659	skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
 660	if (!skb)
 661		return NULL;
 662
 663	hci_sock_copy_creds(sk, skb);
 664
 665	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 666	put_unaligned_le16(opcode, skb_put(skb, 2));
 667
 668	if (buf)
 669		skb_put_data(skb, buf, len);
 670
 671	__net_timestamp(skb);
 672
 673	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
 674	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
 675	hdr->index = cpu_to_le16(index);
 676	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 677
 678	return skb;
 679}
 680
 681static void __printf(2, 3)
 682send_monitor_note(struct sock *sk, const char *fmt, ...)
 683{
 684	size_t len;
 685	struct hci_mon_hdr *hdr;
 686	struct sk_buff *skb;
 687	va_list args;
 688
 689	va_start(args, fmt);
 690	len = vsnprintf(NULL, 0, fmt, args);
 691	va_end(args);
 692
 693	skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
 694	if (!skb)
 695		return;
 696
 697	hci_sock_copy_creds(sk, skb);
 698
 699	va_start(args, fmt);
 700	vsprintf(skb_put(skb, len), fmt, args);
 701	*(u8 *)skb_put(skb, 1) = 0;
 702	va_end(args);
 703
 704	__net_timestamp(skb);
 705
 706	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 707	hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
 708	hdr->index = cpu_to_le16(HCI_DEV_NONE);
 709	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 710
 711	if (sock_queue_rcv_skb(sk, skb))
 712		kfree_skb(skb);
 713}
 714
 715static void send_monitor_replay(struct sock *sk)
 716{
 717	struct hci_dev *hdev;
 718
 719	read_lock(&hci_dev_list_lock);
 720
 721	list_for_each_entry(hdev, &hci_dev_list, list) {
 722		struct sk_buff *skb;
 723
 724		skb = create_monitor_event(hdev, HCI_DEV_REG);
 725		if (!skb)
 726			continue;
 727
 728		if (sock_queue_rcv_skb(sk, skb))
 729			kfree_skb(skb);
 730
 731		if (!test_bit(HCI_RUNNING, &hdev->flags))
 732			continue;
 733
 734		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
 735		if (!skb)
 736			continue;
 737
 738		if (sock_queue_rcv_skb(sk, skb))
 739			kfree_skb(skb);
 740
 741		if (test_bit(HCI_UP, &hdev->flags))
 742			skb = create_monitor_event(hdev, HCI_DEV_UP);
 743		else if (hci_dev_test_flag(hdev, HCI_SETUP))
 744			skb = create_monitor_event(hdev, HCI_DEV_SETUP);
 745		else
 746			skb = NULL;
 747
 748		if (skb) {
 749			if (sock_queue_rcv_skb(sk, skb))
 750				kfree_skb(skb);
 751		}
 752	}
 753
 754	read_unlock(&hci_dev_list_lock);
 755}
 756
 757static void send_monitor_control_replay(struct sock *mon_sk)
 758{
 759	struct sock *sk;
 760
 761	read_lock(&hci_sk_list.lock);
 762
 763	sk_for_each(sk, &hci_sk_list.head) {
 764		struct sk_buff *skb;
 765
 766		skb = create_monitor_ctrl_open(sk);
 767		if (!skb)
 768			continue;
 769
 770		if (sock_queue_rcv_skb(mon_sk, skb))
 771			kfree_skb(skb);
 772	}
 773
 774	read_unlock(&hci_sk_list.lock);
 775}
 776
 777/* Generate internal stack event */
 778static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
 779{
 780	struct hci_event_hdr *hdr;
 781	struct hci_ev_stack_internal *ev;
 782	struct sk_buff *skb;
 783
 784	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
 785	if (!skb)
 786		return;
 787
 788	hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
 789	hdr->evt  = HCI_EV_STACK_INTERNAL;
 790	hdr->plen = sizeof(*ev) + dlen;
 791
 792	ev = skb_put(skb, sizeof(*ev) + dlen);
 793	ev->type = type;
 794	memcpy(ev->data, data, dlen);
 795
 796	bt_cb(skb)->incoming = 1;
 797	__net_timestamp(skb);
 798
 799	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
 800	hci_send_to_sock(hdev, skb);
 801	kfree_skb(skb);
 802}
 803
 804void hci_sock_dev_event(struct hci_dev *hdev, int event)
 805{
 806	BT_DBG("hdev %s event %d", hdev->name, event);
 807
 808	if (atomic_read(&monitor_promisc)) {
 809		struct sk_buff *skb;
 810
 811		/* Send event to monitor */
 812		skb = create_monitor_event(hdev, event);
 813		if (skb) {
 814			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 815					    HCI_SOCK_TRUSTED, NULL);
 816			kfree_skb(skb);
 817		}
 818	}
 819
 820	if (event <= HCI_DEV_DOWN) {
 821		struct hci_ev_si_device ev;
 822
 823		/* Send event to sockets */
 824		ev.event  = event;
 825		ev.dev_id = hdev->id;
 826		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
 827	}
 828
 829	if (event == HCI_DEV_UNREG) {
 830		struct sock *sk;
 831
 832		/* Wake up sockets using this dead device */
 833		read_lock(&hci_sk_list.lock);
 834		sk_for_each(sk, &hci_sk_list.head) {
 
 835			if (hci_pi(sk)->hdev == hdev) {
 
 836				sk->sk_err = EPIPE;
 
 837				sk->sk_state_change(sk);
 
 
 838			}
 
 839		}
 840		read_unlock(&hci_sk_list.lock);
 841	}
 842}
 843
 844static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
 845{
 846	struct hci_mgmt_chan *c;
 847
 848	list_for_each_entry(c, &mgmt_chan_list, list) {
 849		if (c->channel == channel)
 850			return c;
 851	}
 852
 853	return NULL;
 854}
 855
 856static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
 857{
 858	struct hci_mgmt_chan *c;
 859
 860	mutex_lock(&mgmt_chan_list_lock);
 861	c = __hci_mgmt_chan_find(channel);
 862	mutex_unlock(&mgmt_chan_list_lock);
 863
 864	return c;
 865}
 866
 867int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
 868{
 869	if (c->channel < HCI_CHANNEL_CONTROL)
 870		return -EINVAL;
 871
 872	mutex_lock(&mgmt_chan_list_lock);
 873	if (__hci_mgmt_chan_find(c->channel)) {
 874		mutex_unlock(&mgmt_chan_list_lock);
 875		return -EALREADY;
 876	}
 877
 878	list_add_tail(&c->list, &mgmt_chan_list);
 879
 880	mutex_unlock(&mgmt_chan_list_lock);
 881
 882	return 0;
 883}
 884EXPORT_SYMBOL(hci_mgmt_chan_register);
 885
 886void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
 887{
 888	mutex_lock(&mgmt_chan_list_lock);
 889	list_del(&c->list);
 890	mutex_unlock(&mgmt_chan_list_lock);
 891}
 892EXPORT_SYMBOL(hci_mgmt_chan_unregister);
 893
 894static int hci_sock_release(struct socket *sock)
 895{
 896	struct sock *sk = sock->sk;
 897	struct hci_dev *hdev;
 898	struct sk_buff *skb;
 899
 900	BT_DBG("sock %p sk %p", sock, sk);
 901
 902	if (!sk)
 903		return 0;
 904
 905	lock_sock(sk);
 906
 907	switch (hci_pi(sk)->channel) {
 908	case HCI_CHANNEL_MONITOR:
 909		atomic_dec(&monitor_promisc);
 910		break;
 911	case HCI_CHANNEL_RAW:
 912	case HCI_CHANNEL_USER:
 913	case HCI_CHANNEL_CONTROL:
 914		/* Send event to monitor */
 915		skb = create_monitor_ctrl_close(sk);
 916		if (skb) {
 917			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 918					    HCI_SOCK_TRUSTED, NULL);
 919			kfree_skb(skb);
 920		}
 921
 922		hci_sock_free_cookie(sk);
 923		break;
 924	}
 925
 926	bt_sock_unlink(&hci_sk_list, sk);
 927
 928	hdev = hci_pi(sk)->hdev;
 929	if (hdev) {
 930		if (hci_pi(sk)->channel == HCI_CHANNEL_USER &&
 931		    !hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
 932			/* When releasing a user channel exclusive access,
 933			 * call hci_dev_do_close directly instead of calling
 934			 * hci_dev_close to ensure the exclusive access will
 935			 * be released and the controller brought back down.
 936			 *
 937			 * The checking of HCI_AUTO_OFF is not needed in this
 938			 * case since it will have been cleared already when
 939			 * opening the user channel.
 940			 *
 941			 * Make sure to also check that we haven't already
 942			 * unregistered since all the cleanup will have already
 943			 * been complete and hdev will get released when we put
 944			 * below.
 945			 */
 946			hci_dev_do_close(hdev);
 947			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
 948			mgmt_index_added(hdev);
 949		}
 950
 951		atomic_dec(&hdev->promisc);
 952		hci_dev_put(hdev);
 953	}
 954
 955	sock_orphan(sk);
 956	release_sock(sk);
 
 
 
 957	sock_put(sk);
 958	return 0;
 959}
 960
 961static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg)
 962{
 963	bdaddr_t bdaddr;
 964	int err;
 965
 966	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 967		return -EFAULT;
 968
 969	hci_dev_lock(hdev);
 970
 971	err = hci_bdaddr_list_add(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
 972
 973	hci_dev_unlock(hdev);
 974
 975	return err;
 976}
 977
 978static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
 979{
 980	bdaddr_t bdaddr;
 981	int err;
 982
 983	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 984		return -EFAULT;
 985
 986	hci_dev_lock(hdev);
 987
 988	err = hci_bdaddr_list_del(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
 989
 990	hci_dev_unlock(hdev);
 991
 992	return err;
 993}
 994
 995/* Ioctls that require bound socket */
 996static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
 997				unsigned long arg)
 998{
 999	struct hci_dev *hdev = hci_hdev_from_sock(sk);
1000
1001	if (IS_ERR(hdev))
1002		return PTR_ERR(hdev);
1003
1004	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1005		return -EBUSY;
1006
1007	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1008		return -EOPNOTSUPP;
1009
 
 
 
1010	switch (cmd) {
1011	case HCISETRAW:
1012		if (!capable(CAP_NET_ADMIN))
1013			return -EPERM;
1014		return -EOPNOTSUPP;
1015
1016	case HCIGETCONNINFO:
1017		return hci_get_conn_info(hdev, (void __user *)arg);
1018
1019	case HCIGETAUTHINFO:
1020		return hci_get_auth_info(hdev, (void __user *)arg);
1021
1022	case HCIBLOCKADDR:
1023		if (!capable(CAP_NET_ADMIN))
1024			return -EPERM;
1025		return hci_sock_reject_list_add(hdev, (void __user *)arg);
1026
1027	case HCIUNBLOCKADDR:
1028		if (!capable(CAP_NET_ADMIN))
1029			return -EPERM;
1030		return hci_sock_reject_list_del(hdev, (void __user *)arg);
1031	}
1032
1033	return -ENOIOCTLCMD;
1034}
1035
1036static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
1037			  unsigned long arg)
1038{
1039	void __user *argp = (void __user *)arg;
1040	struct sock *sk = sock->sk;
1041	int err;
1042
1043	BT_DBG("cmd %x arg %lx", cmd, arg);
1044
1045	/* Make sure the cmd is valid before doing anything */
1046	switch (cmd) {
1047	case HCIGETDEVLIST:
1048	case HCIGETDEVINFO:
1049	case HCIGETCONNLIST:
1050	case HCIDEVUP:
1051	case HCIDEVDOWN:
1052	case HCIDEVRESET:
1053	case HCIDEVRESTAT:
1054	case HCISETSCAN:
1055	case HCISETAUTH:
1056	case HCISETENCRYPT:
1057	case HCISETPTYPE:
1058	case HCISETLINKPOL:
1059	case HCISETLINKMODE:
1060	case HCISETACLMTU:
1061	case HCISETSCOMTU:
1062	case HCIINQUIRY:
1063	case HCISETRAW:
1064	case HCIGETCONNINFO:
1065	case HCIGETAUTHINFO:
1066	case HCIBLOCKADDR:
1067	case HCIUNBLOCKADDR:
1068		break;
1069	default:
1070		return -ENOIOCTLCMD;
1071	}
1072
1073	lock_sock(sk);
1074
1075	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1076		err = -EBADFD;
1077		goto done;
1078	}
1079
1080	/* When calling an ioctl on an unbound raw socket, then ensure
1081	 * that the monitor gets informed. Ensure that the resulting event
1082	 * is only send once by checking if the cookie exists or not. The
1083	 * socket cookie will be only ever generated once for the lifetime
1084	 * of a given socket.
1085	 */
1086	if (hci_sock_gen_cookie(sk)) {
1087		struct sk_buff *skb;
1088
1089		/* Perform careful checks before setting the HCI_SOCK_TRUSTED
1090		 * flag. Make sure that not only the current task but also
1091		 * the socket opener has the required capability, since
1092		 * privileged programs can be tricked into making ioctl calls
1093		 * on HCI sockets, and the socket should not be marked as
1094		 * trusted simply because the ioctl caller is privileged.
1095		 */
1096		if (sk_capable(sk, CAP_NET_ADMIN))
1097			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1098
1099		/* Send event to monitor */
1100		skb = create_monitor_ctrl_open(sk);
1101		if (skb) {
1102			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1103					    HCI_SOCK_TRUSTED, NULL);
1104			kfree_skb(skb);
1105		}
1106	}
1107
1108	release_sock(sk);
1109
1110	switch (cmd) {
1111	case HCIGETDEVLIST:
1112		return hci_get_dev_list(argp);
1113
1114	case HCIGETDEVINFO:
1115		return hci_get_dev_info(argp);
1116
1117	case HCIGETCONNLIST:
1118		return hci_get_conn_list(argp);
1119
1120	case HCIDEVUP:
1121		if (!capable(CAP_NET_ADMIN))
1122			return -EPERM;
1123		return hci_dev_open(arg);
1124
1125	case HCIDEVDOWN:
1126		if (!capable(CAP_NET_ADMIN))
1127			return -EPERM;
1128		return hci_dev_close(arg);
1129
1130	case HCIDEVRESET:
1131		if (!capable(CAP_NET_ADMIN))
1132			return -EPERM;
1133		return hci_dev_reset(arg);
1134
1135	case HCIDEVRESTAT:
1136		if (!capable(CAP_NET_ADMIN))
1137			return -EPERM;
1138		return hci_dev_reset_stat(arg);
1139
1140	case HCISETSCAN:
1141	case HCISETAUTH:
1142	case HCISETENCRYPT:
1143	case HCISETPTYPE:
1144	case HCISETLINKPOL:
1145	case HCISETLINKMODE:
1146	case HCISETACLMTU:
1147	case HCISETSCOMTU:
1148		if (!capable(CAP_NET_ADMIN))
1149			return -EPERM;
1150		return hci_dev_cmd(cmd, argp);
1151
1152	case HCIINQUIRY:
1153		return hci_inquiry(argp);
1154	}
1155
1156	lock_sock(sk);
1157
1158	err = hci_sock_bound_ioctl(sk, cmd, arg);
1159
1160done:
1161	release_sock(sk);
1162	return err;
1163}
1164
1165#ifdef CONFIG_COMPAT
1166static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
1167				 unsigned long arg)
1168{
1169	switch (cmd) {
1170	case HCIDEVUP:
1171	case HCIDEVDOWN:
1172	case HCIDEVRESET:
1173	case HCIDEVRESTAT:
1174		return hci_sock_ioctl(sock, cmd, arg);
1175	}
1176
1177	return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
1178}
1179#endif
1180
1181static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1182			 int addr_len)
1183{
1184	struct sockaddr_hci haddr;
1185	struct sock *sk = sock->sk;
1186	struct hci_dev *hdev = NULL;
1187	struct sk_buff *skb;
1188	int len, err = 0;
1189
1190	BT_DBG("sock %p sk %p", sock, sk);
1191
1192	if (!addr)
1193		return -EINVAL;
1194
1195	memset(&haddr, 0, sizeof(haddr));
1196	len = min_t(unsigned int, sizeof(haddr), addr_len);
1197	memcpy(&haddr, addr, len);
1198
1199	if (haddr.hci_family != AF_BLUETOOTH)
1200		return -EINVAL;
1201
1202	lock_sock(sk);
1203
1204	/* Allow detaching from dead device and attaching to alive device, if
1205	 * the caller wants to re-bind (instead of close) this socket in
1206	 * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1207	 */
1208	hdev = hci_pi(sk)->hdev;
1209	if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1210		hci_pi(sk)->hdev = NULL;
1211		sk->sk_state = BT_OPEN;
1212		hci_dev_put(hdev);
1213	}
1214	hdev = NULL;
1215
1216	if (sk->sk_state == BT_BOUND) {
1217		err = -EALREADY;
1218		goto done;
1219	}
1220
1221	switch (haddr.hci_channel) {
1222	case HCI_CHANNEL_RAW:
1223		if (hci_pi(sk)->hdev) {
1224			err = -EALREADY;
1225			goto done;
1226		}
1227
1228		if (haddr.hci_dev != HCI_DEV_NONE) {
1229			hdev = hci_dev_get(haddr.hci_dev);
1230			if (!hdev) {
1231				err = -ENODEV;
1232				goto done;
1233			}
1234
1235			atomic_inc(&hdev->promisc);
1236		}
1237
1238		hci_pi(sk)->channel = haddr.hci_channel;
1239
1240		if (!hci_sock_gen_cookie(sk)) {
1241			/* In the case when a cookie has already been assigned,
1242			 * then there has been already an ioctl issued against
1243			 * an unbound socket and with that triggered an open
1244			 * notification. Send a close notification first to
1245			 * allow the state transition to bounded.
1246			 */
1247			skb = create_monitor_ctrl_close(sk);
1248			if (skb) {
1249				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1250						    HCI_SOCK_TRUSTED, NULL);
1251				kfree_skb(skb);
1252			}
1253		}
1254
1255		if (capable(CAP_NET_ADMIN))
1256			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1257
1258		hci_pi(sk)->hdev = hdev;
1259
1260		/* Send event to monitor */
1261		skb = create_monitor_ctrl_open(sk);
1262		if (skb) {
1263			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1264					    HCI_SOCK_TRUSTED, NULL);
1265			kfree_skb(skb);
1266		}
1267		break;
1268
1269	case HCI_CHANNEL_USER:
1270		if (hci_pi(sk)->hdev) {
1271			err = -EALREADY;
1272			goto done;
1273		}
1274
1275		if (haddr.hci_dev == HCI_DEV_NONE) {
1276			err = -EINVAL;
1277			goto done;
1278		}
1279
1280		if (!capable(CAP_NET_ADMIN)) {
1281			err = -EPERM;
1282			goto done;
1283		}
1284
1285		hdev = hci_dev_get(haddr.hci_dev);
1286		if (!hdev) {
1287			err = -ENODEV;
1288			goto done;
1289		}
1290
1291		if (test_bit(HCI_INIT, &hdev->flags) ||
1292		    hci_dev_test_flag(hdev, HCI_SETUP) ||
1293		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1294		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1295		     test_bit(HCI_UP, &hdev->flags))) {
1296			err = -EBUSY;
1297			hci_dev_put(hdev);
1298			goto done;
1299		}
1300
1301		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1302			err = -EUSERS;
1303			hci_dev_put(hdev);
1304			goto done;
1305		}
1306
1307		mgmt_index_removed(hdev);
1308
1309		err = hci_dev_open(hdev->id);
1310		if (err) {
1311			if (err == -EALREADY) {
1312				/* In case the transport is already up and
1313				 * running, clear the error here.
1314				 *
1315				 * This can happen when opening a user
1316				 * channel and HCI_AUTO_OFF grace period
1317				 * is still active.
1318				 */
1319				err = 0;
1320			} else {
1321				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1322				mgmt_index_added(hdev);
1323				hci_dev_put(hdev);
1324				goto done;
1325			}
1326		}
1327
1328		hci_pi(sk)->channel = haddr.hci_channel;
1329
1330		if (!hci_sock_gen_cookie(sk)) {
1331			/* In the case when a cookie has already been assigned,
1332			 * this socket will transition from a raw socket into
1333			 * a user channel socket. For a clean transition, send
1334			 * the close notification first.
1335			 */
1336			skb = create_monitor_ctrl_close(sk);
1337			if (skb) {
1338				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1339						    HCI_SOCK_TRUSTED, NULL);
1340				kfree_skb(skb);
1341			}
1342		}
1343
1344		/* The user channel is restricted to CAP_NET_ADMIN
1345		 * capabilities and with that implicitly trusted.
1346		 */
1347		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1348
1349		hci_pi(sk)->hdev = hdev;
1350
1351		/* Send event to monitor */
1352		skb = create_monitor_ctrl_open(sk);
1353		if (skb) {
1354			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1355					    HCI_SOCK_TRUSTED, NULL);
1356			kfree_skb(skb);
1357		}
1358
1359		atomic_inc(&hdev->promisc);
1360		break;
1361
1362	case HCI_CHANNEL_MONITOR:
1363		if (haddr.hci_dev != HCI_DEV_NONE) {
1364			err = -EINVAL;
1365			goto done;
1366		}
1367
1368		if (!capable(CAP_NET_RAW)) {
1369			err = -EPERM;
1370			goto done;
1371		}
1372
1373		hci_pi(sk)->channel = haddr.hci_channel;
1374
1375		/* The monitor interface is restricted to CAP_NET_RAW
1376		 * capabilities and with that implicitly trusted.
1377		 */
1378		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1379
1380		send_monitor_note(sk, "Linux version %s (%s)",
1381				  init_utsname()->release,
1382				  init_utsname()->machine);
1383		send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1384				  BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1385		send_monitor_replay(sk);
1386		send_monitor_control_replay(sk);
1387
1388		atomic_inc(&monitor_promisc);
1389		break;
1390
1391	case HCI_CHANNEL_LOGGING:
1392		if (haddr.hci_dev != HCI_DEV_NONE) {
1393			err = -EINVAL;
1394			goto done;
1395		}
1396
1397		if (!capable(CAP_NET_ADMIN)) {
1398			err = -EPERM;
1399			goto done;
1400		}
1401
1402		hci_pi(sk)->channel = haddr.hci_channel;
1403		break;
1404
1405	default:
1406		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1407			err = -EINVAL;
1408			goto done;
1409		}
1410
1411		if (haddr.hci_dev != HCI_DEV_NONE) {
1412			err = -EINVAL;
1413			goto done;
1414		}
1415
1416		/* Users with CAP_NET_ADMIN capabilities are allowed
1417		 * access to all management commands and events. For
1418		 * untrusted users the interface is restricted and
1419		 * also only untrusted events are sent.
1420		 */
1421		if (capable(CAP_NET_ADMIN))
1422			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1423
1424		hci_pi(sk)->channel = haddr.hci_channel;
1425
1426		/* At the moment the index and unconfigured index events
1427		 * are enabled unconditionally. Setting them on each
1428		 * socket when binding keeps this functionality. They
1429		 * however might be cleared later and then sending of these
1430		 * events will be disabled, but that is then intentional.
1431		 *
1432		 * This also enables generic events that are safe to be
1433		 * received by untrusted users. Example for such events
1434		 * are changes to settings, class of device, name etc.
1435		 */
1436		if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1437			if (!hci_sock_gen_cookie(sk)) {
1438				/* In the case when a cookie has already been
1439				 * assigned, this socket will transition from
1440				 * a raw socket into a control socket. To
1441				 * allow for a clean transition, send the
1442				 * close notification first.
1443				 */
1444				skb = create_monitor_ctrl_close(sk);
1445				if (skb) {
1446					hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1447							    HCI_SOCK_TRUSTED, NULL);
1448					kfree_skb(skb);
1449				}
1450			}
1451
1452			/* Send event to monitor */
1453			skb = create_monitor_ctrl_open(sk);
1454			if (skb) {
1455				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1456						    HCI_SOCK_TRUSTED, NULL);
1457				kfree_skb(skb);
1458			}
1459
1460			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1461			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1462			hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1463			hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1464			hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1465			hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1466		}
1467		break;
1468	}
1469
1470	/* Default MTU to HCI_MAX_FRAME_SIZE if not set */
1471	if (!hci_pi(sk)->mtu)
1472		hci_pi(sk)->mtu = HCI_MAX_FRAME_SIZE;
1473
1474	sk->sk_state = BT_BOUND;
1475
1476done:
1477	release_sock(sk);
1478	return err;
1479}
1480
1481static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1482			    int peer)
1483{
1484	struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1485	struct sock *sk = sock->sk;
1486	struct hci_dev *hdev;
1487	int err = 0;
1488
1489	BT_DBG("sock %p sk %p", sock, sk);
1490
1491	if (peer)
1492		return -EOPNOTSUPP;
1493
1494	lock_sock(sk);
1495
1496	hdev = hci_hdev_from_sock(sk);
1497	if (IS_ERR(hdev)) {
1498		err = PTR_ERR(hdev);
1499		goto done;
1500	}
1501
 
1502	haddr->hci_family = AF_BLUETOOTH;
1503	haddr->hci_dev    = hdev->id;
1504	haddr->hci_channel= hci_pi(sk)->channel;
1505	err = sizeof(*haddr);
1506
1507done:
1508	release_sock(sk);
1509	return err;
1510}
1511
1512static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1513			  struct sk_buff *skb)
1514{
1515	__u8 mask = hci_pi(sk)->cmsg_mask;
1516
1517	if (mask & HCI_CMSG_DIR) {
1518		int incoming = bt_cb(skb)->incoming;
1519		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1520			 &incoming);
1521	}
1522
1523	if (mask & HCI_CMSG_TSTAMP) {
1524#ifdef CONFIG_COMPAT
1525		struct old_timeval32 ctv;
1526#endif
1527		struct __kernel_old_timeval tv;
1528		void *data;
1529		int len;
1530
1531		skb_get_timestamp(skb, &tv);
1532
1533		data = &tv;
1534		len = sizeof(tv);
1535#ifdef CONFIG_COMPAT
1536		if (!COMPAT_USE_64BIT_TIME &&
1537		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
1538			ctv.tv_sec = tv.tv_sec;
1539			ctv.tv_usec = tv.tv_usec;
1540			data = &ctv;
1541			len = sizeof(ctv);
1542		}
1543#endif
1544
1545		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1546	}
1547}
1548
1549static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1550			    size_t len, int flags)
1551{
1552	struct scm_cookie scm;
1553	struct sock *sk = sock->sk;
1554	struct sk_buff *skb;
1555	int copied, err;
1556	unsigned int skblen;
1557
1558	BT_DBG("sock %p, sk %p", sock, sk);
1559
1560	if (flags & MSG_OOB)
1561		return -EOPNOTSUPP;
1562
1563	if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1564		return -EOPNOTSUPP;
1565
1566	if (sk->sk_state == BT_CLOSED)
1567		return 0;
1568
1569	skb = skb_recv_datagram(sk, flags, &err);
1570	if (!skb)
1571		return err;
1572
1573	skblen = skb->len;
1574	copied = skb->len;
1575	if (len < copied) {
1576		msg->msg_flags |= MSG_TRUNC;
1577		copied = len;
1578	}
1579
1580	skb_reset_transport_header(skb);
1581	err = skb_copy_datagram_msg(skb, 0, msg, copied);
1582
1583	switch (hci_pi(sk)->channel) {
1584	case HCI_CHANNEL_RAW:
1585		hci_sock_cmsg(sk, msg, skb);
1586		break;
1587	case HCI_CHANNEL_USER:
1588	case HCI_CHANNEL_MONITOR:
1589		sock_recv_timestamp(msg, sk, skb);
1590		break;
1591	default:
1592		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1593			sock_recv_timestamp(msg, sk, skb);
1594		break;
1595	}
1596
1597	memset(&scm, 0, sizeof(scm));
1598	scm.creds = bt_cb(skb)->creds;
1599
1600	skb_free_datagram(sk, skb);
1601
1602	if (flags & MSG_TRUNC)
1603		copied = skblen;
1604
1605	scm_recv(sock, msg, &scm, flags);
1606
1607	return err ? : copied;
1608}
1609
1610static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1611			struct sk_buff *skb)
1612{
 
1613	u8 *cp;
1614	struct mgmt_hdr *hdr;
1615	u16 opcode, index, len;
1616	struct hci_dev *hdev = NULL;
1617	const struct hci_mgmt_handler *handler;
1618	bool var_len, no_hdev;
1619	int err;
1620
1621	BT_DBG("got %d bytes", skb->len);
1622
1623	if (skb->len < sizeof(*hdr))
1624		return -EINVAL;
1625
1626	hdr = (void *)skb->data;
 
 
 
 
 
 
 
 
 
1627	opcode = __le16_to_cpu(hdr->opcode);
1628	index = __le16_to_cpu(hdr->index);
1629	len = __le16_to_cpu(hdr->len);
1630
1631	if (len != skb->len - sizeof(*hdr)) {
1632		err = -EINVAL;
1633		goto done;
1634	}
1635
1636	if (chan->channel == HCI_CHANNEL_CONTROL) {
1637		struct sk_buff *cmd;
1638
1639		/* Send event to monitor */
1640		cmd = create_monitor_ctrl_command(sk, index, opcode, len,
1641						  skb->data + sizeof(*hdr));
1642		if (cmd) {
1643			hci_send_to_channel(HCI_CHANNEL_MONITOR, cmd,
1644					    HCI_SOCK_TRUSTED, NULL);
1645			kfree_skb(cmd);
1646		}
1647	}
1648
1649	if (opcode >= chan->handler_count ||
1650	    chan->handlers[opcode].func == NULL) {
1651		BT_DBG("Unknown op %u", opcode);
1652		err = mgmt_cmd_status(sk, index, opcode,
1653				      MGMT_STATUS_UNKNOWN_COMMAND);
1654		goto done;
1655	}
1656
1657	handler = &chan->handlers[opcode];
1658
1659	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1660	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1661		err = mgmt_cmd_status(sk, index, opcode,
1662				      MGMT_STATUS_PERMISSION_DENIED);
1663		goto done;
1664	}
1665
1666	if (index != MGMT_INDEX_NONE) {
1667		hdev = hci_dev_get(index);
1668		if (!hdev) {
1669			err = mgmt_cmd_status(sk, index, opcode,
1670					      MGMT_STATUS_INVALID_INDEX);
1671			goto done;
1672		}
1673
1674		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1675		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1676		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1677			err = mgmt_cmd_status(sk, index, opcode,
1678					      MGMT_STATUS_INVALID_INDEX);
1679			goto done;
1680		}
1681
1682		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1683		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1684			err = mgmt_cmd_status(sk, index, opcode,
1685					      MGMT_STATUS_INVALID_INDEX);
1686			goto done;
1687		}
1688	}
1689
1690	if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) {
1691		no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1692		if (no_hdev != !hdev) {
1693			err = mgmt_cmd_status(sk, index, opcode,
1694					      MGMT_STATUS_INVALID_INDEX);
1695			goto done;
1696		}
1697	}
1698
1699	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1700	if ((var_len && len < handler->data_len) ||
1701	    (!var_len && len != handler->data_len)) {
1702		err = mgmt_cmd_status(sk, index, opcode,
1703				      MGMT_STATUS_INVALID_PARAMS);
1704		goto done;
1705	}
1706
1707	if (hdev && chan->hdev_init)
1708		chan->hdev_init(sk, hdev);
1709
1710	cp = skb->data + sizeof(*hdr);
1711
1712	err = handler->func(sk, hdev, cp, len);
1713	if (err < 0)
1714		goto done;
1715
1716	err = skb->len;
1717
1718done:
1719	if (hdev)
1720		hci_dev_put(hdev);
1721
 
1722	return err;
1723}
1724
1725static int hci_logging_frame(struct sock *sk, struct sk_buff *skb,
1726			     unsigned int flags)
1727{
1728	struct hci_mon_hdr *hdr;
 
1729	struct hci_dev *hdev;
1730	u16 index;
1731	int err;
1732
1733	/* The logging frame consists at minimum of the standard header,
1734	 * the priority byte, the ident length byte and at least one string
1735	 * terminator NUL byte. Anything shorter are invalid packets.
1736	 */
1737	if (skb->len < sizeof(*hdr) + 3)
1738		return -EINVAL;
1739
 
 
 
 
 
 
 
 
 
1740	hdr = (void *)skb->data;
1741
1742	if (__le16_to_cpu(hdr->len) != skb->len - sizeof(*hdr))
1743		return -EINVAL;
 
 
1744
1745	if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1746		__u8 priority = skb->data[sizeof(*hdr)];
1747		__u8 ident_len = skb->data[sizeof(*hdr) + 1];
1748
1749		/* Only the priorities 0-7 are valid and with that any other
1750		 * value results in an invalid packet.
1751		 *
1752		 * The priority byte is followed by an ident length byte and
1753		 * the NUL terminated ident string. Check that the ident
1754		 * length is not overflowing the packet and also that the
1755		 * ident string itself is NUL terminated. In case the ident
1756		 * length is zero, the length value actually doubles as NUL
1757		 * terminator identifier.
1758		 *
1759		 * The message follows the ident string (if present) and
1760		 * must be NUL terminated. Otherwise it is not a valid packet.
1761		 */
1762		if (priority > 7 || skb->data[skb->len - 1] != 0x00 ||
1763		    ident_len > skb->len - sizeof(*hdr) - 3 ||
1764		    skb->data[sizeof(*hdr) + ident_len + 1] != 0x00)
1765			return -EINVAL;
 
 
1766	} else {
1767		return -EINVAL;
 
1768	}
1769
1770	index = __le16_to_cpu(hdr->index);
1771
1772	if (index != MGMT_INDEX_NONE) {
1773		hdev = hci_dev_get(index);
1774		if (!hdev)
1775			return -ENODEV;
 
 
1776	} else {
1777		hdev = NULL;
1778	}
1779
1780	hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1781
1782	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1783	err = skb->len;
1784
1785	if (hdev)
1786		hci_dev_put(hdev);
1787
 
 
1788	return err;
1789}
1790
1791static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1792			    size_t len)
1793{
1794	struct sock *sk = sock->sk;
1795	struct hci_mgmt_chan *chan;
1796	struct hci_dev *hdev;
1797	struct sk_buff *skb;
1798	int err;
1799	const unsigned int flags = msg->msg_flags;
1800
1801	BT_DBG("sock %p sk %p", sock, sk);
1802
1803	if (flags & MSG_OOB)
1804		return -EOPNOTSUPP;
1805
1806	if (flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
1807		return -EINVAL;
1808
1809	if (len < 4 || len > hci_pi(sk)->mtu)
1810		return -EINVAL;
1811
1812	skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0);
1813	if (IS_ERR(skb))
1814		return PTR_ERR(skb);
1815
1816	lock_sock(sk);
1817
1818	switch (hci_pi(sk)->channel) {
1819	case HCI_CHANNEL_RAW:
1820	case HCI_CHANNEL_USER:
1821		break;
1822	case HCI_CHANNEL_MONITOR:
1823		err = -EOPNOTSUPP;
1824		goto drop;
1825	case HCI_CHANNEL_LOGGING:
1826		err = hci_logging_frame(sk, skb, flags);
1827		goto drop;
1828	default:
1829		mutex_lock(&mgmt_chan_list_lock);
1830		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1831		if (chan)
1832			err = hci_mgmt_cmd(chan, sk, skb);
1833		else
1834			err = -EINVAL;
1835
1836		mutex_unlock(&mgmt_chan_list_lock);
1837		goto drop;
1838	}
1839
1840	hdev = hci_hdev_from_sock(sk);
1841	if (IS_ERR(hdev)) {
1842		err = PTR_ERR(hdev);
1843		goto drop;
1844	}
1845
1846	if (!test_bit(HCI_UP, &hdev->flags)) {
1847		err = -ENETDOWN;
 
 
 
 
 
 
 
 
 
1848		goto drop;
1849	}
1850
1851	hci_skb_pkt_type(skb) = skb->data[0];
1852	skb_pull(skb, 1);
1853
1854	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1855		/* No permission check is needed for user channel
1856		 * since that gets enforced when binding the socket.
1857		 *
1858		 * However check that the packet type is valid.
1859		 */
1860		if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1861		    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1862		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1863		    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1864			err = -EINVAL;
1865			goto drop;
1866		}
1867
1868		skb_queue_tail(&hdev->raw_q, skb);
1869		queue_work(hdev->workqueue, &hdev->tx_work);
1870	} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1871		u16 opcode = get_unaligned_le16(skb->data);
1872		u16 ogf = hci_opcode_ogf(opcode);
1873		u16 ocf = hci_opcode_ocf(opcode);
1874
1875		if (((ogf > HCI_SFLT_MAX_OGF) ||
1876		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1877				   &hci_sec_filter.ocf_mask[ogf])) &&
1878		    !capable(CAP_NET_RAW)) {
1879			err = -EPERM;
1880			goto drop;
1881		}
1882
1883		/* Since the opcode has already been extracted here, store
1884		 * a copy of the value for later use by the drivers.
1885		 */
1886		hci_skb_opcode(skb) = opcode;
1887
1888		if (ogf == 0x3f) {
1889			skb_queue_tail(&hdev->raw_q, skb);
1890			queue_work(hdev->workqueue, &hdev->tx_work);
1891		} else {
1892			/* Stand-alone HCI commands must be flagged as
1893			 * single-command requests.
1894			 */
1895			bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1896
1897			skb_queue_tail(&hdev->cmd_q, skb);
1898			queue_work(hdev->workqueue, &hdev->cmd_work);
1899		}
1900	} else {
1901		if (!capable(CAP_NET_RAW)) {
1902			err = -EPERM;
1903			goto drop;
1904		}
1905
1906		if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1907		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1908		    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1909			err = -EINVAL;
1910			goto drop;
1911		}
1912
1913		skb_queue_tail(&hdev->raw_q, skb);
1914		queue_work(hdev->workqueue, &hdev->tx_work);
1915	}
1916
1917	err = len;
1918
1919done:
1920	release_sock(sk);
1921	return err;
1922
1923drop:
1924	kfree_skb(skb);
1925	goto done;
1926}
1927
1928static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
1929				   sockptr_t optval, unsigned int optlen)
1930{
1931	struct hci_ufilter uf = { .opcode = 0 };
1932	struct sock *sk = sock->sk;
1933	int err = 0, opt = 0;
1934
1935	BT_DBG("sk %p, opt %d", sk, optname);
1936
 
 
 
1937	lock_sock(sk);
1938
1939	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1940		err = -EBADFD;
1941		goto done;
1942	}
1943
1944	switch (optname) {
1945	case HCI_DATA_DIR:
1946		err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
1947		if (err)
1948			break;
 
1949
1950		if (opt)
1951			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1952		else
1953			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1954		break;
1955
1956	case HCI_TIME_STAMP:
1957		err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
1958		if (err)
1959			break;
 
1960
1961		if (opt)
1962			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1963		else
1964			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1965		break;
1966
1967	case HCI_FILTER:
1968		{
1969			struct hci_filter *f = &hci_pi(sk)->filter;
1970
1971			uf.type_mask = f->type_mask;
1972			uf.opcode    = f->opcode;
1973			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1974			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1975		}
1976
1977		err = copy_safe_from_sockptr(&uf, sizeof(uf), optval, optlen);
1978		if (err)
 
1979			break;
 
1980
1981		if (!capable(CAP_NET_RAW)) {
1982			uf.type_mask &= hci_sec_filter.type_mask;
1983			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1984			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1985		}
1986
1987		{
1988			struct hci_filter *f = &hci_pi(sk)->filter;
1989
1990			f->type_mask = uf.type_mask;
1991			f->opcode    = uf.opcode;
1992			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
1993			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
1994		}
1995		break;
1996
1997	default:
1998		err = -ENOPROTOOPT;
1999		break;
2000	}
2001
2002done:
2003	release_sock(sk);
2004	return err;
2005}
2006
2007static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
2008			       sockptr_t optval, unsigned int optlen)
2009{
2010	struct sock *sk = sock->sk;
2011	int err = 0;
2012	u16 opt;
2013
2014	BT_DBG("sk %p, opt %d", sk, optname);
2015
2016	if (level == SOL_HCI)
2017		return hci_sock_setsockopt_old(sock, level, optname, optval,
2018					       optlen);
2019
2020	if (level != SOL_BLUETOOTH)
2021		return -ENOPROTOOPT;
2022
2023	lock_sock(sk);
2024
2025	switch (optname) {
2026	case BT_SNDMTU:
2027	case BT_RCVMTU:
2028		switch (hci_pi(sk)->channel) {
2029		/* Don't allow changing MTU for channels that are meant for HCI
2030		 * traffic only.
2031		 */
2032		case HCI_CHANNEL_RAW:
2033		case HCI_CHANNEL_USER:
2034			err = -ENOPROTOOPT;
2035			goto done;
2036		}
2037
2038		err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
2039		if (err)
2040			break;
2041
2042		hci_pi(sk)->mtu = opt;
2043		break;
2044
2045	default:
2046		err = -ENOPROTOOPT;
2047		break;
2048	}
2049
2050done:
2051	release_sock(sk);
2052	return err;
2053}
2054
2055static int hci_sock_getsockopt_old(struct socket *sock, int level, int optname,
2056				   char __user *optval, int __user *optlen)
2057{
2058	struct hci_ufilter uf;
2059	struct sock *sk = sock->sk;
2060	int len, opt, err = 0;
2061
2062	BT_DBG("sk %p, opt %d", sk, optname);
2063
 
 
 
2064	if (get_user(len, optlen))
2065		return -EFAULT;
2066
2067	lock_sock(sk);
2068
2069	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
2070		err = -EBADFD;
2071		goto done;
2072	}
2073
2074	switch (optname) {
2075	case HCI_DATA_DIR:
2076		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
2077			opt = 1;
2078		else
2079			opt = 0;
2080
2081		if (put_user(opt, optval))
2082			err = -EFAULT;
2083		break;
2084
2085	case HCI_TIME_STAMP:
2086		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
2087			opt = 1;
2088		else
2089			opt = 0;
2090
2091		if (put_user(opt, optval))
2092			err = -EFAULT;
2093		break;
2094
2095	case HCI_FILTER:
2096		{
2097			struct hci_filter *f = &hci_pi(sk)->filter;
2098
2099			memset(&uf, 0, sizeof(uf));
2100			uf.type_mask = f->type_mask;
2101			uf.opcode    = f->opcode;
2102			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
2103			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
2104		}
2105
2106		len = min_t(unsigned int, len, sizeof(uf));
2107		if (copy_to_user(optval, &uf, len))
2108			err = -EFAULT;
2109		break;
2110
2111	default:
2112		err = -ENOPROTOOPT;
2113		break;
2114	}
2115
2116done:
2117	release_sock(sk);
2118	return err;
2119}
2120
2121static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
2122			       char __user *optval, int __user *optlen)
2123{
2124	struct sock *sk = sock->sk;
2125	int err = 0;
2126
2127	BT_DBG("sk %p, opt %d", sk, optname);
2128
2129	if (level == SOL_HCI)
2130		return hci_sock_getsockopt_old(sock, level, optname, optval,
2131					       optlen);
2132
2133	if (level != SOL_BLUETOOTH)
2134		return -ENOPROTOOPT;
2135
2136	lock_sock(sk);
2137
2138	switch (optname) {
2139	case BT_SNDMTU:
2140	case BT_RCVMTU:
2141		if (put_user(hci_pi(sk)->mtu, (u16 __user *)optval))
2142			err = -EFAULT;
2143		break;
2144
2145	default:
2146		err = -ENOPROTOOPT;
2147		break;
2148	}
2149
2150	release_sock(sk);
2151	return err;
2152}
2153
2154static void hci_sock_destruct(struct sock *sk)
2155{
2156	mgmt_cleanup(sk);
2157	skb_queue_purge(&sk->sk_receive_queue);
2158	skb_queue_purge(&sk->sk_write_queue);
2159}
2160
2161static const struct proto_ops hci_sock_ops = {
2162	.family		= PF_BLUETOOTH,
2163	.owner		= THIS_MODULE,
2164	.release	= hci_sock_release,
2165	.bind		= hci_sock_bind,
2166	.getname	= hci_sock_getname,
2167	.sendmsg	= hci_sock_sendmsg,
2168	.recvmsg	= hci_sock_recvmsg,
2169	.ioctl		= hci_sock_ioctl,
2170#ifdef CONFIG_COMPAT
2171	.compat_ioctl	= hci_sock_compat_ioctl,
2172#endif
2173	.poll		= datagram_poll,
2174	.listen		= sock_no_listen,
2175	.shutdown	= sock_no_shutdown,
2176	.setsockopt	= hci_sock_setsockopt,
2177	.getsockopt	= hci_sock_getsockopt,
2178	.connect	= sock_no_connect,
2179	.socketpair	= sock_no_socketpair,
2180	.accept		= sock_no_accept,
2181	.mmap		= sock_no_mmap
2182};
2183
2184static struct proto hci_sk_proto = {
2185	.name		= "HCI",
2186	.owner		= THIS_MODULE,
2187	.obj_size	= sizeof(struct hci_pinfo)
2188};
2189
2190static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2191			   int kern)
2192{
2193	struct sock *sk;
2194
2195	BT_DBG("sock %p", sock);
2196
2197	if (sock->type != SOCK_RAW)
2198		return -ESOCKTNOSUPPORT;
2199
2200	sock->ops = &hci_sock_ops;
2201
2202	sk = bt_sock_alloc(net, sock, &hci_sk_proto, protocol, GFP_ATOMIC,
2203			   kern);
2204	if (!sk)
2205		return -ENOMEM;
2206
 
 
 
 
 
 
2207	sock->state = SS_UNCONNECTED;
2208	sk->sk_destruct = hci_sock_destruct;
2209
2210	bt_sock_link(&hci_sk_list, sk);
2211	return 0;
2212}
2213
2214static const struct net_proto_family hci_sock_family_ops = {
2215	.family	= PF_BLUETOOTH,
2216	.owner	= THIS_MODULE,
2217	.create	= hci_sock_create,
2218};
2219
2220int __init hci_sock_init(void)
2221{
2222	int err;
2223
2224	BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2225
2226	err = proto_register(&hci_sk_proto, 0);
2227	if (err < 0)
2228		return err;
2229
2230	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2231	if (err < 0) {
2232		BT_ERR("HCI socket registration failed");
2233		goto error;
2234	}
2235
2236	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2237	if (err < 0) {
2238		BT_ERR("Failed to create HCI proc file");
2239		bt_sock_unregister(BTPROTO_HCI);
2240		goto error;
2241	}
2242
2243	BT_INFO("HCI socket layer initialized");
2244
2245	return 0;
2246
2247error:
2248	proto_unregister(&hci_sk_proto);
2249	return err;
2250}
2251
2252void hci_sock_cleanup(void)
2253{
2254	bt_procfs_cleanup(&init_net, "hci");
2255	bt_sock_unregister(BTPROTO_HCI);
2256	proto_unregister(&hci_sk_proto);
2257}
v4.10.11
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3   Copyright (C) 2000-2001 Qualcomm Incorporated
   4
   5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
   6
   7   This program is free software; you can redistribute it and/or modify
   8   it under the terms of the GNU General Public License version 2 as
   9   published by the Free Software Foundation;
  10
  11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  19
  20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  22   SOFTWARE IS DISCLAIMED.
  23*/
  24
  25/* Bluetooth HCI sockets. */
  26
  27#include <linux/export.h>
  28#include <linux/utsname.h>
  29#include <linux/sched.h>
  30#include <asm/unaligned.h>
  31
  32#include <net/bluetooth/bluetooth.h>
  33#include <net/bluetooth/hci_core.h>
  34#include <net/bluetooth/hci_mon.h>
  35#include <net/bluetooth/mgmt.h>
  36
  37#include "mgmt_util.h"
  38
  39static LIST_HEAD(mgmt_chan_list);
  40static DEFINE_MUTEX(mgmt_chan_list_lock);
  41
  42static DEFINE_IDA(sock_cookie_ida);
  43
  44static atomic_t monitor_promisc = ATOMIC_INIT(0);
  45
  46/* ----- HCI socket interface ----- */
  47
  48/* Socket info */
  49#define hci_pi(sk) ((struct hci_pinfo *) sk)
  50
  51struct hci_pinfo {
  52	struct bt_sock    bt;
  53	struct hci_dev    *hdev;
  54	struct hci_filter filter;
  55	__u32             cmsg_mask;
  56	unsigned short    channel;
  57	unsigned long     flags;
  58	__u32             cookie;
  59	char              comm[TASK_COMM_LEN];
 
  60};
  61
 
 
 
 
 
 
 
 
 
 
 
  62void hci_sock_set_flag(struct sock *sk, int nr)
  63{
  64	set_bit(nr, &hci_pi(sk)->flags);
  65}
  66
  67void hci_sock_clear_flag(struct sock *sk, int nr)
  68{
  69	clear_bit(nr, &hci_pi(sk)->flags);
  70}
  71
  72int hci_sock_test_flag(struct sock *sk, int nr)
  73{
  74	return test_bit(nr, &hci_pi(sk)->flags);
  75}
  76
  77unsigned short hci_sock_get_channel(struct sock *sk)
  78{
  79	return hci_pi(sk)->channel;
  80}
  81
  82u32 hci_sock_get_cookie(struct sock *sk)
  83{
  84	return hci_pi(sk)->cookie;
  85}
  86
  87static bool hci_sock_gen_cookie(struct sock *sk)
  88{
  89	int id = hci_pi(sk)->cookie;
  90
  91	if (!id) {
  92		id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
  93		if (id < 0)
  94			id = 0xffffffff;
  95
  96		hci_pi(sk)->cookie = id;
  97		get_task_comm(hci_pi(sk)->comm, current);
  98		return true;
  99	}
 100
 101	return false;
 102}
 103
 104static void hci_sock_free_cookie(struct sock *sk)
 105{
 106	int id = hci_pi(sk)->cookie;
 107
 108	if (id) {
 109		hci_pi(sk)->cookie = 0xffffffff;
 110		ida_simple_remove(&sock_cookie_ida, id);
 111	}
 112}
 113
 114static inline int hci_test_bit(int nr, const void *addr)
 115{
 116	return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
 117}
 118
 119/* Security filter */
 120#define HCI_SFLT_MAX_OGF  5
 121
 122struct hci_sec_filter {
 123	__u32 type_mask;
 124	__u32 event_mask[2];
 125	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
 126};
 127
 128static const struct hci_sec_filter hci_sec_filter = {
 129	/* Packet types */
 130	0x10,
 131	/* Events */
 132	{ 0x1000d9fe, 0x0000b00c },
 133	/* Commands */
 134	{
 135		{ 0x0 },
 136		/* OGF_LINK_CTL */
 137		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
 138		/* OGF_LINK_POLICY */
 139		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
 140		/* OGF_HOST_CTL */
 141		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
 142		/* OGF_INFO_PARAM */
 143		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
 144		/* OGF_STATUS_PARAM */
 145		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
 146	}
 147};
 148
 149static struct bt_sock_list hci_sk_list = {
 150	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
 151};
 152
 153static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
 154{
 155	struct hci_filter *flt;
 156	int flt_type, flt_event;
 157
 158	/* Apply filter */
 159	flt = &hci_pi(sk)->filter;
 160
 161	flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
 162
 163	if (!test_bit(flt_type, &flt->type_mask))
 164		return true;
 165
 166	/* Extra filter for event packets only */
 167	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
 168		return false;
 169
 170	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
 171
 172	if (!hci_test_bit(flt_event, &flt->event_mask))
 173		return true;
 174
 175	/* Check filter only when opcode is set */
 176	if (!flt->opcode)
 177		return false;
 178
 179	if (flt_event == HCI_EV_CMD_COMPLETE &&
 180	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
 181		return true;
 182
 183	if (flt_event == HCI_EV_CMD_STATUS &&
 184	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
 185		return true;
 186
 187	return false;
 188}
 189
 190/* Send frame to RAW socket */
 191void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
 192{
 193	struct sock *sk;
 194	struct sk_buff *skb_copy = NULL;
 195
 196	BT_DBG("hdev %p len %d", hdev, skb->len);
 197
 198	read_lock(&hci_sk_list.lock);
 199
 200	sk_for_each(sk, &hci_sk_list.head) {
 201		struct sk_buff *nskb;
 202
 203		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
 204			continue;
 205
 206		/* Don't send frame to the socket it came from */
 207		if (skb->sk == sk)
 208			continue;
 209
 210		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
 211			if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
 212			    hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 213			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 214			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
 
 215				continue;
 216			if (is_filtered_packet(sk, skb))
 217				continue;
 218		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 219			if (!bt_cb(skb)->incoming)
 220				continue;
 221			if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 222			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 223			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
 
 224				continue;
 225		} else {
 226			/* Don't send frame to other channel types */
 227			continue;
 228		}
 229
 230		if (!skb_copy) {
 231			/* Create a private copy with headroom */
 232			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
 233			if (!skb_copy)
 234				continue;
 235
 236			/* Put type byte before the data */
 237			memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
 238		}
 239
 240		nskb = skb_clone(skb_copy, GFP_ATOMIC);
 241		if (!nskb)
 242			continue;
 243
 244		if (sock_queue_rcv_skb(sk, nskb))
 245			kfree_skb(nskb);
 246	}
 247
 248	read_unlock(&hci_sk_list.lock);
 249
 250	kfree_skb(skb_copy);
 251}
 252
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 253/* Send frame to sockets with specific channel */
 254void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
 255			 int flag, struct sock *skip_sk)
 256{
 257	struct sock *sk;
 258
 259	BT_DBG("channel %u len %d", channel, skb->len);
 260
 261	read_lock(&hci_sk_list.lock);
 262
 263	sk_for_each(sk, &hci_sk_list.head) {
 264		struct sk_buff *nskb;
 265
 266		/* Ignore socket without the flag set */
 267		if (!hci_sock_test_flag(sk, flag))
 268			continue;
 269
 270		/* Skip the original socket */
 271		if (sk == skip_sk)
 272			continue;
 273
 274		if (sk->sk_state != BT_BOUND)
 275			continue;
 276
 277		if (hci_pi(sk)->channel != channel)
 278			continue;
 279
 280		nskb = skb_clone(skb, GFP_ATOMIC);
 281		if (!nskb)
 282			continue;
 283
 284		if (sock_queue_rcv_skb(sk, nskb))
 285			kfree_skb(nskb);
 286	}
 287
 
 
 
 
 
 
 
 288	read_unlock(&hci_sk_list.lock);
 289}
 290
 291/* Send frame to monitor socket */
 292void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
 293{
 294	struct sk_buff *skb_copy = NULL;
 295	struct hci_mon_hdr *hdr;
 296	__le16 opcode;
 297
 298	if (!atomic_read(&monitor_promisc))
 299		return;
 300
 301	BT_DBG("hdev %p len %d", hdev, skb->len);
 302
 303	switch (hci_skb_pkt_type(skb)) {
 304	case HCI_COMMAND_PKT:
 305		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
 306		break;
 307	case HCI_EVENT_PKT:
 308		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
 309		break;
 310	case HCI_ACLDATA_PKT:
 311		if (bt_cb(skb)->incoming)
 312			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
 313		else
 314			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
 315		break;
 316	case HCI_SCODATA_PKT:
 317		if (bt_cb(skb)->incoming)
 318			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
 319		else
 320			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
 321		break;
 
 
 
 
 
 
 322	case HCI_DIAG_PKT:
 323		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
 324		break;
 325	default:
 326		return;
 327	}
 328
 329	/* Create a private copy with headroom */
 330	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
 331	if (!skb_copy)
 332		return;
 333
 
 
 334	/* Put header before the data */
 335	hdr = (void *)skb_push(skb_copy, HCI_MON_HDR_SIZE);
 336	hdr->opcode = opcode;
 337	hdr->index = cpu_to_le16(hdev->id);
 338	hdr->len = cpu_to_le16(skb->len);
 339
 340	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
 341			    HCI_SOCK_TRUSTED, NULL);
 342	kfree_skb(skb_copy);
 343}
 344
 345void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
 346				 void *data, u16 data_len, ktime_t tstamp,
 347				 int flag, struct sock *skip_sk)
 348{
 349	struct sock *sk;
 350	__le16 index;
 351
 352	if (hdev)
 353		index = cpu_to_le16(hdev->id);
 354	else
 355		index = cpu_to_le16(MGMT_INDEX_NONE);
 356
 357	read_lock(&hci_sk_list.lock);
 358
 359	sk_for_each(sk, &hci_sk_list.head) {
 360		struct hci_mon_hdr *hdr;
 361		struct sk_buff *skb;
 362
 363		if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
 364			continue;
 365
 366		/* Ignore socket without the flag set */
 367		if (!hci_sock_test_flag(sk, flag))
 368			continue;
 369
 370		/* Skip the original socket */
 371		if (sk == skip_sk)
 372			continue;
 373
 374		skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
 375		if (!skb)
 376			continue;
 377
 378		put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 379		put_unaligned_le16(event, skb_put(skb, 2));
 380
 381		if (data)
 382			memcpy(skb_put(skb, data_len), data, data_len);
 383
 384		skb->tstamp = tstamp;
 385
 386		hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 387		hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
 388		hdr->index = index;
 389		hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 390
 391		hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 392				    HCI_SOCK_TRUSTED, NULL);
 393		kfree_skb(skb);
 394	}
 395
 396	read_unlock(&hci_sk_list.lock);
 397}
 398
 399static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
 400{
 401	struct hci_mon_hdr *hdr;
 402	struct hci_mon_new_index *ni;
 403	struct hci_mon_index_info *ii;
 404	struct sk_buff *skb;
 405	__le16 opcode;
 406
 407	switch (event) {
 408	case HCI_DEV_REG:
 409		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
 410		if (!skb)
 411			return NULL;
 412
 413		ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
 414		ni->type = hdev->dev_type;
 415		ni->bus = hdev->bus;
 416		bacpy(&ni->bdaddr, &hdev->bdaddr);
 417		memcpy(ni->name, hdev->name, 8);
 
 418
 419		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
 420		break;
 421
 422	case HCI_DEV_UNREG:
 423		skb = bt_skb_alloc(0, GFP_ATOMIC);
 424		if (!skb)
 425			return NULL;
 426
 427		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
 428		break;
 429
 430	case HCI_DEV_SETUP:
 431		if (hdev->manufacturer == 0xffff)
 432			return NULL;
 433
 434		/* fall through */
 435
 436	case HCI_DEV_UP:
 437		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
 438		if (!skb)
 439			return NULL;
 440
 441		ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
 442		bacpy(&ii->bdaddr, &hdev->bdaddr);
 443		ii->manufacturer = cpu_to_le16(hdev->manufacturer);
 444
 445		opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
 446		break;
 447
 448	case HCI_DEV_OPEN:
 449		skb = bt_skb_alloc(0, GFP_ATOMIC);
 450		if (!skb)
 451			return NULL;
 452
 453		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
 454		break;
 455
 456	case HCI_DEV_CLOSE:
 457		skb = bt_skb_alloc(0, GFP_ATOMIC);
 458		if (!skb)
 459			return NULL;
 460
 461		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
 462		break;
 463
 464	default:
 465		return NULL;
 466	}
 467
 468	__net_timestamp(skb);
 469
 470	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 471	hdr->opcode = opcode;
 472	hdr->index = cpu_to_le16(hdev->id);
 473	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 474
 475	return skb;
 476}
 477
 478static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
 479{
 480	struct hci_mon_hdr *hdr;
 481	struct sk_buff *skb;
 482	u16 format;
 483	u8 ver[3];
 484	u32 flags;
 485
 486	/* No message needed when cookie is not present */
 487	if (!hci_pi(sk)->cookie)
 488		return NULL;
 489
 490	switch (hci_pi(sk)->channel) {
 491	case HCI_CHANNEL_RAW:
 492		format = 0x0000;
 493		ver[0] = BT_SUBSYS_VERSION;
 494		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
 495		break;
 496	case HCI_CHANNEL_USER:
 497		format = 0x0001;
 498		ver[0] = BT_SUBSYS_VERSION;
 499		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
 500		break;
 501	case HCI_CHANNEL_CONTROL:
 502		format = 0x0002;
 503		mgmt_fill_version_info(ver);
 504		break;
 505	default:
 506		/* No message for unsupported format */
 507		return NULL;
 508	}
 509
 510	skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
 511	if (!skb)
 512		return NULL;
 513
 
 
 514	flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
 515
 516	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 517	put_unaligned_le16(format, skb_put(skb, 2));
 518	memcpy(skb_put(skb, sizeof(ver)), ver, sizeof(ver));
 519	put_unaligned_le32(flags, skb_put(skb, 4));
 520	*skb_put(skb, 1) = TASK_COMM_LEN;
 521	memcpy(skb_put(skb, TASK_COMM_LEN), hci_pi(sk)->comm, TASK_COMM_LEN);
 522
 523	__net_timestamp(skb);
 524
 525	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 526	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
 527	if (hci_pi(sk)->hdev)
 528		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
 529	else
 530		hdr->index = cpu_to_le16(HCI_DEV_NONE);
 531	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 532
 533	return skb;
 534}
 535
 536static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
 537{
 538	struct hci_mon_hdr *hdr;
 539	struct sk_buff *skb;
 540
 541	/* No message needed when cookie is not present */
 542	if (!hci_pi(sk)->cookie)
 543		return NULL;
 544
 545	switch (hci_pi(sk)->channel) {
 546	case HCI_CHANNEL_RAW:
 547	case HCI_CHANNEL_USER:
 548	case HCI_CHANNEL_CONTROL:
 549		break;
 550	default:
 551		/* No message for unsupported format */
 552		return NULL;
 553	}
 554
 555	skb = bt_skb_alloc(4, GFP_ATOMIC);
 556	if (!skb)
 557		return NULL;
 558
 
 
 559	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 560
 561	__net_timestamp(skb);
 562
 563	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 564	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
 565	if (hci_pi(sk)->hdev)
 566		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
 567	else
 568		hdr->index = cpu_to_le16(HCI_DEV_NONE);
 569	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 570
 571	return skb;
 572}
 573
 574static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
 575						   u16 opcode, u16 len,
 576						   const void *buf)
 577{
 578	struct hci_mon_hdr *hdr;
 579	struct sk_buff *skb;
 580
 581	skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
 582	if (!skb)
 583		return NULL;
 584
 
 
 585	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
 586	put_unaligned_le16(opcode, skb_put(skb, 2));
 587
 588	if (buf)
 589		memcpy(skb_put(skb, len), buf, len);
 590
 591	__net_timestamp(skb);
 592
 593	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 594	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
 595	hdr->index = cpu_to_le16(index);
 596	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 597
 598	return skb;
 599}
 600
 601static void __printf(2, 3)
 602send_monitor_note(struct sock *sk, const char *fmt, ...)
 603{
 604	size_t len;
 605	struct hci_mon_hdr *hdr;
 606	struct sk_buff *skb;
 607	va_list args;
 608
 609	va_start(args, fmt);
 610	len = vsnprintf(NULL, 0, fmt, args);
 611	va_end(args);
 612
 613	skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
 614	if (!skb)
 615		return;
 616
 
 
 617	va_start(args, fmt);
 618	vsprintf(skb_put(skb, len), fmt, args);
 619	*skb_put(skb, 1) = 0;
 620	va_end(args);
 621
 622	__net_timestamp(skb);
 623
 624	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 625	hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
 626	hdr->index = cpu_to_le16(HCI_DEV_NONE);
 627	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 628
 629	if (sock_queue_rcv_skb(sk, skb))
 630		kfree_skb(skb);
 631}
 632
 633static void send_monitor_replay(struct sock *sk)
 634{
 635	struct hci_dev *hdev;
 636
 637	read_lock(&hci_dev_list_lock);
 638
 639	list_for_each_entry(hdev, &hci_dev_list, list) {
 640		struct sk_buff *skb;
 641
 642		skb = create_monitor_event(hdev, HCI_DEV_REG);
 643		if (!skb)
 644			continue;
 645
 646		if (sock_queue_rcv_skb(sk, skb))
 647			kfree_skb(skb);
 648
 649		if (!test_bit(HCI_RUNNING, &hdev->flags))
 650			continue;
 651
 652		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
 653		if (!skb)
 654			continue;
 655
 656		if (sock_queue_rcv_skb(sk, skb))
 657			kfree_skb(skb);
 658
 659		if (test_bit(HCI_UP, &hdev->flags))
 660			skb = create_monitor_event(hdev, HCI_DEV_UP);
 661		else if (hci_dev_test_flag(hdev, HCI_SETUP))
 662			skb = create_monitor_event(hdev, HCI_DEV_SETUP);
 663		else
 664			skb = NULL;
 665
 666		if (skb) {
 667			if (sock_queue_rcv_skb(sk, skb))
 668				kfree_skb(skb);
 669		}
 670	}
 671
 672	read_unlock(&hci_dev_list_lock);
 673}
 674
 675static void send_monitor_control_replay(struct sock *mon_sk)
 676{
 677	struct sock *sk;
 678
 679	read_lock(&hci_sk_list.lock);
 680
 681	sk_for_each(sk, &hci_sk_list.head) {
 682		struct sk_buff *skb;
 683
 684		skb = create_monitor_ctrl_open(sk);
 685		if (!skb)
 686			continue;
 687
 688		if (sock_queue_rcv_skb(mon_sk, skb))
 689			kfree_skb(skb);
 690	}
 691
 692	read_unlock(&hci_sk_list.lock);
 693}
 694
 695/* Generate internal stack event */
 696static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
 697{
 698	struct hci_event_hdr *hdr;
 699	struct hci_ev_stack_internal *ev;
 700	struct sk_buff *skb;
 701
 702	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
 703	if (!skb)
 704		return;
 705
 706	hdr = (void *)skb_put(skb, HCI_EVENT_HDR_SIZE);
 707	hdr->evt  = HCI_EV_STACK_INTERNAL;
 708	hdr->plen = sizeof(*ev) + dlen;
 709
 710	ev  = (void *)skb_put(skb, sizeof(*ev) + dlen);
 711	ev->type = type;
 712	memcpy(ev->data, data, dlen);
 713
 714	bt_cb(skb)->incoming = 1;
 715	__net_timestamp(skb);
 716
 717	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
 718	hci_send_to_sock(hdev, skb);
 719	kfree_skb(skb);
 720}
 721
 722void hci_sock_dev_event(struct hci_dev *hdev, int event)
 723{
 724	BT_DBG("hdev %s event %d", hdev->name, event);
 725
 726	if (atomic_read(&monitor_promisc)) {
 727		struct sk_buff *skb;
 728
 729		/* Send event to monitor */
 730		skb = create_monitor_event(hdev, event);
 731		if (skb) {
 732			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 733					    HCI_SOCK_TRUSTED, NULL);
 734			kfree_skb(skb);
 735		}
 736	}
 737
 738	if (event <= HCI_DEV_DOWN) {
 739		struct hci_ev_si_device ev;
 740
 741		/* Send event to sockets */
 742		ev.event  = event;
 743		ev.dev_id = hdev->id;
 744		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
 745	}
 746
 747	if (event == HCI_DEV_UNREG) {
 748		struct sock *sk;
 749
 750		/* Detach sockets from device */
 751		read_lock(&hci_sk_list.lock);
 752		sk_for_each(sk, &hci_sk_list.head) {
 753			bh_lock_sock_nested(sk);
 754			if (hci_pi(sk)->hdev == hdev) {
 755				hci_pi(sk)->hdev = NULL;
 756				sk->sk_err = EPIPE;
 757				sk->sk_state = BT_OPEN;
 758				sk->sk_state_change(sk);
 759
 760				hci_dev_put(hdev);
 761			}
 762			bh_unlock_sock(sk);
 763		}
 764		read_unlock(&hci_sk_list.lock);
 765	}
 766}
 767
 768static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
 769{
 770	struct hci_mgmt_chan *c;
 771
 772	list_for_each_entry(c, &mgmt_chan_list, list) {
 773		if (c->channel == channel)
 774			return c;
 775	}
 776
 777	return NULL;
 778}
 779
 780static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
 781{
 782	struct hci_mgmt_chan *c;
 783
 784	mutex_lock(&mgmt_chan_list_lock);
 785	c = __hci_mgmt_chan_find(channel);
 786	mutex_unlock(&mgmt_chan_list_lock);
 787
 788	return c;
 789}
 790
 791int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
 792{
 793	if (c->channel < HCI_CHANNEL_CONTROL)
 794		return -EINVAL;
 795
 796	mutex_lock(&mgmt_chan_list_lock);
 797	if (__hci_mgmt_chan_find(c->channel)) {
 798		mutex_unlock(&mgmt_chan_list_lock);
 799		return -EALREADY;
 800	}
 801
 802	list_add_tail(&c->list, &mgmt_chan_list);
 803
 804	mutex_unlock(&mgmt_chan_list_lock);
 805
 806	return 0;
 807}
 808EXPORT_SYMBOL(hci_mgmt_chan_register);
 809
 810void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
 811{
 812	mutex_lock(&mgmt_chan_list_lock);
 813	list_del(&c->list);
 814	mutex_unlock(&mgmt_chan_list_lock);
 815}
 816EXPORT_SYMBOL(hci_mgmt_chan_unregister);
 817
 818static int hci_sock_release(struct socket *sock)
 819{
 820	struct sock *sk = sock->sk;
 821	struct hci_dev *hdev;
 822	struct sk_buff *skb;
 823
 824	BT_DBG("sock %p sk %p", sock, sk);
 825
 826	if (!sk)
 827		return 0;
 828
 829	hdev = hci_pi(sk)->hdev;
 830
 831	switch (hci_pi(sk)->channel) {
 832	case HCI_CHANNEL_MONITOR:
 833		atomic_dec(&monitor_promisc);
 834		break;
 835	case HCI_CHANNEL_RAW:
 836	case HCI_CHANNEL_USER:
 837	case HCI_CHANNEL_CONTROL:
 838		/* Send event to monitor */
 839		skb = create_monitor_ctrl_close(sk);
 840		if (skb) {
 841			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 842					    HCI_SOCK_TRUSTED, NULL);
 843			kfree_skb(skb);
 844		}
 845
 846		hci_sock_free_cookie(sk);
 847		break;
 848	}
 849
 850	bt_sock_unlink(&hci_sk_list, sk);
 851
 
 852	if (hdev) {
 853		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 854			/* When releasing an user channel exclusive access,
 
 855			 * call hci_dev_do_close directly instead of calling
 856			 * hci_dev_close to ensure the exclusive access will
 857			 * be released and the controller brought back down.
 858			 *
 859			 * The checking of HCI_AUTO_OFF is not needed in this
 860			 * case since it will have been cleared already when
 861			 * opening the user channel.
 
 
 
 
 
 862			 */
 863			hci_dev_do_close(hdev);
 864			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
 865			mgmt_index_added(hdev);
 866		}
 867
 868		atomic_dec(&hdev->promisc);
 869		hci_dev_put(hdev);
 870	}
 871
 872	sock_orphan(sk);
 873
 874	skb_queue_purge(&sk->sk_receive_queue);
 875	skb_queue_purge(&sk->sk_write_queue);
 876
 877	sock_put(sk);
 878	return 0;
 879}
 880
 881static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
 882{
 883	bdaddr_t bdaddr;
 884	int err;
 885
 886	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 887		return -EFAULT;
 888
 889	hci_dev_lock(hdev);
 890
 891	err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
 892
 893	hci_dev_unlock(hdev);
 894
 895	return err;
 896}
 897
 898static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
 899{
 900	bdaddr_t bdaddr;
 901	int err;
 902
 903	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 904		return -EFAULT;
 905
 906	hci_dev_lock(hdev);
 907
 908	err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
 909
 910	hci_dev_unlock(hdev);
 911
 912	return err;
 913}
 914
 915/* Ioctls that require bound socket */
 916static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
 917				unsigned long arg)
 918{
 919	struct hci_dev *hdev = hci_pi(sk)->hdev;
 920
 921	if (!hdev)
 922		return -EBADFD;
 923
 924	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
 925		return -EBUSY;
 926
 927	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
 928		return -EOPNOTSUPP;
 929
 930	if (hdev->dev_type != HCI_PRIMARY)
 931		return -EOPNOTSUPP;
 932
 933	switch (cmd) {
 934	case HCISETRAW:
 935		if (!capable(CAP_NET_ADMIN))
 936			return -EPERM;
 937		return -EOPNOTSUPP;
 938
 939	case HCIGETCONNINFO:
 940		return hci_get_conn_info(hdev, (void __user *)arg);
 941
 942	case HCIGETAUTHINFO:
 943		return hci_get_auth_info(hdev, (void __user *)arg);
 944
 945	case HCIBLOCKADDR:
 946		if (!capable(CAP_NET_ADMIN))
 947			return -EPERM;
 948		return hci_sock_blacklist_add(hdev, (void __user *)arg);
 949
 950	case HCIUNBLOCKADDR:
 951		if (!capable(CAP_NET_ADMIN))
 952			return -EPERM;
 953		return hci_sock_blacklist_del(hdev, (void __user *)arg);
 954	}
 955
 956	return -ENOIOCTLCMD;
 957}
 958
 959static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
 960			  unsigned long arg)
 961{
 962	void __user *argp = (void __user *)arg;
 963	struct sock *sk = sock->sk;
 964	int err;
 965
 966	BT_DBG("cmd %x arg %lx", cmd, arg);
 967
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 968	lock_sock(sk);
 969
 970	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
 971		err = -EBADFD;
 972		goto done;
 973	}
 974
 975	/* When calling an ioctl on an unbound raw socket, then ensure
 976	 * that the monitor gets informed. Ensure that the resulting event
 977	 * is only send once by checking if the cookie exists or not. The
 978	 * socket cookie will be only ever generated once for the lifetime
 979	 * of a given socket.
 980	 */
 981	if (hci_sock_gen_cookie(sk)) {
 982		struct sk_buff *skb;
 983
 984		if (capable(CAP_NET_ADMIN))
 
 
 
 
 
 
 
 985			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
 986
 987		/* Send event to monitor */
 988		skb = create_monitor_ctrl_open(sk);
 989		if (skb) {
 990			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 991					    HCI_SOCK_TRUSTED, NULL);
 992			kfree_skb(skb);
 993		}
 994	}
 995
 996	release_sock(sk);
 997
 998	switch (cmd) {
 999	case HCIGETDEVLIST:
1000		return hci_get_dev_list(argp);
1001
1002	case HCIGETDEVINFO:
1003		return hci_get_dev_info(argp);
1004
1005	case HCIGETCONNLIST:
1006		return hci_get_conn_list(argp);
1007
1008	case HCIDEVUP:
1009		if (!capable(CAP_NET_ADMIN))
1010			return -EPERM;
1011		return hci_dev_open(arg);
1012
1013	case HCIDEVDOWN:
1014		if (!capable(CAP_NET_ADMIN))
1015			return -EPERM;
1016		return hci_dev_close(arg);
1017
1018	case HCIDEVRESET:
1019		if (!capable(CAP_NET_ADMIN))
1020			return -EPERM;
1021		return hci_dev_reset(arg);
1022
1023	case HCIDEVRESTAT:
1024		if (!capable(CAP_NET_ADMIN))
1025			return -EPERM;
1026		return hci_dev_reset_stat(arg);
1027
1028	case HCISETSCAN:
1029	case HCISETAUTH:
1030	case HCISETENCRYPT:
1031	case HCISETPTYPE:
1032	case HCISETLINKPOL:
1033	case HCISETLINKMODE:
1034	case HCISETACLMTU:
1035	case HCISETSCOMTU:
1036		if (!capable(CAP_NET_ADMIN))
1037			return -EPERM;
1038		return hci_dev_cmd(cmd, argp);
1039
1040	case HCIINQUIRY:
1041		return hci_inquiry(argp);
1042	}
1043
1044	lock_sock(sk);
1045
1046	err = hci_sock_bound_ioctl(sk, cmd, arg);
1047
1048done:
1049	release_sock(sk);
1050	return err;
1051}
1052
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1053static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1054			 int addr_len)
1055{
1056	struct sockaddr_hci haddr;
1057	struct sock *sk = sock->sk;
1058	struct hci_dev *hdev = NULL;
1059	struct sk_buff *skb;
1060	int len, err = 0;
1061
1062	BT_DBG("sock %p sk %p", sock, sk);
1063
1064	if (!addr)
1065		return -EINVAL;
1066
1067	memset(&haddr, 0, sizeof(haddr));
1068	len = min_t(unsigned int, sizeof(haddr), addr_len);
1069	memcpy(&haddr, addr, len);
1070
1071	if (haddr.hci_family != AF_BLUETOOTH)
1072		return -EINVAL;
1073
1074	lock_sock(sk);
1075
 
 
 
 
 
 
 
 
 
 
 
 
1076	if (sk->sk_state == BT_BOUND) {
1077		err = -EALREADY;
1078		goto done;
1079	}
1080
1081	switch (haddr.hci_channel) {
1082	case HCI_CHANNEL_RAW:
1083		if (hci_pi(sk)->hdev) {
1084			err = -EALREADY;
1085			goto done;
1086		}
1087
1088		if (haddr.hci_dev != HCI_DEV_NONE) {
1089			hdev = hci_dev_get(haddr.hci_dev);
1090			if (!hdev) {
1091				err = -ENODEV;
1092				goto done;
1093			}
1094
1095			atomic_inc(&hdev->promisc);
1096		}
1097
1098		hci_pi(sk)->channel = haddr.hci_channel;
1099
1100		if (!hci_sock_gen_cookie(sk)) {
1101			/* In the case when a cookie has already been assigned,
1102			 * then there has been already an ioctl issued against
1103			 * an unbound socket and with that triggerd an open
1104			 * notification. Send a close notification first to
1105			 * allow the state transition to bounded.
1106			 */
1107			skb = create_monitor_ctrl_close(sk);
1108			if (skb) {
1109				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1110						    HCI_SOCK_TRUSTED, NULL);
1111				kfree_skb(skb);
1112			}
1113		}
1114
1115		if (capable(CAP_NET_ADMIN))
1116			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1117
1118		hci_pi(sk)->hdev = hdev;
1119
1120		/* Send event to monitor */
1121		skb = create_monitor_ctrl_open(sk);
1122		if (skb) {
1123			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1124					    HCI_SOCK_TRUSTED, NULL);
1125			kfree_skb(skb);
1126		}
1127		break;
1128
1129	case HCI_CHANNEL_USER:
1130		if (hci_pi(sk)->hdev) {
1131			err = -EALREADY;
1132			goto done;
1133		}
1134
1135		if (haddr.hci_dev == HCI_DEV_NONE) {
1136			err = -EINVAL;
1137			goto done;
1138		}
1139
1140		if (!capable(CAP_NET_ADMIN)) {
1141			err = -EPERM;
1142			goto done;
1143		}
1144
1145		hdev = hci_dev_get(haddr.hci_dev);
1146		if (!hdev) {
1147			err = -ENODEV;
1148			goto done;
1149		}
1150
1151		if (test_bit(HCI_INIT, &hdev->flags) ||
1152		    hci_dev_test_flag(hdev, HCI_SETUP) ||
1153		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1154		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1155		     test_bit(HCI_UP, &hdev->flags))) {
1156			err = -EBUSY;
1157			hci_dev_put(hdev);
1158			goto done;
1159		}
1160
1161		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1162			err = -EUSERS;
1163			hci_dev_put(hdev);
1164			goto done;
1165		}
1166
1167		mgmt_index_removed(hdev);
1168
1169		err = hci_dev_open(hdev->id);
1170		if (err) {
1171			if (err == -EALREADY) {
1172				/* In case the transport is already up and
1173				 * running, clear the error here.
1174				 *
1175				 * This can happen when opening an user
1176				 * channel and HCI_AUTO_OFF grace period
1177				 * is still active.
1178				 */
1179				err = 0;
1180			} else {
1181				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1182				mgmt_index_added(hdev);
1183				hci_dev_put(hdev);
1184				goto done;
1185			}
1186		}
1187
1188		hci_pi(sk)->channel = haddr.hci_channel;
1189
1190		if (!hci_sock_gen_cookie(sk)) {
1191			/* In the case when a cookie has already been assigned,
1192			 * this socket will transition from a raw socket into
1193			 * an user channel socket. For a clean transition, send
1194			 * the close notification first.
1195			 */
1196			skb = create_monitor_ctrl_close(sk);
1197			if (skb) {
1198				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1199						    HCI_SOCK_TRUSTED, NULL);
1200				kfree_skb(skb);
1201			}
1202		}
1203
1204		/* The user channel is restricted to CAP_NET_ADMIN
1205		 * capabilities and with that implicitly trusted.
1206		 */
1207		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1208
1209		hci_pi(sk)->hdev = hdev;
1210
1211		/* Send event to monitor */
1212		skb = create_monitor_ctrl_open(sk);
1213		if (skb) {
1214			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1215					    HCI_SOCK_TRUSTED, NULL);
1216			kfree_skb(skb);
1217		}
1218
1219		atomic_inc(&hdev->promisc);
1220		break;
1221
1222	case HCI_CHANNEL_MONITOR:
1223		if (haddr.hci_dev != HCI_DEV_NONE) {
1224			err = -EINVAL;
1225			goto done;
1226		}
1227
1228		if (!capable(CAP_NET_RAW)) {
1229			err = -EPERM;
1230			goto done;
1231		}
1232
1233		hci_pi(sk)->channel = haddr.hci_channel;
1234
1235		/* The monitor interface is restricted to CAP_NET_RAW
1236		 * capabilities and with that implicitly trusted.
1237		 */
1238		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1239
1240		send_monitor_note(sk, "Linux version %s (%s)",
1241				  init_utsname()->release,
1242				  init_utsname()->machine);
1243		send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1244				  BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1245		send_monitor_replay(sk);
1246		send_monitor_control_replay(sk);
1247
1248		atomic_inc(&monitor_promisc);
1249		break;
1250
1251	case HCI_CHANNEL_LOGGING:
1252		if (haddr.hci_dev != HCI_DEV_NONE) {
1253			err = -EINVAL;
1254			goto done;
1255		}
1256
1257		if (!capable(CAP_NET_ADMIN)) {
1258			err = -EPERM;
1259			goto done;
1260		}
1261
1262		hci_pi(sk)->channel = haddr.hci_channel;
1263		break;
1264
1265	default:
1266		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1267			err = -EINVAL;
1268			goto done;
1269		}
1270
1271		if (haddr.hci_dev != HCI_DEV_NONE) {
1272			err = -EINVAL;
1273			goto done;
1274		}
1275
1276		/* Users with CAP_NET_ADMIN capabilities are allowed
1277		 * access to all management commands and events. For
1278		 * untrusted users the interface is restricted and
1279		 * also only untrusted events are sent.
1280		 */
1281		if (capable(CAP_NET_ADMIN))
1282			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1283
1284		hci_pi(sk)->channel = haddr.hci_channel;
1285
1286		/* At the moment the index and unconfigured index events
1287		 * are enabled unconditionally. Setting them on each
1288		 * socket when binding keeps this functionality. They
1289		 * however might be cleared later and then sending of these
1290		 * events will be disabled, but that is then intentional.
1291		 *
1292		 * This also enables generic events that are safe to be
1293		 * received by untrusted users. Example for such events
1294		 * are changes to settings, class of device, name etc.
1295		 */
1296		if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1297			if (!hci_sock_gen_cookie(sk)) {
1298				/* In the case when a cookie has already been
1299				 * assigned, this socket will transtion from
1300				 * a raw socket into a control socket. To
1301				 * allow for a clean transtion, send the
1302				 * close notification first.
1303				 */
1304				skb = create_monitor_ctrl_close(sk);
1305				if (skb) {
1306					hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1307							    HCI_SOCK_TRUSTED, NULL);
1308					kfree_skb(skb);
1309				}
1310			}
1311
1312			/* Send event to monitor */
1313			skb = create_monitor_ctrl_open(sk);
1314			if (skb) {
1315				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1316						    HCI_SOCK_TRUSTED, NULL);
1317				kfree_skb(skb);
1318			}
1319
1320			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1321			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1322			hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1323			hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1324			hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1325			hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1326		}
1327		break;
1328	}
1329
 
 
 
 
1330	sk->sk_state = BT_BOUND;
1331
1332done:
1333	release_sock(sk);
1334	return err;
1335}
1336
1337static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1338			    int *addr_len, int peer)
1339{
1340	struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1341	struct sock *sk = sock->sk;
1342	struct hci_dev *hdev;
1343	int err = 0;
1344
1345	BT_DBG("sock %p sk %p", sock, sk);
1346
1347	if (peer)
1348		return -EOPNOTSUPP;
1349
1350	lock_sock(sk);
1351
1352	hdev = hci_pi(sk)->hdev;
1353	if (!hdev) {
1354		err = -EBADFD;
1355		goto done;
1356	}
1357
1358	*addr_len = sizeof(*haddr);
1359	haddr->hci_family = AF_BLUETOOTH;
1360	haddr->hci_dev    = hdev->id;
1361	haddr->hci_channel= hci_pi(sk)->channel;
 
1362
1363done:
1364	release_sock(sk);
1365	return err;
1366}
1367
1368static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1369			  struct sk_buff *skb)
1370{
1371	__u32 mask = hci_pi(sk)->cmsg_mask;
1372
1373	if (mask & HCI_CMSG_DIR) {
1374		int incoming = bt_cb(skb)->incoming;
1375		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1376			 &incoming);
1377	}
1378
1379	if (mask & HCI_CMSG_TSTAMP) {
1380#ifdef CONFIG_COMPAT
1381		struct compat_timeval ctv;
1382#endif
1383		struct timeval tv;
1384		void *data;
1385		int len;
1386
1387		skb_get_timestamp(skb, &tv);
1388
1389		data = &tv;
1390		len = sizeof(tv);
1391#ifdef CONFIG_COMPAT
1392		if (!COMPAT_USE_64BIT_TIME &&
1393		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
1394			ctv.tv_sec = tv.tv_sec;
1395			ctv.tv_usec = tv.tv_usec;
1396			data = &ctv;
1397			len = sizeof(ctv);
1398		}
1399#endif
1400
1401		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1402	}
1403}
1404
1405static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1406			    size_t len, int flags)
1407{
1408	int noblock = flags & MSG_DONTWAIT;
1409	struct sock *sk = sock->sk;
1410	struct sk_buff *skb;
1411	int copied, err;
1412	unsigned int skblen;
1413
1414	BT_DBG("sock %p, sk %p", sock, sk);
1415
1416	if (flags & MSG_OOB)
1417		return -EOPNOTSUPP;
1418
1419	if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1420		return -EOPNOTSUPP;
1421
1422	if (sk->sk_state == BT_CLOSED)
1423		return 0;
1424
1425	skb = skb_recv_datagram(sk, flags, noblock, &err);
1426	if (!skb)
1427		return err;
1428
1429	skblen = skb->len;
1430	copied = skb->len;
1431	if (len < copied) {
1432		msg->msg_flags |= MSG_TRUNC;
1433		copied = len;
1434	}
1435
1436	skb_reset_transport_header(skb);
1437	err = skb_copy_datagram_msg(skb, 0, msg, copied);
1438
1439	switch (hci_pi(sk)->channel) {
1440	case HCI_CHANNEL_RAW:
1441		hci_sock_cmsg(sk, msg, skb);
1442		break;
1443	case HCI_CHANNEL_USER:
1444	case HCI_CHANNEL_MONITOR:
1445		sock_recv_timestamp(msg, sk, skb);
1446		break;
1447	default:
1448		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1449			sock_recv_timestamp(msg, sk, skb);
1450		break;
1451	}
1452
 
 
 
1453	skb_free_datagram(sk, skb);
1454
1455	if (flags & MSG_TRUNC)
1456		copied = skblen;
1457
 
 
1458	return err ? : copied;
1459}
1460
1461static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1462			struct msghdr *msg, size_t msglen)
1463{
1464	void *buf;
1465	u8 *cp;
1466	struct mgmt_hdr *hdr;
1467	u16 opcode, index, len;
1468	struct hci_dev *hdev = NULL;
1469	const struct hci_mgmt_handler *handler;
1470	bool var_len, no_hdev;
1471	int err;
1472
1473	BT_DBG("got %zu bytes", msglen);
1474
1475	if (msglen < sizeof(*hdr))
1476		return -EINVAL;
1477
1478	buf = kmalloc(msglen, GFP_KERNEL);
1479	if (!buf)
1480		return -ENOMEM;
1481
1482	if (memcpy_from_msg(buf, msg, msglen)) {
1483		err = -EFAULT;
1484		goto done;
1485	}
1486
1487	hdr = buf;
1488	opcode = __le16_to_cpu(hdr->opcode);
1489	index = __le16_to_cpu(hdr->index);
1490	len = __le16_to_cpu(hdr->len);
1491
1492	if (len != msglen - sizeof(*hdr)) {
1493		err = -EINVAL;
1494		goto done;
1495	}
1496
1497	if (chan->channel == HCI_CHANNEL_CONTROL) {
1498		struct sk_buff *skb;
1499
1500		/* Send event to monitor */
1501		skb = create_monitor_ctrl_command(sk, index, opcode, len,
1502						  buf + sizeof(*hdr));
1503		if (skb) {
1504			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1505					    HCI_SOCK_TRUSTED, NULL);
1506			kfree_skb(skb);
1507		}
1508	}
1509
1510	if (opcode >= chan->handler_count ||
1511	    chan->handlers[opcode].func == NULL) {
1512		BT_DBG("Unknown op %u", opcode);
1513		err = mgmt_cmd_status(sk, index, opcode,
1514				      MGMT_STATUS_UNKNOWN_COMMAND);
1515		goto done;
1516	}
1517
1518	handler = &chan->handlers[opcode];
1519
1520	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1521	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1522		err = mgmt_cmd_status(sk, index, opcode,
1523				      MGMT_STATUS_PERMISSION_DENIED);
1524		goto done;
1525	}
1526
1527	if (index != MGMT_INDEX_NONE) {
1528		hdev = hci_dev_get(index);
1529		if (!hdev) {
1530			err = mgmt_cmd_status(sk, index, opcode,
1531					      MGMT_STATUS_INVALID_INDEX);
1532			goto done;
1533		}
1534
1535		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1536		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1537		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1538			err = mgmt_cmd_status(sk, index, opcode,
1539					      MGMT_STATUS_INVALID_INDEX);
1540			goto done;
1541		}
1542
1543		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1544		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1545			err = mgmt_cmd_status(sk, index, opcode,
1546					      MGMT_STATUS_INVALID_INDEX);
1547			goto done;
1548		}
1549	}
1550
1551	no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1552	if (no_hdev != !hdev) {
1553		err = mgmt_cmd_status(sk, index, opcode,
1554				      MGMT_STATUS_INVALID_INDEX);
1555		goto done;
 
 
1556	}
1557
1558	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1559	if ((var_len && len < handler->data_len) ||
1560	    (!var_len && len != handler->data_len)) {
1561		err = mgmt_cmd_status(sk, index, opcode,
1562				      MGMT_STATUS_INVALID_PARAMS);
1563		goto done;
1564	}
1565
1566	if (hdev && chan->hdev_init)
1567		chan->hdev_init(sk, hdev);
1568
1569	cp = buf + sizeof(*hdr);
1570
1571	err = handler->func(sk, hdev, cp, len);
1572	if (err < 0)
1573		goto done;
1574
1575	err = msglen;
1576
1577done:
1578	if (hdev)
1579		hci_dev_put(hdev);
1580
1581	kfree(buf);
1582	return err;
1583}
1584
1585static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
 
1586{
1587	struct hci_mon_hdr *hdr;
1588	struct sk_buff *skb;
1589	struct hci_dev *hdev;
1590	u16 index;
1591	int err;
1592
1593	/* The logging frame consists at minimum of the standard header,
1594	 * the priority byte, the ident length byte and at least one string
1595	 * terminator NUL byte. Anything shorter are invalid packets.
1596	 */
1597	if (len < sizeof(*hdr) + 3)
1598		return -EINVAL;
1599
1600	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1601	if (!skb)
1602		return err;
1603
1604	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1605		err = -EFAULT;
1606		goto drop;
1607	}
1608
1609	hdr = (void *)skb->data;
1610
1611	if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1612		err = -EINVAL;
1613		goto drop;
1614	}
1615
1616	if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1617		__u8 priority = skb->data[sizeof(*hdr)];
1618		__u8 ident_len = skb->data[sizeof(*hdr) + 1];
1619
1620		/* Only the priorities 0-7 are valid and with that any other
1621		 * value results in an invalid packet.
1622		 *
1623		 * The priority byte is followed by an ident length byte and
1624		 * the NUL terminated ident string. Check that the ident
1625		 * length is not overflowing the packet and also that the
1626		 * ident string itself is NUL terminated. In case the ident
1627		 * length is zero, the length value actually doubles as NUL
1628		 * terminator identifier.
1629		 *
1630		 * The message follows the ident string (if present) and
1631		 * must be NUL terminated. Otherwise it is not a valid packet.
1632		 */
1633		if (priority > 7 || skb->data[len - 1] != 0x00 ||
1634		    ident_len > len - sizeof(*hdr) - 3 ||
1635		    skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1636			err = -EINVAL;
1637			goto drop;
1638		}
1639	} else {
1640		err = -EINVAL;
1641		goto drop;
1642	}
1643
1644	index = __le16_to_cpu(hdr->index);
1645
1646	if (index != MGMT_INDEX_NONE) {
1647		hdev = hci_dev_get(index);
1648		if (!hdev) {
1649			err = -ENODEV;
1650			goto drop;
1651		}
1652	} else {
1653		hdev = NULL;
1654	}
1655
1656	hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1657
1658	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1659	err = len;
1660
1661	if (hdev)
1662		hci_dev_put(hdev);
1663
1664drop:
1665	kfree_skb(skb);
1666	return err;
1667}
1668
1669static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1670			    size_t len)
1671{
1672	struct sock *sk = sock->sk;
1673	struct hci_mgmt_chan *chan;
1674	struct hci_dev *hdev;
1675	struct sk_buff *skb;
1676	int err;
 
1677
1678	BT_DBG("sock %p sk %p", sock, sk);
1679
1680	if (msg->msg_flags & MSG_OOB)
1681		return -EOPNOTSUPP;
1682
1683	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
1684		return -EINVAL;
1685
1686	if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1687		return -EINVAL;
1688
 
 
 
 
1689	lock_sock(sk);
1690
1691	switch (hci_pi(sk)->channel) {
1692	case HCI_CHANNEL_RAW:
1693	case HCI_CHANNEL_USER:
1694		break;
1695	case HCI_CHANNEL_MONITOR:
1696		err = -EOPNOTSUPP;
1697		goto done;
1698	case HCI_CHANNEL_LOGGING:
1699		err = hci_logging_frame(sk, msg, len);
1700		goto done;
1701	default:
1702		mutex_lock(&mgmt_chan_list_lock);
1703		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1704		if (chan)
1705			err = hci_mgmt_cmd(chan, sk, msg, len);
1706		else
1707			err = -EINVAL;
1708
1709		mutex_unlock(&mgmt_chan_list_lock);
1710		goto done;
1711	}
1712
1713	hdev = hci_pi(sk)->hdev;
1714	if (!hdev) {
1715		err = -EBADFD;
1716		goto done;
1717	}
1718
1719	if (!test_bit(HCI_UP, &hdev->flags)) {
1720		err = -ENETDOWN;
1721		goto done;
1722	}
1723
1724	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1725	if (!skb)
1726		goto done;
1727
1728	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1729		err = -EFAULT;
1730		goto drop;
1731	}
1732
1733	hci_skb_pkt_type(skb) = skb->data[0];
1734	skb_pull(skb, 1);
1735
1736	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1737		/* No permission check is needed for user channel
1738		 * since that gets enforced when binding the socket.
1739		 *
1740		 * However check that the packet type is valid.
1741		 */
1742		if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1743		    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1744		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
 
1745			err = -EINVAL;
1746			goto drop;
1747		}
1748
1749		skb_queue_tail(&hdev->raw_q, skb);
1750		queue_work(hdev->workqueue, &hdev->tx_work);
1751	} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1752		u16 opcode = get_unaligned_le16(skb->data);
1753		u16 ogf = hci_opcode_ogf(opcode);
1754		u16 ocf = hci_opcode_ocf(opcode);
1755
1756		if (((ogf > HCI_SFLT_MAX_OGF) ||
1757		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1758				   &hci_sec_filter.ocf_mask[ogf])) &&
1759		    !capable(CAP_NET_RAW)) {
1760			err = -EPERM;
1761			goto drop;
1762		}
1763
1764		/* Since the opcode has already been extracted here, store
1765		 * a copy of the value for later use by the drivers.
1766		 */
1767		hci_skb_opcode(skb) = opcode;
1768
1769		if (ogf == 0x3f) {
1770			skb_queue_tail(&hdev->raw_q, skb);
1771			queue_work(hdev->workqueue, &hdev->tx_work);
1772		} else {
1773			/* Stand-alone HCI commands must be flagged as
1774			 * single-command requests.
1775			 */
1776			bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1777
1778			skb_queue_tail(&hdev->cmd_q, skb);
1779			queue_work(hdev->workqueue, &hdev->cmd_work);
1780		}
1781	} else {
1782		if (!capable(CAP_NET_RAW)) {
1783			err = -EPERM;
1784			goto drop;
1785		}
1786
1787		if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1788		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
 
1789			err = -EINVAL;
1790			goto drop;
1791		}
1792
1793		skb_queue_tail(&hdev->raw_q, skb);
1794		queue_work(hdev->workqueue, &hdev->tx_work);
1795	}
1796
1797	err = len;
1798
1799done:
1800	release_sock(sk);
1801	return err;
1802
1803drop:
1804	kfree_skb(skb);
1805	goto done;
1806}
1807
1808static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1809			       char __user *optval, unsigned int len)
1810{
1811	struct hci_ufilter uf = { .opcode = 0 };
1812	struct sock *sk = sock->sk;
1813	int err = 0, opt = 0;
1814
1815	BT_DBG("sk %p, opt %d", sk, optname);
1816
1817	if (level != SOL_HCI)
1818		return -ENOPROTOOPT;
1819
1820	lock_sock(sk);
1821
1822	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1823		err = -EBADFD;
1824		goto done;
1825	}
1826
1827	switch (optname) {
1828	case HCI_DATA_DIR:
1829		if (get_user(opt, (int __user *)optval)) {
1830			err = -EFAULT;
1831			break;
1832		}
1833
1834		if (opt)
1835			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1836		else
1837			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1838		break;
1839
1840	case HCI_TIME_STAMP:
1841		if (get_user(opt, (int __user *)optval)) {
1842			err = -EFAULT;
1843			break;
1844		}
1845
1846		if (opt)
1847			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1848		else
1849			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1850		break;
1851
1852	case HCI_FILTER:
1853		{
1854			struct hci_filter *f = &hci_pi(sk)->filter;
1855
1856			uf.type_mask = f->type_mask;
1857			uf.opcode    = f->opcode;
1858			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1859			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1860		}
1861
1862		len = min_t(unsigned int, len, sizeof(uf));
1863		if (copy_from_user(&uf, optval, len)) {
1864			err = -EFAULT;
1865			break;
1866		}
1867
1868		if (!capable(CAP_NET_RAW)) {
1869			uf.type_mask &= hci_sec_filter.type_mask;
1870			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1871			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1872		}
1873
1874		{
1875			struct hci_filter *f = &hci_pi(sk)->filter;
1876
1877			f->type_mask = uf.type_mask;
1878			f->opcode    = uf.opcode;
1879			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
1880			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
1881		}
1882		break;
1883
1884	default:
1885		err = -ENOPROTOOPT;
1886		break;
1887	}
1888
1889done:
1890	release_sock(sk);
1891	return err;
1892}
1893
1894static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1895			       char __user *optval, int __user *optlen)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1896{
1897	struct hci_ufilter uf;
1898	struct sock *sk = sock->sk;
1899	int len, opt, err = 0;
1900
1901	BT_DBG("sk %p, opt %d", sk, optname);
1902
1903	if (level != SOL_HCI)
1904		return -ENOPROTOOPT;
1905
1906	if (get_user(len, optlen))
1907		return -EFAULT;
1908
1909	lock_sock(sk);
1910
1911	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1912		err = -EBADFD;
1913		goto done;
1914	}
1915
1916	switch (optname) {
1917	case HCI_DATA_DIR:
1918		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1919			opt = 1;
1920		else
1921			opt = 0;
1922
1923		if (put_user(opt, optval))
1924			err = -EFAULT;
1925		break;
1926
1927	case HCI_TIME_STAMP:
1928		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1929			opt = 1;
1930		else
1931			opt = 0;
1932
1933		if (put_user(opt, optval))
1934			err = -EFAULT;
1935		break;
1936
1937	case HCI_FILTER:
1938		{
1939			struct hci_filter *f = &hci_pi(sk)->filter;
1940
1941			memset(&uf, 0, sizeof(uf));
1942			uf.type_mask = f->type_mask;
1943			uf.opcode    = f->opcode;
1944			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1945			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1946		}
1947
1948		len = min_t(unsigned int, len, sizeof(uf));
1949		if (copy_to_user(optval, &uf, len))
1950			err = -EFAULT;
1951		break;
1952
1953	default:
1954		err = -ENOPROTOOPT;
1955		break;
1956	}
1957
1958done:
1959	release_sock(sk);
1960	return err;
1961}
1962
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1963static const struct proto_ops hci_sock_ops = {
1964	.family		= PF_BLUETOOTH,
1965	.owner		= THIS_MODULE,
1966	.release	= hci_sock_release,
1967	.bind		= hci_sock_bind,
1968	.getname	= hci_sock_getname,
1969	.sendmsg	= hci_sock_sendmsg,
1970	.recvmsg	= hci_sock_recvmsg,
1971	.ioctl		= hci_sock_ioctl,
 
 
 
1972	.poll		= datagram_poll,
1973	.listen		= sock_no_listen,
1974	.shutdown	= sock_no_shutdown,
1975	.setsockopt	= hci_sock_setsockopt,
1976	.getsockopt	= hci_sock_getsockopt,
1977	.connect	= sock_no_connect,
1978	.socketpair	= sock_no_socketpair,
1979	.accept		= sock_no_accept,
1980	.mmap		= sock_no_mmap
1981};
1982
1983static struct proto hci_sk_proto = {
1984	.name		= "HCI",
1985	.owner		= THIS_MODULE,
1986	.obj_size	= sizeof(struct hci_pinfo)
1987};
1988
1989static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1990			   int kern)
1991{
1992	struct sock *sk;
1993
1994	BT_DBG("sock %p", sock);
1995
1996	if (sock->type != SOCK_RAW)
1997		return -ESOCKTNOSUPPORT;
1998
1999	sock->ops = &hci_sock_ops;
2000
2001	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
 
2002	if (!sk)
2003		return -ENOMEM;
2004
2005	sock_init_data(sock, sk);
2006
2007	sock_reset_flag(sk, SOCK_ZAPPED);
2008
2009	sk->sk_protocol = protocol;
2010
2011	sock->state = SS_UNCONNECTED;
2012	sk->sk_state = BT_OPEN;
2013
2014	bt_sock_link(&hci_sk_list, sk);
2015	return 0;
2016}
2017
2018static const struct net_proto_family hci_sock_family_ops = {
2019	.family	= PF_BLUETOOTH,
2020	.owner	= THIS_MODULE,
2021	.create	= hci_sock_create,
2022};
2023
2024int __init hci_sock_init(void)
2025{
2026	int err;
2027
2028	BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2029
2030	err = proto_register(&hci_sk_proto, 0);
2031	if (err < 0)
2032		return err;
2033
2034	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2035	if (err < 0) {
2036		BT_ERR("HCI socket registration failed");
2037		goto error;
2038	}
2039
2040	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2041	if (err < 0) {
2042		BT_ERR("Failed to create HCI proc file");
2043		bt_sock_unregister(BTPROTO_HCI);
2044		goto error;
2045	}
2046
2047	BT_INFO("HCI socket layer initialized");
2048
2049	return 0;
2050
2051error:
2052	proto_unregister(&hci_sk_proto);
2053	return err;
2054}
2055
2056void hci_sock_cleanup(void)
2057{
2058	bt_procfs_cleanup(&init_net, "hci");
2059	bt_sock_unregister(BTPROTO_HCI);
2060	proto_unregister(&hci_sk_proto);
2061}