Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * The USB Monitor, inspired by Dave Harding's USBMon.
   4 *
   5 * This is a binary format reader.
   6 *
   7 * Copyright (C) 2006 Paolo Abeni (paolo.abeni@email.it)
   8 * Copyright (C) 2006,2007 Pete Zaitcev (zaitcev@redhat.com)
   9 */
  10
  11#include <linux/kernel.h>
  12#include <linux/sched/signal.h>
  13#include <linux/types.h>
  14#include <linux/fs.h>
  15#include <linux/cdev.h>
  16#include <linux/export.h>
  17#include <linux/usb.h>
  18#include <linux/poll.h>
  19#include <linux/compat.h>
  20#include <linux/mm.h>
  21#include <linux/scatterlist.h>
  22#include <linux/slab.h>
  23#include <linux/time64.h>
  24
  25#include <linux/uaccess.h>
  26
  27#include "usb_mon.h"
  28
  29/*
  30 * Defined by USB 2.0 clause 9.3, table 9.2.
  31 */
  32#define SETUP_LEN  8
  33
  34/* ioctl macros */
  35#define MON_IOC_MAGIC 0x92
  36
  37#define MON_IOCQ_URB_LEN _IO(MON_IOC_MAGIC, 1)
  38/* #2 used to be MON_IOCX_URB, removed before it got into Linus tree */
  39#define MON_IOCG_STATS _IOR(MON_IOC_MAGIC, 3, struct mon_bin_stats)
  40#define MON_IOCT_RING_SIZE _IO(MON_IOC_MAGIC, 4)
  41#define MON_IOCQ_RING_SIZE _IO(MON_IOC_MAGIC, 5)
  42#define MON_IOCX_GET   _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get)
  43#define MON_IOCX_MFETCH _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch)
  44#define MON_IOCH_MFLUSH _IO(MON_IOC_MAGIC, 8)
  45/* #9 was MON_IOCT_SETAPI */
  46#define MON_IOCX_GETX   _IOW(MON_IOC_MAGIC, 10, struct mon_bin_get)
  47
  48#ifdef CONFIG_COMPAT
  49#define MON_IOCX_GET32 _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get32)
  50#define MON_IOCX_MFETCH32 _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch32)
  51#define MON_IOCX_GETX32   _IOW(MON_IOC_MAGIC, 10, struct mon_bin_get32)
  52#endif
  53
  54/*
  55 * Some architectures have enormous basic pages (16KB for ia64, 64KB for ppc).
  56 * But it's all right. Just use a simple way to make sure the chunk is never
  57 * smaller than a page.
  58 *
  59 * N.B. An application does not know our chunk size.
  60 *
  61 * Woops, get_zeroed_page() returns a single page. I guess we're stuck with
  62 * page-sized chunks for the time being.
  63 */
  64#define CHUNK_SIZE   PAGE_SIZE
  65#define CHUNK_ALIGN(x)   (((x)+CHUNK_SIZE-1) & ~(CHUNK_SIZE-1))
  66
  67/*
  68 * The magic limit was calculated so that it allows the monitoring
  69 * application to pick data once in two ticks. This way, another application,
  70 * which presumably drives the bus, gets to hog CPU, yet we collect our data.
  71 * If HZ is 100, a 480 mbit/s bus drives 614 KB every jiffy. USB has an
  72 * enormous overhead built into the bus protocol, so we need about 1000 KB.
  73 *
  74 * This is still too much for most cases, where we just snoop a few
  75 * descriptor fetches for enumeration. So, the default is a "reasonable"
  76 * amount for systems with HZ=250 and incomplete bus saturation.
  77 *
  78 * XXX What about multi-megabyte URBs which take minutes to transfer?
  79 */
  80#define BUFF_MAX  CHUNK_ALIGN(1200*1024)
  81#define BUFF_DFL   CHUNK_ALIGN(300*1024)
  82#define BUFF_MIN     CHUNK_ALIGN(8*1024)
  83
  84/*
  85 * The per-event API header (2 per URB).
  86 *
  87 * This structure is seen in userland as defined by the documentation.
  88 */
  89struct mon_bin_hdr {
  90	u64 id;			/* URB ID - from submission to callback */
  91	unsigned char type;	/* Same as in text API; extensible. */
  92	unsigned char xfer_type;	/* ISO, Intr, Control, Bulk */
  93	unsigned char epnum;	/* Endpoint number and transfer direction */
  94	unsigned char devnum;	/* Device address */
  95	unsigned short busnum;	/* Bus number */
  96	char flag_setup;
  97	char flag_data;
  98	s64 ts_sec;		/* ktime_get_real_ts64 */
  99	s32 ts_usec;		/* ktime_get_real_ts64 */
 100	int status;
 101	unsigned int len_urb;	/* Length of data (submitted or actual) */
 102	unsigned int len_cap;	/* Delivered length */
 103	union {
 104		unsigned char setup[SETUP_LEN];	/* Only for Control S-type */
 105		struct iso_rec {
 106			int error_count;
 107			int numdesc;
 108		} iso;
 109	} s;
 110	int interval;
 111	int start_frame;
 112	unsigned int xfer_flags;
 113	unsigned int ndesc;	/* Actual number of ISO descriptors */
 114};
 115
 116/*
 117 * ISO vector, packed into the head of data stream.
 118 * This has to take 16 bytes to make sure that the end of buffer
 119 * wrap is not happening in the middle of a descriptor.
 120 */
 121struct mon_bin_isodesc {
 122	int          iso_status;
 123	unsigned int iso_off;
 124	unsigned int iso_len;
 125	u32 _pad;
 126};
 127
 128/* per file statistic */
 129struct mon_bin_stats {
 130	u32 queued;
 131	u32 dropped;
 132};
 133
 134struct mon_bin_get {
 135	struct mon_bin_hdr __user *hdr;	/* Can be 48 bytes or 64. */
 136	void __user *data;
 137	size_t alloc;		/* Length of data (can be zero) */
 138};
 139
 140struct mon_bin_mfetch {
 141	u32 __user *offvec;	/* Vector of events fetched */
 142	u32 nfetch;		/* Number of events to fetch (out: fetched) */
 143	u32 nflush;		/* Number of events to flush */
 144};
 145
 146#ifdef CONFIG_COMPAT
 147struct mon_bin_get32 {
 148	u32 hdr32;
 149	u32 data32;
 150	u32 alloc32;
 151};
 152
 153struct mon_bin_mfetch32 {
 154        u32 offvec32;
 155        u32 nfetch32;
 156        u32 nflush32;
 157};
 158#endif
 159
 160/* Having these two values same prevents wrapping of the mon_bin_hdr */
 161#define PKT_ALIGN   64
 162#define PKT_SIZE    64
 163
 164#define PKT_SZ_API0 48	/* API 0 (2.6.20) size */
 165#define PKT_SZ_API1 64	/* API 1 size: extra fields */
 166
 167#define ISODESC_MAX   128	/* Same number as usbfs allows, 2048 bytes. */
 168
 169/* max number of USB bus supported */
 170#define MON_BIN_MAX_MINOR 128
 171
 172/*
 173 * The buffer: map of used pages.
 174 */
 175struct mon_pgmap {
 176	struct page *pg;
 177	unsigned char *ptr;	/* XXX just use page_to_virt everywhere? */
 178};
 179
 180/*
 181 * This gets associated with an open file struct.
 182 */
 183struct mon_reader_bin {
 184	/* The buffer: one per open. */
 185	spinlock_t b_lock;		/* Protect b_cnt, b_in */
 186	unsigned int b_size;		/* Current size of the buffer - bytes */
 187	unsigned int b_cnt;		/* Bytes used */
 188	unsigned int b_in, b_out;	/* Offsets into buffer - bytes */
 189	unsigned int b_read;		/* Amount of read data in curr. pkt. */
 190	struct mon_pgmap *b_vec;	/* The map array */
 191	wait_queue_head_t b_wait;	/* Wait for data here */
 192
 193	struct mutex fetch_lock;	/* Protect b_read, b_out */
 194	int mmap_active;
 195
 196	/* A list of these is needed for "bus 0". Some time later. */
 197	struct mon_reader r;
 198
 199	/* Stats */
 200	unsigned int cnt_lost;
 201};
 202
 203static inline struct mon_bin_hdr *MON_OFF2HDR(const struct mon_reader_bin *rp,
 204    unsigned int offset)
 205{
 206	return (struct mon_bin_hdr *)
 207	    (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE);
 208}
 209
 210#define MON_RING_EMPTY(rp)	((rp)->b_cnt == 0)
 211
 212static unsigned char xfer_to_pipe[4] = {
 213	PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
 214};
 215
 216static struct class *mon_bin_class;
 217static dev_t mon_bin_dev0;
 218static struct cdev mon_bin_cdev;
 219
 220static void mon_buff_area_fill(const struct mon_reader_bin *rp,
 221    unsigned int offset, unsigned int size);
 222static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp);
 223static int mon_alloc_buff(struct mon_pgmap *map, int npages);
 224static void mon_free_buff(struct mon_pgmap *map, int npages);
 225
 226/*
 227 * This is a "chunked memcpy". It does not manipulate any counters.
 228 */
 229static unsigned int mon_copy_to_buff(const struct mon_reader_bin *this,
 230    unsigned int off, const unsigned char *from, unsigned int length)
 231{
 232	unsigned int step_len;
 233	unsigned char *buf;
 234	unsigned int in_page;
 235
 236	while (length) {
 237		/*
 238		 * Determine step_len.
 239		 */
 240		step_len = length;
 241		in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1));
 242		if (in_page < step_len)
 243			step_len = in_page;
 244
 245		/*
 246		 * Copy data and advance pointers.
 247		 */
 248		buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE;
 249		memcpy(buf, from, step_len);
 250		if ((off += step_len) >= this->b_size) off = 0;
 251		from += step_len;
 252		length -= step_len;
 253	}
 254	return off;
 255}
 256
 257/*
 258 * This is a little worse than the above because it's "chunked copy_to_user".
 259 * The return value is an error code, not an offset.
 260 */
 261static int copy_from_buf(const struct mon_reader_bin *this, unsigned int off,
 262    char __user *to, int length)
 263{
 264	unsigned int step_len;
 265	unsigned char *buf;
 266	unsigned int in_page;
 267
 268	while (length) {
 269		/*
 270		 * Determine step_len.
 271		 */
 272		step_len = length;
 273		in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1));
 274		if (in_page < step_len)
 275			step_len = in_page;
 276
 277		/*
 278		 * Copy data and advance pointers.
 279		 */
 280		buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE;
 281		if (copy_to_user(to, buf, step_len))
 282			return -EINVAL;
 283		if ((off += step_len) >= this->b_size) off = 0;
 284		to += step_len;
 285		length -= step_len;
 286	}
 287	return 0;
 288}
 289
 290/*
 291 * Allocate an (aligned) area in the buffer.
 292 * This is called under b_lock.
 293 * Returns ~0 on failure.
 294 */
 295static unsigned int mon_buff_area_alloc(struct mon_reader_bin *rp,
 296    unsigned int size)
 297{
 298	unsigned int offset;
 299
 300	size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
 301	if (rp->b_cnt + size > rp->b_size)
 302		return ~0;
 303	offset = rp->b_in;
 304	rp->b_cnt += size;
 305	if ((rp->b_in += size) >= rp->b_size)
 306		rp->b_in -= rp->b_size;
 307	return offset;
 308}
 309
 310/*
 311 * This is the same thing as mon_buff_area_alloc, only it does not allow
 312 * buffers to wrap. This is needed by applications which pass references
 313 * into mmap-ed buffers up their stacks (libpcap can do that).
 314 *
 315 * Currently, we always have the header stuck with the data, although
 316 * it is not strictly speaking necessary.
 317 *
 318 * When a buffer would wrap, we place a filler packet to mark the space.
 319 */
 320static unsigned int mon_buff_area_alloc_contiguous(struct mon_reader_bin *rp,
 321    unsigned int size)
 322{
 323	unsigned int offset;
 324	unsigned int fill_size;
 325
 326	size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
 327	if (rp->b_cnt + size > rp->b_size)
 328		return ~0;
 329	if (rp->b_in + size > rp->b_size) {
 330		/*
 331		 * This would wrap. Find if we still have space after
 332		 * skipping to the end of the buffer. If we do, place
 333		 * a filler packet and allocate a new packet.
 334		 */
 335		fill_size = rp->b_size - rp->b_in;
 336		if (rp->b_cnt + size + fill_size > rp->b_size)
 337			return ~0;
 338		mon_buff_area_fill(rp, rp->b_in, fill_size);
 339
 340		offset = 0;
 341		rp->b_in = size;
 342		rp->b_cnt += size + fill_size;
 343	} else if (rp->b_in + size == rp->b_size) {
 344		offset = rp->b_in;
 345		rp->b_in = 0;
 346		rp->b_cnt += size;
 347	} else {
 348		offset = rp->b_in;
 349		rp->b_in += size;
 350		rp->b_cnt += size;
 351	}
 352	return offset;
 353}
 354
 355/*
 356 * Return a few (kilo-)bytes to the head of the buffer.
 357 * This is used if a data fetch fails.
 358 */
 359static void mon_buff_area_shrink(struct mon_reader_bin *rp, unsigned int size)
 360{
 361
 362	/* size &= ~(PKT_ALIGN-1);  -- we're called with aligned size */
 363	rp->b_cnt -= size;
 364	if (rp->b_in < size)
 365		rp->b_in += rp->b_size;
 366	rp->b_in -= size;
 367}
 368
 369/*
 370 * This has to be called under both b_lock and fetch_lock, because
 371 * it accesses both b_cnt and b_out.
 372 */
 373static void mon_buff_area_free(struct mon_reader_bin *rp, unsigned int size)
 374{
 375
 376	size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
 377	rp->b_cnt -= size;
 378	if ((rp->b_out += size) >= rp->b_size)
 379		rp->b_out -= rp->b_size;
 380}
 381
 382static void mon_buff_area_fill(const struct mon_reader_bin *rp,
 383    unsigned int offset, unsigned int size)
 384{
 385	struct mon_bin_hdr *ep;
 386
 387	ep = MON_OFF2HDR(rp, offset);
 388	memset(ep, 0, PKT_SIZE);
 389	ep->type = '@';
 390	ep->len_cap = size - PKT_SIZE;
 391}
 392
 393static inline char mon_bin_get_setup(unsigned char *setupb,
 394    const struct urb *urb, char ev_type)
 395{
 396
 397	if (urb->setup_packet == NULL)
 398		return 'Z';
 399	memcpy(setupb, urb->setup_packet, SETUP_LEN);
 400	return 0;
 401}
 402
 403static unsigned int mon_bin_get_data(const struct mon_reader_bin *rp,
 404    unsigned int offset, struct urb *urb, unsigned int length,
 405    char *flag)
 406{
 407	int i;
 408	struct scatterlist *sg;
 409	unsigned int this_len;
 410
 411	*flag = 0;
 412	if (urb->num_sgs == 0) {
 413		if (urb->transfer_buffer == NULL) {
 414			*flag = 'Z';
 415			return length;
 416		}
 417		mon_copy_to_buff(rp, offset, urb->transfer_buffer, length);
 418		length = 0;
 419
 420	} else {
 421		/* If IOMMU coalescing occurred, we cannot trust sg_page */
 422		if (urb->transfer_flags & URB_DMA_SG_COMBINED) {
 423			*flag = 'D';
 424			return length;
 425		}
 426
 427		/* Copy up to the first non-addressable segment */
 428		for_each_sg(urb->sg, sg, urb->num_sgs, i) {
 429			if (length == 0 || PageHighMem(sg_page(sg)))
 430				break;
 431			this_len = min_t(unsigned int, sg->length, length);
 432			offset = mon_copy_to_buff(rp, offset, sg_virt(sg),
 433					this_len);
 434			length -= this_len;
 435		}
 436		if (i == 0)
 437			*flag = 'D';
 438	}
 439
 440	return length;
 441}
 442
 443/*
 444 * This is the look-ahead pass in case of 'C Zi', when actual_length cannot
 445 * be used to determine the length of the whole contiguous buffer.
 446 */
 447static unsigned int mon_bin_collate_isodesc(const struct mon_reader_bin *rp,
 448    struct urb *urb, unsigned int ndesc)
 449{
 450	struct usb_iso_packet_descriptor *fp;
 451	unsigned int length;
 452
 453	length = 0;
 454	fp = urb->iso_frame_desc;
 455	while (ndesc-- != 0) {
 456		if (fp->actual_length != 0) {
 457			if (fp->offset + fp->actual_length > length)
 458				length = fp->offset + fp->actual_length;
 459		}
 460		fp++;
 461	}
 462	return length;
 463}
 464
 465static void mon_bin_get_isodesc(const struct mon_reader_bin *rp,
 466    unsigned int offset, struct urb *urb, char ev_type, unsigned int ndesc)
 467{
 468	struct mon_bin_isodesc *dp;
 469	struct usb_iso_packet_descriptor *fp;
 470
 471	fp = urb->iso_frame_desc;
 472	while (ndesc-- != 0) {
 473		dp = (struct mon_bin_isodesc *)
 474		    (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE);
 475		dp->iso_status = fp->status;
 476		dp->iso_off = fp->offset;
 477		dp->iso_len = (ev_type == 'S') ? fp->length : fp->actual_length;
 478		dp->_pad = 0;
 479		if ((offset += sizeof(struct mon_bin_isodesc)) >= rp->b_size)
 480			offset = 0;
 481		fp++;
 482	}
 483}
 484
 485static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb,
 486    char ev_type, int status)
 487{
 488	const struct usb_endpoint_descriptor *epd = &urb->ep->desc;
 489	struct timespec64 ts;
 490	unsigned long flags;
 491	unsigned int urb_length;
 492	unsigned int offset;
 493	unsigned int length;
 494	unsigned int delta;
 495	unsigned int ndesc, lendesc;
 496	unsigned char dir;
 497	struct mon_bin_hdr *ep;
 498	char data_tag = 0;
 499
 500	ktime_get_real_ts64(&ts);
 501
 502	spin_lock_irqsave(&rp->b_lock, flags);
 503
 504	/*
 505	 * Find the maximum allowable length, then allocate space.
 506	 */
 507	urb_length = (ev_type == 'S') ?
 508	    urb->transfer_buffer_length : urb->actual_length;
 509	length = urb_length;
 510
 511	if (usb_endpoint_xfer_isoc(epd)) {
 512		if (urb->number_of_packets < 0) {
 513			ndesc = 0;
 514		} else if (urb->number_of_packets >= ISODESC_MAX) {
 515			ndesc = ISODESC_MAX;
 516		} else {
 517			ndesc = urb->number_of_packets;
 518		}
 519		if (ev_type == 'C' && usb_urb_dir_in(urb))
 520			length = mon_bin_collate_isodesc(rp, urb, ndesc);
 521	} else {
 522		ndesc = 0;
 523	}
 524	lendesc = ndesc*sizeof(struct mon_bin_isodesc);
 525
 526	/* not an issue unless there's a subtle bug in a HCD somewhere */
 527	if (length >= urb->transfer_buffer_length)
 528		length = urb->transfer_buffer_length;
 529
 530	if (length >= rp->b_size/5)
 531		length = rp->b_size/5;
 532
 533	if (usb_urb_dir_in(urb)) {
 534		if (ev_type == 'S') {
 535			length = 0;
 536			data_tag = '<';
 537		}
 538		/* Cannot rely on endpoint number in case of control ep.0 */
 539		dir = USB_DIR_IN;
 540	} else {
 541		if (ev_type == 'C') {
 542			length = 0;
 543			data_tag = '>';
 544		}
 545		dir = 0;
 546	}
 547
 548	if (rp->mmap_active) {
 549		offset = mon_buff_area_alloc_contiguous(rp,
 550						 length + PKT_SIZE + lendesc);
 551	} else {
 552		offset = mon_buff_area_alloc(rp, length + PKT_SIZE + lendesc);
 553	}
 554	if (offset == ~0) {
 555		rp->cnt_lost++;
 556		spin_unlock_irqrestore(&rp->b_lock, flags);
 557		return;
 558	}
 559
 560	ep = MON_OFF2HDR(rp, offset);
 561	if ((offset += PKT_SIZE) >= rp->b_size) offset = 0;
 562
 563	/*
 564	 * Fill the allocated area.
 565	 */
 566	memset(ep, 0, PKT_SIZE);
 567	ep->type = ev_type;
 568	ep->xfer_type = xfer_to_pipe[usb_endpoint_type(epd)];
 569	ep->epnum = dir | usb_endpoint_num(epd);
 570	ep->devnum = urb->dev->devnum;
 571	ep->busnum = urb->dev->bus->busnum;
 572	ep->id = (unsigned long) urb;
 573	ep->ts_sec = ts.tv_sec;
 574	ep->ts_usec = ts.tv_nsec / NSEC_PER_USEC;
 575	ep->status = status;
 576	ep->len_urb = urb_length;
 577	ep->len_cap = length + lendesc;
 578	ep->xfer_flags = urb->transfer_flags;
 579
 580	if (usb_endpoint_xfer_int(epd)) {
 581		ep->interval = urb->interval;
 582	} else if (usb_endpoint_xfer_isoc(epd)) {
 583		ep->interval = urb->interval;
 584		ep->start_frame = urb->start_frame;
 585		ep->s.iso.error_count = urb->error_count;
 586		ep->s.iso.numdesc = urb->number_of_packets;
 587	}
 588
 589	if (usb_endpoint_xfer_control(epd) && ev_type == 'S') {
 590		ep->flag_setup = mon_bin_get_setup(ep->s.setup, urb, ev_type);
 591	} else {
 592		ep->flag_setup = '-';
 593	}
 594
 595	if (ndesc != 0) {
 596		ep->ndesc = ndesc;
 597		mon_bin_get_isodesc(rp, offset, urb, ev_type, ndesc);
 598		if ((offset += lendesc) >= rp->b_size)
 599			offset -= rp->b_size;
 600	}
 601
 602	if (length != 0) {
 603		length = mon_bin_get_data(rp, offset, urb, length,
 604				&ep->flag_data);
 605		if (length > 0) {
 606			delta = (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
 607			ep->len_cap -= length;
 608			delta -= (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
 609			mon_buff_area_shrink(rp, delta);
 610		}
 611	} else {
 612		ep->flag_data = data_tag;
 613	}
 614
 615	spin_unlock_irqrestore(&rp->b_lock, flags);
 616
 617	wake_up(&rp->b_wait);
 618}
 619
 620static void mon_bin_submit(void *data, struct urb *urb)
 621{
 622	struct mon_reader_bin *rp = data;
 623	mon_bin_event(rp, urb, 'S', -EINPROGRESS);
 624}
 625
 626static void mon_bin_complete(void *data, struct urb *urb, int status)
 627{
 628	struct mon_reader_bin *rp = data;
 629	mon_bin_event(rp, urb, 'C', status);
 630}
 631
 632static void mon_bin_error(void *data, struct urb *urb, int error)
 633{
 634	struct mon_reader_bin *rp = data;
 635	struct timespec64 ts;
 636	unsigned long flags;
 637	unsigned int offset;
 638	struct mon_bin_hdr *ep;
 639
 640	ktime_get_real_ts64(&ts);
 641
 642	spin_lock_irqsave(&rp->b_lock, flags);
 643
 644	offset = mon_buff_area_alloc(rp, PKT_SIZE);
 645	if (offset == ~0) {
 646		/* Not incrementing cnt_lost. Just because. */
 647		spin_unlock_irqrestore(&rp->b_lock, flags);
 648		return;
 649	}
 650
 651	ep = MON_OFF2HDR(rp, offset);
 652
 653	memset(ep, 0, PKT_SIZE);
 654	ep->type = 'E';
 655	ep->xfer_type = xfer_to_pipe[usb_endpoint_type(&urb->ep->desc)];
 656	ep->epnum = usb_urb_dir_in(urb) ? USB_DIR_IN : 0;
 657	ep->epnum |= usb_endpoint_num(&urb->ep->desc);
 658	ep->devnum = urb->dev->devnum;
 659	ep->busnum = urb->dev->bus->busnum;
 660	ep->id = (unsigned long) urb;
 661	ep->ts_sec = ts.tv_sec;
 662	ep->ts_usec = ts.tv_nsec / NSEC_PER_USEC;
 663	ep->status = error;
 664
 665	ep->flag_setup = '-';
 666	ep->flag_data = 'E';
 667
 668	spin_unlock_irqrestore(&rp->b_lock, flags);
 669
 670	wake_up(&rp->b_wait);
 671}
 672
 673static int mon_bin_open(struct inode *inode, struct file *file)
 674{
 675	struct mon_bus *mbus;
 676	struct mon_reader_bin *rp;
 677	size_t size;
 678	int rc;
 679
 680	mutex_lock(&mon_lock);
 681	mbus = mon_bus_lookup(iminor(inode));
 682	if (mbus == NULL) {
 683		mutex_unlock(&mon_lock);
 684		return -ENODEV;
 685	}
 686	if (mbus != &mon_bus0 && mbus->u_bus == NULL) {
 687		printk(KERN_ERR TAG ": consistency error on open\n");
 688		mutex_unlock(&mon_lock);
 689		return -ENODEV;
 690	}
 691
 692	rp = kzalloc(sizeof(struct mon_reader_bin), GFP_KERNEL);
 693	if (rp == NULL) {
 694		rc = -ENOMEM;
 695		goto err_alloc;
 696	}
 697	spin_lock_init(&rp->b_lock);
 698	init_waitqueue_head(&rp->b_wait);
 699	mutex_init(&rp->fetch_lock);
 700	rp->b_size = BUFF_DFL;
 701
 702	size = sizeof(struct mon_pgmap) * (rp->b_size/CHUNK_SIZE);
 703	if ((rp->b_vec = kzalloc(size, GFP_KERNEL)) == NULL) {
 704		rc = -ENOMEM;
 705		goto err_allocvec;
 706	}
 707
 708	if ((rc = mon_alloc_buff(rp->b_vec, rp->b_size/CHUNK_SIZE)) < 0)
 709		goto err_allocbuff;
 710
 711	rp->r.m_bus = mbus;
 712	rp->r.r_data = rp;
 713	rp->r.rnf_submit = mon_bin_submit;
 714	rp->r.rnf_error = mon_bin_error;
 715	rp->r.rnf_complete = mon_bin_complete;
 716
 717	mon_reader_add(mbus, &rp->r);
 718
 719	file->private_data = rp;
 720	mutex_unlock(&mon_lock);
 721	return 0;
 722
 723err_allocbuff:
 724	kfree(rp->b_vec);
 725err_allocvec:
 726	kfree(rp);
 727err_alloc:
 728	mutex_unlock(&mon_lock);
 729	return rc;
 730}
 731
 732/*
 733 * Extract an event from buffer and copy it to user space.
 734 * Wait if there is no event ready.
 735 * Returns zero or error.
 736 */
 737static int mon_bin_get_event(struct file *file, struct mon_reader_bin *rp,
 738    struct mon_bin_hdr __user *hdr, unsigned int hdrbytes,
 739    void __user *data, unsigned int nbytes)
 740{
 741	unsigned long flags;
 742	struct mon_bin_hdr *ep;
 743	size_t step_len;
 744	unsigned int offset;
 745	int rc;
 746
 747	mutex_lock(&rp->fetch_lock);
 748
 749	if ((rc = mon_bin_wait_event(file, rp)) < 0) {
 750		mutex_unlock(&rp->fetch_lock);
 751		return rc;
 752	}
 753
 754	ep = MON_OFF2HDR(rp, rp->b_out);
 755
 756	if (copy_to_user(hdr, ep, hdrbytes)) {
 757		mutex_unlock(&rp->fetch_lock);
 758		return -EFAULT;
 759	}
 760
 761	step_len = min(ep->len_cap, nbytes);
 762	if ((offset = rp->b_out + PKT_SIZE) >= rp->b_size) offset = 0;
 763
 764	if (copy_from_buf(rp, offset, data, step_len)) {
 765		mutex_unlock(&rp->fetch_lock);
 766		return -EFAULT;
 767	}
 768
 769	spin_lock_irqsave(&rp->b_lock, flags);
 770	mon_buff_area_free(rp, PKT_SIZE + ep->len_cap);
 771	spin_unlock_irqrestore(&rp->b_lock, flags);
 772	rp->b_read = 0;
 773
 774	mutex_unlock(&rp->fetch_lock);
 775	return 0;
 776}
 777
 778static int mon_bin_release(struct inode *inode, struct file *file)
 779{
 780	struct mon_reader_bin *rp = file->private_data;
 781	struct mon_bus* mbus = rp->r.m_bus;
 782
 783	mutex_lock(&mon_lock);
 784
 785	if (mbus->nreaders <= 0) {
 786		printk(KERN_ERR TAG ": consistency error on close\n");
 787		mutex_unlock(&mon_lock);
 788		return 0;
 789	}
 790	mon_reader_del(mbus, &rp->r);
 791
 792	mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE);
 793	kfree(rp->b_vec);
 794	kfree(rp);
 795
 796	mutex_unlock(&mon_lock);
 797	return 0;
 798}
 799
 800static ssize_t mon_bin_read(struct file *file, char __user *buf,
 801    size_t nbytes, loff_t *ppos)
 802{
 803	struct mon_reader_bin *rp = file->private_data;
 804	unsigned int hdrbytes = PKT_SZ_API0;
 805	unsigned long flags;
 806	struct mon_bin_hdr *ep;
 807	unsigned int offset;
 808	size_t step_len;
 809	char *ptr;
 810	ssize_t done = 0;
 811	int rc;
 812
 813	mutex_lock(&rp->fetch_lock);
 814
 815	if ((rc = mon_bin_wait_event(file, rp)) < 0) {
 816		mutex_unlock(&rp->fetch_lock);
 817		return rc;
 818	}
 819
 820	ep = MON_OFF2HDR(rp, rp->b_out);
 821
 822	if (rp->b_read < hdrbytes) {
 823		step_len = min(nbytes, (size_t)(hdrbytes - rp->b_read));
 824		ptr = ((char *)ep) + rp->b_read;
 825		if (step_len && copy_to_user(buf, ptr, step_len)) {
 826			mutex_unlock(&rp->fetch_lock);
 827			return -EFAULT;
 828		}
 829		nbytes -= step_len;
 830		buf += step_len;
 831		rp->b_read += step_len;
 832		done += step_len;
 833	}
 834
 835	if (rp->b_read >= hdrbytes) {
 836		step_len = ep->len_cap;
 837		step_len -= rp->b_read - hdrbytes;
 838		if (step_len > nbytes)
 839			step_len = nbytes;
 840		offset = rp->b_out + PKT_SIZE;
 841		offset += rp->b_read - hdrbytes;
 842		if (offset >= rp->b_size)
 843			offset -= rp->b_size;
 844		if (copy_from_buf(rp, offset, buf, step_len)) {
 845			mutex_unlock(&rp->fetch_lock);
 846			return -EFAULT;
 847		}
 848		nbytes -= step_len;
 849		buf += step_len;
 850		rp->b_read += step_len;
 851		done += step_len;
 852	}
 853
 854	/*
 855	 * Check if whole packet was read, and if so, jump to the next one.
 856	 */
 857	if (rp->b_read >= hdrbytes + ep->len_cap) {
 858		spin_lock_irqsave(&rp->b_lock, flags);
 859		mon_buff_area_free(rp, PKT_SIZE + ep->len_cap);
 860		spin_unlock_irqrestore(&rp->b_lock, flags);
 861		rp->b_read = 0;
 862	}
 863
 864	mutex_unlock(&rp->fetch_lock);
 865	return done;
 866}
 867
 868/*
 869 * Remove at most nevents from chunked buffer.
 870 * Returns the number of removed events.
 871 */
 872static int mon_bin_flush(struct mon_reader_bin *rp, unsigned nevents)
 873{
 874	unsigned long flags;
 875	struct mon_bin_hdr *ep;
 876	int i;
 877
 878	mutex_lock(&rp->fetch_lock);
 879	spin_lock_irqsave(&rp->b_lock, flags);
 880	for (i = 0; i < nevents; ++i) {
 881		if (MON_RING_EMPTY(rp))
 882			break;
 883
 884		ep = MON_OFF2HDR(rp, rp->b_out);
 885		mon_buff_area_free(rp, PKT_SIZE + ep->len_cap);
 886	}
 887	spin_unlock_irqrestore(&rp->b_lock, flags);
 888	rp->b_read = 0;
 889	mutex_unlock(&rp->fetch_lock);
 890	return i;
 891}
 892
 893/*
 894 * Fetch at most max event offsets into the buffer and put them into vec.
 895 * The events are usually freed later with mon_bin_flush.
 896 * Return the effective number of events fetched.
 897 */
 898static int mon_bin_fetch(struct file *file, struct mon_reader_bin *rp,
 899    u32 __user *vec, unsigned int max)
 900{
 901	unsigned int cur_out;
 902	unsigned int bytes, avail;
 903	unsigned int size;
 904	unsigned int nevents;
 905	struct mon_bin_hdr *ep;
 906	unsigned long flags;
 907	int rc;
 908
 909	mutex_lock(&rp->fetch_lock);
 910
 911	if ((rc = mon_bin_wait_event(file, rp)) < 0) {
 912		mutex_unlock(&rp->fetch_lock);
 913		return rc;
 914	}
 915
 916	spin_lock_irqsave(&rp->b_lock, flags);
 917	avail = rp->b_cnt;
 918	spin_unlock_irqrestore(&rp->b_lock, flags);
 919
 920	cur_out = rp->b_out;
 921	nevents = 0;
 922	bytes = 0;
 923	while (bytes < avail) {
 924		if (nevents >= max)
 925			break;
 926
 927		ep = MON_OFF2HDR(rp, cur_out);
 928		if (put_user(cur_out, &vec[nevents])) {
 929			mutex_unlock(&rp->fetch_lock);
 930			return -EFAULT;
 931		}
 932
 933		nevents++;
 934		size = ep->len_cap + PKT_SIZE;
 935		size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
 936		if ((cur_out += size) >= rp->b_size)
 937			cur_out -= rp->b_size;
 938		bytes += size;
 939	}
 940
 941	mutex_unlock(&rp->fetch_lock);
 942	return nevents;
 943}
 944
 945/*
 946 * Count events. This is almost the same as the above mon_bin_fetch,
 947 * only we do not store offsets into user vector, and we have no limit.
 948 */
 949static int mon_bin_queued(struct mon_reader_bin *rp)
 950{
 951	unsigned int cur_out;
 952	unsigned int bytes, avail;
 953	unsigned int size;
 954	unsigned int nevents;
 955	struct mon_bin_hdr *ep;
 956	unsigned long flags;
 957
 958	mutex_lock(&rp->fetch_lock);
 959
 960	spin_lock_irqsave(&rp->b_lock, flags);
 961	avail = rp->b_cnt;
 962	spin_unlock_irqrestore(&rp->b_lock, flags);
 963
 964	cur_out = rp->b_out;
 965	nevents = 0;
 966	bytes = 0;
 967	while (bytes < avail) {
 968		ep = MON_OFF2HDR(rp, cur_out);
 969
 970		nevents++;
 971		size = ep->len_cap + PKT_SIZE;
 972		size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
 973		if ((cur_out += size) >= rp->b_size)
 974			cur_out -= rp->b_size;
 975		bytes += size;
 976	}
 977
 978	mutex_unlock(&rp->fetch_lock);
 979	return nevents;
 980}
 981
 982/*
 983 */
 984static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 985{
 986	struct mon_reader_bin *rp = file->private_data;
 987	// struct mon_bus* mbus = rp->r.m_bus;
 988	int ret = 0;
 989	struct mon_bin_hdr *ep;
 990	unsigned long flags;
 991
 992	switch (cmd) {
 993
 994	case MON_IOCQ_URB_LEN:
 995		/*
 996		 * N.B. This only returns the size of data, without the header.
 997		 */
 998		spin_lock_irqsave(&rp->b_lock, flags);
 999		if (!MON_RING_EMPTY(rp)) {
1000			ep = MON_OFF2HDR(rp, rp->b_out);
1001			ret = ep->len_cap;
1002		}
1003		spin_unlock_irqrestore(&rp->b_lock, flags);
1004		break;
1005
1006	case MON_IOCQ_RING_SIZE:
1007		mutex_lock(&rp->fetch_lock);
1008		ret = rp->b_size;
1009		mutex_unlock(&rp->fetch_lock);
1010		break;
1011
1012	case MON_IOCT_RING_SIZE:
1013		/*
1014		 * Changing the buffer size will flush it's contents; the new
1015		 * buffer is allocated before releasing the old one to be sure
1016		 * the device will stay functional also in case of memory
1017		 * pressure.
1018		 */
1019		{
1020		int size;
1021		struct mon_pgmap *vec;
1022
1023		if (arg < BUFF_MIN || arg > BUFF_MAX)
1024			return -EINVAL;
1025
1026		size = CHUNK_ALIGN(arg);
1027		vec = kcalloc(size / CHUNK_SIZE, sizeof(struct mon_pgmap),
1028			      GFP_KERNEL);
1029		if (vec == NULL) {
1030			ret = -ENOMEM;
1031			break;
1032		}
1033
1034		ret = mon_alloc_buff(vec, size/CHUNK_SIZE);
1035		if (ret < 0) {
1036			kfree(vec);
1037			break;
1038		}
1039
1040		mutex_lock(&rp->fetch_lock);
1041		spin_lock_irqsave(&rp->b_lock, flags);
1042		if (rp->mmap_active) {
1043			mon_free_buff(vec, size/CHUNK_SIZE);
1044			kfree(vec);
1045			ret = -EBUSY;
1046		} else {
1047			mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE);
1048			kfree(rp->b_vec);
1049			rp->b_vec  = vec;
1050			rp->b_size = size;
1051			rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0;
1052			rp->cnt_lost = 0;
1053		}
1054		spin_unlock_irqrestore(&rp->b_lock, flags);
1055		mutex_unlock(&rp->fetch_lock);
1056		}
1057		break;
1058
1059	case MON_IOCH_MFLUSH:
1060		ret = mon_bin_flush(rp, arg);
1061		break;
1062
1063	case MON_IOCX_GET:
1064	case MON_IOCX_GETX:
1065		{
1066		struct mon_bin_get getb;
1067
1068		if (copy_from_user(&getb, (void __user *)arg,
1069					    sizeof(struct mon_bin_get)))
1070			return -EFAULT;
1071
1072		if (getb.alloc > 0x10000000)	/* Want to cast to u32 */
1073			return -EINVAL;
1074		ret = mon_bin_get_event(file, rp, getb.hdr,
1075		    (cmd == MON_IOCX_GET)? PKT_SZ_API0: PKT_SZ_API1,
1076		    getb.data, (unsigned int)getb.alloc);
1077		}
1078		break;
1079
1080	case MON_IOCX_MFETCH:
1081		{
1082		struct mon_bin_mfetch mfetch;
1083		struct mon_bin_mfetch __user *uptr;
1084
1085		uptr = (struct mon_bin_mfetch __user *)arg;
1086
1087		if (copy_from_user(&mfetch, uptr, sizeof(mfetch)))
1088			return -EFAULT;
1089
1090		if (mfetch.nflush) {
1091			ret = mon_bin_flush(rp, mfetch.nflush);
1092			if (ret < 0)
1093				return ret;
1094			if (put_user(ret, &uptr->nflush))
1095				return -EFAULT;
1096		}
1097		ret = mon_bin_fetch(file, rp, mfetch.offvec, mfetch.nfetch);
1098		if (ret < 0)
1099			return ret;
1100		if (put_user(ret, &uptr->nfetch))
1101			return -EFAULT;
1102		ret = 0;
1103		}
1104		break;
1105
1106	case MON_IOCG_STATS: {
1107		struct mon_bin_stats __user *sp;
1108		unsigned int nevents;
1109		unsigned int ndropped;
1110
1111		spin_lock_irqsave(&rp->b_lock, flags);
1112		ndropped = rp->cnt_lost;
1113		rp->cnt_lost = 0;
1114		spin_unlock_irqrestore(&rp->b_lock, flags);
1115		nevents = mon_bin_queued(rp);
1116
1117		sp = (struct mon_bin_stats __user *)arg;
1118		if (put_user(ndropped, &sp->dropped))
1119			return -EFAULT;
1120		if (put_user(nevents, &sp->queued))
1121			return -EFAULT;
1122
1123		}
1124		break;
1125
1126	default:
1127		return -ENOTTY;
1128	}
1129
1130	return ret;
1131}
1132
1133#ifdef CONFIG_COMPAT
1134static long mon_bin_compat_ioctl(struct file *file,
1135    unsigned int cmd, unsigned long arg)
1136{
1137	struct mon_reader_bin *rp = file->private_data;
1138	int ret;
1139
1140	switch (cmd) {
1141
1142	case MON_IOCX_GET32:
1143	case MON_IOCX_GETX32:
1144		{
1145		struct mon_bin_get32 getb;
1146
1147		if (copy_from_user(&getb, (void __user *)arg,
1148					    sizeof(struct mon_bin_get32)))
1149			return -EFAULT;
1150
1151		ret = mon_bin_get_event(file, rp, compat_ptr(getb.hdr32),
1152		    (cmd == MON_IOCX_GET32)? PKT_SZ_API0: PKT_SZ_API1,
1153		    compat_ptr(getb.data32), getb.alloc32);
1154		if (ret < 0)
1155			return ret;
1156		}
1157		return 0;
1158
1159	case MON_IOCX_MFETCH32:
1160		{
1161		struct mon_bin_mfetch32 mfetch;
1162		struct mon_bin_mfetch32 __user *uptr;
1163
1164		uptr = (struct mon_bin_mfetch32 __user *) compat_ptr(arg);
1165
1166		if (copy_from_user(&mfetch, uptr, sizeof(mfetch)))
1167			return -EFAULT;
1168
1169		if (mfetch.nflush32) {
1170			ret = mon_bin_flush(rp, mfetch.nflush32);
1171			if (ret < 0)
1172				return ret;
1173			if (put_user(ret, &uptr->nflush32))
1174				return -EFAULT;
1175		}
1176		ret = mon_bin_fetch(file, rp, compat_ptr(mfetch.offvec32),
1177		    mfetch.nfetch32);
1178		if (ret < 0)
1179			return ret;
1180		if (put_user(ret, &uptr->nfetch32))
1181			return -EFAULT;
1182		}
1183		return 0;
1184
1185	case MON_IOCG_STATS:
1186		return mon_bin_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
1187
1188	case MON_IOCQ_URB_LEN:
1189	case MON_IOCQ_RING_SIZE:
1190	case MON_IOCT_RING_SIZE:
1191	case MON_IOCH_MFLUSH:
1192		return mon_bin_ioctl(file, cmd, arg);
1193
1194	default:
1195		;
1196	}
1197	return -ENOTTY;
1198}
1199#endif /* CONFIG_COMPAT */
1200
1201static __poll_t
1202mon_bin_poll(struct file *file, struct poll_table_struct *wait)
1203{
1204	struct mon_reader_bin *rp = file->private_data;
1205	__poll_t mask = 0;
1206	unsigned long flags;
1207
1208	if (file->f_mode & FMODE_READ)
1209		poll_wait(file, &rp->b_wait, wait);
1210
1211	spin_lock_irqsave(&rp->b_lock, flags);
1212	if (!MON_RING_EMPTY(rp))
1213		mask |= EPOLLIN | EPOLLRDNORM;    /* readable */
1214	spin_unlock_irqrestore(&rp->b_lock, flags);
1215	return mask;
1216}
1217
1218/*
1219 * open and close: just keep track of how many times the device is
1220 * mapped, to use the proper memory allocation function.
1221 */
1222static void mon_bin_vma_open(struct vm_area_struct *vma)
1223{
1224	struct mon_reader_bin *rp = vma->vm_private_data;
1225	unsigned long flags;
1226
1227	spin_lock_irqsave(&rp->b_lock, flags);
1228	rp->mmap_active++;
1229	spin_unlock_irqrestore(&rp->b_lock, flags);
1230}
1231
1232static void mon_bin_vma_close(struct vm_area_struct *vma)
1233{
1234	unsigned long flags;
1235
1236	struct mon_reader_bin *rp = vma->vm_private_data;
1237	spin_lock_irqsave(&rp->b_lock, flags);
1238	rp->mmap_active--;
1239	spin_unlock_irqrestore(&rp->b_lock, flags);
1240}
1241
1242/*
1243 * Map ring pages to user space.
1244 */
1245static vm_fault_t mon_bin_vma_fault(struct vm_fault *vmf)
1246{
1247	struct mon_reader_bin *rp = vmf->vma->vm_private_data;
1248	unsigned long offset, chunk_idx;
1249	struct page *pageptr;
1250
 
1251	offset = vmf->pgoff << PAGE_SHIFT;
1252	if (offset >= rp->b_size)
 
1253		return VM_FAULT_SIGBUS;
 
1254	chunk_idx = offset / CHUNK_SIZE;
1255	pageptr = rp->b_vec[chunk_idx].pg;
1256	get_page(pageptr);
 
1257	vmf->page = pageptr;
1258	return 0;
1259}
1260
1261static const struct vm_operations_struct mon_bin_vm_ops = {
1262	.open =     mon_bin_vma_open,
1263	.close =    mon_bin_vma_close,
1264	.fault =    mon_bin_vma_fault,
1265};
1266
1267static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma)
1268{
1269	/* don't do anything here: "fault" will set up page table entries */
1270	vma->vm_ops = &mon_bin_vm_ops;
1271	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
1272	vma->vm_private_data = filp->private_data;
1273	mon_bin_vma_open(vma);
1274	return 0;
1275}
1276
1277static const struct file_operations mon_fops_binary = {
1278	.owner =	THIS_MODULE,
1279	.open =		mon_bin_open,
1280	.llseek =	no_llseek,
1281	.read =		mon_bin_read,
1282	/* .write =	mon_text_write, */
1283	.poll =		mon_bin_poll,
1284	.unlocked_ioctl = mon_bin_ioctl,
1285#ifdef CONFIG_COMPAT
1286	.compat_ioctl =	mon_bin_compat_ioctl,
1287#endif
1288	.release =	mon_bin_release,
1289	.mmap =		mon_bin_mmap,
1290};
1291
1292static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp)
1293{
1294	DECLARE_WAITQUEUE(waita, current);
1295	unsigned long flags;
1296
1297	add_wait_queue(&rp->b_wait, &waita);
1298	set_current_state(TASK_INTERRUPTIBLE);
1299
1300	spin_lock_irqsave(&rp->b_lock, flags);
1301	while (MON_RING_EMPTY(rp)) {
1302		spin_unlock_irqrestore(&rp->b_lock, flags);
1303
1304		if (file->f_flags & O_NONBLOCK) {
1305			set_current_state(TASK_RUNNING);
1306			remove_wait_queue(&rp->b_wait, &waita);
1307			return -EWOULDBLOCK; /* Same as EAGAIN in Linux */
1308		}
1309		schedule();
1310		if (signal_pending(current)) {
1311			remove_wait_queue(&rp->b_wait, &waita);
1312			return -EINTR;
1313		}
1314		set_current_state(TASK_INTERRUPTIBLE);
1315
1316		spin_lock_irqsave(&rp->b_lock, flags);
1317	}
1318	spin_unlock_irqrestore(&rp->b_lock, flags);
1319
1320	set_current_state(TASK_RUNNING);
1321	remove_wait_queue(&rp->b_wait, &waita);
1322	return 0;
1323}
1324
1325static int mon_alloc_buff(struct mon_pgmap *map, int npages)
1326{
1327	int n;
1328	unsigned long vaddr;
1329
1330	for (n = 0; n < npages; n++) {
1331		vaddr = get_zeroed_page(GFP_KERNEL);
1332		if (vaddr == 0) {
1333			while (n-- != 0)
1334				free_page((unsigned long) map[n].ptr);
1335			return -ENOMEM;
1336		}
1337		map[n].ptr = (unsigned char *) vaddr;
1338		map[n].pg = virt_to_page((void *) vaddr);
1339	}
1340	return 0;
1341}
1342
1343static void mon_free_buff(struct mon_pgmap *map, int npages)
1344{
1345	int n;
1346
1347	for (n = 0; n < npages; n++)
1348		free_page((unsigned long) map[n].ptr);
1349}
1350
1351int mon_bin_add(struct mon_bus *mbus, const struct usb_bus *ubus)
1352{
1353	struct device *dev;
1354	unsigned minor = ubus? ubus->busnum: 0;
1355
1356	if (minor >= MON_BIN_MAX_MINOR)
1357		return 0;
1358
1359	dev = device_create(mon_bin_class, ubus ? ubus->controller : NULL,
1360			    MKDEV(MAJOR(mon_bin_dev0), minor), NULL,
1361			    "usbmon%d", minor);
1362	if (IS_ERR(dev))
1363		return 0;
1364
1365	mbus->classdev = dev;
1366	return 1;
1367}
1368
1369void mon_bin_del(struct mon_bus *mbus)
1370{
1371	device_destroy(mon_bin_class, mbus->classdev->devt);
1372}
1373
1374int __init mon_bin_init(void)
1375{
1376	int rc;
1377
1378	mon_bin_class = class_create(THIS_MODULE, "usbmon");
1379	if (IS_ERR(mon_bin_class)) {
1380		rc = PTR_ERR(mon_bin_class);
1381		goto err_class;
1382	}
1383
1384	rc = alloc_chrdev_region(&mon_bin_dev0, 0, MON_BIN_MAX_MINOR, "usbmon");
1385	if (rc < 0)
1386		goto err_dev;
1387
1388	cdev_init(&mon_bin_cdev, &mon_fops_binary);
1389	mon_bin_cdev.owner = THIS_MODULE;
1390
1391	rc = cdev_add(&mon_bin_cdev, mon_bin_dev0, MON_BIN_MAX_MINOR);
1392	if (rc < 0)
1393		goto err_add;
1394
1395	return 0;
1396
1397err_add:
1398	unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR);
1399err_dev:
1400	class_destroy(mon_bin_class);
1401err_class:
1402	return rc;
1403}
1404
1405void mon_bin_exit(void)
1406{
1407	cdev_del(&mon_bin_cdev);
1408	unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR);
1409	class_destroy(mon_bin_class);
1410}
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * The USB Monitor, inspired by Dave Harding's USBMon.
   4 *
   5 * This is a binary format reader.
   6 *
   7 * Copyright (C) 2006 Paolo Abeni (paolo.abeni@email.it)
   8 * Copyright (C) 2006,2007 Pete Zaitcev (zaitcev@redhat.com)
   9 */
  10
  11#include <linux/kernel.h>
  12#include <linux/sched/signal.h>
  13#include <linux/types.h>
  14#include <linux/fs.h>
  15#include <linux/cdev.h>
  16#include <linux/export.h>
  17#include <linux/usb.h>
  18#include <linux/poll.h>
  19#include <linux/compat.h>
  20#include <linux/mm.h>
  21#include <linux/scatterlist.h>
  22#include <linux/slab.h>
  23#include <linux/time64.h>
  24
  25#include <linux/uaccess.h>
  26
  27#include "usb_mon.h"
  28
  29/*
  30 * Defined by USB 2.0 clause 9.3, table 9.2.
  31 */
  32#define SETUP_LEN  8
  33
  34/* ioctl macros */
  35#define MON_IOC_MAGIC 0x92
  36
  37#define MON_IOCQ_URB_LEN _IO(MON_IOC_MAGIC, 1)
  38/* #2 used to be MON_IOCX_URB, removed before it got into Linus tree */
  39#define MON_IOCG_STATS _IOR(MON_IOC_MAGIC, 3, struct mon_bin_stats)
  40#define MON_IOCT_RING_SIZE _IO(MON_IOC_MAGIC, 4)
  41#define MON_IOCQ_RING_SIZE _IO(MON_IOC_MAGIC, 5)
  42#define MON_IOCX_GET   _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get)
  43#define MON_IOCX_MFETCH _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch)
  44#define MON_IOCH_MFLUSH _IO(MON_IOC_MAGIC, 8)
  45/* #9 was MON_IOCT_SETAPI */
  46#define MON_IOCX_GETX   _IOW(MON_IOC_MAGIC, 10, struct mon_bin_get)
  47
  48#ifdef CONFIG_COMPAT
  49#define MON_IOCX_GET32 _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get32)
  50#define MON_IOCX_MFETCH32 _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch32)
  51#define MON_IOCX_GETX32   _IOW(MON_IOC_MAGIC, 10, struct mon_bin_get32)
  52#endif
  53
  54/*
  55 * Some architectures have enormous basic pages (16KB for ia64, 64KB for ppc).
  56 * But it's all right. Just use a simple way to make sure the chunk is never
  57 * smaller than a page.
  58 *
  59 * N.B. An application does not know our chunk size.
  60 *
  61 * Woops, get_zeroed_page() returns a single page. I guess we're stuck with
  62 * page-sized chunks for the time being.
  63 */
  64#define CHUNK_SIZE   PAGE_SIZE
  65#define CHUNK_ALIGN(x)   (((x)+CHUNK_SIZE-1) & ~(CHUNK_SIZE-1))
  66
  67/*
  68 * The magic limit was calculated so that it allows the monitoring
  69 * application to pick data once in two ticks. This way, another application,
  70 * which presumably drives the bus, gets to hog CPU, yet we collect our data.
  71 * If HZ is 100, a 480 mbit/s bus drives 614 KB every jiffy. USB has an
  72 * enormous overhead built into the bus protocol, so we need about 1000 KB.
  73 *
  74 * This is still too much for most cases, where we just snoop a few
  75 * descriptor fetches for enumeration. So, the default is a "reasonable"
  76 * amount for systems with HZ=250 and incomplete bus saturation.
  77 *
  78 * XXX What about multi-megabyte URBs which take minutes to transfer?
  79 */
  80#define BUFF_MAX  CHUNK_ALIGN(1200*1024)
  81#define BUFF_DFL   CHUNK_ALIGN(300*1024)
  82#define BUFF_MIN     CHUNK_ALIGN(8*1024)
  83
  84/*
  85 * The per-event API header (2 per URB).
  86 *
  87 * This structure is seen in userland as defined by the documentation.
  88 */
  89struct mon_bin_hdr {
  90	u64 id;			/* URB ID - from submission to callback */
  91	unsigned char type;	/* Same as in text API; extensible. */
  92	unsigned char xfer_type;	/* ISO, Intr, Control, Bulk */
  93	unsigned char epnum;	/* Endpoint number and transfer direction */
  94	unsigned char devnum;	/* Device address */
  95	unsigned short busnum;	/* Bus number */
  96	char flag_setup;
  97	char flag_data;
  98	s64 ts_sec;		/* getnstimeofday64 */
  99	s32 ts_usec;		/* getnstimeofday64 */
 100	int status;
 101	unsigned int len_urb;	/* Length of data (submitted or actual) */
 102	unsigned int len_cap;	/* Delivered length */
 103	union {
 104		unsigned char setup[SETUP_LEN];	/* Only for Control S-type */
 105		struct iso_rec {
 106			int error_count;
 107			int numdesc;
 108		} iso;
 109	} s;
 110	int interval;
 111	int start_frame;
 112	unsigned int xfer_flags;
 113	unsigned int ndesc;	/* Actual number of ISO descriptors */
 114};
 115
 116/*
 117 * ISO vector, packed into the head of data stream.
 118 * This has to take 16 bytes to make sure that the end of buffer
 119 * wrap is not happening in the middle of a descriptor.
 120 */
 121struct mon_bin_isodesc {
 122	int          iso_status;
 123	unsigned int iso_off;
 124	unsigned int iso_len;
 125	u32 _pad;
 126};
 127
 128/* per file statistic */
 129struct mon_bin_stats {
 130	u32 queued;
 131	u32 dropped;
 132};
 133
 134struct mon_bin_get {
 135	struct mon_bin_hdr __user *hdr;	/* Can be 48 bytes or 64. */
 136	void __user *data;
 137	size_t alloc;		/* Length of data (can be zero) */
 138};
 139
 140struct mon_bin_mfetch {
 141	u32 __user *offvec;	/* Vector of events fetched */
 142	u32 nfetch;		/* Number of events to fetch (out: fetched) */
 143	u32 nflush;		/* Number of events to flush */
 144};
 145
 146#ifdef CONFIG_COMPAT
 147struct mon_bin_get32 {
 148	u32 hdr32;
 149	u32 data32;
 150	u32 alloc32;
 151};
 152
 153struct mon_bin_mfetch32 {
 154        u32 offvec32;
 155        u32 nfetch32;
 156        u32 nflush32;
 157};
 158#endif
 159
 160/* Having these two values same prevents wrapping of the mon_bin_hdr */
 161#define PKT_ALIGN   64
 162#define PKT_SIZE    64
 163
 164#define PKT_SZ_API0 48	/* API 0 (2.6.20) size */
 165#define PKT_SZ_API1 64	/* API 1 size: extra fields */
 166
 167#define ISODESC_MAX   128	/* Same number as usbfs allows, 2048 bytes. */
 168
 169/* max number of USB bus supported */
 170#define MON_BIN_MAX_MINOR 128
 171
 172/*
 173 * The buffer: map of used pages.
 174 */
 175struct mon_pgmap {
 176	struct page *pg;
 177	unsigned char *ptr;	/* XXX just use page_to_virt everywhere? */
 178};
 179
 180/*
 181 * This gets associated with an open file struct.
 182 */
 183struct mon_reader_bin {
 184	/* The buffer: one per open. */
 185	spinlock_t b_lock;		/* Protect b_cnt, b_in */
 186	unsigned int b_size;		/* Current size of the buffer - bytes */
 187	unsigned int b_cnt;		/* Bytes used */
 188	unsigned int b_in, b_out;	/* Offsets into buffer - bytes */
 189	unsigned int b_read;		/* Amount of read data in curr. pkt. */
 190	struct mon_pgmap *b_vec;	/* The map array */
 191	wait_queue_head_t b_wait;	/* Wait for data here */
 192
 193	struct mutex fetch_lock;	/* Protect b_read, b_out */
 194	int mmap_active;
 195
 196	/* A list of these is needed for "bus 0". Some time later. */
 197	struct mon_reader r;
 198
 199	/* Stats */
 200	unsigned int cnt_lost;
 201};
 202
 203static inline struct mon_bin_hdr *MON_OFF2HDR(const struct mon_reader_bin *rp,
 204    unsigned int offset)
 205{
 206	return (struct mon_bin_hdr *)
 207	    (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE);
 208}
 209
 210#define MON_RING_EMPTY(rp)	((rp)->b_cnt == 0)
 211
 212static unsigned char xfer_to_pipe[4] = {
 213	PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
 214};
 215
 216static struct class *mon_bin_class;
 217static dev_t mon_bin_dev0;
 218static struct cdev mon_bin_cdev;
 219
 220static void mon_buff_area_fill(const struct mon_reader_bin *rp,
 221    unsigned int offset, unsigned int size);
 222static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp);
 223static int mon_alloc_buff(struct mon_pgmap *map, int npages);
 224static void mon_free_buff(struct mon_pgmap *map, int npages);
 225
 226/*
 227 * This is a "chunked memcpy". It does not manipulate any counters.
 228 */
 229static unsigned int mon_copy_to_buff(const struct mon_reader_bin *this,
 230    unsigned int off, const unsigned char *from, unsigned int length)
 231{
 232	unsigned int step_len;
 233	unsigned char *buf;
 234	unsigned int in_page;
 235
 236	while (length) {
 237		/*
 238		 * Determine step_len.
 239		 */
 240		step_len = length;
 241		in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1));
 242		if (in_page < step_len)
 243			step_len = in_page;
 244
 245		/*
 246		 * Copy data and advance pointers.
 247		 */
 248		buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE;
 249		memcpy(buf, from, step_len);
 250		if ((off += step_len) >= this->b_size) off = 0;
 251		from += step_len;
 252		length -= step_len;
 253	}
 254	return off;
 255}
 256
 257/*
 258 * This is a little worse than the above because it's "chunked copy_to_user".
 259 * The return value is an error code, not an offset.
 260 */
 261static int copy_from_buf(const struct mon_reader_bin *this, unsigned int off,
 262    char __user *to, int length)
 263{
 264	unsigned int step_len;
 265	unsigned char *buf;
 266	unsigned int in_page;
 267
 268	while (length) {
 269		/*
 270		 * Determine step_len.
 271		 */
 272		step_len = length;
 273		in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1));
 274		if (in_page < step_len)
 275			step_len = in_page;
 276
 277		/*
 278		 * Copy data and advance pointers.
 279		 */
 280		buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE;
 281		if (copy_to_user(to, buf, step_len))
 282			return -EINVAL;
 283		if ((off += step_len) >= this->b_size) off = 0;
 284		to += step_len;
 285		length -= step_len;
 286	}
 287	return 0;
 288}
 289
 290/*
 291 * Allocate an (aligned) area in the buffer.
 292 * This is called under b_lock.
 293 * Returns ~0 on failure.
 294 */
 295static unsigned int mon_buff_area_alloc(struct mon_reader_bin *rp,
 296    unsigned int size)
 297{
 298	unsigned int offset;
 299
 300	size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
 301	if (rp->b_cnt + size > rp->b_size)
 302		return ~0;
 303	offset = rp->b_in;
 304	rp->b_cnt += size;
 305	if ((rp->b_in += size) >= rp->b_size)
 306		rp->b_in -= rp->b_size;
 307	return offset;
 308}
 309
 310/*
 311 * This is the same thing as mon_buff_area_alloc, only it does not allow
 312 * buffers to wrap. This is needed by applications which pass references
 313 * into mmap-ed buffers up their stacks (libpcap can do that).
 314 *
 315 * Currently, we always have the header stuck with the data, although
 316 * it is not strictly speaking necessary.
 317 *
 318 * When a buffer would wrap, we place a filler packet to mark the space.
 319 */
 320static unsigned int mon_buff_area_alloc_contiguous(struct mon_reader_bin *rp,
 321    unsigned int size)
 322{
 323	unsigned int offset;
 324	unsigned int fill_size;
 325
 326	size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
 327	if (rp->b_cnt + size > rp->b_size)
 328		return ~0;
 329	if (rp->b_in + size > rp->b_size) {
 330		/*
 331		 * This would wrap. Find if we still have space after
 332		 * skipping to the end of the buffer. If we do, place
 333		 * a filler packet and allocate a new packet.
 334		 */
 335		fill_size = rp->b_size - rp->b_in;
 336		if (rp->b_cnt + size + fill_size > rp->b_size)
 337			return ~0;
 338		mon_buff_area_fill(rp, rp->b_in, fill_size);
 339
 340		offset = 0;
 341		rp->b_in = size;
 342		rp->b_cnt += size + fill_size;
 343	} else if (rp->b_in + size == rp->b_size) {
 344		offset = rp->b_in;
 345		rp->b_in = 0;
 346		rp->b_cnt += size;
 347	} else {
 348		offset = rp->b_in;
 349		rp->b_in += size;
 350		rp->b_cnt += size;
 351	}
 352	return offset;
 353}
 354
 355/*
 356 * Return a few (kilo-)bytes to the head of the buffer.
 357 * This is used if a data fetch fails.
 358 */
 359static void mon_buff_area_shrink(struct mon_reader_bin *rp, unsigned int size)
 360{
 361
 362	/* size &= ~(PKT_ALIGN-1);  -- we're called with aligned size */
 363	rp->b_cnt -= size;
 364	if (rp->b_in < size)
 365		rp->b_in += rp->b_size;
 366	rp->b_in -= size;
 367}
 368
 369/*
 370 * This has to be called under both b_lock and fetch_lock, because
 371 * it accesses both b_cnt and b_out.
 372 */
 373static void mon_buff_area_free(struct mon_reader_bin *rp, unsigned int size)
 374{
 375
 376	size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
 377	rp->b_cnt -= size;
 378	if ((rp->b_out += size) >= rp->b_size)
 379		rp->b_out -= rp->b_size;
 380}
 381
 382static void mon_buff_area_fill(const struct mon_reader_bin *rp,
 383    unsigned int offset, unsigned int size)
 384{
 385	struct mon_bin_hdr *ep;
 386
 387	ep = MON_OFF2HDR(rp, offset);
 388	memset(ep, 0, PKT_SIZE);
 389	ep->type = '@';
 390	ep->len_cap = size - PKT_SIZE;
 391}
 392
 393static inline char mon_bin_get_setup(unsigned char *setupb,
 394    const struct urb *urb, char ev_type)
 395{
 396
 397	if (urb->setup_packet == NULL)
 398		return 'Z';
 399	memcpy(setupb, urb->setup_packet, SETUP_LEN);
 400	return 0;
 401}
 402
 403static unsigned int mon_bin_get_data(const struct mon_reader_bin *rp,
 404    unsigned int offset, struct urb *urb, unsigned int length,
 405    char *flag)
 406{
 407	int i;
 408	struct scatterlist *sg;
 409	unsigned int this_len;
 410
 411	*flag = 0;
 412	if (urb->num_sgs == 0) {
 413		if (urb->transfer_buffer == NULL) {
 414			*flag = 'Z';
 415			return length;
 416		}
 417		mon_copy_to_buff(rp, offset, urb->transfer_buffer, length);
 418		length = 0;
 419
 420	} else {
 421		/* If IOMMU coalescing occurred, we cannot trust sg_page */
 422		if (urb->transfer_flags & URB_DMA_SG_COMBINED) {
 423			*flag = 'D';
 424			return length;
 425		}
 426
 427		/* Copy up to the first non-addressable segment */
 428		for_each_sg(urb->sg, sg, urb->num_sgs, i) {
 429			if (length == 0 || PageHighMem(sg_page(sg)))
 430				break;
 431			this_len = min_t(unsigned int, sg->length, length);
 432			offset = mon_copy_to_buff(rp, offset, sg_virt(sg),
 433					this_len);
 434			length -= this_len;
 435		}
 436		if (i == 0)
 437			*flag = 'D';
 438	}
 439
 440	return length;
 441}
 442
 443/*
 444 * This is the look-ahead pass in case of 'C Zi', when actual_length cannot
 445 * be used to determine the length of the whole contiguous buffer.
 446 */
 447static unsigned int mon_bin_collate_isodesc(const struct mon_reader_bin *rp,
 448    struct urb *urb, unsigned int ndesc)
 449{
 450	struct usb_iso_packet_descriptor *fp;
 451	unsigned int length;
 452
 453	length = 0;
 454	fp = urb->iso_frame_desc;
 455	while (ndesc-- != 0) {
 456		if (fp->actual_length != 0) {
 457			if (fp->offset + fp->actual_length > length)
 458				length = fp->offset + fp->actual_length;
 459		}
 460		fp++;
 461	}
 462	return length;
 463}
 464
 465static void mon_bin_get_isodesc(const struct mon_reader_bin *rp,
 466    unsigned int offset, struct urb *urb, char ev_type, unsigned int ndesc)
 467{
 468	struct mon_bin_isodesc *dp;
 469	struct usb_iso_packet_descriptor *fp;
 470
 471	fp = urb->iso_frame_desc;
 472	while (ndesc-- != 0) {
 473		dp = (struct mon_bin_isodesc *)
 474		    (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE);
 475		dp->iso_status = fp->status;
 476		dp->iso_off = fp->offset;
 477		dp->iso_len = (ev_type == 'S') ? fp->length : fp->actual_length;
 478		dp->_pad = 0;
 479		if ((offset += sizeof(struct mon_bin_isodesc)) >= rp->b_size)
 480			offset = 0;
 481		fp++;
 482	}
 483}
 484
 485static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb,
 486    char ev_type, int status)
 487{
 488	const struct usb_endpoint_descriptor *epd = &urb->ep->desc;
 489	struct timespec64 ts;
 490	unsigned long flags;
 491	unsigned int urb_length;
 492	unsigned int offset;
 493	unsigned int length;
 494	unsigned int delta;
 495	unsigned int ndesc, lendesc;
 496	unsigned char dir;
 497	struct mon_bin_hdr *ep;
 498	char data_tag = 0;
 499
 500	getnstimeofday64(&ts);
 501
 502	spin_lock_irqsave(&rp->b_lock, flags);
 503
 504	/*
 505	 * Find the maximum allowable length, then allocate space.
 506	 */
 507	urb_length = (ev_type == 'S') ?
 508	    urb->transfer_buffer_length : urb->actual_length;
 509	length = urb_length;
 510
 511	if (usb_endpoint_xfer_isoc(epd)) {
 512		if (urb->number_of_packets < 0) {
 513			ndesc = 0;
 514		} else if (urb->number_of_packets >= ISODESC_MAX) {
 515			ndesc = ISODESC_MAX;
 516		} else {
 517			ndesc = urb->number_of_packets;
 518		}
 519		if (ev_type == 'C' && usb_urb_dir_in(urb))
 520			length = mon_bin_collate_isodesc(rp, urb, ndesc);
 521	} else {
 522		ndesc = 0;
 523	}
 524	lendesc = ndesc*sizeof(struct mon_bin_isodesc);
 525
 526	/* not an issue unless there's a subtle bug in a HCD somewhere */
 527	if (length >= urb->transfer_buffer_length)
 528		length = urb->transfer_buffer_length;
 529
 530	if (length >= rp->b_size/5)
 531		length = rp->b_size/5;
 532
 533	if (usb_urb_dir_in(urb)) {
 534		if (ev_type == 'S') {
 535			length = 0;
 536			data_tag = '<';
 537		}
 538		/* Cannot rely on endpoint number in case of control ep.0 */
 539		dir = USB_DIR_IN;
 540	} else {
 541		if (ev_type == 'C') {
 542			length = 0;
 543			data_tag = '>';
 544		}
 545		dir = 0;
 546	}
 547
 548	if (rp->mmap_active) {
 549		offset = mon_buff_area_alloc_contiguous(rp,
 550						 length + PKT_SIZE + lendesc);
 551	} else {
 552		offset = mon_buff_area_alloc(rp, length + PKT_SIZE + lendesc);
 553	}
 554	if (offset == ~0) {
 555		rp->cnt_lost++;
 556		spin_unlock_irqrestore(&rp->b_lock, flags);
 557		return;
 558	}
 559
 560	ep = MON_OFF2HDR(rp, offset);
 561	if ((offset += PKT_SIZE) >= rp->b_size) offset = 0;
 562
 563	/*
 564	 * Fill the allocated area.
 565	 */
 566	memset(ep, 0, PKT_SIZE);
 567	ep->type = ev_type;
 568	ep->xfer_type = xfer_to_pipe[usb_endpoint_type(epd)];
 569	ep->epnum = dir | usb_endpoint_num(epd);
 570	ep->devnum = urb->dev->devnum;
 571	ep->busnum = urb->dev->bus->busnum;
 572	ep->id = (unsigned long) urb;
 573	ep->ts_sec = ts.tv_sec;
 574	ep->ts_usec = ts.tv_nsec / NSEC_PER_USEC;
 575	ep->status = status;
 576	ep->len_urb = urb_length;
 577	ep->len_cap = length + lendesc;
 578	ep->xfer_flags = urb->transfer_flags;
 579
 580	if (usb_endpoint_xfer_int(epd)) {
 581		ep->interval = urb->interval;
 582	} else if (usb_endpoint_xfer_isoc(epd)) {
 583		ep->interval = urb->interval;
 584		ep->start_frame = urb->start_frame;
 585		ep->s.iso.error_count = urb->error_count;
 586		ep->s.iso.numdesc = urb->number_of_packets;
 587	}
 588
 589	if (usb_endpoint_xfer_control(epd) && ev_type == 'S') {
 590		ep->flag_setup = mon_bin_get_setup(ep->s.setup, urb, ev_type);
 591	} else {
 592		ep->flag_setup = '-';
 593	}
 594
 595	if (ndesc != 0) {
 596		ep->ndesc = ndesc;
 597		mon_bin_get_isodesc(rp, offset, urb, ev_type, ndesc);
 598		if ((offset += lendesc) >= rp->b_size)
 599			offset -= rp->b_size;
 600	}
 601
 602	if (length != 0) {
 603		length = mon_bin_get_data(rp, offset, urb, length,
 604				&ep->flag_data);
 605		if (length > 0) {
 606			delta = (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
 607			ep->len_cap -= length;
 608			delta -= (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
 609			mon_buff_area_shrink(rp, delta);
 610		}
 611	} else {
 612		ep->flag_data = data_tag;
 613	}
 614
 615	spin_unlock_irqrestore(&rp->b_lock, flags);
 616
 617	wake_up(&rp->b_wait);
 618}
 619
 620static void mon_bin_submit(void *data, struct urb *urb)
 621{
 622	struct mon_reader_bin *rp = data;
 623	mon_bin_event(rp, urb, 'S', -EINPROGRESS);
 624}
 625
 626static void mon_bin_complete(void *data, struct urb *urb, int status)
 627{
 628	struct mon_reader_bin *rp = data;
 629	mon_bin_event(rp, urb, 'C', status);
 630}
 631
 632static void mon_bin_error(void *data, struct urb *urb, int error)
 633{
 634	struct mon_reader_bin *rp = data;
 635	struct timespec64 ts;
 636	unsigned long flags;
 637	unsigned int offset;
 638	struct mon_bin_hdr *ep;
 639
 640	getnstimeofday64(&ts);
 641
 642	spin_lock_irqsave(&rp->b_lock, flags);
 643
 644	offset = mon_buff_area_alloc(rp, PKT_SIZE);
 645	if (offset == ~0) {
 646		/* Not incrementing cnt_lost. Just because. */
 647		spin_unlock_irqrestore(&rp->b_lock, flags);
 648		return;
 649	}
 650
 651	ep = MON_OFF2HDR(rp, offset);
 652
 653	memset(ep, 0, PKT_SIZE);
 654	ep->type = 'E';
 655	ep->xfer_type = xfer_to_pipe[usb_endpoint_type(&urb->ep->desc)];
 656	ep->epnum = usb_urb_dir_in(urb) ? USB_DIR_IN : 0;
 657	ep->epnum |= usb_endpoint_num(&urb->ep->desc);
 658	ep->devnum = urb->dev->devnum;
 659	ep->busnum = urb->dev->bus->busnum;
 660	ep->id = (unsigned long) urb;
 661	ep->ts_sec = ts.tv_sec;
 662	ep->ts_usec = ts.tv_nsec / NSEC_PER_USEC;
 663	ep->status = error;
 664
 665	ep->flag_setup = '-';
 666	ep->flag_data = 'E';
 667
 668	spin_unlock_irqrestore(&rp->b_lock, flags);
 669
 670	wake_up(&rp->b_wait);
 671}
 672
 673static int mon_bin_open(struct inode *inode, struct file *file)
 674{
 675	struct mon_bus *mbus;
 676	struct mon_reader_bin *rp;
 677	size_t size;
 678	int rc;
 679
 680	mutex_lock(&mon_lock);
 681	mbus = mon_bus_lookup(iminor(inode));
 682	if (mbus == NULL) {
 683		mutex_unlock(&mon_lock);
 684		return -ENODEV;
 685	}
 686	if (mbus != &mon_bus0 && mbus->u_bus == NULL) {
 687		printk(KERN_ERR TAG ": consistency error on open\n");
 688		mutex_unlock(&mon_lock);
 689		return -ENODEV;
 690	}
 691
 692	rp = kzalloc(sizeof(struct mon_reader_bin), GFP_KERNEL);
 693	if (rp == NULL) {
 694		rc = -ENOMEM;
 695		goto err_alloc;
 696	}
 697	spin_lock_init(&rp->b_lock);
 698	init_waitqueue_head(&rp->b_wait);
 699	mutex_init(&rp->fetch_lock);
 700	rp->b_size = BUFF_DFL;
 701
 702	size = sizeof(struct mon_pgmap) * (rp->b_size/CHUNK_SIZE);
 703	if ((rp->b_vec = kzalloc(size, GFP_KERNEL)) == NULL) {
 704		rc = -ENOMEM;
 705		goto err_allocvec;
 706	}
 707
 708	if ((rc = mon_alloc_buff(rp->b_vec, rp->b_size/CHUNK_SIZE)) < 0)
 709		goto err_allocbuff;
 710
 711	rp->r.m_bus = mbus;
 712	rp->r.r_data = rp;
 713	rp->r.rnf_submit = mon_bin_submit;
 714	rp->r.rnf_error = mon_bin_error;
 715	rp->r.rnf_complete = mon_bin_complete;
 716
 717	mon_reader_add(mbus, &rp->r);
 718
 719	file->private_data = rp;
 720	mutex_unlock(&mon_lock);
 721	return 0;
 722
 723err_allocbuff:
 724	kfree(rp->b_vec);
 725err_allocvec:
 726	kfree(rp);
 727err_alloc:
 728	mutex_unlock(&mon_lock);
 729	return rc;
 730}
 731
 732/*
 733 * Extract an event from buffer and copy it to user space.
 734 * Wait if there is no event ready.
 735 * Returns zero or error.
 736 */
 737static int mon_bin_get_event(struct file *file, struct mon_reader_bin *rp,
 738    struct mon_bin_hdr __user *hdr, unsigned int hdrbytes,
 739    void __user *data, unsigned int nbytes)
 740{
 741	unsigned long flags;
 742	struct mon_bin_hdr *ep;
 743	size_t step_len;
 744	unsigned int offset;
 745	int rc;
 746
 747	mutex_lock(&rp->fetch_lock);
 748
 749	if ((rc = mon_bin_wait_event(file, rp)) < 0) {
 750		mutex_unlock(&rp->fetch_lock);
 751		return rc;
 752	}
 753
 754	ep = MON_OFF2HDR(rp, rp->b_out);
 755
 756	if (copy_to_user(hdr, ep, hdrbytes)) {
 757		mutex_unlock(&rp->fetch_lock);
 758		return -EFAULT;
 759	}
 760
 761	step_len = min(ep->len_cap, nbytes);
 762	if ((offset = rp->b_out + PKT_SIZE) >= rp->b_size) offset = 0;
 763
 764	if (copy_from_buf(rp, offset, data, step_len)) {
 765		mutex_unlock(&rp->fetch_lock);
 766		return -EFAULT;
 767	}
 768
 769	spin_lock_irqsave(&rp->b_lock, flags);
 770	mon_buff_area_free(rp, PKT_SIZE + ep->len_cap);
 771	spin_unlock_irqrestore(&rp->b_lock, flags);
 772	rp->b_read = 0;
 773
 774	mutex_unlock(&rp->fetch_lock);
 775	return 0;
 776}
 777
 778static int mon_bin_release(struct inode *inode, struct file *file)
 779{
 780	struct mon_reader_bin *rp = file->private_data;
 781	struct mon_bus* mbus = rp->r.m_bus;
 782
 783	mutex_lock(&mon_lock);
 784
 785	if (mbus->nreaders <= 0) {
 786		printk(KERN_ERR TAG ": consistency error on close\n");
 787		mutex_unlock(&mon_lock);
 788		return 0;
 789	}
 790	mon_reader_del(mbus, &rp->r);
 791
 792	mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE);
 793	kfree(rp->b_vec);
 794	kfree(rp);
 795
 796	mutex_unlock(&mon_lock);
 797	return 0;
 798}
 799
 800static ssize_t mon_bin_read(struct file *file, char __user *buf,
 801    size_t nbytes, loff_t *ppos)
 802{
 803	struct mon_reader_bin *rp = file->private_data;
 804	unsigned int hdrbytes = PKT_SZ_API0;
 805	unsigned long flags;
 806	struct mon_bin_hdr *ep;
 807	unsigned int offset;
 808	size_t step_len;
 809	char *ptr;
 810	ssize_t done = 0;
 811	int rc;
 812
 813	mutex_lock(&rp->fetch_lock);
 814
 815	if ((rc = mon_bin_wait_event(file, rp)) < 0) {
 816		mutex_unlock(&rp->fetch_lock);
 817		return rc;
 818	}
 819
 820	ep = MON_OFF2HDR(rp, rp->b_out);
 821
 822	if (rp->b_read < hdrbytes) {
 823		step_len = min(nbytes, (size_t)(hdrbytes - rp->b_read));
 824		ptr = ((char *)ep) + rp->b_read;
 825		if (step_len && copy_to_user(buf, ptr, step_len)) {
 826			mutex_unlock(&rp->fetch_lock);
 827			return -EFAULT;
 828		}
 829		nbytes -= step_len;
 830		buf += step_len;
 831		rp->b_read += step_len;
 832		done += step_len;
 833	}
 834
 835	if (rp->b_read >= hdrbytes) {
 836		step_len = ep->len_cap;
 837		step_len -= rp->b_read - hdrbytes;
 838		if (step_len > nbytes)
 839			step_len = nbytes;
 840		offset = rp->b_out + PKT_SIZE;
 841		offset += rp->b_read - hdrbytes;
 842		if (offset >= rp->b_size)
 843			offset -= rp->b_size;
 844		if (copy_from_buf(rp, offset, buf, step_len)) {
 845			mutex_unlock(&rp->fetch_lock);
 846			return -EFAULT;
 847		}
 848		nbytes -= step_len;
 849		buf += step_len;
 850		rp->b_read += step_len;
 851		done += step_len;
 852	}
 853
 854	/*
 855	 * Check if whole packet was read, and if so, jump to the next one.
 856	 */
 857	if (rp->b_read >= hdrbytes + ep->len_cap) {
 858		spin_lock_irqsave(&rp->b_lock, flags);
 859		mon_buff_area_free(rp, PKT_SIZE + ep->len_cap);
 860		spin_unlock_irqrestore(&rp->b_lock, flags);
 861		rp->b_read = 0;
 862	}
 863
 864	mutex_unlock(&rp->fetch_lock);
 865	return done;
 866}
 867
 868/*
 869 * Remove at most nevents from chunked buffer.
 870 * Returns the number of removed events.
 871 */
 872static int mon_bin_flush(struct mon_reader_bin *rp, unsigned nevents)
 873{
 874	unsigned long flags;
 875	struct mon_bin_hdr *ep;
 876	int i;
 877
 878	mutex_lock(&rp->fetch_lock);
 879	spin_lock_irqsave(&rp->b_lock, flags);
 880	for (i = 0; i < nevents; ++i) {
 881		if (MON_RING_EMPTY(rp))
 882			break;
 883
 884		ep = MON_OFF2HDR(rp, rp->b_out);
 885		mon_buff_area_free(rp, PKT_SIZE + ep->len_cap);
 886	}
 887	spin_unlock_irqrestore(&rp->b_lock, flags);
 888	rp->b_read = 0;
 889	mutex_unlock(&rp->fetch_lock);
 890	return i;
 891}
 892
 893/*
 894 * Fetch at most max event offsets into the buffer and put them into vec.
 895 * The events are usually freed later with mon_bin_flush.
 896 * Return the effective number of events fetched.
 897 */
 898static int mon_bin_fetch(struct file *file, struct mon_reader_bin *rp,
 899    u32 __user *vec, unsigned int max)
 900{
 901	unsigned int cur_out;
 902	unsigned int bytes, avail;
 903	unsigned int size;
 904	unsigned int nevents;
 905	struct mon_bin_hdr *ep;
 906	unsigned long flags;
 907	int rc;
 908
 909	mutex_lock(&rp->fetch_lock);
 910
 911	if ((rc = mon_bin_wait_event(file, rp)) < 0) {
 912		mutex_unlock(&rp->fetch_lock);
 913		return rc;
 914	}
 915
 916	spin_lock_irqsave(&rp->b_lock, flags);
 917	avail = rp->b_cnt;
 918	spin_unlock_irqrestore(&rp->b_lock, flags);
 919
 920	cur_out = rp->b_out;
 921	nevents = 0;
 922	bytes = 0;
 923	while (bytes < avail) {
 924		if (nevents >= max)
 925			break;
 926
 927		ep = MON_OFF2HDR(rp, cur_out);
 928		if (put_user(cur_out, &vec[nevents])) {
 929			mutex_unlock(&rp->fetch_lock);
 930			return -EFAULT;
 931		}
 932
 933		nevents++;
 934		size = ep->len_cap + PKT_SIZE;
 935		size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
 936		if ((cur_out += size) >= rp->b_size)
 937			cur_out -= rp->b_size;
 938		bytes += size;
 939	}
 940
 941	mutex_unlock(&rp->fetch_lock);
 942	return nevents;
 943}
 944
 945/*
 946 * Count events. This is almost the same as the above mon_bin_fetch,
 947 * only we do not store offsets into user vector, and we have no limit.
 948 */
 949static int mon_bin_queued(struct mon_reader_bin *rp)
 950{
 951	unsigned int cur_out;
 952	unsigned int bytes, avail;
 953	unsigned int size;
 954	unsigned int nevents;
 955	struct mon_bin_hdr *ep;
 956	unsigned long flags;
 957
 958	mutex_lock(&rp->fetch_lock);
 959
 960	spin_lock_irqsave(&rp->b_lock, flags);
 961	avail = rp->b_cnt;
 962	spin_unlock_irqrestore(&rp->b_lock, flags);
 963
 964	cur_out = rp->b_out;
 965	nevents = 0;
 966	bytes = 0;
 967	while (bytes < avail) {
 968		ep = MON_OFF2HDR(rp, cur_out);
 969
 970		nevents++;
 971		size = ep->len_cap + PKT_SIZE;
 972		size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
 973		if ((cur_out += size) >= rp->b_size)
 974			cur_out -= rp->b_size;
 975		bytes += size;
 976	}
 977
 978	mutex_unlock(&rp->fetch_lock);
 979	return nevents;
 980}
 981
 982/*
 983 */
 984static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 985{
 986	struct mon_reader_bin *rp = file->private_data;
 987	// struct mon_bus* mbus = rp->r.m_bus;
 988	int ret = 0;
 989	struct mon_bin_hdr *ep;
 990	unsigned long flags;
 991
 992	switch (cmd) {
 993
 994	case MON_IOCQ_URB_LEN:
 995		/*
 996		 * N.B. This only returns the size of data, without the header.
 997		 */
 998		spin_lock_irqsave(&rp->b_lock, flags);
 999		if (!MON_RING_EMPTY(rp)) {
1000			ep = MON_OFF2HDR(rp, rp->b_out);
1001			ret = ep->len_cap;
1002		}
1003		spin_unlock_irqrestore(&rp->b_lock, flags);
1004		break;
1005
1006	case MON_IOCQ_RING_SIZE:
1007		mutex_lock(&rp->fetch_lock);
1008		ret = rp->b_size;
1009		mutex_unlock(&rp->fetch_lock);
1010		break;
1011
1012	case MON_IOCT_RING_SIZE:
1013		/*
1014		 * Changing the buffer size will flush it's contents; the new
1015		 * buffer is allocated before releasing the old one to be sure
1016		 * the device will stay functional also in case of memory
1017		 * pressure.
1018		 */
1019		{
1020		int size;
1021		struct mon_pgmap *vec;
1022
1023		if (arg < BUFF_MIN || arg > BUFF_MAX)
1024			return -EINVAL;
1025
1026		size = CHUNK_ALIGN(arg);
1027		vec = kzalloc(sizeof(struct mon_pgmap) * (size / CHUNK_SIZE), GFP_KERNEL);
 
1028		if (vec == NULL) {
1029			ret = -ENOMEM;
1030			break;
1031		}
1032
1033		ret = mon_alloc_buff(vec, size/CHUNK_SIZE);
1034		if (ret < 0) {
1035			kfree(vec);
1036			break;
1037		}
1038
1039		mutex_lock(&rp->fetch_lock);
1040		spin_lock_irqsave(&rp->b_lock, flags);
1041		mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE);
1042		kfree(rp->b_vec);
1043		rp->b_vec  = vec;
1044		rp->b_size = size;
1045		rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0;
1046		rp->cnt_lost = 0;
 
 
 
 
 
 
1047		spin_unlock_irqrestore(&rp->b_lock, flags);
1048		mutex_unlock(&rp->fetch_lock);
1049		}
1050		break;
1051
1052	case MON_IOCH_MFLUSH:
1053		ret = mon_bin_flush(rp, arg);
1054		break;
1055
1056	case MON_IOCX_GET:
1057	case MON_IOCX_GETX:
1058		{
1059		struct mon_bin_get getb;
1060
1061		if (copy_from_user(&getb, (void __user *)arg,
1062					    sizeof(struct mon_bin_get)))
1063			return -EFAULT;
1064
1065		if (getb.alloc > 0x10000000)	/* Want to cast to u32 */
1066			return -EINVAL;
1067		ret = mon_bin_get_event(file, rp, getb.hdr,
1068		    (cmd == MON_IOCX_GET)? PKT_SZ_API0: PKT_SZ_API1,
1069		    getb.data, (unsigned int)getb.alloc);
1070		}
1071		break;
1072
1073	case MON_IOCX_MFETCH:
1074		{
1075		struct mon_bin_mfetch mfetch;
1076		struct mon_bin_mfetch __user *uptr;
1077
1078		uptr = (struct mon_bin_mfetch __user *)arg;
1079
1080		if (copy_from_user(&mfetch, uptr, sizeof(mfetch)))
1081			return -EFAULT;
1082
1083		if (mfetch.nflush) {
1084			ret = mon_bin_flush(rp, mfetch.nflush);
1085			if (ret < 0)
1086				return ret;
1087			if (put_user(ret, &uptr->nflush))
1088				return -EFAULT;
1089		}
1090		ret = mon_bin_fetch(file, rp, mfetch.offvec, mfetch.nfetch);
1091		if (ret < 0)
1092			return ret;
1093		if (put_user(ret, &uptr->nfetch))
1094			return -EFAULT;
1095		ret = 0;
1096		}
1097		break;
1098
1099	case MON_IOCG_STATS: {
1100		struct mon_bin_stats __user *sp;
1101		unsigned int nevents;
1102		unsigned int ndropped;
1103
1104		spin_lock_irqsave(&rp->b_lock, flags);
1105		ndropped = rp->cnt_lost;
1106		rp->cnt_lost = 0;
1107		spin_unlock_irqrestore(&rp->b_lock, flags);
1108		nevents = mon_bin_queued(rp);
1109
1110		sp = (struct mon_bin_stats __user *)arg;
1111		if (put_user(ndropped, &sp->dropped))
1112			return -EFAULT;
1113		if (put_user(nevents, &sp->queued))
1114			return -EFAULT;
1115
1116		}
1117		break;
1118
1119	default:
1120		return -ENOTTY;
1121	}
1122
1123	return ret;
1124}
1125
1126#ifdef CONFIG_COMPAT
1127static long mon_bin_compat_ioctl(struct file *file,
1128    unsigned int cmd, unsigned long arg)
1129{
1130	struct mon_reader_bin *rp = file->private_data;
1131	int ret;
1132
1133	switch (cmd) {
1134
1135	case MON_IOCX_GET32:
1136	case MON_IOCX_GETX32:
1137		{
1138		struct mon_bin_get32 getb;
1139
1140		if (copy_from_user(&getb, (void __user *)arg,
1141					    sizeof(struct mon_bin_get32)))
1142			return -EFAULT;
1143
1144		ret = mon_bin_get_event(file, rp, compat_ptr(getb.hdr32),
1145		    (cmd == MON_IOCX_GET32)? PKT_SZ_API0: PKT_SZ_API1,
1146		    compat_ptr(getb.data32), getb.alloc32);
1147		if (ret < 0)
1148			return ret;
1149		}
1150		return 0;
1151
1152	case MON_IOCX_MFETCH32:
1153		{
1154		struct mon_bin_mfetch32 mfetch;
1155		struct mon_bin_mfetch32 __user *uptr;
1156
1157		uptr = (struct mon_bin_mfetch32 __user *) compat_ptr(arg);
1158
1159		if (copy_from_user(&mfetch, uptr, sizeof(mfetch)))
1160			return -EFAULT;
1161
1162		if (mfetch.nflush32) {
1163			ret = mon_bin_flush(rp, mfetch.nflush32);
1164			if (ret < 0)
1165				return ret;
1166			if (put_user(ret, &uptr->nflush32))
1167				return -EFAULT;
1168		}
1169		ret = mon_bin_fetch(file, rp, compat_ptr(mfetch.offvec32),
1170		    mfetch.nfetch32);
1171		if (ret < 0)
1172			return ret;
1173		if (put_user(ret, &uptr->nfetch32))
1174			return -EFAULT;
1175		}
1176		return 0;
1177
1178	case MON_IOCG_STATS:
1179		return mon_bin_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
1180
1181	case MON_IOCQ_URB_LEN:
1182	case MON_IOCQ_RING_SIZE:
1183	case MON_IOCT_RING_SIZE:
1184	case MON_IOCH_MFLUSH:
1185		return mon_bin_ioctl(file, cmd, arg);
1186
1187	default:
1188		;
1189	}
1190	return -ENOTTY;
1191}
1192#endif /* CONFIG_COMPAT */
1193
1194static __poll_t
1195mon_bin_poll(struct file *file, struct poll_table_struct *wait)
1196{
1197	struct mon_reader_bin *rp = file->private_data;
1198	__poll_t mask = 0;
1199	unsigned long flags;
1200
1201	if (file->f_mode & FMODE_READ)
1202		poll_wait(file, &rp->b_wait, wait);
1203
1204	spin_lock_irqsave(&rp->b_lock, flags);
1205	if (!MON_RING_EMPTY(rp))
1206		mask |= EPOLLIN | EPOLLRDNORM;    /* readable */
1207	spin_unlock_irqrestore(&rp->b_lock, flags);
1208	return mask;
1209}
1210
1211/*
1212 * open and close: just keep track of how many times the device is
1213 * mapped, to use the proper memory allocation function.
1214 */
1215static void mon_bin_vma_open(struct vm_area_struct *vma)
1216{
1217	struct mon_reader_bin *rp = vma->vm_private_data;
 
 
 
1218	rp->mmap_active++;
 
1219}
1220
1221static void mon_bin_vma_close(struct vm_area_struct *vma)
1222{
 
 
1223	struct mon_reader_bin *rp = vma->vm_private_data;
 
1224	rp->mmap_active--;
 
1225}
1226
1227/*
1228 * Map ring pages to user space.
1229 */
1230static int mon_bin_vma_fault(struct vm_fault *vmf)
1231{
1232	struct mon_reader_bin *rp = vmf->vma->vm_private_data;
1233	unsigned long offset, chunk_idx;
1234	struct page *pageptr;
1235
1236	mutex_lock(&rp->fetch_lock);
1237	offset = vmf->pgoff << PAGE_SHIFT;
1238	if (offset >= rp->b_size) {
1239		mutex_unlock(&rp->fetch_lock);
1240		return VM_FAULT_SIGBUS;
1241	}
1242	chunk_idx = offset / CHUNK_SIZE;
1243	pageptr = rp->b_vec[chunk_idx].pg;
1244	get_page(pageptr);
1245	mutex_unlock(&rp->fetch_lock);
1246	vmf->page = pageptr;
1247	return 0;
1248}
1249
1250static const struct vm_operations_struct mon_bin_vm_ops = {
1251	.open =     mon_bin_vma_open,
1252	.close =    mon_bin_vma_close,
1253	.fault =    mon_bin_vma_fault,
1254};
1255
1256static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma)
1257{
1258	/* don't do anything here: "fault" will set up page table entries */
1259	vma->vm_ops = &mon_bin_vm_ops;
1260	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
1261	vma->vm_private_data = filp->private_data;
1262	mon_bin_vma_open(vma);
1263	return 0;
1264}
1265
1266static const struct file_operations mon_fops_binary = {
1267	.owner =	THIS_MODULE,
1268	.open =		mon_bin_open,
1269	.llseek =	no_llseek,
1270	.read =		mon_bin_read,
1271	/* .write =	mon_text_write, */
1272	.poll =		mon_bin_poll,
1273	.unlocked_ioctl = mon_bin_ioctl,
1274#ifdef CONFIG_COMPAT
1275	.compat_ioctl =	mon_bin_compat_ioctl,
1276#endif
1277	.release =	mon_bin_release,
1278	.mmap =		mon_bin_mmap,
1279};
1280
1281static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp)
1282{
1283	DECLARE_WAITQUEUE(waita, current);
1284	unsigned long flags;
1285
1286	add_wait_queue(&rp->b_wait, &waita);
1287	set_current_state(TASK_INTERRUPTIBLE);
1288
1289	spin_lock_irqsave(&rp->b_lock, flags);
1290	while (MON_RING_EMPTY(rp)) {
1291		spin_unlock_irqrestore(&rp->b_lock, flags);
1292
1293		if (file->f_flags & O_NONBLOCK) {
1294			set_current_state(TASK_RUNNING);
1295			remove_wait_queue(&rp->b_wait, &waita);
1296			return -EWOULDBLOCK; /* Same as EAGAIN in Linux */
1297		}
1298		schedule();
1299		if (signal_pending(current)) {
1300			remove_wait_queue(&rp->b_wait, &waita);
1301			return -EINTR;
1302		}
1303		set_current_state(TASK_INTERRUPTIBLE);
1304
1305		spin_lock_irqsave(&rp->b_lock, flags);
1306	}
1307	spin_unlock_irqrestore(&rp->b_lock, flags);
1308
1309	set_current_state(TASK_RUNNING);
1310	remove_wait_queue(&rp->b_wait, &waita);
1311	return 0;
1312}
1313
1314static int mon_alloc_buff(struct mon_pgmap *map, int npages)
1315{
1316	int n;
1317	unsigned long vaddr;
1318
1319	for (n = 0; n < npages; n++) {
1320		vaddr = get_zeroed_page(GFP_KERNEL);
1321		if (vaddr == 0) {
1322			while (n-- != 0)
1323				free_page((unsigned long) map[n].ptr);
1324			return -ENOMEM;
1325		}
1326		map[n].ptr = (unsigned char *) vaddr;
1327		map[n].pg = virt_to_page((void *) vaddr);
1328	}
1329	return 0;
1330}
1331
1332static void mon_free_buff(struct mon_pgmap *map, int npages)
1333{
1334	int n;
1335
1336	for (n = 0; n < npages; n++)
1337		free_page((unsigned long) map[n].ptr);
1338}
1339
1340int mon_bin_add(struct mon_bus *mbus, const struct usb_bus *ubus)
1341{
1342	struct device *dev;
1343	unsigned minor = ubus? ubus->busnum: 0;
1344
1345	if (minor >= MON_BIN_MAX_MINOR)
1346		return 0;
1347
1348	dev = device_create(mon_bin_class, ubus ? ubus->controller : NULL,
1349			    MKDEV(MAJOR(mon_bin_dev0), minor), NULL,
1350			    "usbmon%d", minor);
1351	if (IS_ERR(dev))
1352		return 0;
1353
1354	mbus->classdev = dev;
1355	return 1;
1356}
1357
1358void mon_bin_del(struct mon_bus *mbus)
1359{
1360	device_destroy(mon_bin_class, mbus->classdev->devt);
1361}
1362
1363int __init mon_bin_init(void)
1364{
1365	int rc;
1366
1367	mon_bin_class = class_create(THIS_MODULE, "usbmon");
1368	if (IS_ERR(mon_bin_class)) {
1369		rc = PTR_ERR(mon_bin_class);
1370		goto err_class;
1371	}
1372
1373	rc = alloc_chrdev_region(&mon_bin_dev0, 0, MON_BIN_MAX_MINOR, "usbmon");
1374	if (rc < 0)
1375		goto err_dev;
1376
1377	cdev_init(&mon_bin_cdev, &mon_fops_binary);
1378	mon_bin_cdev.owner = THIS_MODULE;
1379
1380	rc = cdev_add(&mon_bin_cdev, mon_bin_dev0, MON_BIN_MAX_MINOR);
1381	if (rc < 0)
1382		goto err_add;
1383
1384	return 0;
1385
1386err_add:
1387	unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR);
1388err_dev:
1389	class_destroy(mon_bin_class);
1390err_class:
1391	return rc;
1392}
1393
1394void mon_bin_exit(void)
1395{
1396	cdev_del(&mon_bin_cdev);
1397	unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR);
1398	class_destroy(mon_bin_class);
1399}