Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
Note: File does not exist in v3.5.6.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2017 - Cambridge Greys Limited
   4 * Copyright (C) 2011 - 2014 Cisco Systems Inc
   5 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
   6 * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and
   7 * James Leu (jleu@mindspring.net).
   8 * Copyright (C) 2001 by various other people who didn't put their name here.
   9 */
  10
  11#include <linux/version.h>
  12#include <linux/memblock.h>
  13#include <linux/etherdevice.h>
  14#include <linux/ethtool.h>
  15#include <linux/inetdevice.h>
  16#include <linux/init.h>
  17#include <linux/list.h>
  18#include <linux/netdevice.h>
  19#include <linux/platform_device.h>
  20#include <linux/rtnetlink.h>
  21#include <linux/skbuff.h>
  22#include <linux/slab.h>
  23#include <linux/interrupt.h>
  24#include <init.h>
  25#include <irq_kern.h>
  26#include <irq_user.h>
  27#include <net_kern.h>
  28#include <os.h>
  29#include "mconsole_kern.h"
  30#include "vector_user.h"
  31#include "vector_kern.h"
  32
  33/*
  34 * Adapted from network devices with the following major changes:
  35 * All transports are static - simplifies the code significantly
  36 * Multiple FDs/IRQs per device
  37 * Vector IO optionally used for read/write, falling back to legacy
  38 * based on configuration and/or availability
  39 * Configuration is no longer positional - L2TPv3 and GRE require up to
  40 * 10 parameters, passing this as positional is not fit for purpose.
  41 * Only socket transports are supported
  42 */
  43
  44
  45#define DRIVER_NAME "uml-vector"
  46#define DRIVER_VERSION "01"
  47struct vector_cmd_line_arg {
  48	struct list_head list;
  49	int unit;
  50	char *arguments;
  51};
  52
  53struct vector_device {
  54	struct list_head list;
  55	struct net_device *dev;
  56	struct platform_device pdev;
  57	int unit;
  58	int opened;
  59};
  60
  61static LIST_HEAD(vec_cmd_line);
  62
  63static DEFINE_SPINLOCK(vector_devices_lock);
  64static LIST_HEAD(vector_devices);
  65
  66static int driver_registered;
  67
  68static void vector_eth_configure(int n, struct arglist *def);
  69
  70/* Argument accessors to set variables (and/or set default values)
  71 * mtu, buffer sizing, default headroom, etc
  72 */
  73
  74#define DEFAULT_HEADROOM 2
  75#define SAFETY_MARGIN 32
  76#define DEFAULT_VECTOR_SIZE 64
  77#define TX_SMALL_PACKET 128
  78#define MAX_IOV_SIZE (MAX_SKB_FRAGS + 1)
  79#define MAX_ITERATIONS 64
  80
  81static const struct {
  82	const char string[ETH_GSTRING_LEN];
  83} ethtool_stats_keys[] = {
  84	{ "rx_queue_max" },
  85	{ "rx_queue_running_average" },
  86	{ "tx_queue_max" },
  87	{ "tx_queue_running_average" },
  88	{ "rx_encaps_errors" },
  89	{ "tx_timeout_count" },
  90	{ "tx_restart_queue" },
  91	{ "tx_kicks" },
  92	{ "tx_flow_control_xon" },
  93	{ "tx_flow_control_xoff" },
  94	{ "rx_csum_offload_good" },
  95	{ "rx_csum_offload_errors"},
  96	{ "sg_ok"},
  97	{ "sg_linearized"},
  98};
  99
 100#define VECTOR_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
 101
 102static void vector_reset_stats(struct vector_private *vp)
 103{
 104	vp->estats.rx_queue_max = 0;
 105	vp->estats.rx_queue_running_average = 0;
 106	vp->estats.tx_queue_max = 0;
 107	vp->estats.tx_queue_running_average = 0;
 108	vp->estats.rx_encaps_errors = 0;
 109	vp->estats.tx_timeout_count = 0;
 110	vp->estats.tx_restart_queue = 0;
 111	vp->estats.tx_kicks = 0;
 112	vp->estats.tx_flow_control_xon = 0;
 113	vp->estats.tx_flow_control_xoff = 0;
 114	vp->estats.sg_ok = 0;
 115	vp->estats.sg_linearized = 0;
 116}
 117
 118static int get_mtu(struct arglist *def)
 119{
 120	char *mtu = uml_vector_fetch_arg(def, "mtu");
 121	long result;
 122
 123	if (mtu != NULL) {
 124		if (kstrtoul(mtu, 10, &result) == 0)
 125			if ((result < (1 << 16) - 1) && (result >= 576))
 126				return result;
 127	}
 128	return ETH_MAX_PACKET;
 129}
 130
 131static int get_depth(struct arglist *def)
 132{
 133	char *mtu = uml_vector_fetch_arg(def, "depth");
 134	long result;
 135
 136	if (mtu != NULL) {
 137		if (kstrtoul(mtu, 10, &result) == 0)
 138			return result;
 139	}
 140	return DEFAULT_VECTOR_SIZE;
 141}
 142
 143static int get_headroom(struct arglist *def)
 144{
 145	char *mtu = uml_vector_fetch_arg(def, "headroom");
 146	long result;
 147
 148	if (mtu != NULL) {
 149		if (kstrtoul(mtu, 10, &result) == 0)
 150			return result;
 151	}
 152	return DEFAULT_HEADROOM;
 153}
 154
 155static int get_req_size(struct arglist *def)
 156{
 157	char *gro = uml_vector_fetch_arg(def, "gro");
 158	long result;
 159
 160	if (gro != NULL) {
 161		if (kstrtoul(gro, 10, &result) == 0) {
 162			if (result > 0)
 163				return 65536;
 164		}
 165	}
 166	return get_mtu(def) + ETH_HEADER_OTHER +
 167		get_headroom(def) + SAFETY_MARGIN;
 168}
 169
 170
 171static int get_transport_options(struct arglist *def)
 172{
 173	char *transport = uml_vector_fetch_arg(def, "transport");
 174	char *vector = uml_vector_fetch_arg(def, "vec");
 175
 176	int vec_rx = VECTOR_RX;
 177	int vec_tx = VECTOR_TX;
 178	long parsed;
 179
 180	if (vector != NULL) {
 181		if (kstrtoul(vector, 10, &parsed) == 0) {
 182			if (parsed == 0) {
 183				vec_rx = 0;
 184				vec_tx = 0;
 185			}
 186		}
 187	}
 188
 189
 190	if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0)
 191		return 0;
 192	if (strncmp(transport, TRANS_HYBRID, TRANS_HYBRID_LEN) == 0)
 193		return (vec_rx | VECTOR_BPF);
 194	if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0)
 195		return (vec_rx | vec_tx | VECTOR_QDISC_BYPASS);
 196	return (vec_rx | vec_tx);
 197}
 198
 199
 200/* A mini-buffer for packet drop read
 201 * All of our supported transports are datagram oriented and we always
 202 * read using recvmsg or recvmmsg. If we pass a buffer which is smaller
 203 * than the packet size it still counts as full packet read and will
 204 * clean the incoming stream to keep sigio/epoll happy
 205 */
 206
 207#define DROP_BUFFER_SIZE 32
 208
 209static char *drop_buffer;
 210
 211/* Array backed queues optimized for bulk enqueue/dequeue and
 212 * 1:N (small values of N) or 1:1 enqueuer/dequeuer ratios.
 213 * For more details and full design rationale see
 214 * http://foswiki.cambridgegreys.com/Main/EatYourTailAndEnjoyIt
 215 */
 216
 217
 218/*
 219 * Advance the mmsg queue head by n = advance. Resets the queue to
 220 * maximum enqueue/dequeue-at-once capacity if possible. Called by
 221 * dequeuers. Caller must hold the head_lock!
 222 */
 223
 224static int vector_advancehead(struct vector_queue *qi, int advance)
 225{
 226	int queue_depth;
 227
 228	qi->head =
 229		(qi->head + advance)
 230			% qi->max_depth;
 231
 232
 233	spin_lock(&qi->tail_lock);
 234	qi->queue_depth -= advance;
 235
 236	/* we are at 0, use this to
 237	 * reset head and tail so we can use max size vectors
 238	 */
 239
 240	if (qi->queue_depth == 0) {
 241		qi->head = 0;
 242		qi->tail = 0;
 243	}
 244	queue_depth = qi->queue_depth;
 245	spin_unlock(&qi->tail_lock);
 246	return queue_depth;
 247}
 248
 249/*	Advance the queue tail by n = advance.
 250 *	This is called by enqueuers which should hold the
 251 *	head lock already
 252 */
 253
 254static int vector_advancetail(struct vector_queue *qi, int advance)
 255{
 256	int queue_depth;
 257
 258	qi->tail =
 259		(qi->tail + advance)
 260			% qi->max_depth;
 261	spin_lock(&qi->head_lock);
 262	qi->queue_depth += advance;
 263	queue_depth = qi->queue_depth;
 264	spin_unlock(&qi->head_lock);
 265	return queue_depth;
 266}
 267
 268static int prep_msg(struct vector_private *vp,
 269	struct sk_buff *skb,
 270	struct iovec *iov)
 271{
 272	int iov_index = 0;
 273	int nr_frags, frag;
 274	skb_frag_t *skb_frag;
 275
 276	nr_frags = skb_shinfo(skb)->nr_frags;
 277	if (nr_frags > MAX_IOV_SIZE) {
 278		if (skb_linearize(skb) != 0)
 279			goto drop;
 280	}
 281	if (vp->header_size > 0) {
 282		iov[iov_index].iov_len = vp->header_size;
 283		vp->form_header(iov[iov_index].iov_base, skb, vp);
 284		iov_index++;
 285	}
 286	iov[iov_index].iov_base = skb->data;
 287	if (nr_frags > 0) {
 288		iov[iov_index].iov_len = skb->len - skb->data_len;
 289		vp->estats.sg_ok++;
 290	} else
 291		iov[iov_index].iov_len = skb->len;
 292	iov_index++;
 293	for (frag = 0; frag < nr_frags; frag++) {
 294		skb_frag = &skb_shinfo(skb)->frags[frag];
 295		iov[iov_index].iov_base = skb_frag_address_safe(skb_frag);
 296		iov[iov_index].iov_len = skb_frag_size(skb_frag);
 297		iov_index++;
 298	}
 299	return iov_index;
 300drop:
 301	return -1;
 302}
 303/*
 304 * Generic vector enqueue with support for forming headers using transport
 305 * specific callback. Allows GRE, L2TPv3, RAW and other transports
 306 * to use a common enqueue procedure in vector mode
 307 */
 308
 309static int vector_enqueue(struct vector_queue *qi, struct sk_buff *skb)
 310{
 311	struct vector_private *vp = netdev_priv(qi->dev);
 312	int queue_depth;
 313	int packet_len;
 314	struct mmsghdr *mmsg_vector = qi->mmsg_vector;
 315	int iov_count;
 316
 317	spin_lock(&qi->tail_lock);
 318	spin_lock(&qi->head_lock);
 319	queue_depth = qi->queue_depth;
 320	spin_unlock(&qi->head_lock);
 321
 322	if (skb)
 323		packet_len = skb->len;
 324
 325	if (queue_depth < qi->max_depth) {
 326
 327		*(qi->skbuff_vector + qi->tail) = skb;
 328		mmsg_vector += qi->tail;
 329		iov_count = prep_msg(
 330			vp,
 331			skb,
 332			mmsg_vector->msg_hdr.msg_iov
 333		);
 334		if (iov_count < 1)
 335			goto drop;
 336		mmsg_vector->msg_hdr.msg_iovlen = iov_count;
 337		mmsg_vector->msg_hdr.msg_name = vp->fds->remote_addr;
 338		mmsg_vector->msg_hdr.msg_namelen = vp->fds->remote_addr_size;
 339		queue_depth = vector_advancetail(qi, 1);
 340	} else
 341		goto drop;
 342	spin_unlock(&qi->tail_lock);
 343	return queue_depth;
 344drop:
 345	qi->dev->stats.tx_dropped++;
 346	if (skb != NULL) {
 347		packet_len = skb->len;
 348		dev_consume_skb_any(skb);
 349		netdev_completed_queue(qi->dev, 1, packet_len);
 350	}
 351	spin_unlock(&qi->tail_lock);
 352	return queue_depth;
 353}
 354
 355static int consume_vector_skbs(struct vector_queue *qi, int count)
 356{
 357	struct sk_buff *skb;
 358	int skb_index;
 359	int bytes_compl = 0;
 360
 361	for (skb_index = qi->head; skb_index < qi->head + count; skb_index++) {
 362		skb = *(qi->skbuff_vector + skb_index);
 363		/* mark as empty to ensure correct destruction if
 364		 * needed
 365		 */
 366		bytes_compl += skb->len;
 367		*(qi->skbuff_vector + skb_index) = NULL;
 368		dev_consume_skb_any(skb);
 369	}
 370	qi->dev->stats.tx_bytes += bytes_compl;
 371	qi->dev->stats.tx_packets += count;
 372	netdev_completed_queue(qi->dev, count, bytes_compl);
 373	return vector_advancehead(qi, count);
 374}
 375
 376/*
 377 * Generic vector deque via sendmmsg with support for forming headers
 378 * using transport specific callback. Allows GRE, L2TPv3, RAW and
 379 * other transports to use a common dequeue procedure in vector mode
 380 */
 381
 382
 383static int vector_send(struct vector_queue *qi)
 384{
 385	struct vector_private *vp = netdev_priv(qi->dev);
 386	struct mmsghdr *send_from;
 387	int result = 0, send_len, queue_depth = qi->max_depth;
 388
 389	if (spin_trylock(&qi->head_lock)) {
 390		if (spin_trylock(&qi->tail_lock)) {
 391			/* update queue_depth to current value */
 392			queue_depth = qi->queue_depth;
 393			spin_unlock(&qi->tail_lock);
 394			while (queue_depth > 0) {
 395				/* Calculate the start of the vector */
 396				send_len = queue_depth;
 397				send_from = qi->mmsg_vector;
 398				send_from += qi->head;
 399				/* Adjust vector size if wraparound */
 400				if (send_len + qi->head > qi->max_depth)
 401					send_len = qi->max_depth - qi->head;
 402				/* Try to TX as many packets as possible */
 403				if (send_len > 0) {
 404					result = uml_vector_sendmmsg(
 405						 vp->fds->tx_fd,
 406						 send_from,
 407						 send_len,
 408						 0
 409					);
 410					vp->in_write_poll =
 411						(result != send_len);
 412				}
 413				/* For some of the sendmmsg error scenarios
 414				 * we may end being unsure in the TX success
 415				 * for all packets. It is safer to declare
 416				 * them all TX-ed and blame the network.
 417				 */
 418				if (result < 0) {
 419					if (net_ratelimit())
 420						netdev_err(vp->dev, "sendmmsg err=%i\n",
 421							result);
 422					vp->in_error = true;
 423					result = send_len;
 424				}
 425				if (result > 0) {
 426					queue_depth =
 427						consume_vector_skbs(qi, result);
 428					/* This is equivalent to an TX IRQ.
 429					 * Restart the upper layers to feed us
 430					 * more packets.
 431					 */
 432					if (result > vp->estats.tx_queue_max)
 433						vp->estats.tx_queue_max = result;
 434					vp->estats.tx_queue_running_average =
 435						(vp->estats.tx_queue_running_average + result) >> 1;
 436				}
 437				netif_trans_update(qi->dev);
 438				netif_wake_queue(qi->dev);
 439				/* if TX is busy, break out of the send loop,
 440				 *  poll write IRQ will reschedule xmit for us
 441				 */
 442				if (result != send_len) {
 443					vp->estats.tx_restart_queue++;
 444					break;
 445				}
 446			}
 447		}
 448		spin_unlock(&qi->head_lock);
 449	} else {
 450		tasklet_schedule(&vp->tx_poll);
 451	}
 452	return queue_depth;
 453}
 454
 455/* Queue destructor. Deliberately stateless so we can use
 456 * it in queue cleanup if initialization fails.
 457 */
 458
 459static void destroy_queue(struct vector_queue *qi)
 460{
 461	int i;
 462	struct iovec *iov;
 463	struct vector_private *vp = netdev_priv(qi->dev);
 464	struct mmsghdr *mmsg_vector;
 465
 466	if (qi == NULL)
 467		return;
 468	/* deallocate any skbuffs - we rely on any unused to be
 469	 * set to NULL.
 470	 */
 471	if (qi->skbuff_vector != NULL) {
 472		for (i = 0; i < qi->max_depth; i++) {
 473			if (*(qi->skbuff_vector + i) != NULL)
 474				dev_kfree_skb_any(*(qi->skbuff_vector + i));
 475		}
 476		kfree(qi->skbuff_vector);
 477	}
 478	/* deallocate matching IOV structures including header buffs */
 479	if (qi->mmsg_vector != NULL) {
 480		mmsg_vector = qi->mmsg_vector;
 481		for (i = 0; i < qi->max_depth; i++) {
 482			iov = mmsg_vector->msg_hdr.msg_iov;
 483			if (iov != NULL) {
 484				if ((vp->header_size > 0) &&
 485					(iov->iov_base != NULL))
 486					kfree(iov->iov_base);
 487				kfree(iov);
 488			}
 489			mmsg_vector++;
 490		}
 491		kfree(qi->mmsg_vector);
 492	}
 493	kfree(qi);
 494}
 495
 496/*
 497 * Queue constructor. Create a queue with a given side.
 498 */
 499static struct vector_queue *create_queue(
 500	struct vector_private *vp,
 501	int max_size,
 502	int header_size,
 503	int num_extra_frags)
 504{
 505	struct vector_queue *result;
 506	int i;
 507	struct iovec *iov;
 508	struct mmsghdr *mmsg_vector;
 509
 510	result = kmalloc(sizeof(struct vector_queue), GFP_KERNEL);
 511	if (result == NULL)
 512		return NULL;
 513	result->max_depth = max_size;
 514	result->dev = vp->dev;
 515	result->mmsg_vector = kmalloc(
 516		(sizeof(struct mmsghdr) * max_size), GFP_KERNEL);
 517	if (result->mmsg_vector == NULL)
 518		goto out_mmsg_fail;
 519	result->skbuff_vector = kmalloc(
 520		(sizeof(void *) * max_size), GFP_KERNEL);
 521	if (result->skbuff_vector == NULL)
 522		goto out_skb_fail;
 523
 524	/* further failures can be handled safely by destroy_queue*/
 525
 526	mmsg_vector = result->mmsg_vector;
 527	for (i = 0; i < max_size; i++) {
 528		/* Clear all pointers - we use non-NULL as marking on
 529		 * what to free on destruction
 530		 */
 531		*(result->skbuff_vector + i) = NULL;
 532		mmsg_vector->msg_hdr.msg_iov = NULL;
 533		mmsg_vector++;
 534	}
 535	mmsg_vector = result->mmsg_vector;
 536	result->max_iov_frags = num_extra_frags;
 537	for (i = 0; i < max_size; i++) {
 538		if (vp->header_size > 0)
 539			iov = kmalloc_array(3 + num_extra_frags,
 540					    sizeof(struct iovec),
 541					    GFP_KERNEL
 542			);
 543		else
 544			iov = kmalloc_array(2 + num_extra_frags,
 545					    sizeof(struct iovec),
 546					    GFP_KERNEL
 547			);
 548		if (iov == NULL)
 549			goto out_fail;
 550		mmsg_vector->msg_hdr.msg_iov = iov;
 551		mmsg_vector->msg_hdr.msg_iovlen = 1;
 552		mmsg_vector->msg_hdr.msg_control = NULL;
 553		mmsg_vector->msg_hdr.msg_controllen = 0;
 554		mmsg_vector->msg_hdr.msg_flags = MSG_DONTWAIT;
 555		mmsg_vector->msg_hdr.msg_name = NULL;
 556		mmsg_vector->msg_hdr.msg_namelen = 0;
 557		if (vp->header_size > 0) {
 558			iov->iov_base = kmalloc(header_size, GFP_KERNEL);
 559			if (iov->iov_base == NULL)
 560				goto out_fail;
 561			iov->iov_len = header_size;
 562			mmsg_vector->msg_hdr.msg_iovlen = 2;
 563			iov++;
 564		}
 565		iov->iov_base = NULL;
 566		iov->iov_len = 0;
 567		mmsg_vector++;
 568	}
 569	spin_lock_init(&result->head_lock);
 570	spin_lock_init(&result->tail_lock);
 571	result->queue_depth = 0;
 572	result->head = 0;
 573	result->tail = 0;
 574	return result;
 575out_skb_fail:
 576	kfree(result->mmsg_vector);
 577out_mmsg_fail:
 578	kfree(result);
 579	return NULL;
 580out_fail:
 581	destroy_queue(result);
 582	return NULL;
 583}
 584
 585/*
 586 * We do not use the RX queue as a proper wraparound queue for now
 587 * This is not necessary because the consumption via netif_rx()
 588 * happens in-line. While we can try using the return code of
 589 * netif_rx() for flow control there are no drivers doing this today.
 590 * For this RX specific use we ignore the tail/head locks and
 591 * just read into a prepared queue filled with skbuffs.
 592 */
 593
 594static struct sk_buff *prep_skb(
 595	struct vector_private *vp,
 596	struct user_msghdr *msg)
 597{
 598	int linear = vp->max_packet + vp->headroom + SAFETY_MARGIN;
 599	struct sk_buff *result;
 600	int iov_index = 0, len;
 601	struct iovec *iov = msg->msg_iov;
 602	int err, nr_frags, frag;
 603	skb_frag_t *skb_frag;
 604
 605	if (vp->req_size <= linear)
 606		len = linear;
 607	else
 608		len = vp->req_size;
 609	result = alloc_skb_with_frags(
 610		linear,
 611		len - vp->max_packet,
 612		3,
 613		&err,
 614		GFP_ATOMIC
 615	);
 616	if (vp->header_size > 0)
 617		iov_index++;
 618	if (result == NULL) {
 619		iov[iov_index].iov_base = NULL;
 620		iov[iov_index].iov_len = 0;
 621		goto done;
 622	}
 623	skb_reserve(result, vp->headroom);
 624	result->dev = vp->dev;
 625	skb_put(result, vp->max_packet);
 626	result->data_len = len - vp->max_packet;
 627	result->len += len - vp->max_packet;
 628	skb_reset_mac_header(result);
 629	result->ip_summed = CHECKSUM_NONE;
 630	iov[iov_index].iov_base = result->data;
 631	iov[iov_index].iov_len = vp->max_packet;
 632	iov_index++;
 633
 634	nr_frags = skb_shinfo(result)->nr_frags;
 635	for (frag = 0; frag < nr_frags; frag++) {
 636		skb_frag = &skb_shinfo(result)->frags[frag];
 637		iov[iov_index].iov_base = skb_frag_address_safe(skb_frag);
 638		if (iov[iov_index].iov_base != NULL)
 639			iov[iov_index].iov_len = skb_frag_size(skb_frag);
 640		else
 641			iov[iov_index].iov_len = 0;
 642		iov_index++;
 643	}
 644done:
 645	msg->msg_iovlen = iov_index;
 646	return result;
 647}
 648
 649
 650/* Prepare queue for recvmmsg one-shot rx - fill with fresh sk_buffs*/
 651
 652static void prep_queue_for_rx(struct vector_queue *qi)
 653{
 654	struct vector_private *vp = netdev_priv(qi->dev);
 655	struct mmsghdr *mmsg_vector = qi->mmsg_vector;
 656	void **skbuff_vector = qi->skbuff_vector;
 657	int i;
 658
 659	if (qi->queue_depth == 0)
 660		return;
 661	for (i = 0; i < qi->queue_depth; i++) {
 662		/* it is OK if allocation fails - recvmmsg with NULL data in
 663		 * iov argument still performs an RX, just drops the packet
 664		 * This allows us stop faffing around with a "drop buffer"
 665		 */
 666
 667		*skbuff_vector = prep_skb(vp, &mmsg_vector->msg_hdr);
 668		skbuff_vector++;
 669		mmsg_vector++;
 670	}
 671	qi->queue_depth = 0;
 672}
 673
 674static struct vector_device *find_device(int n)
 675{
 676	struct vector_device *device;
 677	struct list_head *ele;
 678
 679	spin_lock(&vector_devices_lock);
 680	list_for_each(ele, &vector_devices) {
 681		device = list_entry(ele, struct vector_device, list);
 682		if (device->unit == n)
 683			goto out;
 684	}
 685	device = NULL;
 686 out:
 687	spin_unlock(&vector_devices_lock);
 688	return device;
 689}
 690
 691static int vector_parse(char *str, int *index_out, char **str_out,
 692			char **error_out)
 693{
 694	int n, len, err;
 695	char *start = str;
 696
 697	len = strlen(str);
 698
 699	while ((*str != ':') && (strlen(str) > 1))
 700		str++;
 701	if (*str != ':') {
 702		*error_out = "Expected ':' after device number";
 703		return -EINVAL;
 704	}
 705	*str = '\0';
 706
 707	err = kstrtouint(start, 0, &n);
 708	if (err < 0) {
 709		*error_out = "Bad device number";
 710		return err;
 711	}
 712
 713	str++;
 714	if (find_device(n)) {
 715		*error_out = "Device already configured";
 716		return -EINVAL;
 717	}
 718
 719	*index_out = n;
 720	*str_out = str;
 721	return 0;
 722}
 723
 724static int vector_config(char *str, char **error_out)
 725{
 726	int err, n;
 727	char *params;
 728	struct arglist *parsed;
 729
 730	err = vector_parse(str, &n, &params, error_out);
 731	if (err != 0)
 732		return err;
 733
 734	/* This string is broken up and the pieces used by the underlying
 735	 * driver. We should copy it to make sure things do not go wrong
 736	 * later.
 737	 */
 738
 739	params = kstrdup(params, GFP_KERNEL);
 740	if (params == NULL) {
 741		*error_out = "vector_config failed to strdup string";
 742		return -ENOMEM;
 743	}
 744
 745	parsed = uml_parse_vector_ifspec(params);
 746
 747	if (parsed == NULL) {
 748		*error_out = "vector_config failed to parse parameters";
 749		return -EINVAL;
 750	}
 751
 752	vector_eth_configure(n, parsed);
 753	return 0;
 754}
 755
 756static int vector_id(char **str, int *start_out, int *end_out)
 757{
 758	char *end;
 759	int n;
 760
 761	n = simple_strtoul(*str, &end, 0);
 762	if ((*end != '\0') || (end == *str))
 763		return -1;
 764
 765	*start_out = n;
 766	*end_out = n;
 767	*str = end;
 768	return n;
 769}
 770
 771static int vector_remove(int n, char **error_out)
 772{
 773	struct vector_device *vec_d;
 774	struct net_device *dev;
 775	struct vector_private *vp;
 776
 777	vec_d = find_device(n);
 778	if (vec_d == NULL)
 779		return -ENODEV;
 780	dev = vec_d->dev;
 781	vp = netdev_priv(dev);
 782	if (vp->fds != NULL)
 783		return -EBUSY;
 784	unregister_netdev(dev);
 785	platform_device_unregister(&vec_d->pdev);
 786	return 0;
 787}
 788
 789/*
 790 * There is no shared per-transport initialization code, so
 791 * we will just initialize each interface one by one and
 792 * add them to a list
 793 */
 794
 795static struct platform_driver uml_net_driver = {
 796	.driver = {
 797		.name = DRIVER_NAME,
 798	},
 799};
 800
 801
 802static void vector_device_release(struct device *dev)
 803{
 804	struct vector_device *device = dev_get_drvdata(dev);
 805	struct net_device *netdev = device->dev;
 806
 807	list_del(&device->list);
 808	kfree(device);
 809	free_netdev(netdev);
 810}
 811
 812/* Bog standard recv using recvmsg - not used normally unless the user
 813 * explicitly specifies not to use recvmmsg vector RX.
 814 */
 815
 816static int vector_legacy_rx(struct vector_private *vp)
 817{
 818	int pkt_len;
 819	struct user_msghdr hdr;
 820	struct iovec iov[2 + MAX_IOV_SIZE]; /* header + data use case only */
 821	int iovpos = 0;
 822	struct sk_buff *skb;
 823	int header_check;
 824
 825	hdr.msg_name = NULL;
 826	hdr.msg_namelen = 0;
 827	hdr.msg_iov = (struct iovec *) &iov;
 828	hdr.msg_control = NULL;
 829	hdr.msg_controllen = 0;
 830	hdr.msg_flags = 0;
 831
 832	if (vp->header_size > 0) {
 833		iov[0].iov_base = vp->header_rxbuffer;
 834		iov[0].iov_len = vp->header_size;
 835	}
 836
 837	skb = prep_skb(vp, &hdr);
 838
 839	if (skb == NULL) {
 840		/* Read a packet into drop_buffer and don't do
 841		 * anything with it.
 842		 */
 843		iov[iovpos].iov_base = drop_buffer;
 844		iov[iovpos].iov_len = DROP_BUFFER_SIZE;
 845		hdr.msg_iovlen = 1;
 846		vp->dev->stats.rx_dropped++;
 847	}
 848
 849	pkt_len = uml_vector_recvmsg(vp->fds->rx_fd, &hdr, 0);
 850	if (pkt_len < 0) {
 851		vp->in_error = true;
 852		return pkt_len;
 853	}
 854
 855	if (skb != NULL) {
 856		if (pkt_len > vp->header_size) {
 857			if (vp->header_size > 0) {
 858				header_check = vp->verify_header(
 859					vp->header_rxbuffer, skb, vp);
 860				if (header_check < 0) {
 861					dev_kfree_skb_irq(skb);
 862					vp->dev->stats.rx_dropped++;
 863					vp->estats.rx_encaps_errors++;
 864					return 0;
 865				}
 866				if (header_check > 0) {
 867					vp->estats.rx_csum_offload_good++;
 868					skb->ip_summed = CHECKSUM_UNNECESSARY;
 869				}
 870			}
 871			pskb_trim(skb, pkt_len - vp->rx_header_size);
 872			skb->protocol = eth_type_trans(skb, skb->dev);
 873			vp->dev->stats.rx_bytes += skb->len;
 874			vp->dev->stats.rx_packets++;
 875			netif_rx(skb);
 876		} else {
 877			dev_kfree_skb_irq(skb);
 878		}
 879	}
 880	return pkt_len;
 881}
 882
 883/*
 884 * Packet at a time TX which falls back to vector TX if the
 885 * underlying transport is busy.
 886 */
 887
 888
 889
 890static int writev_tx(struct vector_private *vp, struct sk_buff *skb)
 891{
 892	struct iovec iov[3 + MAX_IOV_SIZE];
 893	int iov_count, pkt_len = 0;
 894
 895	iov[0].iov_base = vp->header_txbuffer;
 896	iov_count = prep_msg(vp, skb, (struct iovec *) &iov);
 897
 898	if (iov_count < 1)
 899		goto drop;
 900
 901	pkt_len = uml_vector_writev(
 902		vp->fds->tx_fd,
 903		(struct iovec *) &iov,
 904		iov_count
 905	);
 906
 907	if (pkt_len < 0)
 908		goto drop;
 909
 910	netif_trans_update(vp->dev);
 911	netif_wake_queue(vp->dev);
 912
 913	if (pkt_len > 0) {
 914		vp->dev->stats.tx_bytes += skb->len;
 915		vp->dev->stats.tx_packets++;
 916	} else {
 917		vp->dev->stats.tx_dropped++;
 918	}
 919	consume_skb(skb);
 920	return pkt_len;
 921drop:
 922	vp->dev->stats.tx_dropped++;
 923	consume_skb(skb);
 924	if (pkt_len < 0)
 925		vp->in_error = true;
 926	return pkt_len;
 927}
 928
 929/*
 930 * Receive as many messages as we can in one call using the special
 931 * mmsg vector matched to an skb vector which we prepared earlier.
 932 */
 933
 934static int vector_mmsg_rx(struct vector_private *vp)
 935{
 936	int packet_count, i;
 937	struct vector_queue *qi = vp->rx_queue;
 938	struct sk_buff *skb;
 939	struct mmsghdr *mmsg_vector = qi->mmsg_vector;
 940	void **skbuff_vector = qi->skbuff_vector;
 941	int header_check;
 942
 943	/* Refresh the vector and make sure it is with new skbs and the
 944	 * iovs are updated to point to them.
 945	 */
 946
 947	prep_queue_for_rx(qi);
 948
 949	/* Fire the Lazy Gun - get as many packets as we can in one go. */
 950
 951	packet_count = uml_vector_recvmmsg(
 952		vp->fds->rx_fd, qi->mmsg_vector, qi->max_depth, 0);
 953
 954	if (packet_count < 0)
 955		vp->in_error = true;
 956
 957	if (packet_count <= 0)
 958		return packet_count;
 959
 960	/* We treat packet processing as enqueue, buffer refresh as dequeue
 961	 * The queue_depth tells us how many buffers have been used and how
 962	 * many do we need to prep the next time prep_queue_for_rx() is called.
 963	 */
 964
 965	qi->queue_depth = packet_count;
 966
 967	for (i = 0; i < packet_count; i++) {
 968		skb = (*skbuff_vector);
 969		if (mmsg_vector->msg_len > vp->header_size) {
 970			if (vp->header_size > 0) {
 971				header_check = vp->verify_header(
 972					mmsg_vector->msg_hdr.msg_iov->iov_base,
 973					skb,
 974					vp
 975				);
 976				if (header_check < 0) {
 977				/* Overlay header failed to verify - discard.
 978				 * We can actually keep this skb and reuse it,
 979				 * but that will make the prep logic too
 980				 * complex.
 981				 */
 982					dev_kfree_skb_irq(skb);
 983					vp->estats.rx_encaps_errors++;
 984					continue;
 985				}
 986				if (header_check > 0) {
 987					vp->estats.rx_csum_offload_good++;
 988					skb->ip_summed = CHECKSUM_UNNECESSARY;
 989				}
 990			}
 991			pskb_trim(skb,
 992				mmsg_vector->msg_len - vp->rx_header_size);
 993			skb->protocol = eth_type_trans(skb, skb->dev);
 994			/*
 995			 * We do not need to lock on updating stats here
 996			 * The interrupt loop is non-reentrant.
 997			 */
 998			vp->dev->stats.rx_bytes += skb->len;
 999			vp->dev->stats.rx_packets++;
1000			netif_rx(skb);
1001		} else {
1002			/* Overlay header too short to do anything - discard.
1003			 * We can actually keep this skb and reuse it,
1004			 * but that will make the prep logic too complex.
1005			 */
1006			if (skb != NULL)
1007				dev_kfree_skb_irq(skb);
1008		}
1009		(*skbuff_vector) = NULL;
1010		/* Move to the next buffer element */
1011		mmsg_vector++;
1012		skbuff_vector++;
1013	}
1014	if (packet_count > 0) {
1015		if (vp->estats.rx_queue_max < packet_count)
1016			vp->estats.rx_queue_max = packet_count;
1017		vp->estats.rx_queue_running_average =
1018			(vp->estats.rx_queue_running_average + packet_count) >> 1;
1019	}
1020	return packet_count;
1021}
1022
1023static void vector_rx(struct vector_private *vp)
1024{
1025	int err;
1026	int iter = 0;
1027
1028	if ((vp->options & VECTOR_RX) > 0)
1029		while (((err = vector_mmsg_rx(vp)) > 0) && (iter < MAX_ITERATIONS))
1030			iter++;
1031	else
1032		while (((err = vector_legacy_rx(vp)) > 0) && (iter < MAX_ITERATIONS))
1033			iter++;
1034	if ((err != 0) && net_ratelimit())
1035		netdev_err(vp->dev, "vector_rx: error(%d)\n", err);
1036	if (iter == MAX_ITERATIONS)
1037		netdev_err(vp->dev, "vector_rx: device stuck, remote end may have closed the connection\n");
1038}
1039
1040static int vector_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
1041{
1042	struct vector_private *vp = netdev_priv(dev);
1043	int queue_depth = 0;
1044
1045	if (vp->in_error) {
1046		deactivate_fd(vp->fds->rx_fd, vp->rx_irq);
1047		if ((vp->fds->rx_fd != vp->fds->tx_fd) && (vp->tx_irq != 0))
1048			deactivate_fd(vp->fds->tx_fd, vp->tx_irq);
1049		return NETDEV_TX_BUSY;
1050	}
1051
1052	if ((vp->options & VECTOR_TX) == 0) {
1053		writev_tx(vp, skb);
1054		return NETDEV_TX_OK;
1055	}
1056
1057	/* We do BQL only in the vector path, no point doing it in
1058	 * packet at a time mode as there is no device queue
1059	 */
1060
1061	netdev_sent_queue(vp->dev, skb->len);
1062	queue_depth = vector_enqueue(vp->tx_queue, skb);
1063
1064	/* if the device queue is full, stop the upper layers and
1065	 * flush it.
1066	 */
1067
1068	if (queue_depth >= vp->tx_queue->max_depth - 1) {
1069		vp->estats.tx_kicks++;
1070		netif_stop_queue(dev);
1071		vector_send(vp->tx_queue);
1072		return NETDEV_TX_OK;
1073	}
1074	if (netdev_xmit_more()) {
1075		mod_timer(&vp->tl, vp->coalesce);
1076		return NETDEV_TX_OK;
1077	}
1078	if (skb->len < TX_SMALL_PACKET) {
1079		vp->estats.tx_kicks++;
1080		vector_send(vp->tx_queue);
1081	} else
1082		tasklet_schedule(&vp->tx_poll);
1083	return NETDEV_TX_OK;
1084}
1085
1086static irqreturn_t vector_rx_interrupt(int irq, void *dev_id)
1087{
1088	struct net_device *dev = dev_id;
1089	struct vector_private *vp = netdev_priv(dev);
1090
1091	if (!netif_running(dev))
1092		return IRQ_NONE;
1093	vector_rx(vp);
1094	return IRQ_HANDLED;
1095
1096}
1097
1098static irqreturn_t vector_tx_interrupt(int irq, void *dev_id)
1099{
1100	struct net_device *dev = dev_id;
1101	struct vector_private *vp = netdev_priv(dev);
1102
1103	if (!netif_running(dev))
1104		return IRQ_NONE;
1105	/* We need to pay attention to it only if we got
1106	 * -EAGAIN or -ENOBUFFS from sendmmsg. Otherwise
1107	 * we ignore it. In the future, it may be worth
1108	 * it to improve the IRQ controller a bit to make
1109	 * tweaking the IRQ mask less costly
1110	 */
1111
1112	if (vp->in_write_poll)
1113		tasklet_schedule(&vp->tx_poll);
1114	return IRQ_HANDLED;
1115
1116}
1117
1118static int irq_rr;
1119
1120static int vector_net_close(struct net_device *dev)
1121{
1122	struct vector_private *vp = netdev_priv(dev);
1123	unsigned long flags;
1124
1125	netif_stop_queue(dev);
1126	del_timer(&vp->tl);
1127
1128	if (vp->fds == NULL)
1129		return 0;
1130
1131	/* Disable and free all IRQS */
1132	if (vp->rx_irq > 0) {
1133		um_free_irq(vp->rx_irq, dev);
1134		vp->rx_irq = 0;
1135	}
1136	if (vp->tx_irq > 0) {
1137		um_free_irq(vp->tx_irq, dev);
1138		vp->tx_irq = 0;
1139	}
1140	tasklet_kill(&vp->tx_poll);
1141	if (vp->fds->rx_fd > 0) {
1142		os_close_file(vp->fds->rx_fd);
1143		vp->fds->rx_fd = -1;
1144	}
1145	if (vp->fds->tx_fd > 0) {
1146		os_close_file(vp->fds->tx_fd);
1147		vp->fds->tx_fd = -1;
1148	}
1149	kfree(vp->bpf);
1150	kfree(vp->fds->remote_addr);
1151	kfree(vp->transport_data);
1152	kfree(vp->header_rxbuffer);
1153	kfree(vp->header_txbuffer);
1154	if (vp->rx_queue != NULL)
1155		destroy_queue(vp->rx_queue);
1156	if (vp->tx_queue != NULL)
1157		destroy_queue(vp->tx_queue);
1158	kfree(vp->fds);
1159	vp->fds = NULL;
1160	spin_lock_irqsave(&vp->lock, flags);
1161	vp->opened = false;
1162	vp->in_error = false;
1163	spin_unlock_irqrestore(&vp->lock, flags);
1164	return 0;
1165}
1166
1167/* TX tasklet */
1168
1169static void vector_tx_poll(unsigned long data)
1170{
1171	struct vector_private *vp = (struct vector_private *)data;
1172
1173	vp->estats.tx_kicks++;
1174	vector_send(vp->tx_queue);
1175}
1176static void vector_reset_tx(struct work_struct *work)
1177{
1178	struct vector_private *vp =
1179		container_of(work, struct vector_private, reset_tx);
1180	netdev_reset_queue(vp->dev);
1181	netif_start_queue(vp->dev);
1182	netif_wake_queue(vp->dev);
1183}
1184static int vector_net_open(struct net_device *dev)
1185{
1186	struct vector_private *vp = netdev_priv(dev);
1187	unsigned long flags;
1188	int err = -EINVAL;
1189	struct vector_device *vdevice;
1190
1191	spin_lock_irqsave(&vp->lock, flags);
1192	if (vp->opened) {
1193		spin_unlock_irqrestore(&vp->lock, flags);
1194		return -ENXIO;
1195	}
1196	vp->opened = true;
1197	spin_unlock_irqrestore(&vp->lock, flags);
1198
1199	vp->fds = uml_vector_user_open(vp->unit, vp->parsed);
1200
1201	if (vp->fds == NULL)
1202		goto out_close;
1203
1204	if (build_transport_data(vp) < 0)
1205		goto out_close;
1206
1207	if ((vp->options & VECTOR_RX) > 0) {
1208		vp->rx_queue = create_queue(
1209			vp,
1210			get_depth(vp->parsed),
1211			vp->rx_header_size,
1212			MAX_IOV_SIZE
1213		);
1214		vp->rx_queue->queue_depth = get_depth(vp->parsed);
1215	} else {
1216		vp->header_rxbuffer = kmalloc(
1217			vp->rx_header_size,
1218			GFP_KERNEL
1219		);
1220		if (vp->header_rxbuffer == NULL)
1221			goto out_close;
1222	}
1223	if ((vp->options & VECTOR_TX) > 0) {
1224		vp->tx_queue = create_queue(
1225			vp,
1226			get_depth(vp->parsed),
1227			vp->header_size,
1228			MAX_IOV_SIZE
1229		);
1230	} else {
1231		vp->header_txbuffer = kmalloc(vp->header_size, GFP_KERNEL);
1232		if (vp->header_txbuffer == NULL)
1233			goto out_close;
1234	}
1235
1236	/* READ IRQ */
1237	err = um_request_irq(
1238		irq_rr + VECTOR_BASE_IRQ, vp->fds->rx_fd,
1239			IRQ_READ, vector_rx_interrupt,
1240			IRQF_SHARED, dev->name, dev);
1241	if (err != 0) {
1242		netdev_err(dev, "vector_open: failed to get rx irq(%d)\n", err);
1243		err = -ENETUNREACH;
1244		goto out_close;
1245	}
1246	vp->rx_irq = irq_rr + VECTOR_BASE_IRQ;
1247	dev->irq = irq_rr + VECTOR_BASE_IRQ;
1248	irq_rr = (irq_rr + 1) % VECTOR_IRQ_SPACE;
1249
1250	/* WRITE IRQ - we need it only if we have vector TX */
1251	if ((vp->options & VECTOR_TX) > 0) {
1252		err = um_request_irq(
1253			irq_rr + VECTOR_BASE_IRQ, vp->fds->tx_fd,
1254				IRQ_WRITE, vector_tx_interrupt,
1255				IRQF_SHARED, dev->name, dev);
1256		if (err != 0) {
1257			netdev_err(dev,
1258				"vector_open: failed to get tx irq(%d)\n", err);
1259			err = -ENETUNREACH;
1260			goto out_close;
1261		}
1262		vp->tx_irq = irq_rr + VECTOR_BASE_IRQ;
1263		irq_rr = (irq_rr + 1) % VECTOR_IRQ_SPACE;
1264	}
1265
1266	if ((vp->options & VECTOR_QDISC_BYPASS) != 0) {
1267		if (!uml_raw_enable_qdisc_bypass(vp->fds->rx_fd))
1268			vp->options |= VECTOR_BPF;
1269	}
1270	if ((vp->options & VECTOR_BPF) != 0)
1271		vp->bpf = uml_vector_default_bpf(vp->fds->rx_fd, dev->dev_addr);
1272
1273	netif_start_queue(dev);
1274
1275	/* clear buffer - it can happen that the host side of the interface
1276	 * is full when we get here. In this case, new data is never queued,
1277	 * SIGIOs never arrive, and the net never works.
1278	 */
1279
1280	vector_rx(vp);
1281
1282	vector_reset_stats(vp);
1283	vdevice = find_device(vp->unit);
1284	vdevice->opened = 1;
1285
1286	if ((vp->options & VECTOR_TX) != 0)
1287		add_timer(&vp->tl);
1288	return 0;
1289out_close:
1290	vector_net_close(dev);
1291	return err;
1292}
1293
1294
1295static void vector_net_set_multicast_list(struct net_device *dev)
1296{
1297	/* TODO: - we can do some BPF games here */
1298	return;
1299}
1300
1301static void vector_net_tx_timeout(struct net_device *dev)
1302{
1303	struct vector_private *vp = netdev_priv(dev);
1304
1305	vp->estats.tx_timeout_count++;
1306	netif_trans_update(dev);
1307	schedule_work(&vp->reset_tx);
1308}
1309
1310static netdev_features_t vector_fix_features(struct net_device *dev,
1311	netdev_features_t features)
1312{
1313	features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
1314	return features;
1315}
1316
1317static int vector_set_features(struct net_device *dev,
1318	netdev_features_t features)
1319{
1320	struct vector_private *vp = netdev_priv(dev);
1321	/* Adjust buffer sizes for GSO/GRO. Unfortunately, there is
1322	 * no way to negotiate it on raw sockets, so we can change
1323	 * only our side.
1324	 */
1325	if (features & NETIF_F_GRO)
1326		/* All new frame buffers will be GRO-sized */
1327		vp->req_size = 65536;
1328	else
1329		/* All new frame buffers will be normal sized */
1330		vp->req_size = vp->max_packet + vp->headroom + SAFETY_MARGIN;
1331	return 0;
1332}
1333
1334#ifdef CONFIG_NET_POLL_CONTROLLER
1335static void vector_net_poll_controller(struct net_device *dev)
1336{
1337	disable_irq(dev->irq);
1338	vector_rx_interrupt(dev->irq, dev);
1339	enable_irq(dev->irq);
1340}
1341#endif
1342
1343static void vector_net_get_drvinfo(struct net_device *dev,
1344				struct ethtool_drvinfo *info)
1345{
1346	strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1347	strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
1348}
1349
1350static void vector_get_ringparam(struct net_device *netdev,
1351				struct ethtool_ringparam *ring)
1352{
1353	struct vector_private *vp = netdev_priv(netdev);
1354
1355	ring->rx_max_pending = vp->rx_queue->max_depth;
1356	ring->tx_max_pending = vp->tx_queue->max_depth;
1357	ring->rx_pending = vp->rx_queue->max_depth;
1358	ring->tx_pending = vp->tx_queue->max_depth;
1359}
1360
1361static void vector_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
1362{
1363	switch (stringset) {
1364	case ETH_SS_TEST:
1365		*buf = '\0';
1366		break;
1367	case ETH_SS_STATS:
1368		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
1369		break;
1370	default:
1371		WARN_ON(1);
1372		break;
1373	}
1374}
1375
1376static int vector_get_sset_count(struct net_device *dev, int sset)
1377{
1378	switch (sset) {
1379	case ETH_SS_TEST:
1380		return 0;
1381	case ETH_SS_STATS:
1382		return VECTOR_NUM_STATS;
1383	default:
1384		return -EOPNOTSUPP;
1385	}
1386}
1387
1388static void vector_get_ethtool_stats(struct net_device *dev,
1389	struct ethtool_stats *estats,
1390	u64 *tmp_stats)
1391{
1392	struct vector_private *vp = netdev_priv(dev);
1393
1394	memcpy(tmp_stats, &vp->estats, sizeof(struct vector_estats));
1395}
1396
1397static int vector_get_coalesce(struct net_device *netdev,
1398					struct ethtool_coalesce *ec)
1399{
1400	struct vector_private *vp = netdev_priv(netdev);
1401
1402	ec->tx_coalesce_usecs = (vp->coalesce * 1000000) / HZ;
1403	return 0;
1404}
1405
1406static int vector_set_coalesce(struct net_device *netdev,
1407					struct ethtool_coalesce *ec)
1408{
1409	struct vector_private *vp = netdev_priv(netdev);
1410
1411	vp->coalesce = (ec->tx_coalesce_usecs * HZ) / 1000000;
1412	if (vp->coalesce == 0)
1413		vp->coalesce = 1;
1414	return 0;
1415}
1416
1417static const struct ethtool_ops vector_net_ethtool_ops = {
1418	.get_drvinfo	= vector_net_get_drvinfo,
1419	.get_link	= ethtool_op_get_link,
1420	.get_ts_info	= ethtool_op_get_ts_info,
1421	.get_ringparam	= vector_get_ringparam,
1422	.get_strings	= vector_get_strings,
1423	.get_sset_count	= vector_get_sset_count,
1424	.get_ethtool_stats = vector_get_ethtool_stats,
1425	.get_coalesce	= vector_get_coalesce,
1426	.set_coalesce	= vector_set_coalesce,
1427};
1428
1429
1430static const struct net_device_ops vector_netdev_ops = {
1431	.ndo_open		= vector_net_open,
1432	.ndo_stop		= vector_net_close,
1433	.ndo_start_xmit		= vector_net_start_xmit,
1434	.ndo_set_rx_mode	= vector_net_set_multicast_list,
1435	.ndo_tx_timeout		= vector_net_tx_timeout,
1436	.ndo_set_mac_address	= eth_mac_addr,
1437	.ndo_validate_addr	= eth_validate_addr,
1438	.ndo_fix_features	= vector_fix_features,
1439	.ndo_set_features	= vector_set_features,
1440#ifdef CONFIG_NET_POLL_CONTROLLER
1441	.ndo_poll_controller = vector_net_poll_controller,
1442#endif
1443};
1444
1445
1446static void vector_timer_expire(struct timer_list *t)
1447{
1448	struct vector_private *vp = from_timer(vp, t, tl);
1449
1450	vp->estats.tx_kicks++;
1451	vector_send(vp->tx_queue);
1452}
1453
1454static void vector_eth_configure(
1455		int n,
1456		struct arglist *def
1457	)
1458{
1459	struct vector_device *device;
1460	struct net_device *dev;
1461	struct vector_private *vp;
1462	int err;
1463
1464	device = kzalloc(sizeof(*device), GFP_KERNEL);
1465	if (device == NULL) {
1466		printk(KERN_ERR "eth_configure failed to allocate struct "
1467				 "vector_device\n");
1468		return;
1469	}
1470	dev = alloc_etherdev(sizeof(struct vector_private));
1471	if (dev == NULL) {
1472		printk(KERN_ERR "eth_configure: failed to allocate struct "
1473				 "net_device for vec%d\n", n);
1474		goto out_free_device;
1475	}
1476
1477	dev->mtu = get_mtu(def);
1478
1479	INIT_LIST_HEAD(&device->list);
1480	device->unit = n;
1481
1482	/* If this name ends up conflicting with an existing registered
1483	 * netdevice, that is OK, register_netdev{,ice}() will notice this
1484	 * and fail.
1485	 */
1486	snprintf(dev->name, sizeof(dev->name), "vec%d", n);
1487	uml_net_setup_etheraddr(dev, uml_vector_fetch_arg(def, "mac"));
1488	vp = netdev_priv(dev);
1489
1490	/* sysfs register */
1491	if (!driver_registered) {
1492		platform_driver_register(&uml_net_driver);
1493		driver_registered = 1;
1494	}
1495	device->pdev.id = n;
1496	device->pdev.name = DRIVER_NAME;
1497	device->pdev.dev.release = vector_device_release;
1498	dev_set_drvdata(&device->pdev.dev, device);
1499	if (platform_device_register(&device->pdev))
1500		goto out_free_netdev;
1501	SET_NETDEV_DEV(dev, &device->pdev.dev);
1502
1503	device->dev = dev;
1504
1505	*vp = ((struct vector_private)
1506		{
1507		.list			= LIST_HEAD_INIT(vp->list),
1508		.dev			= dev,
1509		.unit			= n,
1510		.options		= get_transport_options(def),
1511		.rx_irq			= 0,
1512		.tx_irq			= 0,
1513		.parsed			= def,
1514		.max_packet		= get_mtu(def) + ETH_HEADER_OTHER,
1515		/* TODO - we need to calculate headroom so that ip header
1516		 * is 16 byte aligned all the time
1517		 */
1518		.headroom		= get_headroom(def),
1519		.form_header		= NULL,
1520		.verify_header		= NULL,
1521		.header_rxbuffer	= NULL,
1522		.header_txbuffer	= NULL,
1523		.header_size		= 0,
1524		.rx_header_size		= 0,
1525		.rexmit_scheduled	= false,
1526		.opened			= false,
1527		.transport_data		= NULL,
1528		.in_write_poll		= false,
1529		.coalesce		= 2,
1530		.req_size		= get_req_size(def),
1531		.in_error		= false
1532		});
1533
1534	dev->features = dev->hw_features = (NETIF_F_SG | NETIF_F_FRAGLIST);
1535	tasklet_init(&vp->tx_poll, vector_tx_poll, (unsigned long)vp);
1536	INIT_WORK(&vp->reset_tx, vector_reset_tx);
1537
1538	timer_setup(&vp->tl, vector_timer_expire, 0);
1539	spin_lock_init(&vp->lock);
1540
1541	/* FIXME */
1542	dev->netdev_ops = &vector_netdev_ops;
1543	dev->ethtool_ops = &vector_net_ethtool_ops;
1544	dev->watchdog_timeo = (HZ >> 1);
1545	/* primary IRQ - fixme */
1546	dev->irq = 0; /* we will adjust this once opened */
1547
1548	rtnl_lock();
1549	err = register_netdevice(dev);
1550	rtnl_unlock();
1551	if (err)
1552		goto out_undo_user_init;
1553
1554	spin_lock(&vector_devices_lock);
1555	list_add(&device->list, &vector_devices);
1556	spin_unlock(&vector_devices_lock);
1557
1558	return;
1559
1560out_undo_user_init:
1561	return;
1562out_free_netdev:
1563	free_netdev(dev);
1564out_free_device:
1565	kfree(device);
1566}
1567
1568
1569
1570
1571/*
1572 * Invoked late in the init
1573 */
1574
1575static int __init vector_init(void)
1576{
1577	struct list_head *ele;
1578	struct vector_cmd_line_arg *def;
1579	struct arglist *parsed;
1580
1581	list_for_each(ele, &vec_cmd_line) {
1582		def = list_entry(ele, struct vector_cmd_line_arg, list);
1583		parsed = uml_parse_vector_ifspec(def->arguments);
1584		if (parsed != NULL)
1585			vector_eth_configure(def->unit, parsed);
1586	}
1587	return 0;
1588}
1589
1590
1591/* Invoked at initial argument parsing, only stores
1592 * arguments until a proper vector_init is called
1593 * later
1594 */
1595
1596static int __init vector_setup(char *str)
1597{
1598	char *error;
1599	int n, err;
1600	struct vector_cmd_line_arg *new;
1601
1602	err = vector_parse(str, &n, &str, &error);
1603	if (err) {
1604		printk(KERN_ERR "vector_setup - Couldn't parse '%s' : %s\n",
1605				 str, error);
1606		return 1;
1607	}
1608	new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES);
1609	if (!new)
1610		panic("%s: Failed to allocate %zu bytes\n", __func__,
1611		      sizeof(*new));
1612	INIT_LIST_HEAD(&new->list);
1613	new->unit = n;
1614	new->arguments = str;
1615	list_add_tail(&new->list, &vec_cmd_line);
1616	return 1;
1617}
1618
1619__setup("vec", vector_setup);
1620__uml_help(vector_setup,
1621"vec[0-9]+:<option>=<value>,<option>=<value>\n"
1622"	 Configure a vector io network device.\n\n"
1623);
1624
1625late_initcall(vector_init);
1626
1627static struct mc_device vector_mc = {
1628	.list		= LIST_HEAD_INIT(vector_mc.list),
1629	.name		= "vec",
1630	.config		= vector_config,
1631	.get_config	= NULL,
1632	.id		= vector_id,
1633	.remove		= vector_remove,
1634};
1635
1636#ifdef CONFIG_INET
1637static int vector_inetaddr_event(
1638	struct notifier_block *this,
1639	unsigned long event,
1640	void *ptr)
1641{
1642	return NOTIFY_DONE;
1643}
1644
1645static struct notifier_block vector_inetaddr_notifier = {
1646	.notifier_call		= vector_inetaddr_event,
1647};
1648
1649static void inet_register(void)
1650{
1651	register_inetaddr_notifier(&vector_inetaddr_notifier);
1652}
1653#else
1654static inline void inet_register(void)
1655{
1656}
1657#endif
1658
1659static int vector_net_init(void)
1660{
1661	mconsole_register_dev(&vector_mc);
1662	inet_register();
1663	return 0;
1664}
1665
1666__initcall(vector_net_init);
1667
1668
1669