Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Authors:
   4 * Copyright 2001, 2002 by Robert Olsson <robert.olsson@its.uu.se>
   5 *                             Uppsala University and
   6 *                             Swedish University of Agricultural Sciences
   7 *
   8 * Alexey Kuznetsov  <kuznet@ms2.inr.ac.ru>
   9 * Ben Greear <greearb@candelatech.com>
  10 * Jens Låås <jens.laas@data.slu.se>
  11 *
  12 * A tool for loading the network with preconfigurated packets.
  13 * The tool is implemented as a linux module.  Parameters are output
  14 * device, delay (to hard_xmit), number of packets, and whether
  15 * to use multiple SKBs or just the same one.
  16 * pktgen uses the installed interface's output routine.
  17 *
  18 * Additional hacking by:
  19 *
  20 * Jens.Laas@data.slu.se
  21 * Improved by ANK. 010120.
  22 * Improved by ANK even more. 010212.
  23 * MAC address typo fixed. 010417 --ro
  24 * Integrated.  020301 --DaveM
  25 * Added multiskb option 020301 --DaveM
  26 * Scaling of results. 020417--sigurdur@linpro.no
  27 * Significant re-work of the module:
  28 *   *  Convert to threaded model to more efficiently be able to transmit
  29 *       and receive on multiple interfaces at once.
  30 *   *  Converted many counters to __u64 to allow longer runs.
  31 *   *  Allow configuration of ranges, like min/max IP address, MACs,
  32 *       and UDP-ports, for both source and destination, and can
  33 *       set to use a random distribution or sequentially walk the range.
  34 *   *  Can now change most values after starting.
  35 *   *  Place 12-byte packet in UDP payload with magic number,
  36 *       sequence number, and timestamp.
  37 *   *  Add receiver code that detects dropped pkts, re-ordered pkts, and
  38 *       latencies (with micro-second) precision.
  39 *   *  Add IOCTL interface to easily get counters & configuration.
  40 *   --Ben Greear <greearb@candelatech.com>
  41 *
  42 * Renamed multiskb to clone_skb and cleaned up sending core for two distinct
  43 * skb modes. A clone_skb=0 mode for Ben "ranges" work and a clone_skb != 0
  44 * as a "fastpath" with a configurable number of clones after alloc's.
  45 * clone_skb=0 means all packets are allocated this also means ranges time
  46 * stamps etc can be used. clone_skb=100 means 1 malloc is followed by 100
  47 * clones.
  48 *
  49 * Also moved to /proc/net/pktgen/
  50 * --ro
  51 *
  52 * Sept 10:  Fixed threading/locking.  Lots of bone-headed and more clever
  53 *    mistakes.  Also merged in DaveM's patch in the -pre6 patch.
  54 * --Ben Greear <greearb@candelatech.com>
  55 *
  56 * Integrated to 2.5.x 021029 --Lucio Maciel (luciomaciel@zipmail.com.br)
  57 *
  58 * 021124 Finished major redesign and rewrite for new functionality.
  59 * See Documentation/networking/pktgen.rst for how to use this.
  60 *
  61 * The new operation:
  62 * For each CPU one thread/process is created at start. This process checks
  63 * for running devices in the if_list and sends packets until count is 0 it
  64 * also the thread checks the thread->control which is used for inter-process
  65 * communication. controlling process "posts" operations to the threads this
  66 * way.
  67 * The if_list is RCU protected, and the if_lock remains to protect updating
  68 * of if_list, from "add_device" as it invoked from userspace (via proc write).
  69 *
  70 * By design there should only be *one* "controlling" process. In practice
  71 * multiple write accesses gives unpredictable result. Understood by "write"
  72 * to /proc gives result code that should be read be the "writer".
  73 * For practical use this should be no problem.
  74 *
  75 * Note when adding devices to a specific CPU there good idea to also assign
  76 * /proc/irq/XX/smp_affinity so TX-interrupts gets bound to the same CPU.
  77 * --ro
  78 *
  79 * Fix refcount off by one if first packet fails, potential null deref,
  80 * memleak 030710- KJP
  81 *
  82 * First "ranges" functionality for ipv6 030726 --ro
  83 *
  84 * Included flow support. 030802 ANK.
  85 *
  86 * Fixed unaligned access on IA-64 Grant Grundler <grundler@parisc-linux.org>
  87 *
  88 * Remove if fix from added Harald Welte <laforge@netfilter.org> 040419
  89 * ia64 compilation fix from  Aron Griffis <aron@hp.com> 040604
  90 *
  91 * New xmit() return, do_div and misc clean up by Stephen Hemminger
  92 * <shemminger@osdl.org> 040923
  93 *
  94 * Randy Dunlap fixed u64 printk compiler warning
  95 *
  96 * Remove FCS from BW calculation.  Lennert Buytenhek <buytenh@wantstofly.org>
  97 * New time handling. Lennert Buytenhek <buytenh@wantstofly.org> 041213
  98 *
  99 * Corrections from Nikolai Malykh (nmalykh@bilim.com)
 100 * Removed unused flags F_SET_SRCMAC & F_SET_SRCIP 041230
 101 *
 102 * interruptible_sleep_on_timeout() replaced Nishanth Aravamudan <nacc@us.ibm.com>
 103 * 050103
 104 *
 105 * MPLS support by Steven Whitehouse <steve@chygwyn.com>
 106 *
 107 * 802.1Q/Q-in-Q support by Francesco Fondelli (FF) <francesco.fondelli@gmail.com>
 108 *
 109 * Fixed src_mac command to set source mac of packet to value specified in
 110 * command by Adit Ranadive <adit.262@gmail.com>
 111 */
 112
 113#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 114
 115#include <linux/sys.h>
 116#include <linux/types.h>
 117#include <linux/module.h>
 118#include <linux/moduleparam.h>
 119#include <linux/kernel.h>
 120#include <linux/mutex.h>
 121#include <linux/sched.h>
 122#include <linux/slab.h>
 123#include <linux/vmalloc.h>
 124#include <linux/unistd.h>
 125#include <linux/string.h>
 126#include <linux/ptrace.h>
 127#include <linux/errno.h>
 128#include <linux/ioport.h>
 129#include <linux/interrupt.h>
 130#include <linux/capability.h>
 131#include <linux/hrtimer.h>
 132#include <linux/freezer.h>
 133#include <linux/delay.h>
 134#include <linux/timer.h>
 135#include <linux/list.h>
 136#include <linux/init.h>
 137#include <linux/skbuff.h>
 138#include <linux/netdevice.h>
 139#include <linux/inet.h>
 140#include <linux/inetdevice.h>
 141#include <linux/rtnetlink.h>
 142#include <linux/if_arp.h>
 143#include <linux/if_vlan.h>
 144#include <linux/in.h>
 145#include <linux/ip.h>
 146#include <linux/ipv6.h>
 147#include <linux/udp.h>
 148#include <linux/proc_fs.h>
 149#include <linux/seq_file.h>
 150#include <linux/wait.h>
 151#include <linux/etherdevice.h>
 152#include <linux/kthread.h>
 153#include <linux/prefetch.h>
 154#include <linux/mmzone.h>
 155#include <net/net_namespace.h>
 156#include <net/checksum.h>
 157#include <net/ipv6.h>
 158#include <net/udp.h>
 159#include <net/ip6_checksum.h>
 160#include <net/addrconf.h>
 161#ifdef CONFIG_XFRM
 162#include <net/xfrm.h>
 163#endif
 164#include <net/netns/generic.h>
 165#include <asm/byteorder.h>
 166#include <linux/rcupdate.h>
 167#include <linux/bitops.h>
 168#include <linux/io.h>
 169#include <linux/timex.h>
 170#include <linux/uaccess.h>
 171#include <asm/dma.h>
 172#include <asm/div64.h>		/* do_div */
 173
 174#define VERSION	"2.75"
 175#define IP_NAME_SZ 32
 176#define MAX_MPLS_LABELS 16 /* This is the max label stack depth */
 177#define MPLS_STACK_BOTTOM htonl(0x00000100)
 178/* Max number of internet mix entries that can be specified in imix_weights. */
 179#define MAX_IMIX_ENTRIES 20
 180#define IMIX_PRECISION 100 /* Precision of IMIX distribution */
 181
 182#define func_enter() pr_debug("entering %s\n", __func__);
 183
 184#define PKT_FLAGS							\
 185	pf(IPV6)		/* Interface in IPV6 Mode */		\
 186	pf(IPSRC_RND)		/* IP-Src Random  */			\
 187	pf(IPDST_RND)		/* IP-Dst Random  */			\
 188	pf(TXSIZE_RND)		/* Transmit size is random */		\
 189	pf(UDPSRC_RND)		/* UDP-Src Random */			\
 190	pf(UDPDST_RND)		/* UDP-Dst Random */			\
 191	pf(UDPCSUM)		/* Include UDP checksum */		\
 192	pf(NO_TIMESTAMP)	/* Don't timestamp packets (default TS) */ \
 193	pf(MPLS_RND)		/* Random MPLS labels */		\
 194	pf(QUEUE_MAP_RND)	/* queue map Random */			\
 195	pf(QUEUE_MAP_CPU)	/* queue map mirrors smp_processor_id() */ \
 196	pf(FLOW_SEQ)		/* Sequential flows */			\
 197	pf(IPSEC)		/* ipsec on for flows */		\
 198	pf(MACSRC_RND)		/* MAC-Src Random */			\
 199	pf(MACDST_RND)		/* MAC-Dst Random */			\
 200	pf(VID_RND)		/* Random VLAN ID */			\
 201	pf(SVID_RND)		/* Random SVLAN ID */			\
 202	pf(NODE)		/* Node memory alloc*/			\
 203	pf(SHARED)		/* Shared SKB */			\
 204
 205#define pf(flag)		flag##_SHIFT,
 206enum pkt_flags {
 207	PKT_FLAGS
 208};
 209#undef pf
 210
 211/* Device flag bits */
 212#define pf(flag)		static const __u32 F_##flag = (1<<flag##_SHIFT);
 213PKT_FLAGS
 214#undef pf
 215
 216#define pf(flag)		__stringify(flag),
 217static char *pkt_flag_names[] = {
 218	PKT_FLAGS
 219};
 220#undef pf
 221
 222#define NR_PKT_FLAGS		ARRAY_SIZE(pkt_flag_names)
 223
 224/* Thread control flag bits */
 225#define T_STOP        (1<<0)	/* Stop run */
 226#define T_RUN         (1<<1)	/* Start run */
 227#define T_REMDEVALL   (1<<2)	/* Remove all devs */
 228#define T_REMDEV      (1<<3)	/* Remove one dev */
 229
 230/* Xmit modes */
 231#define M_START_XMIT		0	/* Default normal TX */
 232#define M_NETIF_RECEIVE 	1	/* Inject packets into stack */
 233#define M_QUEUE_XMIT		2	/* Inject packet into qdisc */
 234
 235/* If lock -- protects updating of if_list */
 236#define   if_lock(t)           mutex_lock(&(t->if_lock));
 237#define   if_unlock(t)           mutex_unlock(&(t->if_lock));
 238
 239/* Used to help with determining the pkts on receive */
 240#define PKTGEN_MAGIC 0xbe9be955
 241#define PG_PROC_DIR "pktgen"
 242#define PGCTRL	    "pgctrl"
 243
 244#define MAX_CFLOWS  65536
 245
 246#define VLAN_TAG_SIZE(x) ((x)->vlan_id == 0xffff ? 0 : 4)
 247#define SVLAN_TAG_SIZE(x) ((x)->svlan_id == 0xffff ? 0 : 4)
 248
 249struct imix_pkt {
 250	u64 size;
 251	u64 weight;
 252	u64 count_so_far;
 253};
 254
 255struct flow_state {
 256	__be32 cur_daddr;
 257	int count;
 258#ifdef CONFIG_XFRM
 259	struct xfrm_state *x;
 260#endif
 261	__u32 flags;
 262};
 263
 264/* flow flag bits */
 265#define F_INIT   (1<<0)		/* flow has been initialized */
 266
 267struct pktgen_dev {
 268	/*
 269	 * Try to keep frequent/infrequent used vars. separated.
 270	 */
 271	struct proc_dir_entry *entry;	/* proc file */
 272	struct pktgen_thread *pg_thread;/* the owner */
 273	struct list_head list;		/* chaining in the thread's run-queue */
 274	struct rcu_head	 rcu;		/* freed by RCU */
 275
 276	int running;		/* if false, the test will stop */
 277
 278	/* If min != max, then we will either do a linear iteration, or
 279	 * we will do a random selection from within the range.
 280	 */
 281	__u32 flags;
 282	int xmit_mode;
 283	int min_pkt_size;
 284	int max_pkt_size;
 285	int pkt_overhead;	/* overhead for MPLS, VLANs, IPSEC etc */
 286	int nfrags;
 287	int removal_mark;	/* non-zero => the device is marked for
 288				 * removal by worker thread */
 289
 290	struct page *page;
 291	u64 delay;		/* nano-seconds */
 292
 293	__u64 count;		/* Default No packets to send */
 294	__u64 sofar;		/* How many pkts we've sent so far */
 295	__u64 tx_bytes;		/* How many bytes we've transmitted */
 296	__u64 errors;		/* Errors when trying to transmit, */
 297
 298	/* runtime counters relating to clone_skb */
 299
 300	__u32 clone_count;
 301	int last_ok;		/* Was last skb sent?
 302				 * Or a failed transmit of some sort?
 303				 * This will keep sequence numbers in order
 304				 */
 305	ktime_t next_tx;
 306	ktime_t started_at;
 307	ktime_t stopped_at;
 308	u64	idle_acc;	/* nano-seconds */
 309
 310	__u32 seq_num;
 311
 312	int clone_skb;		/*
 313				 * Use multiple SKBs during packet gen.
 314				 * If this number is greater than 1, then
 315				 * that many copies of the same packet will be
 316				 * sent before a new packet is allocated.
 317				 * If you want to send 1024 identical packets
 318				 * before creating a new packet,
 319				 * set clone_skb to 1024.
 320				 */
 321
 322	char dst_min[IP_NAME_SZ];	/* IP, ie 1.2.3.4 */
 323	char dst_max[IP_NAME_SZ];	/* IP, ie 1.2.3.4 */
 324	char src_min[IP_NAME_SZ];	/* IP, ie 1.2.3.4 */
 325	char src_max[IP_NAME_SZ];	/* IP, ie 1.2.3.4 */
 326
 327	struct in6_addr in6_saddr;
 328	struct in6_addr in6_daddr;
 329	struct in6_addr cur_in6_daddr;
 330	struct in6_addr cur_in6_saddr;
 331	/* For ranges */
 332	struct in6_addr min_in6_daddr;
 333	struct in6_addr max_in6_daddr;
 334	struct in6_addr min_in6_saddr;
 335	struct in6_addr max_in6_saddr;
 336
 337	/* If we're doing ranges, random or incremental, then this
 338	 * defines the min/max for those ranges.
 339	 */
 340	__be32 saddr_min;	/* inclusive, source IP address */
 341	__be32 saddr_max;	/* exclusive, source IP address */
 342	__be32 daddr_min;	/* inclusive, dest IP address */
 343	__be32 daddr_max;	/* exclusive, dest IP address */
 344
 345	__u16 udp_src_min;	/* inclusive, source UDP port */
 346	__u16 udp_src_max;	/* exclusive, source UDP port */
 347	__u16 udp_dst_min;	/* inclusive, dest UDP port */
 348	__u16 udp_dst_max;	/* exclusive, dest UDP port */
 349
 350	/* DSCP + ECN */
 351	__u8 tos;            /* six MSB of (former) IPv4 TOS
 352				are for dscp codepoint */
 353	__u8 traffic_class;  /* ditto for the (former) Traffic Class in IPv6
 354				(see RFC 3260, sec. 4) */
 355
 356	/* IMIX */
 357	unsigned int n_imix_entries;
 358	struct imix_pkt imix_entries[MAX_IMIX_ENTRIES];
 359	/* Maps 0-IMIX_PRECISION range to imix_entry based on probability*/
 360	__u8 imix_distribution[IMIX_PRECISION];
 361
 362	/* MPLS */
 363	unsigned int nr_labels;	/* Depth of stack, 0 = no MPLS */
 364	__be32 labels[MAX_MPLS_LABELS];
 365
 366	/* VLAN/SVLAN (802.1Q/Q-in-Q) */
 367	__u8  vlan_p;
 368	__u8  vlan_cfi;
 369	__u16 vlan_id;  /* 0xffff means no vlan tag */
 370
 371	__u8  svlan_p;
 372	__u8  svlan_cfi;
 373	__u16 svlan_id; /* 0xffff means no svlan tag */
 374
 375	__u32 src_mac_count;	/* How many MACs to iterate through */
 376	__u32 dst_mac_count;	/* How many MACs to iterate through */
 377
 378	unsigned char dst_mac[ETH_ALEN];
 379	unsigned char src_mac[ETH_ALEN];
 380
 381	__u32 cur_dst_mac_offset;
 382	__u32 cur_src_mac_offset;
 383	__be32 cur_saddr;
 384	__be32 cur_daddr;
 385	__u16 ip_id;
 386	__u16 cur_udp_dst;
 387	__u16 cur_udp_src;
 388	__u16 cur_queue_map;
 389	__u32 cur_pkt_size;
 390	__u32 last_pkt_size;
 391
 392	__u8 hh[14];
 393	/* = {
 394	   0x00, 0x80, 0xC8, 0x79, 0xB3, 0xCB,
 395
 396	   We fill in SRC address later
 397	   0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 398	   0x08, 0x00
 399	   };
 400	 */
 401	__u16 pad;		/* pad out the hh struct to an even 16 bytes */
 402
 403	struct sk_buff *skb;	/* skb we are to transmit next, used for when we
 404				 * are transmitting the same one multiple times
 405				 */
 406	struct net_device *odev; /* The out-going device.
 407				  * Note that the device should have it's
 408				  * pg_info pointer pointing back to this
 409				  * device.
 410				  * Set when the user specifies the out-going
 411				  * device name (not when the inject is
 412				  * started as it used to do.)
 413				  */
 414	netdevice_tracker dev_tracker;
 415	char odevname[32];
 416	struct flow_state *flows;
 417	unsigned int cflows;	/* Concurrent flows (config) */
 418	unsigned int lflow;		/* Flow length  (config) */
 419	unsigned int nflows;	/* accumulated flows (stats) */
 420	unsigned int curfl;		/* current sequenced flow (state)*/
 421
 422	u16 queue_map_min;
 423	u16 queue_map_max;
 424	__u32 skb_priority;	/* skb priority field */
 425	unsigned int burst;	/* number of duplicated packets to burst */
 426	int node;               /* Memory node */
 427
 428#ifdef CONFIG_XFRM
 429	__u8	ipsmode;		/* IPSEC mode (config) */
 430	__u8	ipsproto;		/* IPSEC type (config) */
 431	__u32	spi;
 432	struct xfrm_dst xdst;
 433	struct dst_ops dstops;
 434#endif
 435	char result[512];
 436};
 437
 438struct pktgen_hdr {
 439	__be32 pgh_magic;
 440	__be32 seq_num;
 441	__be32 tv_sec;
 442	__be32 tv_usec;
 443};
 444
 445
 446static unsigned int pg_net_id __read_mostly;
 447
 448struct pktgen_net {
 449	struct net		*net;
 450	struct proc_dir_entry	*proc_dir;
 451	struct list_head	pktgen_threads;
 452	bool			pktgen_exiting;
 453};
 454
 455struct pktgen_thread {
 456	struct mutex if_lock;		/* for list of devices */
 457	struct list_head if_list;	/* All device here */
 458	struct list_head th_list;
 459	struct task_struct *tsk;
 460	char result[512];
 461
 462	/* Field for thread to receive "posted" events terminate,
 463	   stop ifs etc. */
 464
 465	u32 control;
 466	int cpu;
 467
 468	wait_queue_head_t queue;
 469	struct completion start_done;
 470	struct pktgen_net *net;
 471};
 472
 473#define REMOVE 1
 474#define FIND   0
 475
 476static const char version[] =
 477	"Packet Generator for packet performance testing. "
 478	"Version: " VERSION "\n";
 479
 480static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i);
 481static int pktgen_add_device(struct pktgen_thread *t, const char *ifname);
 482static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
 483					  const char *ifname, bool exact);
 484static int pktgen_device_event(struct notifier_block *, unsigned long, void *);
 485static void pktgen_run_all_threads(struct pktgen_net *pn);
 486static void pktgen_reset_all_threads(struct pktgen_net *pn);
 487static void pktgen_stop_all_threads(struct pktgen_net *pn);
 488
 489static void pktgen_stop(struct pktgen_thread *t);
 490static void pktgen_clear_counters(struct pktgen_dev *pkt_dev);
 491static void fill_imix_distribution(struct pktgen_dev *pkt_dev);
 492
 493/* Module parameters, defaults. */
 494static int pg_count_d __read_mostly = 1000;
 495static int pg_delay_d __read_mostly;
 496static int pg_clone_skb_d  __read_mostly;
 497static int debug  __read_mostly;
 498
 499static DEFINE_MUTEX(pktgen_thread_lock);
 500
 501static struct notifier_block pktgen_notifier_block = {
 502	.notifier_call = pktgen_device_event,
 503};
 504
 505/*
 506 * /proc handling functions
 507 *
 508 */
 509
 510static int pgctrl_show(struct seq_file *seq, void *v)
 511{
 512	seq_puts(seq, version);
 513	return 0;
 514}
 515
 516static ssize_t pgctrl_write(struct file *file, const char __user *buf,
 517			    size_t count, loff_t *ppos)
 518{
 519	char data[128];
 520	struct pktgen_net *pn = net_generic(current->nsproxy->net_ns, pg_net_id);
 521
 522	if (!capable(CAP_NET_ADMIN))
 523		return -EPERM;
 524
 525	if (count == 0)
 526		return -EINVAL;
 527
 528	if (count > sizeof(data))
 529		count = sizeof(data);
 530
 531	if (copy_from_user(data, buf, count))
 532		return -EFAULT;
 533
 534	data[count - 1] = 0;	/* Strip trailing '\n' and terminate string */
 535
 536	if (!strcmp(data, "stop"))
 537		pktgen_stop_all_threads(pn);
 538	else if (!strcmp(data, "start"))
 539		pktgen_run_all_threads(pn);
 540	else if (!strcmp(data, "reset"))
 541		pktgen_reset_all_threads(pn);
 542	else
 543		return -EINVAL;
 544
 545	return count;
 546}
 547
 548static int pgctrl_open(struct inode *inode, struct file *file)
 549{
 550	return single_open(file, pgctrl_show, pde_data(inode));
 551}
 552
 553static const struct proc_ops pktgen_proc_ops = {
 554	.proc_open	= pgctrl_open,
 555	.proc_read	= seq_read,
 556	.proc_lseek	= seq_lseek,
 557	.proc_write	= pgctrl_write,
 558	.proc_release	= single_release,
 559};
 560
 561static int pktgen_if_show(struct seq_file *seq, void *v)
 562{
 563	const struct pktgen_dev *pkt_dev = seq->private;
 564	ktime_t stopped;
 565	unsigned int i;
 566	u64 idle;
 567
 568	seq_printf(seq,
 569		   "Params: count %llu  min_pkt_size: %u  max_pkt_size: %u\n",
 570		   (unsigned long long)pkt_dev->count, pkt_dev->min_pkt_size,
 571		   pkt_dev->max_pkt_size);
 572
 573	if (pkt_dev->n_imix_entries > 0) {
 574		seq_puts(seq, "     imix_weights: ");
 575		for (i = 0; i < pkt_dev->n_imix_entries; i++) {
 576			seq_printf(seq, "%llu,%llu ",
 577				   pkt_dev->imix_entries[i].size,
 578				   pkt_dev->imix_entries[i].weight);
 579		}
 580		seq_puts(seq, "\n");
 581	}
 582
 583	seq_printf(seq,
 584		   "     frags: %d  delay: %llu  clone_skb: %d  ifname: %s\n",
 585		   pkt_dev->nfrags, (unsigned long long) pkt_dev->delay,
 586		   pkt_dev->clone_skb, pkt_dev->odevname);
 587
 588	seq_printf(seq, "     flows: %u flowlen: %u\n", pkt_dev->cflows,
 589		   pkt_dev->lflow);
 590
 591	seq_printf(seq,
 592		   "     queue_map_min: %u  queue_map_max: %u\n",
 593		   pkt_dev->queue_map_min,
 594		   pkt_dev->queue_map_max);
 595
 596	if (pkt_dev->skb_priority)
 597		seq_printf(seq, "     skb_priority: %u\n",
 598			   pkt_dev->skb_priority);
 599
 600	if (pkt_dev->flags & F_IPV6) {
 601		seq_printf(seq,
 602			   "     saddr: %pI6c  min_saddr: %pI6c  max_saddr: %pI6c\n"
 603			   "     daddr: %pI6c  min_daddr: %pI6c  max_daddr: %pI6c\n",
 604			   &pkt_dev->in6_saddr,
 605			   &pkt_dev->min_in6_saddr, &pkt_dev->max_in6_saddr,
 606			   &pkt_dev->in6_daddr,
 607			   &pkt_dev->min_in6_daddr, &pkt_dev->max_in6_daddr);
 608	} else {
 609		seq_printf(seq,
 610			   "     dst_min: %s  dst_max: %s\n",
 611			   pkt_dev->dst_min, pkt_dev->dst_max);
 612		seq_printf(seq,
 613			   "     src_min: %s  src_max: %s\n",
 614			   pkt_dev->src_min, pkt_dev->src_max);
 615	}
 616
 617	seq_puts(seq, "     src_mac: ");
 618
 619	seq_printf(seq, "%pM ",
 620		   is_zero_ether_addr(pkt_dev->src_mac) ?
 621			     pkt_dev->odev->dev_addr : pkt_dev->src_mac);
 622
 623	seq_puts(seq, "dst_mac: ");
 624	seq_printf(seq, "%pM\n", pkt_dev->dst_mac);
 625
 626	seq_printf(seq,
 627		   "     udp_src_min: %d  udp_src_max: %d"
 628		   "  udp_dst_min: %d  udp_dst_max: %d\n",
 629		   pkt_dev->udp_src_min, pkt_dev->udp_src_max,
 630		   pkt_dev->udp_dst_min, pkt_dev->udp_dst_max);
 631
 632	seq_printf(seq,
 633		   "     src_mac_count: %d  dst_mac_count: %d\n",
 634		   pkt_dev->src_mac_count, pkt_dev->dst_mac_count);
 635
 636	if (pkt_dev->nr_labels) {
 637		seq_puts(seq, "     mpls: ");
 638		for (i = 0; i < pkt_dev->nr_labels; i++)
 639			seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]),
 640				   i == pkt_dev->nr_labels-1 ? "\n" : ", ");
 641	}
 642
 643	if (pkt_dev->vlan_id != 0xffff)
 644		seq_printf(seq, "     vlan_id: %u  vlan_p: %u  vlan_cfi: %u\n",
 645			   pkt_dev->vlan_id, pkt_dev->vlan_p,
 646			   pkt_dev->vlan_cfi);
 647
 648	if (pkt_dev->svlan_id != 0xffff)
 649		seq_printf(seq, "     svlan_id: %u  vlan_p: %u  vlan_cfi: %u\n",
 650			   pkt_dev->svlan_id, pkt_dev->svlan_p,
 651			   pkt_dev->svlan_cfi);
 652
 653	if (pkt_dev->tos)
 654		seq_printf(seq, "     tos: 0x%02x\n", pkt_dev->tos);
 655
 656	if (pkt_dev->traffic_class)
 657		seq_printf(seq, "     traffic_class: 0x%02x\n", pkt_dev->traffic_class);
 658
 659	if (pkt_dev->burst > 1)
 660		seq_printf(seq, "     burst: %d\n", pkt_dev->burst);
 661
 662	if (pkt_dev->node >= 0)
 663		seq_printf(seq, "     node: %d\n", pkt_dev->node);
 664
 665	if (pkt_dev->xmit_mode == M_NETIF_RECEIVE)
 666		seq_puts(seq, "     xmit_mode: netif_receive\n");
 667	else if (pkt_dev->xmit_mode == M_QUEUE_XMIT)
 668		seq_puts(seq, "     xmit_mode: xmit_queue\n");
 669
 670	seq_puts(seq, "     Flags: ");
 671
 672	for (i = 0; i < NR_PKT_FLAGS; i++) {
 673		if (i == FLOW_SEQ_SHIFT)
 674			if (!pkt_dev->cflows)
 675				continue;
 676
 677		if (pkt_dev->flags & (1 << i)) {
 678			seq_printf(seq, "%s  ", pkt_flag_names[i]);
 
 
 
 679#ifdef CONFIG_XFRM
 680			if (i == IPSEC_SHIFT && pkt_dev->spi)
 681				seq_printf(seq, "spi:%u  ", pkt_dev->spi);
 682#endif
 683		} else if (i == FLOW_SEQ_SHIFT) {
 684			seq_puts(seq, "FLOW_RND  ");
 685		}
 686	}
 687
 688	seq_puts(seq, "\n");
 689
 690	/* not really stopped, more like last-running-at */
 691	stopped = pkt_dev->running ? ktime_get() : pkt_dev->stopped_at;
 692	idle = pkt_dev->idle_acc;
 693	do_div(idle, NSEC_PER_USEC);
 694
 695	seq_printf(seq,
 696		   "Current:\n     pkts-sofar: %llu  errors: %llu\n",
 697		   (unsigned long long)pkt_dev->sofar,
 698		   (unsigned long long)pkt_dev->errors);
 699
 700	if (pkt_dev->n_imix_entries > 0) {
 701		int i;
 702
 703		seq_puts(seq, "     imix_size_counts: ");
 704		for (i = 0; i < pkt_dev->n_imix_entries; i++) {
 705			seq_printf(seq, "%llu,%llu ",
 706				   pkt_dev->imix_entries[i].size,
 707				   pkt_dev->imix_entries[i].count_so_far);
 708		}
 709		seq_puts(seq, "\n");
 710	}
 711
 712	seq_printf(seq,
 713		   "     started: %lluus  stopped: %lluus idle: %lluus\n",
 714		   (unsigned long long) ktime_to_us(pkt_dev->started_at),
 715		   (unsigned long long) ktime_to_us(stopped),
 716		   (unsigned long long) idle);
 717
 718	seq_printf(seq,
 719		   "     seq_num: %d  cur_dst_mac_offset: %d  cur_src_mac_offset: %d\n",
 720		   pkt_dev->seq_num, pkt_dev->cur_dst_mac_offset,
 721		   pkt_dev->cur_src_mac_offset);
 722
 723	if (pkt_dev->flags & F_IPV6) {
 724		seq_printf(seq, "     cur_saddr: %pI6c  cur_daddr: %pI6c\n",
 725				&pkt_dev->cur_in6_saddr,
 726				&pkt_dev->cur_in6_daddr);
 727	} else
 728		seq_printf(seq, "     cur_saddr: %pI4  cur_daddr: %pI4\n",
 729			   &pkt_dev->cur_saddr, &pkt_dev->cur_daddr);
 730
 731	seq_printf(seq, "     cur_udp_dst: %d  cur_udp_src: %d\n",
 732		   pkt_dev->cur_udp_dst, pkt_dev->cur_udp_src);
 733
 734	seq_printf(seq, "     cur_queue_map: %u\n", pkt_dev->cur_queue_map);
 735
 736	seq_printf(seq, "     flows: %u\n", pkt_dev->nflows);
 737
 738	if (pkt_dev->result[0])
 739		seq_printf(seq, "Result: %s\n", pkt_dev->result);
 740	else
 741		seq_puts(seq, "Result: Idle\n");
 742
 743	return 0;
 744}
 745
 746
 747static int hex32_arg(const char __user *user_buffer, unsigned long maxlen,
 748		     __u32 *num)
 749{
 750	int i = 0;
 751	*num = 0;
 752
 753	for (; i < maxlen; i++) {
 754		int value;
 755		char c;
 756		*num <<= 4;
 757		if (get_user(c, &user_buffer[i]))
 758			return -EFAULT;
 759		value = hex_to_bin(c);
 760		if (value >= 0)
 761			*num |= value;
 762		else
 763			break;
 764	}
 765	return i;
 766}
 767
 768static int count_trail_chars(const char __user * user_buffer,
 769			     unsigned int maxlen)
 770{
 771	int i;
 772
 773	for (i = 0; i < maxlen; i++) {
 774		char c;
 775		if (get_user(c, &user_buffer[i]))
 776			return -EFAULT;
 777		switch (c) {
 778		case '\"':
 779		case '\n':
 780		case '\r':
 781		case '\t':
 782		case ' ':
 783		case '=':
 784			break;
 785		default:
 786			goto done;
 787		}
 788	}
 789done:
 790	return i;
 791}
 792
 793static long num_arg(const char __user *user_buffer, unsigned long maxlen,
 794				unsigned long *num)
 795{
 796	int i;
 797	*num = 0;
 798
 799	for (i = 0; i < maxlen; i++) {
 800		char c;
 801		if (get_user(c, &user_buffer[i]))
 802			return -EFAULT;
 803		if ((c >= '0') && (c <= '9')) {
 804			*num *= 10;
 805			*num += c - '0';
 806		} else
 807			break;
 808	}
 809	return i;
 810}
 811
 812static int strn_len(const char __user * user_buffer, unsigned int maxlen)
 813{
 814	int i;
 815
 816	for (i = 0; i < maxlen; i++) {
 817		char c;
 818		if (get_user(c, &user_buffer[i]))
 819			return -EFAULT;
 820		switch (c) {
 821		case '\"':
 822		case '\n':
 823		case '\r':
 824		case '\t':
 825		case ' ':
 826			goto done_str;
 827		default:
 828			break;
 829		}
 830	}
 831done_str:
 832	return i;
 833}
 834
 835/* Parses imix entries from user buffer.
 836 * The user buffer should consist of imix entries separated by spaces
 837 * where each entry consists of size and weight delimited by commas.
 838 * "size1,weight_1 size2,weight_2 ... size_n,weight_n" for example.
 839 */
 840static ssize_t get_imix_entries(const char __user *buffer,
 841				struct pktgen_dev *pkt_dev)
 842{
 843	const int max_digits = 10;
 844	int i = 0;
 845	long len;
 846	char c;
 847
 848	pkt_dev->n_imix_entries = 0;
 849
 850	do {
 851		unsigned long weight;
 852		unsigned long size;
 853
 854		if (pkt_dev->n_imix_entries >= MAX_IMIX_ENTRIES)
 855			return -E2BIG;
 856
 857		len = num_arg(&buffer[i], max_digits, &size);
 858		if (len < 0)
 859			return len;
 860		i += len;
 861		if (get_user(c, &buffer[i]))
 862			return -EFAULT;
 863		/* Check for comma between size_i and weight_i */
 864		if (c != ',')
 865			return -EINVAL;
 866		i++;
 867
 868		if (size < 14 + 20 + 8)
 869			size = 14 + 20 + 8;
 870
 871		len = num_arg(&buffer[i], max_digits, &weight);
 872		if (len < 0)
 873			return len;
 874		if (weight <= 0)
 875			return -EINVAL;
 876
 877		pkt_dev->imix_entries[pkt_dev->n_imix_entries].size = size;
 878		pkt_dev->imix_entries[pkt_dev->n_imix_entries].weight = weight;
 879
 880		i += len;
 881		if (get_user(c, &buffer[i]))
 882			return -EFAULT;
 883
 884		i++;
 885		pkt_dev->n_imix_entries++;
 
 
 
 886	} while (c == ' ');
 887
 888	return i;
 889}
 890
 891static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev)
 892{
 893	unsigned int n = 0;
 894	char c;
 895	ssize_t i = 0;
 896	int len;
 897
 898	pkt_dev->nr_labels = 0;
 899	do {
 900		__u32 tmp;
 901		len = hex32_arg(&buffer[i], 8, &tmp);
 902		if (len <= 0)
 903			return len;
 904		pkt_dev->labels[n] = htonl(tmp);
 905		if (pkt_dev->labels[n] & MPLS_STACK_BOTTOM)
 906			pkt_dev->flags |= F_MPLS_RND;
 907		i += len;
 908		if (get_user(c, &buffer[i]))
 909			return -EFAULT;
 910		i++;
 911		n++;
 912		if (n >= MAX_MPLS_LABELS)
 913			return -E2BIG;
 914	} while (c == ',');
 915
 916	pkt_dev->nr_labels = n;
 917	return i;
 918}
 919
 920static __u32 pktgen_read_flag(const char *f, bool *disable)
 921{
 922	__u32 i;
 923
 924	if (f[0] == '!') {
 925		*disable = true;
 926		f++;
 927	}
 928
 929	for (i = 0; i < NR_PKT_FLAGS; i++) {
 930		if (!IS_ENABLED(CONFIG_XFRM) && i == IPSEC_SHIFT)
 931			continue;
 932
 933		/* allow only disabling ipv6 flag */
 934		if (!*disable && i == IPV6_SHIFT)
 935			continue;
 936
 937		if (strcmp(f, pkt_flag_names[i]) == 0)
 938			return 1 << i;
 939	}
 940
 941	if (strcmp(f, "FLOW_RND") == 0) {
 942		*disable = !*disable;
 943		return F_FLOW_SEQ;
 944	}
 945
 946	return 0;
 947}
 948
 949static ssize_t pktgen_if_write(struct file *file,
 950			       const char __user * user_buffer, size_t count,
 951			       loff_t * offset)
 952{
 953	struct seq_file *seq = file->private_data;
 954	struct pktgen_dev *pkt_dev = seq->private;
 955	int i, max, len;
 956	char name[16], valstr[32];
 957	unsigned long value = 0;
 958	char *pg_result = NULL;
 959	int tmp = 0;
 960	char buf[128];
 961
 962	pg_result = &(pkt_dev->result[0]);
 963
 964	if (count < 1) {
 965		pr_warn("wrong command format\n");
 966		return -EINVAL;
 967	}
 968
 969	max = count;
 970	tmp = count_trail_chars(user_buffer, max);
 971	if (tmp < 0) {
 972		pr_warn("illegal format\n");
 973		return tmp;
 974	}
 975	i = tmp;
 976
 977	/* Read variable name */
 978
 979	len = strn_len(&user_buffer[i], sizeof(name) - 1);
 980	if (len < 0)
 981		return len;
 982
 983	memset(name, 0, sizeof(name));
 984	if (copy_from_user(name, &user_buffer[i], len))
 985		return -EFAULT;
 986	i += len;
 987
 988	max = count - i;
 989	len = count_trail_chars(&user_buffer[i], max);
 990	if (len < 0)
 991		return len;
 992
 993	i += len;
 994
 995	if (debug) {
 996		size_t copy = min_t(size_t, count + 1, 1024);
 997		char *tp = strndup_user(user_buffer, copy);
 998
 999		if (IS_ERR(tp))
1000			return PTR_ERR(tp);
1001
1002		pr_debug("%s,%zu  buffer -:%s:-\n", name, count, tp);
1003		kfree(tp);
1004	}
1005
1006	if (!strcmp(name, "min_pkt_size")) {
1007		len = num_arg(&user_buffer[i], 10, &value);
1008		if (len < 0)
1009			return len;
1010
1011		i += len;
1012		if (value < 14 + 20 + 8)
1013			value = 14 + 20 + 8;
1014		if (value != pkt_dev->min_pkt_size) {
1015			pkt_dev->min_pkt_size = value;
1016			pkt_dev->cur_pkt_size = value;
1017		}
1018		sprintf(pg_result, "OK: min_pkt_size=%d",
1019			pkt_dev->min_pkt_size);
1020		return count;
1021	}
1022
1023	if (!strcmp(name, "max_pkt_size")) {
1024		len = num_arg(&user_buffer[i], 10, &value);
1025		if (len < 0)
1026			return len;
1027
1028		i += len;
1029		if (value < 14 + 20 + 8)
1030			value = 14 + 20 + 8;
1031		if (value != pkt_dev->max_pkt_size) {
1032			pkt_dev->max_pkt_size = value;
1033			pkt_dev->cur_pkt_size = value;
1034		}
1035		sprintf(pg_result, "OK: max_pkt_size=%d",
1036			pkt_dev->max_pkt_size);
1037		return count;
1038	}
1039
1040	/* Shortcut for min = max */
1041
1042	if (!strcmp(name, "pkt_size")) {
1043		len = num_arg(&user_buffer[i], 10, &value);
1044		if (len < 0)
1045			return len;
1046
1047		i += len;
1048		if (value < 14 + 20 + 8)
1049			value = 14 + 20 + 8;
1050		if (value != pkt_dev->min_pkt_size) {
1051			pkt_dev->min_pkt_size = value;
1052			pkt_dev->max_pkt_size = value;
1053			pkt_dev->cur_pkt_size = value;
1054		}
1055		sprintf(pg_result, "OK: pkt_size=%d", pkt_dev->min_pkt_size);
1056		return count;
1057	}
1058
1059	if (!strcmp(name, "imix_weights")) {
1060		if (pkt_dev->clone_skb > 0)
1061			return -EINVAL;
1062
1063		len = get_imix_entries(&user_buffer[i], pkt_dev);
1064		if (len < 0)
1065			return len;
1066
1067		fill_imix_distribution(pkt_dev);
1068
1069		i += len;
1070		return count;
1071	}
1072
1073	if (!strcmp(name, "debug")) {
1074		len = num_arg(&user_buffer[i], 10, &value);
1075		if (len < 0)
1076			return len;
1077
1078		i += len;
1079		debug = value;
1080		sprintf(pg_result, "OK: debug=%u", debug);
1081		return count;
1082	}
1083
1084	if (!strcmp(name, "frags")) {
1085		len = num_arg(&user_buffer[i], 10, &value);
1086		if (len < 0)
1087			return len;
1088
1089		i += len;
1090		pkt_dev->nfrags = value;
1091		sprintf(pg_result, "OK: frags=%d", pkt_dev->nfrags);
1092		return count;
1093	}
1094	if (!strcmp(name, "delay")) {
1095		len = num_arg(&user_buffer[i], 10, &value);
1096		if (len < 0)
1097			return len;
1098
1099		i += len;
1100		if (value == 0x7FFFFFFF)
1101			pkt_dev->delay = ULLONG_MAX;
1102		else
1103			pkt_dev->delay = (u64)value;
1104
1105		sprintf(pg_result, "OK: delay=%llu",
1106			(unsigned long long) pkt_dev->delay);
1107		return count;
1108	}
1109	if (!strcmp(name, "rate")) {
1110		len = num_arg(&user_buffer[i], 10, &value);
1111		if (len < 0)
1112			return len;
1113
1114		i += len;
1115		if (!value)
1116			return len;
1117		pkt_dev->delay = pkt_dev->min_pkt_size*8*NSEC_PER_USEC/value;
1118		if (debug)
1119			pr_info("Delay set at: %llu ns\n", pkt_dev->delay);
1120
1121		sprintf(pg_result, "OK: rate=%lu", value);
1122		return count;
1123	}
1124	if (!strcmp(name, "ratep")) {
1125		len = num_arg(&user_buffer[i], 10, &value);
1126		if (len < 0)
1127			return len;
1128
1129		i += len;
1130		if (!value)
1131			return len;
1132		pkt_dev->delay = NSEC_PER_SEC/value;
1133		if (debug)
1134			pr_info("Delay set at: %llu ns\n", pkt_dev->delay);
1135
1136		sprintf(pg_result, "OK: rate=%lu", value);
1137		return count;
1138	}
1139	if (!strcmp(name, "udp_src_min")) {
1140		len = num_arg(&user_buffer[i], 10, &value);
1141		if (len < 0)
1142			return len;
1143
1144		i += len;
1145		if (value != pkt_dev->udp_src_min) {
1146			pkt_dev->udp_src_min = value;
1147			pkt_dev->cur_udp_src = value;
1148		}
1149		sprintf(pg_result, "OK: udp_src_min=%u", pkt_dev->udp_src_min);
1150		return count;
1151	}
1152	if (!strcmp(name, "udp_dst_min")) {
1153		len = num_arg(&user_buffer[i], 10, &value);
1154		if (len < 0)
1155			return len;
1156
1157		i += len;
1158		if (value != pkt_dev->udp_dst_min) {
1159			pkt_dev->udp_dst_min = value;
1160			pkt_dev->cur_udp_dst = value;
1161		}
1162		sprintf(pg_result, "OK: udp_dst_min=%u", pkt_dev->udp_dst_min);
1163		return count;
1164	}
1165	if (!strcmp(name, "udp_src_max")) {
1166		len = num_arg(&user_buffer[i], 10, &value);
1167		if (len < 0)
1168			return len;
1169
1170		i += len;
1171		if (value != pkt_dev->udp_src_max) {
1172			pkt_dev->udp_src_max = value;
1173			pkt_dev->cur_udp_src = value;
1174		}
1175		sprintf(pg_result, "OK: udp_src_max=%u", pkt_dev->udp_src_max);
1176		return count;
1177	}
1178	if (!strcmp(name, "udp_dst_max")) {
1179		len = num_arg(&user_buffer[i], 10, &value);
1180		if (len < 0)
1181			return len;
1182
1183		i += len;
1184		if (value != pkt_dev->udp_dst_max) {
1185			pkt_dev->udp_dst_max = value;
1186			pkt_dev->cur_udp_dst = value;
1187		}
1188		sprintf(pg_result, "OK: udp_dst_max=%u", pkt_dev->udp_dst_max);
1189		return count;
1190	}
1191	if (!strcmp(name, "clone_skb")) {
1192		len = num_arg(&user_buffer[i], 10, &value);
1193		if (len < 0)
1194			return len;
1195		/* clone_skb is not supported for netif_receive xmit_mode and
1196		 * IMIX mode.
1197		 */
1198		if ((value > 0) &&
1199		    ((pkt_dev->xmit_mode == M_NETIF_RECEIVE) ||
1200		     !(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))
1201			return -ENOTSUPP;
1202		if (value > 0 && (pkt_dev->n_imix_entries > 0 ||
1203				  !(pkt_dev->flags & F_SHARED)))
1204			return -EINVAL;
1205
1206		i += len;
1207		pkt_dev->clone_skb = value;
1208
1209		sprintf(pg_result, "OK: clone_skb=%d", pkt_dev->clone_skb);
1210		return count;
1211	}
1212	if (!strcmp(name, "count")) {
1213		len = num_arg(&user_buffer[i], 10, &value);
1214		if (len < 0)
1215			return len;
1216
1217		i += len;
1218		pkt_dev->count = value;
1219		sprintf(pg_result, "OK: count=%llu",
1220			(unsigned long long)pkt_dev->count);
1221		return count;
1222	}
1223	if (!strcmp(name, "src_mac_count")) {
1224		len = num_arg(&user_buffer[i], 10, &value);
1225		if (len < 0)
1226			return len;
1227
1228		i += len;
1229		if (pkt_dev->src_mac_count != value) {
1230			pkt_dev->src_mac_count = value;
1231			pkt_dev->cur_src_mac_offset = 0;
1232		}
1233		sprintf(pg_result, "OK: src_mac_count=%d",
1234			pkt_dev->src_mac_count);
1235		return count;
1236	}
1237	if (!strcmp(name, "dst_mac_count")) {
1238		len = num_arg(&user_buffer[i], 10, &value);
1239		if (len < 0)
1240			return len;
1241
1242		i += len;
1243		if (pkt_dev->dst_mac_count != value) {
1244			pkt_dev->dst_mac_count = value;
1245			pkt_dev->cur_dst_mac_offset = 0;
1246		}
1247		sprintf(pg_result, "OK: dst_mac_count=%d",
1248			pkt_dev->dst_mac_count);
1249		return count;
1250	}
1251	if (!strcmp(name, "burst")) {
1252		len = num_arg(&user_buffer[i], 10, &value);
1253		if (len < 0)
1254			return len;
1255
1256		i += len;
1257		if ((value > 1) &&
1258		    ((pkt_dev->xmit_mode == M_QUEUE_XMIT) ||
1259		     ((pkt_dev->xmit_mode == M_START_XMIT) &&
1260		     (!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))))
1261			return -ENOTSUPP;
1262
1263		if (value > 1 && !(pkt_dev->flags & F_SHARED))
1264			return -EINVAL;
1265
1266		pkt_dev->burst = value < 1 ? 1 : value;
1267		sprintf(pg_result, "OK: burst=%u", pkt_dev->burst);
1268		return count;
1269	}
1270	if (!strcmp(name, "node")) {
1271		len = num_arg(&user_buffer[i], 10, &value);
1272		if (len < 0)
1273			return len;
1274
1275		i += len;
1276
1277		if (node_possible(value)) {
1278			pkt_dev->node = value;
1279			sprintf(pg_result, "OK: node=%d", pkt_dev->node);
1280			if (pkt_dev->page) {
1281				put_page(pkt_dev->page);
1282				pkt_dev->page = NULL;
1283			}
1284		}
1285		else
1286			sprintf(pg_result, "ERROR: node not possible");
1287		return count;
1288	}
1289	if (!strcmp(name, "xmit_mode")) {
1290		char f[32];
1291
1292		memset(f, 0, 32);
1293		len = strn_len(&user_buffer[i], sizeof(f) - 1);
1294		if (len < 0)
1295			return len;
1296
1297		if (copy_from_user(f, &user_buffer[i], len))
1298			return -EFAULT;
1299		i += len;
1300
1301		if (strcmp(f, "start_xmit") == 0) {
1302			pkt_dev->xmit_mode = M_START_XMIT;
1303		} else if (strcmp(f, "netif_receive") == 0) {
1304			/* clone_skb set earlier, not supported in this mode */
1305			if (pkt_dev->clone_skb > 0)
1306				return -ENOTSUPP;
1307
1308			pkt_dev->xmit_mode = M_NETIF_RECEIVE;
1309
1310			/* make sure new packet is allocated every time
1311			 * pktgen_xmit() is called
1312			 */
1313			pkt_dev->last_ok = 1;
1314		} else if (strcmp(f, "queue_xmit") == 0) {
1315			pkt_dev->xmit_mode = M_QUEUE_XMIT;
1316			pkt_dev->last_ok = 1;
1317		} else {
1318			sprintf(pg_result,
1319				"xmit_mode -:%s:- unknown\nAvailable modes: %s",
1320				f, "start_xmit, netif_receive\n");
1321			return count;
1322		}
1323		sprintf(pg_result, "OK: xmit_mode=%s", f);
1324		return count;
1325	}
1326	if (!strcmp(name, "flag")) {
1327		bool disable = false;
1328		__u32 flag;
1329		char f[32];
1330		char *end;
1331
1332		memset(f, 0, 32);
1333		len = strn_len(&user_buffer[i], sizeof(f) - 1);
1334		if (len < 0)
1335			return len;
1336
1337		if (copy_from_user(f, &user_buffer[i], len))
1338			return -EFAULT;
1339		i += len;
1340
1341		flag = pktgen_read_flag(f, &disable);
 
1342		if (flag) {
1343			if (disable) {
1344				/* If "clone_skb", or "burst" parameters are
1345				 * configured, it means that the skb still
1346				 * needs to be referenced by the pktgen, so
1347				 * the skb must be shared.
1348				 */
1349				if (flag == F_SHARED && (pkt_dev->clone_skb ||
1350							 pkt_dev->burst > 1))
1351					return -EINVAL;
1352				pkt_dev->flags &= ~flag;
1353			} else {
1354				pkt_dev->flags |= flag;
1355			}
1356
1357			sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags);
 
 
 
 
 
 
 
 
 
 
1358			return count;
1359		}
1360
1361		/* Unknown flag */
1362		end = pkt_dev->result + sizeof(pkt_dev->result);
1363		pg_result += sprintf(pg_result,
1364			"Flag -:%s:- unknown\n"
1365			"Available flags, (prepend ! to un-set flag):\n", f);
1366
1367		for (int n = 0; n < NR_PKT_FLAGS && pg_result < end; n++) {
1368			if (!IS_ENABLED(CONFIG_XFRM) && n == IPSEC_SHIFT)
1369				continue;
1370			pg_result += snprintf(pg_result, end - pg_result,
1371					      "%s, ", pkt_flag_names[n]);
1372		}
1373		if (!WARN_ON_ONCE(pg_result >= end)) {
1374			/* Remove the comma and whitespace at the end */
1375			*(pg_result - 2) = '\0';
1376		}
1377
1378		return count;
1379	}
1380	if (!strcmp(name, "dst_min") || !strcmp(name, "dst")) {
1381		len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_min) - 1);
1382		if (len < 0)
1383			return len;
1384
1385		if (copy_from_user(buf, &user_buffer[i], len))
1386			return -EFAULT;
1387		buf[len] = 0;
1388		if (strcmp(buf, pkt_dev->dst_min) != 0) {
1389			memset(pkt_dev->dst_min, 0, sizeof(pkt_dev->dst_min));
1390			strcpy(pkt_dev->dst_min, buf);
1391			pkt_dev->daddr_min = in_aton(pkt_dev->dst_min);
1392			pkt_dev->cur_daddr = pkt_dev->daddr_min;
1393		}
1394		if (debug)
1395			pr_debug("dst_min set to: %s\n", pkt_dev->dst_min);
1396		i += len;
1397		sprintf(pg_result, "OK: dst_min=%s", pkt_dev->dst_min);
1398		return count;
1399	}
1400	if (!strcmp(name, "dst_max")) {
1401		len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_max) - 1);
1402		if (len < 0)
1403			return len;
1404
1405		if (copy_from_user(buf, &user_buffer[i], len))
1406			return -EFAULT;
1407		buf[len] = 0;
1408		if (strcmp(buf, pkt_dev->dst_max) != 0) {
1409			memset(pkt_dev->dst_max, 0, sizeof(pkt_dev->dst_max));
1410			strcpy(pkt_dev->dst_max, buf);
1411			pkt_dev->daddr_max = in_aton(pkt_dev->dst_max);
1412			pkt_dev->cur_daddr = pkt_dev->daddr_max;
1413		}
1414		if (debug)
1415			pr_debug("dst_max set to: %s\n", pkt_dev->dst_max);
1416		i += len;
1417		sprintf(pg_result, "OK: dst_max=%s", pkt_dev->dst_max);
1418		return count;
1419	}
1420	if (!strcmp(name, "dst6")) {
1421		len = strn_len(&user_buffer[i], sizeof(buf) - 1);
1422		if (len < 0)
1423			return len;
1424
1425		pkt_dev->flags |= F_IPV6;
1426
1427		if (copy_from_user(buf, &user_buffer[i], len))
1428			return -EFAULT;
1429		buf[len] = 0;
1430
1431		in6_pton(buf, -1, pkt_dev->in6_daddr.s6_addr, -1, NULL);
1432		snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_daddr);
1433
1434		pkt_dev->cur_in6_daddr = pkt_dev->in6_daddr;
1435
1436		if (debug)
1437			pr_debug("dst6 set to: %s\n", buf);
1438
1439		i += len;
1440		sprintf(pg_result, "OK: dst6=%s", buf);
1441		return count;
1442	}
1443	if (!strcmp(name, "dst6_min")) {
1444		len = strn_len(&user_buffer[i], sizeof(buf) - 1);
1445		if (len < 0)
1446			return len;
1447
1448		pkt_dev->flags |= F_IPV6;
1449
1450		if (copy_from_user(buf, &user_buffer[i], len))
1451			return -EFAULT;
1452		buf[len] = 0;
1453
1454		in6_pton(buf, -1, pkt_dev->min_in6_daddr.s6_addr, -1, NULL);
1455		snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->min_in6_daddr);
1456
1457		pkt_dev->cur_in6_daddr = pkt_dev->min_in6_daddr;
1458		if (debug)
1459			pr_debug("dst6_min set to: %s\n", buf);
1460
1461		i += len;
1462		sprintf(pg_result, "OK: dst6_min=%s", buf);
1463		return count;
1464	}
1465	if (!strcmp(name, "dst6_max")) {
1466		len = strn_len(&user_buffer[i], sizeof(buf) - 1);
1467		if (len < 0)
1468			return len;
1469
1470		pkt_dev->flags |= F_IPV6;
1471
1472		if (copy_from_user(buf, &user_buffer[i], len))
1473			return -EFAULT;
1474		buf[len] = 0;
1475
1476		in6_pton(buf, -1, pkt_dev->max_in6_daddr.s6_addr, -1, NULL);
1477		snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->max_in6_daddr);
1478
1479		if (debug)
1480			pr_debug("dst6_max set to: %s\n", buf);
1481
1482		i += len;
1483		sprintf(pg_result, "OK: dst6_max=%s", buf);
1484		return count;
1485	}
1486	if (!strcmp(name, "src6")) {
1487		len = strn_len(&user_buffer[i], sizeof(buf) - 1);
1488		if (len < 0)
1489			return len;
1490
1491		pkt_dev->flags |= F_IPV6;
1492
1493		if (copy_from_user(buf, &user_buffer[i], len))
1494			return -EFAULT;
1495		buf[len] = 0;
1496
1497		in6_pton(buf, -1, pkt_dev->in6_saddr.s6_addr, -1, NULL);
1498		snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_saddr);
1499
1500		pkt_dev->cur_in6_saddr = pkt_dev->in6_saddr;
1501
1502		if (debug)
1503			pr_debug("src6 set to: %s\n", buf);
1504
1505		i += len;
1506		sprintf(pg_result, "OK: src6=%s", buf);
1507		return count;
1508	}
1509	if (!strcmp(name, "src_min")) {
1510		len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_min) - 1);
1511		if (len < 0)
1512			return len;
1513
1514		if (copy_from_user(buf, &user_buffer[i], len))
1515			return -EFAULT;
1516		buf[len] = 0;
1517		if (strcmp(buf, pkt_dev->src_min) != 0) {
1518			memset(pkt_dev->src_min, 0, sizeof(pkt_dev->src_min));
1519			strcpy(pkt_dev->src_min, buf);
1520			pkt_dev->saddr_min = in_aton(pkt_dev->src_min);
1521			pkt_dev->cur_saddr = pkt_dev->saddr_min;
1522		}
1523		if (debug)
1524			pr_debug("src_min set to: %s\n", pkt_dev->src_min);
1525		i += len;
1526		sprintf(pg_result, "OK: src_min=%s", pkt_dev->src_min);
1527		return count;
1528	}
1529	if (!strcmp(name, "src_max")) {
1530		len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_max) - 1);
1531		if (len < 0)
1532			return len;
1533
1534		if (copy_from_user(buf, &user_buffer[i], len))
1535			return -EFAULT;
1536		buf[len] = 0;
1537		if (strcmp(buf, pkt_dev->src_max) != 0) {
1538			memset(pkt_dev->src_max, 0, sizeof(pkt_dev->src_max));
1539			strcpy(pkt_dev->src_max, buf);
1540			pkt_dev->saddr_max = in_aton(pkt_dev->src_max);
1541			pkt_dev->cur_saddr = pkt_dev->saddr_max;
1542		}
1543		if (debug)
1544			pr_debug("src_max set to: %s\n", pkt_dev->src_max);
1545		i += len;
1546		sprintf(pg_result, "OK: src_max=%s", pkt_dev->src_max);
1547		return count;
1548	}
1549	if (!strcmp(name, "dst_mac")) {
1550		len = strn_len(&user_buffer[i], sizeof(valstr) - 1);
1551		if (len < 0)
1552			return len;
1553
1554		memset(valstr, 0, sizeof(valstr));
1555		if (copy_from_user(valstr, &user_buffer[i], len))
1556			return -EFAULT;
1557
1558		if (!mac_pton(valstr, pkt_dev->dst_mac))
1559			return -EINVAL;
1560		/* Set up Dest MAC */
1561		ether_addr_copy(&pkt_dev->hh[0], pkt_dev->dst_mac);
1562
1563		sprintf(pg_result, "OK: dstmac %pM", pkt_dev->dst_mac);
1564		return count;
1565	}
1566	if (!strcmp(name, "src_mac")) {
1567		len = strn_len(&user_buffer[i], sizeof(valstr) - 1);
1568		if (len < 0)
1569			return len;
1570
1571		memset(valstr, 0, sizeof(valstr));
1572		if (copy_from_user(valstr, &user_buffer[i], len))
1573			return -EFAULT;
1574
1575		if (!mac_pton(valstr, pkt_dev->src_mac))
1576			return -EINVAL;
1577		/* Set up Src MAC */
1578		ether_addr_copy(&pkt_dev->hh[6], pkt_dev->src_mac);
1579
1580		sprintf(pg_result, "OK: srcmac %pM", pkt_dev->src_mac);
1581		return count;
1582	}
1583
1584	if (!strcmp(name, "clear_counters")) {
1585		pktgen_clear_counters(pkt_dev);
1586		sprintf(pg_result, "OK: Clearing counters.\n");
1587		return count;
1588	}
1589
1590	if (!strcmp(name, "flows")) {
1591		len = num_arg(&user_buffer[i], 10, &value);
1592		if (len < 0)
1593			return len;
1594
1595		i += len;
1596		if (value > MAX_CFLOWS)
1597			value = MAX_CFLOWS;
1598
1599		pkt_dev->cflows = value;
1600		sprintf(pg_result, "OK: flows=%u", pkt_dev->cflows);
1601		return count;
1602	}
1603#ifdef CONFIG_XFRM
1604	if (!strcmp(name, "spi")) {
1605		len = num_arg(&user_buffer[i], 10, &value);
1606		if (len < 0)
1607			return len;
1608
1609		i += len;
1610		pkt_dev->spi = value;
1611		sprintf(pg_result, "OK: spi=%u", pkt_dev->spi);
1612		return count;
1613	}
1614#endif
1615	if (!strcmp(name, "flowlen")) {
1616		len = num_arg(&user_buffer[i], 10, &value);
1617		if (len < 0)
1618			return len;
1619
1620		i += len;
1621		pkt_dev->lflow = value;
1622		sprintf(pg_result, "OK: flowlen=%u", pkt_dev->lflow);
1623		return count;
1624	}
1625
1626	if (!strcmp(name, "queue_map_min")) {
1627		len = num_arg(&user_buffer[i], 5, &value);
1628		if (len < 0)
1629			return len;
1630
1631		i += len;
1632		pkt_dev->queue_map_min = value;
1633		sprintf(pg_result, "OK: queue_map_min=%u", pkt_dev->queue_map_min);
1634		return count;
1635	}
1636
1637	if (!strcmp(name, "queue_map_max")) {
1638		len = num_arg(&user_buffer[i], 5, &value);
1639		if (len < 0)
1640			return len;
1641
1642		i += len;
1643		pkt_dev->queue_map_max = value;
1644		sprintf(pg_result, "OK: queue_map_max=%u", pkt_dev->queue_map_max);
1645		return count;
1646	}
1647
1648	if (!strcmp(name, "mpls")) {
1649		unsigned int n, cnt;
1650
1651		len = get_labels(&user_buffer[i], pkt_dev);
1652		if (len < 0)
1653			return len;
1654		i += len;
1655		cnt = sprintf(pg_result, "OK: mpls=");
1656		for (n = 0; n < pkt_dev->nr_labels; n++)
1657			cnt += sprintf(pg_result + cnt,
1658				       "%08x%s", ntohl(pkt_dev->labels[n]),
1659				       n == pkt_dev->nr_labels-1 ? "" : ",");
1660
1661		if (pkt_dev->nr_labels && pkt_dev->vlan_id != 0xffff) {
1662			pkt_dev->vlan_id = 0xffff; /* turn off VLAN/SVLAN */
1663			pkt_dev->svlan_id = 0xffff;
1664
1665			if (debug)
1666				pr_debug("VLAN/SVLAN auto turned off\n");
1667		}
1668		return count;
1669	}
1670
1671	if (!strcmp(name, "vlan_id")) {
1672		len = num_arg(&user_buffer[i], 4, &value);
1673		if (len < 0)
1674			return len;
1675
1676		i += len;
1677		if (value <= 4095) {
1678			pkt_dev->vlan_id = value;  /* turn on VLAN */
1679
1680			if (debug)
1681				pr_debug("VLAN turned on\n");
1682
1683			if (debug && pkt_dev->nr_labels)
1684				pr_debug("MPLS auto turned off\n");
1685
1686			pkt_dev->nr_labels = 0;    /* turn off MPLS */
1687			sprintf(pg_result, "OK: vlan_id=%u", pkt_dev->vlan_id);
1688		} else {
1689			pkt_dev->vlan_id = 0xffff; /* turn off VLAN/SVLAN */
1690			pkt_dev->svlan_id = 0xffff;
1691
1692			if (debug)
1693				pr_debug("VLAN/SVLAN turned off\n");
1694		}
1695		return count;
1696	}
1697
1698	if (!strcmp(name, "vlan_p")) {
1699		len = num_arg(&user_buffer[i], 1, &value);
1700		if (len < 0)
1701			return len;
1702
1703		i += len;
1704		if ((value <= 7) && (pkt_dev->vlan_id != 0xffff)) {
1705			pkt_dev->vlan_p = value;
1706			sprintf(pg_result, "OK: vlan_p=%u", pkt_dev->vlan_p);
1707		} else {
1708			sprintf(pg_result, "ERROR: vlan_p must be 0-7");
1709		}
1710		return count;
1711	}
1712
1713	if (!strcmp(name, "vlan_cfi")) {
1714		len = num_arg(&user_buffer[i], 1, &value);
1715		if (len < 0)
1716			return len;
1717
1718		i += len;
1719		if ((value <= 1) && (pkt_dev->vlan_id != 0xffff)) {
1720			pkt_dev->vlan_cfi = value;
1721			sprintf(pg_result, "OK: vlan_cfi=%u", pkt_dev->vlan_cfi);
1722		} else {
1723			sprintf(pg_result, "ERROR: vlan_cfi must be 0-1");
1724		}
1725		return count;
1726	}
1727
1728	if (!strcmp(name, "svlan_id")) {
1729		len = num_arg(&user_buffer[i], 4, &value);
1730		if (len < 0)
1731			return len;
1732
1733		i += len;
1734		if ((value <= 4095) && ((pkt_dev->vlan_id != 0xffff))) {
1735			pkt_dev->svlan_id = value;  /* turn on SVLAN */
1736
1737			if (debug)
1738				pr_debug("SVLAN turned on\n");
1739
1740			if (debug && pkt_dev->nr_labels)
1741				pr_debug("MPLS auto turned off\n");
1742
1743			pkt_dev->nr_labels = 0;    /* turn off MPLS */
1744			sprintf(pg_result, "OK: svlan_id=%u", pkt_dev->svlan_id);
1745		} else {
1746			pkt_dev->vlan_id = 0xffff; /* turn off VLAN/SVLAN */
1747			pkt_dev->svlan_id = 0xffff;
1748
1749			if (debug)
1750				pr_debug("VLAN/SVLAN turned off\n");
1751		}
1752		return count;
1753	}
1754
1755	if (!strcmp(name, "svlan_p")) {
1756		len = num_arg(&user_buffer[i], 1, &value);
1757		if (len < 0)
1758			return len;
1759
1760		i += len;
1761		if ((value <= 7) && (pkt_dev->svlan_id != 0xffff)) {
1762			pkt_dev->svlan_p = value;
1763			sprintf(pg_result, "OK: svlan_p=%u", pkt_dev->svlan_p);
1764		} else {
1765			sprintf(pg_result, "ERROR: svlan_p must be 0-7");
1766		}
1767		return count;
1768	}
1769
1770	if (!strcmp(name, "svlan_cfi")) {
1771		len = num_arg(&user_buffer[i], 1, &value);
1772		if (len < 0)
1773			return len;
1774
1775		i += len;
1776		if ((value <= 1) && (pkt_dev->svlan_id != 0xffff)) {
1777			pkt_dev->svlan_cfi = value;
1778			sprintf(pg_result, "OK: svlan_cfi=%u", pkt_dev->svlan_cfi);
1779		} else {
1780			sprintf(pg_result, "ERROR: svlan_cfi must be 0-1");
1781		}
1782		return count;
1783	}
1784
1785	if (!strcmp(name, "tos")) {
1786		__u32 tmp_value = 0;
1787		len = hex32_arg(&user_buffer[i], 2, &tmp_value);
1788		if (len < 0)
1789			return len;
1790
1791		i += len;
1792		if (len == 2) {
1793			pkt_dev->tos = tmp_value;
1794			sprintf(pg_result, "OK: tos=0x%02x", pkt_dev->tos);
1795		} else {
1796			sprintf(pg_result, "ERROR: tos must be 00-ff");
1797		}
1798		return count;
1799	}
1800
1801	if (!strcmp(name, "traffic_class")) {
1802		__u32 tmp_value = 0;
1803		len = hex32_arg(&user_buffer[i], 2, &tmp_value);
1804		if (len < 0)
1805			return len;
1806
1807		i += len;
1808		if (len == 2) {
1809			pkt_dev->traffic_class = tmp_value;
1810			sprintf(pg_result, "OK: traffic_class=0x%02x", pkt_dev->traffic_class);
1811		} else {
1812			sprintf(pg_result, "ERROR: traffic_class must be 00-ff");
1813		}
1814		return count;
1815	}
1816
1817	if (!strcmp(name, "skb_priority")) {
1818		len = num_arg(&user_buffer[i], 9, &value);
1819		if (len < 0)
1820			return len;
1821
1822		i += len;
1823		pkt_dev->skb_priority = value;
1824		sprintf(pg_result, "OK: skb_priority=%i",
1825			pkt_dev->skb_priority);
1826		return count;
1827	}
1828
1829	sprintf(pkt_dev->result, "No such parameter \"%s\"", name);
1830	return -EINVAL;
1831}
1832
1833static int pktgen_if_open(struct inode *inode, struct file *file)
1834{
1835	return single_open(file, pktgen_if_show, pde_data(inode));
1836}
1837
1838static const struct proc_ops pktgen_if_proc_ops = {
1839	.proc_open	= pktgen_if_open,
1840	.proc_read	= seq_read,
1841	.proc_lseek	= seq_lseek,
1842	.proc_write	= pktgen_if_write,
1843	.proc_release	= single_release,
1844};
1845
1846static int pktgen_thread_show(struct seq_file *seq, void *v)
1847{
1848	struct pktgen_thread *t = seq->private;
1849	const struct pktgen_dev *pkt_dev;
1850
1851	BUG_ON(!t);
1852
1853	seq_puts(seq, "Running: ");
1854
1855	rcu_read_lock();
1856	list_for_each_entry_rcu(pkt_dev, &t->if_list, list)
1857		if (pkt_dev->running)
1858			seq_printf(seq, "%s ", pkt_dev->odevname);
1859
1860	seq_puts(seq, "\nStopped: ");
1861
1862	list_for_each_entry_rcu(pkt_dev, &t->if_list, list)
1863		if (!pkt_dev->running)
1864			seq_printf(seq, "%s ", pkt_dev->odevname);
1865
1866	if (t->result[0])
1867		seq_printf(seq, "\nResult: %s\n", t->result);
1868	else
1869		seq_puts(seq, "\nResult: NA\n");
1870
1871	rcu_read_unlock();
1872
1873	return 0;
1874}
1875
1876static ssize_t pktgen_thread_write(struct file *file,
1877				   const char __user * user_buffer,
1878				   size_t count, loff_t * offset)
1879{
1880	struct seq_file *seq = file->private_data;
1881	struct pktgen_thread *t = seq->private;
1882	int i, max, len, ret;
1883	char name[40];
1884	char *pg_result;
1885
1886	if (count < 1) {
1887		//      sprintf(pg_result, "Wrong command format");
1888		return -EINVAL;
1889	}
1890
1891	max = count;
1892	len = count_trail_chars(user_buffer, max);
1893	if (len < 0)
1894		return len;
1895
1896	i = len;
1897
1898	/* Read variable name */
1899
1900	len = strn_len(&user_buffer[i], sizeof(name) - 1);
1901	if (len < 0)
1902		return len;
1903
1904	memset(name, 0, sizeof(name));
1905	if (copy_from_user(name, &user_buffer[i], len))
1906		return -EFAULT;
1907	i += len;
1908
1909	max = count - i;
1910	len = count_trail_chars(&user_buffer[i], max);
1911	if (len < 0)
1912		return len;
1913
1914	i += len;
1915
1916	if (debug)
1917		pr_debug("t=%s, count=%lu\n", name, (unsigned long)count);
1918
1919	if (!t) {
1920		pr_err("ERROR: No thread\n");
1921		ret = -EINVAL;
1922		goto out;
1923	}
1924
1925	pg_result = &(t->result[0]);
1926
1927	if (!strcmp(name, "add_device")) {
1928		char f[32];
1929		memset(f, 0, 32);
1930		len = strn_len(&user_buffer[i], sizeof(f) - 1);
1931		if (len < 0) {
1932			ret = len;
1933			goto out;
1934		}
1935		if (copy_from_user(f, &user_buffer[i], len))
1936			return -EFAULT;
1937		i += len;
1938		mutex_lock(&pktgen_thread_lock);
1939		ret = pktgen_add_device(t, f);
1940		mutex_unlock(&pktgen_thread_lock);
1941		if (!ret) {
1942			ret = count;
1943			sprintf(pg_result, "OK: add_device=%s", f);
1944		} else
1945			sprintf(pg_result, "ERROR: can not add device %s", f);
1946		goto out;
1947	}
1948
1949	if (!strcmp(name, "rem_device_all")) {
1950		mutex_lock(&pktgen_thread_lock);
1951		t->control |= T_REMDEVALL;
1952		mutex_unlock(&pktgen_thread_lock);
1953		schedule_timeout_interruptible(msecs_to_jiffies(125));	/* Propagate thread->control  */
1954		ret = count;
1955		sprintf(pg_result, "OK: rem_device_all");
1956		goto out;
1957	}
1958
1959	if (!strcmp(name, "max_before_softirq")) {
1960		sprintf(pg_result, "OK: Note! max_before_softirq is obsoleted -- Do not use");
1961		ret = count;
1962		goto out;
1963	}
1964
1965	ret = -EINVAL;
1966out:
1967	return ret;
1968}
1969
1970static int pktgen_thread_open(struct inode *inode, struct file *file)
1971{
1972	return single_open(file, pktgen_thread_show, pde_data(inode));
1973}
1974
1975static const struct proc_ops pktgen_thread_proc_ops = {
1976	.proc_open	= pktgen_thread_open,
1977	.proc_read	= seq_read,
1978	.proc_lseek	= seq_lseek,
1979	.proc_write	= pktgen_thread_write,
1980	.proc_release	= single_release,
1981};
1982
1983/* Think find or remove for NN */
1984static struct pktgen_dev *__pktgen_NN_threads(const struct pktgen_net *pn,
1985					      const char *ifname, int remove)
1986{
1987	struct pktgen_thread *t;
1988	struct pktgen_dev *pkt_dev = NULL;
1989	bool exact = (remove == FIND);
1990
1991	list_for_each_entry(t, &pn->pktgen_threads, th_list) {
1992		pkt_dev = pktgen_find_dev(t, ifname, exact);
1993		if (pkt_dev) {
1994			if (remove) {
1995				pkt_dev->removal_mark = 1;
1996				t->control |= T_REMDEV;
1997			}
1998			break;
1999		}
2000	}
2001	return pkt_dev;
2002}
2003
2004/*
2005 * mark a device for removal
2006 */
2007static void pktgen_mark_device(const struct pktgen_net *pn, const char *ifname)
2008{
2009	struct pktgen_dev *pkt_dev = NULL;
2010	const int max_tries = 10, msec_per_try = 125;
2011	int i = 0;
2012
2013	mutex_lock(&pktgen_thread_lock);
2014	pr_debug("%s: marking %s for removal\n", __func__, ifname);
2015
2016	while (1) {
2017
2018		pkt_dev = __pktgen_NN_threads(pn, ifname, REMOVE);
2019		if (pkt_dev == NULL)
2020			break;	/* success */
2021
2022		mutex_unlock(&pktgen_thread_lock);
2023		pr_debug("%s: waiting for %s to disappear....\n",
2024			 __func__, ifname);
2025		schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try));
2026		mutex_lock(&pktgen_thread_lock);
2027
2028		if (++i >= max_tries) {
2029			pr_err("%s: timed out after waiting %d msec for device %s to be removed\n",
2030			       __func__, msec_per_try * i, ifname);
2031			break;
2032		}
2033
2034	}
2035
2036	mutex_unlock(&pktgen_thread_lock);
2037}
2038
2039static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *dev)
2040{
2041	struct pktgen_thread *t;
2042
2043	mutex_lock(&pktgen_thread_lock);
2044
2045	list_for_each_entry(t, &pn->pktgen_threads, th_list) {
2046		struct pktgen_dev *pkt_dev;
2047
2048		if_lock(t);
2049		list_for_each_entry(pkt_dev, &t->if_list, list) {
2050			if (pkt_dev->odev != dev)
2051				continue;
2052
2053			proc_remove(pkt_dev->entry);
2054
2055			pkt_dev->entry = proc_create_data(dev->name, 0600,
2056							  pn->proc_dir,
2057							  &pktgen_if_proc_ops,
2058							  pkt_dev);
2059			if (!pkt_dev->entry)
2060				pr_err("can't move proc entry for '%s'\n",
2061				       dev->name);
2062			break;
2063		}
2064		if_unlock(t);
2065	}
2066	mutex_unlock(&pktgen_thread_lock);
2067}
2068
2069static int pktgen_device_event(struct notifier_block *unused,
2070			       unsigned long event, void *ptr)
2071{
2072	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2073	struct pktgen_net *pn = net_generic(dev_net(dev), pg_net_id);
2074
2075	if (pn->pktgen_exiting)
2076		return NOTIFY_DONE;
2077
2078	/* It is OK that we do not hold the group lock right now,
2079	 * as we run under the RTNL lock.
2080	 */
2081
2082	switch (event) {
2083	case NETDEV_CHANGENAME:
2084		pktgen_change_name(pn, dev);
2085		break;
2086
2087	case NETDEV_UNREGISTER:
2088		pktgen_mark_device(pn, dev->name);
2089		break;
2090	}
2091
2092	return NOTIFY_DONE;
2093}
2094
2095static struct net_device *pktgen_dev_get_by_name(const struct pktgen_net *pn,
2096						 struct pktgen_dev *pkt_dev,
2097						 const char *ifname)
2098{
2099	char b[IFNAMSIZ+5];
2100	int i;
2101
2102	for (i = 0; ifname[i] != '@'; i++) {
2103		if (i == IFNAMSIZ)
2104			break;
2105
2106		b[i] = ifname[i];
2107	}
2108	b[i] = 0;
2109
2110	return dev_get_by_name(pn->net, b);
2111}
2112
2113
2114/* Associate pktgen_dev with a device. */
2115
2116static int pktgen_setup_dev(const struct pktgen_net *pn,
2117			    struct pktgen_dev *pkt_dev, const char *ifname)
2118{
2119	struct net_device *odev;
2120	int err;
2121
2122	/* Clean old setups */
2123	if (pkt_dev->odev) {
2124		netdev_put(pkt_dev->odev, &pkt_dev->dev_tracker);
2125		pkt_dev->odev = NULL;
2126	}
2127
2128	odev = pktgen_dev_get_by_name(pn, pkt_dev, ifname);
2129	if (!odev) {
2130		pr_err("no such netdevice: \"%s\"\n", ifname);
2131		return -ENODEV;
2132	}
2133
2134	if (odev->type != ARPHRD_ETHER && odev->type != ARPHRD_LOOPBACK) {
2135		pr_err("not an ethernet or loopback device: \"%s\"\n", ifname);
2136		err = -EINVAL;
2137	} else if (!netif_running(odev)) {
2138		pr_err("device is down: \"%s\"\n", ifname);
2139		err = -ENETDOWN;
2140	} else {
2141		pkt_dev->odev = odev;
2142		netdev_tracker_alloc(odev, &pkt_dev->dev_tracker, GFP_KERNEL);
2143		return 0;
2144	}
2145
2146	dev_put(odev);
2147	return err;
2148}
2149
2150/* Read pkt_dev from the interface and set up internal pktgen_dev
2151 * structure to have the right information to create/send packets
2152 */
2153static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
2154{
2155	int ntxq;
2156
2157	if (!pkt_dev->odev) {
2158		pr_err("ERROR: pkt_dev->odev == NULL in setup_inject\n");
2159		sprintf(pkt_dev->result,
2160			"ERROR: pkt_dev->odev == NULL in setup_inject.\n");
2161		return;
2162	}
2163
2164	/* make sure that we don't pick a non-existing transmit queue */
2165	ntxq = pkt_dev->odev->real_num_tx_queues;
2166
2167	if (ntxq <= pkt_dev->queue_map_min) {
2168		pr_warn("WARNING: Requested queue_map_min (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n",
2169			pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq,
2170			pkt_dev->odevname);
2171		pkt_dev->queue_map_min = (ntxq ?: 1) - 1;
2172	}
2173	if (pkt_dev->queue_map_max >= ntxq) {
2174		pr_warn("WARNING: Requested queue_map_max (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n",
2175			pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq,
2176			pkt_dev->odevname);
2177		pkt_dev->queue_map_max = (ntxq ?: 1) - 1;
2178	}
2179
2180	/* Default to the interface's mac if not explicitly set. */
2181
2182	if (is_zero_ether_addr(pkt_dev->src_mac))
2183		ether_addr_copy(&(pkt_dev->hh[6]), pkt_dev->odev->dev_addr);
2184
2185	/* Set up Dest MAC */
2186	ether_addr_copy(&(pkt_dev->hh[0]), pkt_dev->dst_mac);
2187
2188	if (pkt_dev->flags & F_IPV6) {
2189		int i, set = 0, err = 1;
2190		struct inet6_dev *idev;
2191
2192		if (pkt_dev->min_pkt_size == 0) {
2193			pkt_dev->min_pkt_size = 14 + sizeof(struct ipv6hdr)
2194						+ sizeof(struct udphdr)
2195						+ sizeof(struct pktgen_hdr)
2196						+ pkt_dev->pkt_overhead;
2197		}
2198
2199		for (i = 0; i < sizeof(struct in6_addr); i++)
2200			if (pkt_dev->cur_in6_saddr.s6_addr[i]) {
2201				set = 1;
2202				break;
2203			}
2204
2205		if (!set) {
2206
2207			/*
2208			 * Use linklevel address if unconfigured.
2209			 *
2210			 * use ipv6_get_lladdr if/when it's get exported
2211			 */
2212
2213			rcu_read_lock();
2214			idev = __in6_dev_get(pkt_dev->odev);
2215			if (idev) {
2216				struct inet6_ifaddr *ifp;
2217
2218				read_lock_bh(&idev->lock);
2219				list_for_each_entry(ifp, &idev->addr_list, if_list) {
2220					if ((ifp->scope & IFA_LINK) &&
2221					    !(ifp->flags & IFA_F_TENTATIVE)) {
2222						pkt_dev->cur_in6_saddr = ifp->addr;
2223						err = 0;
2224						break;
2225					}
2226				}
2227				read_unlock_bh(&idev->lock);
2228			}
2229			rcu_read_unlock();
2230			if (err)
2231				pr_err("ERROR: IPv6 link address not available\n");
2232		}
2233	} else {
2234		if (pkt_dev->min_pkt_size == 0) {
2235			pkt_dev->min_pkt_size = 14 + sizeof(struct iphdr)
2236						+ sizeof(struct udphdr)
2237						+ sizeof(struct pktgen_hdr)
2238						+ pkt_dev->pkt_overhead;
2239		}
2240
2241		pkt_dev->saddr_min = 0;
2242		pkt_dev->saddr_max = 0;
2243		if (strlen(pkt_dev->src_min) == 0) {
2244
2245			struct in_device *in_dev;
2246
2247			rcu_read_lock();
2248			in_dev = __in_dev_get_rcu(pkt_dev->odev);
2249			if (in_dev) {
2250				const struct in_ifaddr *ifa;
2251
2252				ifa = rcu_dereference(in_dev->ifa_list);
2253				if (ifa) {
2254					pkt_dev->saddr_min = ifa->ifa_address;
2255					pkt_dev->saddr_max = pkt_dev->saddr_min;
2256				}
2257			}
2258			rcu_read_unlock();
2259		} else {
2260			pkt_dev->saddr_min = in_aton(pkt_dev->src_min);
2261			pkt_dev->saddr_max = in_aton(pkt_dev->src_max);
2262		}
2263
2264		pkt_dev->daddr_min = in_aton(pkt_dev->dst_min);
2265		pkt_dev->daddr_max = in_aton(pkt_dev->dst_max);
2266	}
2267	/* Initialize current values. */
2268	pkt_dev->cur_pkt_size = pkt_dev->min_pkt_size;
2269	if (pkt_dev->min_pkt_size > pkt_dev->max_pkt_size)
2270		pkt_dev->max_pkt_size = pkt_dev->min_pkt_size;
2271
2272	pkt_dev->cur_dst_mac_offset = 0;
2273	pkt_dev->cur_src_mac_offset = 0;
2274	pkt_dev->cur_saddr = pkt_dev->saddr_min;
2275	pkt_dev->cur_daddr = pkt_dev->daddr_min;
2276	pkt_dev->cur_udp_dst = pkt_dev->udp_dst_min;
2277	pkt_dev->cur_udp_src = pkt_dev->udp_src_min;
2278	pkt_dev->nflows = 0;
2279}
2280
2281
2282static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
2283{
2284	ktime_t start_time, end_time;
2285	s64 remaining;
2286	struct hrtimer_sleeper t;
2287
2288	hrtimer_setup_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2289	hrtimer_set_expires(&t.timer, spin_until);
2290
2291	remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer));
2292	if (remaining <= 0)
2293		goto out;
2294
2295	start_time = ktime_get();
2296	if (remaining < 100000) {
2297		/* for small delays (<100us), just loop until limit is reached */
2298		do {
2299			end_time = ktime_get();
2300		} while (ktime_compare(end_time, spin_until) < 0);
2301	} else {
2302		do {
2303			set_current_state(TASK_INTERRUPTIBLE);
2304			hrtimer_sleeper_start_expires(&t, HRTIMER_MODE_ABS);
2305
2306			if (likely(t.task))
2307				schedule();
2308
2309			hrtimer_cancel(&t.timer);
2310		} while (t.task && pkt_dev->running && !signal_pending(current));
2311		__set_current_state(TASK_RUNNING);
2312		end_time = ktime_get();
2313	}
2314
2315	pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
2316out:
2317	pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
2318	destroy_hrtimer_on_stack(&t.timer);
2319}
2320
2321static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
2322{
2323	pkt_dev->pkt_overhead = 0;
2324	pkt_dev->pkt_overhead += pkt_dev->nr_labels*sizeof(u32);
2325	pkt_dev->pkt_overhead += VLAN_TAG_SIZE(pkt_dev);
2326	pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev);
2327}
2328
2329static inline int f_seen(const struct pktgen_dev *pkt_dev, int flow)
2330{
2331	return !!(pkt_dev->flows[flow].flags & F_INIT);
2332}
2333
2334static inline int f_pick(struct pktgen_dev *pkt_dev)
2335{
2336	int flow = pkt_dev->curfl;
2337
2338	if (pkt_dev->flags & F_FLOW_SEQ) {
2339		if (pkt_dev->flows[flow].count >= pkt_dev->lflow) {
2340			/* reset time */
2341			pkt_dev->flows[flow].count = 0;
2342			pkt_dev->flows[flow].flags = 0;
2343			pkt_dev->curfl += 1;
2344			if (pkt_dev->curfl >= pkt_dev->cflows)
2345				pkt_dev->curfl = 0; /*reset */
2346		}
2347	} else {
2348		flow = get_random_u32_below(pkt_dev->cflows);
2349		pkt_dev->curfl = flow;
2350
2351		if (pkt_dev->flows[flow].count > pkt_dev->lflow) {
2352			pkt_dev->flows[flow].count = 0;
2353			pkt_dev->flows[flow].flags = 0;
2354		}
2355	}
2356
2357	return pkt_dev->curfl;
2358}
2359
2360
2361#ifdef CONFIG_XFRM
2362/* If there was already an IPSEC SA, we keep it as is, else
2363 * we go look for it ...
2364*/
2365#define DUMMY_MARK 0
2366static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow)
2367{
2368	struct xfrm_state *x = pkt_dev->flows[flow].x;
2369	struct pktgen_net *pn = net_generic(dev_net(pkt_dev->odev), pg_net_id);
2370	if (!x) {
2371
2372		if (pkt_dev->spi) {
2373			/* We need as quick as possible to find the right SA
2374			 * Searching with minimum criteria to achieve, this.
2375			 */
2376			x = xfrm_state_lookup_byspi(pn->net, htonl(pkt_dev->spi), AF_INET);
2377		} else {
2378			/* slow path: we don't already have xfrm_state */
2379			x = xfrm_stateonly_find(pn->net, DUMMY_MARK, 0,
2380						(xfrm_address_t *)&pkt_dev->cur_daddr,
2381						(xfrm_address_t *)&pkt_dev->cur_saddr,
2382						AF_INET,
2383						pkt_dev->ipsmode,
2384						pkt_dev->ipsproto, 0);
2385		}
2386		if (x) {
2387			pkt_dev->flows[flow].x = x;
2388			set_pkt_overhead(pkt_dev);
2389			pkt_dev->pkt_overhead += x->props.header_len;
2390		}
2391
2392	}
2393}
2394#endif
2395static void set_cur_queue_map(struct pktgen_dev *pkt_dev)
2396{
2397
2398	if (pkt_dev->flags & F_QUEUE_MAP_CPU)
2399		pkt_dev->cur_queue_map = smp_processor_id();
2400
2401	else if (pkt_dev->queue_map_min <= pkt_dev->queue_map_max) {
2402		__u16 t;
2403		if (pkt_dev->flags & F_QUEUE_MAP_RND) {
2404			t = get_random_u32_inclusive(pkt_dev->queue_map_min,
2405						     pkt_dev->queue_map_max);
2406		} else {
2407			t = pkt_dev->cur_queue_map + 1;
2408			if (t > pkt_dev->queue_map_max)
2409				t = pkt_dev->queue_map_min;
2410		}
2411		pkt_dev->cur_queue_map = t;
2412	}
2413	pkt_dev->cur_queue_map  = pkt_dev->cur_queue_map % pkt_dev->odev->real_num_tx_queues;
2414}
2415
2416/* Increment/randomize headers according to flags and current values
2417 * for IP src/dest, UDP src/dst port, MAC-Addr src/dst
2418 */
2419static void mod_cur_headers(struct pktgen_dev *pkt_dev)
2420{
2421	__u32 imn;
2422	__u32 imx;
2423	int flow = 0;
2424
2425	if (pkt_dev->cflows)
2426		flow = f_pick(pkt_dev);
2427
2428	/*  Deal with source MAC */
2429	if (pkt_dev->src_mac_count > 1) {
2430		__u32 mc;
2431		__u32 tmp;
2432
2433		if (pkt_dev->flags & F_MACSRC_RND)
2434			mc = get_random_u32_below(pkt_dev->src_mac_count);
2435		else {
2436			mc = pkt_dev->cur_src_mac_offset++;
2437			if (pkt_dev->cur_src_mac_offset >=
2438			    pkt_dev->src_mac_count)
2439				pkt_dev->cur_src_mac_offset = 0;
2440		}
2441
2442		tmp = pkt_dev->src_mac[5] + (mc & 0xFF);
2443		pkt_dev->hh[11] = tmp;
2444		tmp = (pkt_dev->src_mac[4] + ((mc >> 8) & 0xFF) + (tmp >> 8));
2445		pkt_dev->hh[10] = tmp;
2446		tmp = (pkt_dev->src_mac[3] + ((mc >> 16) & 0xFF) + (tmp >> 8));
2447		pkt_dev->hh[9] = tmp;
2448		tmp = (pkt_dev->src_mac[2] + ((mc >> 24) & 0xFF) + (tmp >> 8));
2449		pkt_dev->hh[8] = tmp;
2450		tmp = (pkt_dev->src_mac[1] + (tmp >> 8));
2451		pkt_dev->hh[7] = tmp;
2452	}
2453
2454	/*  Deal with Destination MAC */
2455	if (pkt_dev->dst_mac_count > 1) {
2456		__u32 mc;
2457		__u32 tmp;
2458
2459		if (pkt_dev->flags & F_MACDST_RND)
2460			mc = get_random_u32_below(pkt_dev->dst_mac_count);
2461
2462		else {
2463			mc = pkt_dev->cur_dst_mac_offset++;
2464			if (pkt_dev->cur_dst_mac_offset >=
2465			    pkt_dev->dst_mac_count) {
2466				pkt_dev->cur_dst_mac_offset = 0;
2467			}
2468		}
2469
2470		tmp = pkt_dev->dst_mac[5] + (mc & 0xFF);
2471		pkt_dev->hh[5] = tmp;
2472		tmp = (pkt_dev->dst_mac[4] + ((mc >> 8) & 0xFF) + (tmp >> 8));
2473		pkt_dev->hh[4] = tmp;
2474		tmp = (pkt_dev->dst_mac[3] + ((mc >> 16) & 0xFF) + (tmp >> 8));
2475		pkt_dev->hh[3] = tmp;
2476		tmp = (pkt_dev->dst_mac[2] + ((mc >> 24) & 0xFF) + (tmp >> 8));
2477		pkt_dev->hh[2] = tmp;
2478		tmp = (pkt_dev->dst_mac[1] + (tmp >> 8));
2479		pkt_dev->hh[1] = tmp;
2480	}
2481
2482	if (pkt_dev->flags & F_MPLS_RND) {
2483		unsigned int i;
2484		for (i = 0; i < pkt_dev->nr_labels; i++)
2485			if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM)
2486				pkt_dev->labels[i] = MPLS_STACK_BOTTOM |
2487					     ((__force __be32)get_random_u32() &
2488						      htonl(0x000fffff));
2489	}
2490
2491	if ((pkt_dev->flags & F_VID_RND) && (pkt_dev->vlan_id != 0xffff)) {
2492		pkt_dev->vlan_id = get_random_u32_below(4096);
2493	}
2494
2495	if ((pkt_dev->flags & F_SVID_RND) && (pkt_dev->svlan_id != 0xffff)) {
2496		pkt_dev->svlan_id = get_random_u32_below(4096);
2497	}
2498
2499	if (pkt_dev->udp_src_min < pkt_dev->udp_src_max) {
2500		if (pkt_dev->flags & F_UDPSRC_RND)
2501			pkt_dev->cur_udp_src = get_random_u32_inclusive(pkt_dev->udp_src_min,
2502									pkt_dev->udp_src_max - 1);
2503
2504		else {
2505			pkt_dev->cur_udp_src++;
2506			if (pkt_dev->cur_udp_src >= pkt_dev->udp_src_max)
2507				pkt_dev->cur_udp_src = pkt_dev->udp_src_min;
2508		}
2509	}
2510
2511	if (pkt_dev->udp_dst_min < pkt_dev->udp_dst_max) {
2512		if (pkt_dev->flags & F_UDPDST_RND) {
2513			pkt_dev->cur_udp_dst = get_random_u32_inclusive(pkt_dev->udp_dst_min,
2514									pkt_dev->udp_dst_max - 1);
2515		} else {
2516			pkt_dev->cur_udp_dst++;
2517			if (pkt_dev->cur_udp_dst >= pkt_dev->udp_dst_max)
2518				pkt_dev->cur_udp_dst = pkt_dev->udp_dst_min;
2519		}
2520	}
2521
2522	if (!(pkt_dev->flags & F_IPV6)) {
2523
2524		imn = ntohl(pkt_dev->saddr_min);
2525		imx = ntohl(pkt_dev->saddr_max);
2526		if (imn < imx) {
2527			__u32 t;
2528			if (pkt_dev->flags & F_IPSRC_RND)
2529				t = get_random_u32_inclusive(imn, imx - 1);
2530			else {
2531				t = ntohl(pkt_dev->cur_saddr);
2532				t++;
2533				if (t > imx)
2534					t = imn;
2535
2536			}
2537			pkt_dev->cur_saddr = htonl(t);
2538		}
2539
2540		if (pkt_dev->cflows && f_seen(pkt_dev, flow)) {
2541			pkt_dev->cur_daddr = pkt_dev->flows[flow].cur_daddr;
2542		} else {
2543			imn = ntohl(pkt_dev->daddr_min);
2544			imx = ntohl(pkt_dev->daddr_max);
2545			if (imn < imx) {
2546				__u32 t;
2547				__be32 s;
2548				if (pkt_dev->flags & F_IPDST_RND) {
2549
2550					do {
2551						t = get_random_u32_inclusive(imn, imx - 1);
2552						s = htonl(t);
2553					} while (ipv4_is_loopback(s) ||
2554						ipv4_is_multicast(s) ||
2555						ipv4_is_lbcast(s) ||
2556						ipv4_is_zeronet(s) ||
2557						ipv4_is_local_multicast(s));
2558					pkt_dev->cur_daddr = s;
2559				} else {
2560					t = ntohl(pkt_dev->cur_daddr);
2561					t++;
2562					if (t > imx) {
2563						t = imn;
2564					}
2565					pkt_dev->cur_daddr = htonl(t);
2566				}
2567			}
2568			if (pkt_dev->cflows) {
2569				pkt_dev->flows[flow].flags |= F_INIT;
2570				pkt_dev->flows[flow].cur_daddr =
2571				    pkt_dev->cur_daddr;
2572#ifdef CONFIG_XFRM
2573				if (pkt_dev->flags & F_IPSEC)
2574					get_ipsec_sa(pkt_dev, flow);
2575#endif
2576				pkt_dev->nflows++;
2577			}
2578		}
2579	} else {		/* IPV6 * */
2580
2581		if (!ipv6_addr_any(&pkt_dev->min_in6_daddr)) {
2582			int i;
2583
2584			/* Only random destinations yet */
2585
2586			for (i = 0; i < 4; i++) {
2587				pkt_dev->cur_in6_daddr.s6_addr32[i] =
2588				    (((__force __be32)get_random_u32() |
2589				      pkt_dev->min_in6_daddr.s6_addr32[i]) &
2590				     pkt_dev->max_in6_daddr.s6_addr32[i]);
2591			}
2592		}
2593	}
2594
2595	if (pkt_dev->min_pkt_size < pkt_dev->max_pkt_size) {
2596		__u32 t;
2597		if (pkt_dev->flags & F_TXSIZE_RND) {
2598			t = get_random_u32_inclusive(pkt_dev->min_pkt_size,
2599						     pkt_dev->max_pkt_size - 1);
2600		} else {
2601			t = pkt_dev->cur_pkt_size + 1;
2602			if (t > pkt_dev->max_pkt_size)
2603				t = pkt_dev->min_pkt_size;
2604		}
2605		pkt_dev->cur_pkt_size = t;
2606	} else if (pkt_dev->n_imix_entries > 0) {
2607		struct imix_pkt *entry;
2608		__u32 t = get_random_u32_below(IMIX_PRECISION);
2609		__u8 entry_index = pkt_dev->imix_distribution[t];
2610
2611		entry = &pkt_dev->imix_entries[entry_index];
2612		entry->count_so_far++;
2613		pkt_dev->cur_pkt_size = entry->size;
2614	}
2615
2616	set_cur_queue_map(pkt_dev);
2617
2618	pkt_dev->flows[flow].count++;
2619}
2620
2621static void fill_imix_distribution(struct pktgen_dev *pkt_dev)
2622{
2623	int cumulative_probabilites[MAX_IMIX_ENTRIES];
2624	int j = 0;
2625	__u64 cumulative_prob = 0;
2626	__u64 total_weight = 0;
2627	int i = 0;
2628
2629	for (i = 0; i < pkt_dev->n_imix_entries; i++)
2630		total_weight += pkt_dev->imix_entries[i].weight;
2631
2632	/* Fill cumulative_probabilites with sum of normalized probabilities */
2633	for (i = 0; i < pkt_dev->n_imix_entries - 1; i++) {
2634		cumulative_prob += div64_u64(pkt_dev->imix_entries[i].weight *
2635						     IMIX_PRECISION,
2636					     total_weight);
2637		cumulative_probabilites[i] = cumulative_prob;
2638	}
2639	cumulative_probabilites[pkt_dev->n_imix_entries - 1] = 100;
2640
2641	for (i = 0; i < IMIX_PRECISION; i++) {
2642		if (i == cumulative_probabilites[j])
2643			j++;
2644		pkt_dev->imix_distribution[i] = j;
2645	}
2646}
2647
2648#ifdef CONFIG_XFRM
2649static u32 pktgen_dst_metrics[RTAX_MAX + 1] = {
2650
2651	[RTAX_HOPLIMIT] = 0x5, /* Set a static hoplimit */
2652};
2653
2654static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev)
2655{
2656	struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x;
2657	int err = 0;
2658	struct net *net = dev_net(pkt_dev->odev);
2659
2660	if (!x)
2661		return 0;
2662	/* XXX: we dont support tunnel mode for now until
2663	 * we resolve the dst issue */
2664	if ((x->props.mode != XFRM_MODE_TRANSPORT) && (pkt_dev->spi == 0))
2665		return 0;
2666
2667	/* But when user specify an valid SPI, transformation
2668	 * supports both transport/tunnel mode + ESP/AH type.
2669	 */
2670	if ((x->props.mode == XFRM_MODE_TUNNEL) && (pkt_dev->spi != 0))
2671		skb->_skb_refdst = (unsigned long)&pkt_dev->xdst.u.dst | SKB_DST_NOREF;
2672
2673	rcu_read_lock_bh();
2674	err = pktgen_xfrm_outer_mode_output(x, skb);
2675	rcu_read_unlock_bh();
2676	if (err) {
2677		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR);
2678		goto error;
2679	}
2680	err = x->type->output(x, skb);
2681	if (err) {
2682		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEPROTOERROR);
2683		goto error;
2684	}
2685	spin_lock_bh(&x->lock);
2686	x->curlft.bytes += skb->len;
2687	x->curlft.packets++;
2688	spin_unlock_bh(&x->lock);
2689error:
2690	return err;
2691}
2692
2693static void free_SAs(struct pktgen_dev *pkt_dev)
2694{
2695	if (pkt_dev->cflows) {
2696		/* let go of the SAs if we have them */
2697		int i;
2698		for (i = 0; i < pkt_dev->cflows; i++) {
2699			struct xfrm_state *x = pkt_dev->flows[i].x;
2700			if (x) {
2701				xfrm_state_put(x);
2702				pkt_dev->flows[i].x = NULL;
2703			}
2704		}
2705	}
2706}
2707
2708static int process_ipsec(struct pktgen_dev *pkt_dev,
2709			      struct sk_buff *skb, __be16 protocol)
2710{
2711	if (pkt_dev->flags & F_IPSEC) {
2712		struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x;
2713		int nhead = 0;
2714		if (x) {
2715			struct ethhdr *eth;
2716			struct iphdr *iph;
2717			int ret;
2718
2719			nhead = x->props.header_len - skb_headroom(skb);
2720			if (nhead > 0) {
2721				ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
2722				if (ret < 0) {
2723					pr_err("Error expanding ipsec packet %d\n",
2724					       ret);
2725					goto err;
2726				}
2727			}
2728
2729			/* ipsec is not expecting ll header */
2730			skb_pull(skb, ETH_HLEN);
2731			ret = pktgen_output_ipsec(skb, pkt_dev);
2732			if (ret) {
2733				pr_err("Error creating ipsec packet %d\n", ret);
2734				goto err;
2735			}
2736			/* restore ll */
2737			eth = skb_push(skb, ETH_HLEN);
2738			memcpy(eth, pkt_dev->hh, 2 * ETH_ALEN);
2739			eth->h_proto = protocol;
2740
2741			/* Update IPv4 header len as well as checksum value */
2742			iph = ip_hdr(skb);
2743			iph->tot_len = htons(skb->len - ETH_HLEN);
2744			ip_send_check(iph);
2745		}
2746	}
2747	return 1;
2748err:
2749	kfree_skb(skb);
2750	return 0;
2751}
2752#endif
2753
2754static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev)
2755{
2756	unsigned int i;
2757	for (i = 0; i < pkt_dev->nr_labels; i++)
2758		*mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM;
2759
2760	mpls--;
2761	*mpls |= MPLS_STACK_BOTTOM;
2762}
2763
2764static inline __be16 build_tci(unsigned int id, unsigned int cfi,
2765			       unsigned int prio)
2766{
2767	return htons(id | (cfi << 12) | (prio << 13));
2768}
2769
2770static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
2771				int datalen)
2772{
2773	struct timespec64 timestamp;
2774	struct pktgen_hdr *pgh;
2775
2776	pgh = skb_put(skb, sizeof(*pgh));
2777	datalen -= sizeof(*pgh);
2778
2779	if (pkt_dev->nfrags <= 0) {
2780		skb_put_zero(skb, datalen);
2781	} else {
2782		int frags = pkt_dev->nfrags;
2783		int i, len;
2784		int frag_len;
2785
2786
2787		if (frags > MAX_SKB_FRAGS)
2788			frags = MAX_SKB_FRAGS;
2789		len = datalen - frags * PAGE_SIZE;
2790		if (len > 0) {
2791			skb_put_zero(skb, len);
2792			datalen = frags * PAGE_SIZE;
2793		}
2794
2795		i = 0;
2796		frag_len = (datalen/frags) < PAGE_SIZE ?
2797			   (datalen/frags) : PAGE_SIZE;
2798		while (datalen > 0) {
2799			if (unlikely(!pkt_dev->page)) {
2800				int node = numa_node_id();
2801
2802				if (pkt_dev->node >= 0 && (pkt_dev->flags & F_NODE))
2803					node = pkt_dev->node;
2804				pkt_dev->page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
2805				if (!pkt_dev->page)
2806					break;
2807			}
2808			get_page(pkt_dev->page);
2809
 
2810			/*last fragment, fill rest of data*/
2811			if (i == (frags - 1))
2812				skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[i],
2813							pkt_dev->page, 0,
2814							(datalen < PAGE_SIZE ?
2815							 datalen : PAGE_SIZE));
2816			else
2817				skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[i],
2818							pkt_dev->page, 0, frag_len);
2819
2820			datalen -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
2821			skb->len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
2822			skb->data_len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
2823			i++;
2824			skb_shinfo(skb)->nr_frags = i;
2825		}
2826	}
2827
2828	/* Stamp the time, and sequence number,
2829	 * convert them to network byte order
2830	 */
2831	pgh->pgh_magic = htonl(PKTGEN_MAGIC);
2832	pgh->seq_num = htonl(pkt_dev->seq_num);
2833
2834	if (pkt_dev->flags & F_NO_TIMESTAMP) {
2835		pgh->tv_sec = 0;
2836		pgh->tv_usec = 0;
2837	} else {
2838		/*
2839		 * pgh->tv_sec wraps in y2106 when interpreted as unsigned
2840		 * as done by wireshark, or y2038 when interpreted as signed.
2841		 * This is probably harmless, but if anyone wants to improve
2842		 * it, we could introduce a variant that puts 64-bit nanoseconds
2843		 * into the respective header bytes.
2844		 * This would also be slightly faster to read.
2845		 */
2846		ktime_get_real_ts64(&timestamp);
2847		pgh->tv_sec = htonl(timestamp.tv_sec);
2848		pgh->tv_usec = htonl(timestamp.tv_nsec / NSEC_PER_USEC);
2849	}
2850}
2851
2852static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
2853					struct pktgen_dev *pkt_dev)
2854{
2855	unsigned int extralen = LL_RESERVED_SPACE(dev);
2856	struct sk_buff *skb = NULL;
2857	unsigned int size;
2858
2859	size = pkt_dev->cur_pkt_size + 64 + extralen + pkt_dev->pkt_overhead;
2860	if (pkt_dev->flags & F_NODE) {
2861		int node = pkt_dev->node >= 0 ? pkt_dev->node : numa_node_id();
2862
2863		skb = __alloc_skb(NET_SKB_PAD + size, GFP_NOWAIT, 0, node);
2864		if (likely(skb)) {
2865			skb_reserve(skb, NET_SKB_PAD);
2866			skb->dev = dev;
2867		}
2868	} else {
2869		 skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT);
2870	}
2871
2872	/* the caller pre-fetches from skb->data and reserves for the mac hdr */
2873	if (likely(skb))
2874		skb_reserve(skb, extralen - 16);
2875
2876	return skb;
2877}
2878
2879static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2880					struct pktgen_dev *pkt_dev)
2881{
2882	struct sk_buff *skb = NULL;
2883	__u8 *eth;
2884	struct udphdr *udph;
2885	int datalen, iplen;
2886	struct iphdr *iph;
2887	__be16 protocol = htons(ETH_P_IP);
2888	__be32 *mpls;
2889	__be16 *vlan_tci = NULL;                 /* Encapsulates priority and VLAN ID */
2890	__be16 *vlan_encapsulated_proto = NULL;  /* packet type ID field (or len) for VLAN tag */
2891	__be16 *svlan_tci = NULL;                /* Encapsulates priority and SVLAN ID */
2892	__be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */
2893	u16 queue_map;
2894
2895	if (pkt_dev->nr_labels)
2896		protocol = htons(ETH_P_MPLS_UC);
2897
2898	if (pkt_dev->vlan_id != 0xffff)
2899		protocol = htons(ETH_P_8021Q);
2900
2901	/* Update any of the values, used when we're incrementing various
2902	 * fields.
2903	 */
2904	mod_cur_headers(pkt_dev);
2905	queue_map = pkt_dev->cur_queue_map;
2906
2907	skb = pktgen_alloc_skb(odev, pkt_dev);
2908	if (!skb) {
2909		sprintf(pkt_dev->result, "No memory");
2910		return NULL;
2911	}
2912
2913	prefetchw(skb->data);
2914	skb_reserve(skb, 16);
2915
2916	/*  Reserve for ethernet and IP header  */
2917	eth = skb_push(skb, 14);
2918	mpls = skb_put(skb, pkt_dev->nr_labels * sizeof(__u32));
2919	if (pkt_dev->nr_labels)
2920		mpls_push(mpls, pkt_dev);
2921
2922	if (pkt_dev->vlan_id != 0xffff) {
2923		if (pkt_dev->svlan_id != 0xffff) {
2924			svlan_tci = skb_put(skb, sizeof(__be16));
2925			*svlan_tci = build_tci(pkt_dev->svlan_id,
2926					       pkt_dev->svlan_cfi,
2927					       pkt_dev->svlan_p);
2928			svlan_encapsulated_proto = skb_put(skb,
2929							   sizeof(__be16));
2930			*svlan_encapsulated_proto = htons(ETH_P_8021Q);
2931		}
2932		vlan_tci = skb_put(skb, sizeof(__be16));
2933		*vlan_tci = build_tci(pkt_dev->vlan_id,
2934				      pkt_dev->vlan_cfi,
2935				      pkt_dev->vlan_p);
2936		vlan_encapsulated_proto = skb_put(skb, sizeof(__be16));
2937		*vlan_encapsulated_proto = htons(ETH_P_IP);
2938	}
2939
2940	skb_reset_mac_header(skb);
2941	skb_set_network_header(skb, skb->len);
2942	iph = skb_put(skb, sizeof(struct iphdr));
2943
2944	skb_set_transport_header(skb, skb->len);
2945	udph = skb_put(skb, sizeof(struct udphdr));
2946	skb_set_queue_mapping(skb, queue_map);
2947	skb->priority = pkt_dev->skb_priority;
2948
2949	memcpy(eth, pkt_dev->hh, 12);
2950	*(__be16 *) & eth[12] = protocol;
2951
2952	/* Eth + IPh + UDPh + mpls */
2953	datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 -
2954		  pkt_dev->pkt_overhead;
2955	if (datalen < 0 || datalen < sizeof(struct pktgen_hdr))
2956		datalen = sizeof(struct pktgen_hdr);
2957
2958	udph->source = htons(pkt_dev->cur_udp_src);
2959	udph->dest = htons(pkt_dev->cur_udp_dst);
2960	udph->len = htons(datalen + 8);	/* DATA + udphdr */
2961	udph->check = 0;
2962
2963	iph->ihl = 5;
2964	iph->version = 4;
2965	iph->ttl = 32;
2966	iph->tos = pkt_dev->tos;
2967	iph->protocol = IPPROTO_UDP;	/* UDP */
2968	iph->saddr = pkt_dev->cur_saddr;
2969	iph->daddr = pkt_dev->cur_daddr;
2970	iph->id = htons(pkt_dev->ip_id);
2971	pkt_dev->ip_id++;
2972	iph->frag_off = 0;
2973	iplen = 20 + 8 + datalen;
2974	iph->tot_len = htons(iplen);
2975	ip_send_check(iph);
2976	skb->protocol = protocol;
2977	skb->dev = odev;
2978	skb->pkt_type = PACKET_HOST;
2979
2980	pktgen_finalize_skb(pkt_dev, skb, datalen);
2981
2982	if (!(pkt_dev->flags & F_UDPCSUM)) {
2983		skb->ip_summed = CHECKSUM_NONE;
2984	} else if (odev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM)) {
2985		skb->ip_summed = CHECKSUM_PARTIAL;
2986		skb->csum = 0;
2987		udp4_hwcsum(skb, iph->saddr, iph->daddr);
2988	} else {
2989		__wsum csum = skb_checksum(skb, skb_transport_offset(skb), datalen + 8, 0);
2990
2991		/* add protocol-dependent pseudo-header */
2992		udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
2993						datalen + 8, IPPROTO_UDP, csum);
2994
2995		if (udph->check == 0)
2996			udph->check = CSUM_MANGLED_0;
2997	}
2998
2999#ifdef CONFIG_XFRM
3000	if (!process_ipsec(pkt_dev, skb, protocol))
3001		return NULL;
3002#endif
3003
3004	return skb;
3005}
3006
3007static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
3008					struct pktgen_dev *pkt_dev)
3009{
3010	struct sk_buff *skb = NULL;
3011	__u8 *eth;
3012	struct udphdr *udph;
3013	int datalen, udplen;
3014	struct ipv6hdr *iph;
3015	__be16 protocol = htons(ETH_P_IPV6);
3016	__be32 *mpls;
3017	__be16 *vlan_tci = NULL;                 /* Encapsulates priority and VLAN ID */
3018	__be16 *vlan_encapsulated_proto = NULL;  /* packet type ID field (or len) for VLAN tag */
3019	__be16 *svlan_tci = NULL;                /* Encapsulates priority and SVLAN ID */
3020	__be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */
3021	u16 queue_map;
3022
3023	if (pkt_dev->nr_labels)
3024		protocol = htons(ETH_P_MPLS_UC);
3025
3026	if (pkt_dev->vlan_id != 0xffff)
3027		protocol = htons(ETH_P_8021Q);
3028
3029	/* Update any of the values, used when we're incrementing various
3030	 * fields.
3031	 */
3032	mod_cur_headers(pkt_dev);
3033	queue_map = pkt_dev->cur_queue_map;
3034
3035	skb = pktgen_alloc_skb(odev, pkt_dev);
3036	if (!skb) {
3037		sprintf(pkt_dev->result, "No memory");
3038		return NULL;
3039	}
3040
3041	prefetchw(skb->data);
3042	skb_reserve(skb, 16);
3043
3044	/*  Reserve for ethernet and IP header  */
3045	eth = skb_push(skb, 14);
3046	mpls = skb_put(skb, pkt_dev->nr_labels * sizeof(__u32));
3047	if (pkt_dev->nr_labels)
3048		mpls_push(mpls, pkt_dev);
3049
3050	if (pkt_dev->vlan_id != 0xffff) {
3051		if (pkt_dev->svlan_id != 0xffff) {
3052			svlan_tci = skb_put(skb, sizeof(__be16));
3053			*svlan_tci = build_tci(pkt_dev->svlan_id,
3054					       pkt_dev->svlan_cfi,
3055					       pkt_dev->svlan_p);
3056			svlan_encapsulated_proto = skb_put(skb,
3057							   sizeof(__be16));
3058			*svlan_encapsulated_proto = htons(ETH_P_8021Q);
3059		}
3060		vlan_tci = skb_put(skb, sizeof(__be16));
3061		*vlan_tci = build_tci(pkt_dev->vlan_id,
3062				      pkt_dev->vlan_cfi,
3063				      pkt_dev->vlan_p);
3064		vlan_encapsulated_proto = skb_put(skb, sizeof(__be16));
3065		*vlan_encapsulated_proto = htons(ETH_P_IPV6);
3066	}
3067
3068	skb_reset_mac_header(skb);
3069	skb_set_network_header(skb, skb->len);
3070	iph = skb_put(skb, sizeof(struct ipv6hdr));
3071
3072	skb_set_transport_header(skb, skb->len);
3073	udph = skb_put(skb, sizeof(struct udphdr));
3074	skb_set_queue_mapping(skb, queue_map);
3075	skb->priority = pkt_dev->skb_priority;
3076
3077	memcpy(eth, pkt_dev->hh, 12);
3078	*(__be16 *) &eth[12] = protocol;
3079
3080	/* Eth + IPh + UDPh + mpls */
3081	datalen = pkt_dev->cur_pkt_size - 14 -
3082		  sizeof(struct ipv6hdr) - sizeof(struct udphdr) -
3083		  pkt_dev->pkt_overhead;
3084
3085	if (datalen < 0 || datalen < sizeof(struct pktgen_hdr)) {
3086		datalen = sizeof(struct pktgen_hdr);
3087		net_info_ratelimited("increased datalen to %d\n", datalen);
3088	}
3089
3090	udplen = datalen + sizeof(struct udphdr);
3091	udph->source = htons(pkt_dev->cur_udp_src);
3092	udph->dest = htons(pkt_dev->cur_udp_dst);
3093	udph->len = htons(udplen);
3094	udph->check = 0;
3095
3096	*(__be32 *) iph = htonl(0x60000000);	/* Version + flow */
3097
3098	if (pkt_dev->traffic_class) {
3099		/* Version + traffic class + flow (0) */
3100		*(__be32 *)iph |= htonl(0x60000000 | (pkt_dev->traffic_class << 20));
3101	}
3102
3103	iph->hop_limit = 32;
3104
3105	iph->payload_len = htons(udplen);
3106	iph->nexthdr = IPPROTO_UDP;
3107
3108	iph->daddr = pkt_dev->cur_in6_daddr;
3109	iph->saddr = pkt_dev->cur_in6_saddr;
3110
3111	skb->protocol = protocol;
3112	skb->dev = odev;
3113	skb->pkt_type = PACKET_HOST;
3114
3115	pktgen_finalize_skb(pkt_dev, skb, datalen);
3116
3117	if (!(pkt_dev->flags & F_UDPCSUM)) {
3118		skb->ip_summed = CHECKSUM_NONE;
3119	} else if (odev->features & (NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM)) {
3120		skb->ip_summed = CHECKSUM_PARTIAL;
3121		skb->csum_start = skb_transport_header(skb) - skb->head;
3122		skb->csum_offset = offsetof(struct udphdr, check);
3123		udph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, 0);
3124	} else {
3125		__wsum csum = skb_checksum(skb, skb_transport_offset(skb), udplen, 0);
3126
3127		/* add protocol-dependent pseudo-header */
3128		udph->check = csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, csum);
3129
3130		if (udph->check == 0)
3131			udph->check = CSUM_MANGLED_0;
3132	}
3133
3134	return skb;
3135}
3136
3137static struct sk_buff *fill_packet(struct net_device *odev,
3138				   struct pktgen_dev *pkt_dev)
3139{
3140	if (pkt_dev->flags & F_IPV6)
3141		return fill_packet_ipv6(odev, pkt_dev);
3142	else
3143		return fill_packet_ipv4(odev, pkt_dev);
3144}
3145
3146static void pktgen_clear_counters(struct pktgen_dev *pkt_dev)
3147{
3148	pkt_dev->seq_num = 1;
3149	pkt_dev->idle_acc = 0;
3150	pkt_dev->sofar = 0;
3151	pkt_dev->tx_bytes = 0;
3152	pkt_dev->errors = 0;
3153}
3154
3155/* Set up structure for sending pkts, clear counters */
3156
3157static void pktgen_run(struct pktgen_thread *t)
3158{
3159	struct pktgen_dev *pkt_dev;
3160	int started = 0;
3161
3162	func_enter();
3163
3164	rcu_read_lock();
3165	list_for_each_entry_rcu(pkt_dev, &t->if_list, list) {
3166
3167		/*
3168		 * setup odev and create initial packet.
3169		 */
3170		pktgen_setup_inject(pkt_dev);
3171
3172		if (pkt_dev->odev) {
3173			pktgen_clear_counters(pkt_dev);
3174			pkt_dev->skb = NULL;
3175			pkt_dev->started_at = pkt_dev->next_tx = ktime_get();
3176
3177			set_pkt_overhead(pkt_dev);
3178
3179			strcpy(pkt_dev->result, "Starting");
3180			pkt_dev->running = 1;	/* Cranke yeself! */
3181			started++;
3182		} else
3183			strcpy(pkt_dev->result, "Error starting");
3184	}
3185	rcu_read_unlock();
3186	if (started)
3187		t->control &= ~(T_STOP);
3188}
3189
3190static void pktgen_handle_all_threads(struct pktgen_net *pn, u32 flags)
3191{
3192	struct pktgen_thread *t;
3193
3194	mutex_lock(&pktgen_thread_lock);
3195
3196	list_for_each_entry(t, &pn->pktgen_threads, th_list)
3197		t->control |= (flags);
3198
3199	mutex_unlock(&pktgen_thread_lock);
3200}
3201
3202static void pktgen_stop_all_threads(struct pktgen_net *pn)
3203{
3204	func_enter();
3205
3206	pktgen_handle_all_threads(pn, T_STOP);
3207}
3208
3209static int thread_is_running(const struct pktgen_thread *t)
3210{
3211	const struct pktgen_dev *pkt_dev;
3212
3213	rcu_read_lock();
3214	list_for_each_entry_rcu(pkt_dev, &t->if_list, list)
3215		if (pkt_dev->running) {
3216			rcu_read_unlock();
3217			return 1;
3218		}
3219	rcu_read_unlock();
3220	return 0;
3221}
3222
3223static int pktgen_wait_thread_run(struct pktgen_thread *t)
3224{
3225	while (thread_is_running(t)) {
3226
3227		/* note: 't' will still be around even after the unlock/lock
3228		 * cycle because pktgen_thread threads are only cleared at
3229		 * net exit
3230		 */
3231		mutex_unlock(&pktgen_thread_lock);
3232		msleep_interruptible(100);
3233		mutex_lock(&pktgen_thread_lock);
3234
3235		if (signal_pending(current))
3236			goto signal;
3237	}
3238	return 1;
3239signal:
3240	return 0;
3241}
3242
3243static int pktgen_wait_all_threads_run(struct pktgen_net *pn)
3244{
3245	struct pktgen_thread *t;
3246	int sig = 1;
3247
3248	/* prevent from racing with rmmod */
3249	if (!try_module_get(THIS_MODULE))
3250		return sig;
3251
3252	mutex_lock(&pktgen_thread_lock);
3253
3254	list_for_each_entry(t, &pn->pktgen_threads, th_list) {
3255		sig = pktgen_wait_thread_run(t);
3256		if (sig == 0)
3257			break;
3258	}
3259
3260	if (sig == 0)
3261		list_for_each_entry(t, &pn->pktgen_threads, th_list)
3262			t->control |= (T_STOP);
3263
3264	mutex_unlock(&pktgen_thread_lock);
3265	module_put(THIS_MODULE);
3266	return sig;
3267}
3268
3269static void pktgen_run_all_threads(struct pktgen_net *pn)
3270{
3271	func_enter();
3272
3273	pktgen_handle_all_threads(pn, T_RUN);
3274
3275	/* Propagate thread->control  */
3276	schedule_timeout_interruptible(msecs_to_jiffies(125));
3277
3278	pktgen_wait_all_threads_run(pn);
3279}
3280
3281static void pktgen_reset_all_threads(struct pktgen_net *pn)
3282{
3283	func_enter();
3284
3285	pktgen_handle_all_threads(pn, T_REMDEVALL);
3286
3287	/* Propagate thread->control  */
3288	schedule_timeout_interruptible(msecs_to_jiffies(125));
3289
3290	pktgen_wait_all_threads_run(pn);
3291}
3292
3293static void show_results(struct pktgen_dev *pkt_dev, int nr_frags)
3294{
3295	__u64 bps, mbps, pps;
3296	char *p = pkt_dev->result;
3297	ktime_t elapsed = ktime_sub(pkt_dev->stopped_at,
3298				    pkt_dev->started_at);
3299	ktime_t idle = ns_to_ktime(pkt_dev->idle_acc);
3300
3301	p += sprintf(p, "OK: %llu(c%llu+d%llu) usec, %llu (%dbyte,%dfrags)\n",
3302		     (unsigned long long)ktime_to_us(elapsed),
3303		     (unsigned long long)ktime_to_us(ktime_sub(elapsed, idle)),
3304		     (unsigned long long)ktime_to_us(idle),
3305		     (unsigned long long)pkt_dev->sofar,
3306		     pkt_dev->cur_pkt_size, nr_frags);
3307
3308	pps = div64_u64(pkt_dev->sofar * NSEC_PER_SEC,
3309			ktime_to_ns(elapsed));
3310
3311	if (pkt_dev->n_imix_entries > 0) {
3312		int i;
3313		struct imix_pkt *entry;
3314
3315		bps = 0;
3316		for (i = 0; i < pkt_dev->n_imix_entries; i++) {
3317			entry = &pkt_dev->imix_entries[i];
3318			bps += entry->size * entry->count_so_far;
3319		}
3320		bps = div64_u64(bps * 8 * NSEC_PER_SEC, ktime_to_ns(elapsed));
3321	} else {
3322		bps = pps * 8 * pkt_dev->cur_pkt_size;
3323	}
3324
3325	mbps = bps;
3326	do_div(mbps, 1000000);
3327	p += sprintf(p, "  %llupps %lluMb/sec (%llubps) errors: %llu",
3328		     (unsigned long long)pps,
3329		     (unsigned long long)mbps,
3330		     (unsigned long long)bps,
3331		     (unsigned long long)pkt_dev->errors);
3332}
3333
3334/* Set stopped-at timer, remove from running list, do counters & statistics */
3335static int pktgen_stop_device(struct pktgen_dev *pkt_dev)
3336{
3337	int nr_frags = pkt_dev->skb ? skb_shinfo(pkt_dev->skb)->nr_frags : -1;
3338
3339	if (!pkt_dev->running) {
3340		pr_warn("interface: %s is already stopped\n",
3341			pkt_dev->odevname);
3342		return -EINVAL;
3343	}
3344
3345	pkt_dev->running = 0;
3346	kfree_skb(pkt_dev->skb);
3347	pkt_dev->skb = NULL;
3348	pkt_dev->stopped_at = ktime_get();
3349
3350	show_results(pkt_dev, nr_frags);
3351
3352	return 0;
3353}
3354
3355static struct pktgen_dev *next_to_run(struct pktgen_thread *t)
3356{
3357	struct pktgen_dev *pkt_dev, *best = NULL;
3358
3359	rcu_read_lock();
3360	list_for_each_entry_rcu(pkt_dev, &t->if_list, list) {
3361		if (!pkt_dev->running)
3362			continue;
3363		if (best == NULL)
3364			best = pkt_dev;
3365		else if (ktime_compare(pkt_dev->next_tx, best->next_tx) < 0)
3366			best = pkt_dev;
3367	}
3368	rcu_read_unlock();
3369
3370	return best;
3371}
3372
3373static void pktgen_stop(struct pktgen_thread *t)
3374{
3375	struct pktgen_dev *pkt_dev;
3376
3377	func_enter();
3378
3379	rcu_read_lock();
3380
3381	list_for_each_entry_rcu(pkt_dev, &t->if_list, list) {
3382		pktgen_stop_device(pkt_dev);
3383	}
3384
3385	rcu_read_unlock();
3386}
3387
3388/*
3389 * one of our devices needs to be removed - find it
3390 * and remove it
3391 */
3392static void pktgen_rem_one_if(struct pktgen_thread *t)
3393{
3394	struct list_head *q, *n;
3395	struct pktgen_dev *cur;
3396
3397	func_enter();
3398
3399	list_for_each_safe(q, n, &t->if_list) {
3400		cur = list_entry(q, struct pktgen_dev, list);
3401
3402		if (!cur->removal_mark)
3403			continue;
3404
3405		kfree_skb(cur->skb);
3406		cur->skb = NULL;
3407
3408		pktgen_remove_device(t, cur);
3409
3410		break;
3411	}
3412}
3413
3414static void pktgen_rem_all_ifs(struct pktgen_thread *t)
3415{
3416	struct list_head *q, *n;
3417	struct pktgen_dev *cur;
3418
3419	func_enter();
3420
3421	/* Remove all devices, free mem */
3422
3423	list_for_each_safe(q, n, &t->if_list) {
3424		cur = list_entry(q, struct pktgen_dev, list);
3425
3426		kfree_skb(cur->skb);
3427		cur->skb = NULL;
3428
3429		pktgen_remove_device(t, cur);
3430	}
3431}
3432
3433static void pktgen_rem_thread(struct pktgen_thread *t)
3434{
3435	/* Remove from the thread list */
3436	remove_proc_entry(t->tsk->comm, t->net->proc_dir);
3437}
3438
3439static void pktgen_resched(struct pktgen_dev *pkt_dev)
3440{
3441	ktime_t idle_start = ktime_get();
3442	schedule();
3443	pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_get(), idle_start));
3444}
3445
3446static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)
3447{
3448	ktime_t idle_start = ktime_get();
3449
3450	while (refcount_read(&(pkt_dev->skb->users)) != 1) {
3451		if (signal_pending(current))
3452			break;
3453
3454		if (need_resched())
3455			pktgen_resched(pkt_dev);
3456		else
3457			cpu_relax();
3458	}
3459	pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_get(), idle_start));
3460}
3461
3462static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3463{
3464	bool skb_shared = !!(READ_ONCE(pkt_dev->flags) & F_SHARED);
3465	struct net_device *odev = pkt_dev->odev;
3466	struct netdev_queue *txq;
3467	unsigned int burst = 1;
3468	struct sk_buff *skb;
3469	int clone_skb = 0;
3470	int ret;
3471
3472	/* If 'skb_shared' is false, the read of possible
3473	 * new values (if any) for 'burst' and 'clone_skb' will be skipped to
3474	 * prevent some concurrent changes from slipping in. And the stabilized
3475	 * config will be read in during the next run of pktgen_xmit.
3476	 */
3477	if (skb_shared) {
3478		burst = READ_ONCE(pkt_dev->burst);
3479		clone_skb = READ_ONCE(pkt_dev->clone_skb);
3480	}
3481
3482	/* If device is offline, then don't send */
3483	if (unlikely(!netif_running(odev) || !netif_carrier_ok(odev))) {
3484		pktgen_stop_device(pkt_dev);
3485		return;
3486	}
3487
3488	/* This is max DELAY, this has special meaning of
3489	 * "never transmit"
3490	 */
3491	if (unlikely(pkt_dev->delay == ULLONG_MAX)) {
3492		pkt_dev->next_tx = ktime_add_ns(ktime_get(), ULONG_MAX);
3493		return;
3494	}
3495
3496	/* If no skb or clone count exhausted then get new one */
3497	if (!pkt_dev->skb || (pkt_dev->last_ok &&
3498			      ++pkt_dev->clone_count >= clone_skb)) {
3499		/* build a new pkt */
3500		kfree_skb(pkt_dev->skb);
3501
3502		pkt_dev->skb = fill_packet(odev, pkt_dev);
3503		if (pkt_dev->skb == NULL) {
3504			pr_err("ERROR: couldn't allocate skb in fill_packet\n");
3505			schedule();
3506			pkt_dev->clone_count--;	/* back out increment, OOM */
3507			return;
3508		}
3509		pkt_dev->last_pkt_size = pkt_dev->skb->len;
3510		pkt_dev->clone_count = 0;	/* reset counter */
3511	}
3512
3513	if (pkt_dev->delay && pkt_dev->last_ok)
3514		spin(pkt_dev, pkt_dev->next_tx);
3515
3516	if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) {
3517		skb = pkt_dev->skb;
3518		skb->protocol = eth_type_trans(skb, skb->dev);
3519		if (skb_shared)
3520			refcount_add(burst, &skb->users);
3521		local_bh_disable();
3522		do {
3523			ret = netif_receive_skb(skb);
3524			if (ret == NET_RX_DROP)
3525				pkt_dev->errors++;
3526			pkt_dev->sofar++;
3527			pkt_dev->seq_num++;
3528			if (unlikely(!skb_shared)) {
3529				pkt_dev->skb = NULL;
3530				break;
3531			}
3532			if (refcount_read(&skb->users) != burst) {
3533				/* skb was queued by rps/rfs or taps,
3534				 * so cannot reuse this skb
3535				 */
3536				WARN_ON(refcount_sub_and_test(burst - 1, &skb->users));
3537				/* get out of the loop and wait
3538				 * until skb is consumed
3539				 */
3540				break;
3541			}
3542			/* skb was 'freed' by stack, so clean few
3543			 * bits and reuse it
3544			 */
3545			skb_reset_redirect(skb);
3546		} while (--burst > 0);
3547		goto out; /* Skips xmit_mode M_START_XMIT */
3548	} else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) {
3549		local_bh_disable();
3550		if (skb_shared)
3551			refcount_inc(&pkt_dev->skb->users);
3552
3553		ret = dev_queue_xmit(pkt_dev->skb);
3554
3555		if (!skb_shared && dev_xmit_complete(ret))
3556			pkt_dev->skb = NULL;
3557
3558		switch (ret) {
3559		case NET_XMIT_SUCCESS:
3560			pkt_dev->sofar++;
3561			pkt_dev->seq_num++;
3562			pkt_dev->tx_bytes += pkt_dev->last_pkt_size;
3563			break;
3564		case NET_XMIT_DROP:
3565		case NET_XMIT_CN:
3566		/* These are all valid return codes for a qdisc but
3567		 * indicate packets are being dropped or will likely
3568		 * be dropped soon.
3569		 */
3570		case NETDEV_TX_BUSY:
3571		/* qdisc may call dev_hard_start_xmit directly in cases
3572		 * where no queues exist e.g. loopback device, virtual
3573		 * devices, etc. In this case we need to handle
3574		 * NETDEV_TX_ codes.
3575		 */
3576		default:
3577			pkt_dev->errors++;
3578			net_info_ratelimited("%s xmit error: %d\n",
3579					     pkt_dev->odevname, ret);
3580			break;
3581		}
3582		goto out;
3583	}
3584
3585	txq = skb_get_tx_queue(odev, pkt_dev->skb);
3586
3587	local_bh_disable();
3588
3589	HARD_TX_LOCK(odev, txq, smp_processor_id());
3590
3591	if (unlikely(netif_xmit_frozen_or_drv_stopped(txq))) {
3592		pkt_dev->last_ok = 0;
3593		goto unlock;
3594	}
3595	if (skb_shared)
3596		refcount_add(burst, &pkt_dev->skb->users);
3597
3598xmit_more:
3599	ret = netdev_start_xmit(pkt_dev->skb, odev, txq, --burst > 0);
3600
3601	if (!skb_shared && dev_xmit_complete(ret))
3602		pkt_dev->skb = NULL;
3603
3604	switch (ret) {
3605	case NETDEV_TX_OK:
3606		pkt_dev->last_ok = 1;
3607		pkt_dev->sofar++;
3608		pkt_dev->seq_num++;
3609		pkt_dev->tx_bytes += pkt_dev->last_pkt_size;
3610		if (burst > 0 && !netif_xmit_frozen_or_drv_stopped(txq))
3611			goto xmit_more;
3612		break;
3613	case NET_XMIT_DROP:
3614	case NET_XMIT_CN:
3615		/* skb has been consumed */
3616		pkt_dev->errors++;
3617		break;
3618	default: /* Drivers are not supposed to return other values! */
3619		net_info_ratelimited("%s xmit error: %d\n",
3620				     pkt_dev->odevname, ret);
3621		pkt_dev->errors++;
3622		fallthrough;
3623	case NETDEV_TX_BUSY:
3624		/* Retry it next time */
3625		if (skb_shared)
3626			refcount_dec(&pkt_dev->skb->users);
3627		pkt_dev->last_ok = 0;
3628	}
3629	if (unlikely(burst))
3630		WARN_ON(refcount_sub_and_test(burst, &pkt_dev->skb->users));
3631unlock:
3632	HARD_TX_UNLOCK(odev, txq);
3633
3634out:
3635	local_bh_enable();
3636
3637	/* If pkt_dev->count is zero, then run forever */
3638	if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
3639		if (pkt_dev->skb)
3640			pktgen_wait_for_skb(pkt_dev);
3641
3642		/* Done with this */
3643		pktgen_stop_device(pkt_dev);
3644	}
3645}
3646
3647/*
3648 * Main loop of the thread goes here
3649 */
3650
3651static int pktgen_thread_worker(void *arg)
3652{
3653	struct pktgen_thread *t = arg;
3654	struct pktgen_dev *pkt_dev = NULL;
3655	int cpu = t->cpu;
3656
3657	WARN_ON_ONCE(smp_processor_id() != cpu);
3658
3659	init_waitqueue_head(&t->queue);
3660	complete(&t->start_done);
3661
3662	pr_debug("starting pktgen/%d:  pid=%d\n", cpu, task_pid_nr(current));
3663
3664	set_freezable();
3665
3666	while (!kthread_should_stop()) {
3667		pkt_dev = next_to_run(t);
3668
3669		if (unlikely(!pkt_dev && t->control == 0)) {
3670			if (t->net->pktgen_exiting)
3671				break;
3672			wait_event_freezable_timeout(t->queue,
3673						     t->control != 0, HZ / 10);
 
 
3674			continue;
3675		}
3676
3677		if (likely(pkt_dev)) {
3678			pktgen_xmit(pkt_dev);
3679
3680			if (need_resched())
3681				pktgen_resched(pkt_dev);
3682			else
3683				cpu_relax();
3684		}
3685
3686		if (t->control & T_STOP) {
3687			pktgen_stop(t);
3688			t->control &= ~(T_STOP);
3689		}
3690
3691		if (t->control & T_RUN) {
3692			pktgen_run(t);
3693			t->control &= ~(T_RUN);
3694		}
3695
3696		if (t->control & T_REMDEVALL) {
3697			pktgen_rem_all_ifs(t);
3698			t->control &= ~(T_REMDEVALL);
3699		}
3700
3701		if (t->control & T_REMDEV) {
3702			pktgen_rem_one_if(t);
3703			t->control &= ~(T_REMDEV);
3704		}
3705
3706		try_to_freeze();
3707	}
3708
3709	pr_debug("%s stopping all device\n", t->tsk->comm);
3710	pktgen_stop(t);
3711
3712	pr_debug("%s removing all device\n", t->tsk->comm);
3713	pktgen_rem_all_ifs(t);
3714
3715	pr_debug("%s removing thread\n", t->tsk->comm);
3716	pktgen_rem_thread(t);
3717
3718	return 0;
3719}
3720
3721static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
3722					  const char *ifname, bool exact)
3723{
3724	struct pktgen_dev *p, *pkt_dev = NULL;
3725	size_t len = strlen(ifname);
3726
3727	rcu_read_lock();
3728	list_for_each_entry_rcu(p, &t->if_list, list)
3729		if (strncmp(p->odevname, ifname, len) == 0) {
3730			if (p->odevname[len]) {
3731				if (exact || p->odevname[len] != '@')
3732					continue;
3733			}
3734			pkt_dev = p;
3735			break;
3736		}
3737
3738	rcu_read_unlock();
3739	pr_debug("find_dev(%s) returning %p\n", ifname, pkt_dev);
3740	return pkt_dev;
3741}
3742
3743/*
3744 * Adds a dev at front of if_list.
3745 */
3746
3747static int add_dev_to_thread(struct pktgen_thread *t,
3748			     struct pktgen_dev *pkt_dev)
3749{
3750	int rv = 0;
3751
3752	/* This function cannot be called concurrently, as its called
3753	 * under pktgen_thread_lock mutex, but it can run from
3754	 * userspace on another CPU than the kthread.  The if_lock()
3755	 * is used here to sync with concurrent instances of
3756	 * _rem_dev_from_if_list() invoked via kthread, which is also
3757	 * updating the if_list */
3758	if_lock(t);
3759
3760	if (pkt_dev->pg_thread) {
3761		pr_err("ERROR: already assigned to a thread\n");
3762		rv = -EBUSY;
3763		goto out;
3764	}
3765
3766	pkt_dev->running = 0;
3767	pkt_dev->pg_thread = t;
3768	list_add_rcu(&pkt_dev->list, &t->if_list);
3769
3770out:
3771	if_unlock(t);
3772	return rv;
3773}
3774
3775/* Called under thread lock */
3776
3777static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
3778{
3779	struct pktgen_dev *pkt_dev;
3780	int err;
3781	int node = cpu_to_node(t->cpu);
3782
3783	/* We don't allow a device to be on several threads */
3784
3785	pkt_dev = __pktgen_NN_threads(t->net, ifname, FIND);
3786	if (pkt_dev) {
3787		pr_err("ERROR: interface already used\n");
3788		return -EBUSY;
3789	}
3790
3791	pkt_dev = kzalloc_node(sizeof(struct pktgen_dev), GFP_KERNEL, node);
3792	if (!pkt_dev)
3793		return -ENOMEM;
3794
3795	strcpy(pkt_dev->odevname, ifname);
3796	pkt_dev->flows = vzalloc_node(array_size(MAX_CFLOWS,
3797						 sizeof(struct flow_state)),
3798				      node);
3799	if (pkt_dev->flows == NULL) {
3800		kfree(pkt_dev);
3801		return -ENOMEM;
3802	}
3803
3804	pkt_dev->removal_mark = 0;
3805	pkt_dev->nfrags = 0;
3806	pkt_dev->delay = pg_delay_d;
3807	pkt_dev->count = pg_count_d;
3808	pkt_dev->sofar = 0;
3809	pkt_dev->udp_src_min = 9;	/* sink port */
3810	pkt_dev->udp_src_max = 9;
3811	pkt_dev->udp_dst_min = 9;
3812	pkt_dev->udp_dst_max = 9;
3813	pkt_dev->vlan_p = 0;
3814	pkt_dev->vlan_cfi = 0;
3815	pkt_dev->vlan_id = 0xffff;
3816	pkt_dev->svlan_p = 0;
3817	pkt_dev->svlan_cfi = 0;
3818	pkt_dev->svlan_id = 0xffff;
3819	pkt_dev->burst = 1;
3820	pkt_dev->node = NUMA_NO_NODE;
3821	pkt_dev->flags = F_SHARED;	/* SKB shared by default */
3822
3823	err = pktgen_setup_dev(t->net, pkt_dev, ifname);
3824	if (err)
3825		goto out1;
3826	if (pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)
3827		pkt_dev->clone_skb = pg_clone_skb_d;
3828
3829	pkt_dev->entry = proc_create_data(ifname, 0600, t->net->proc_dir,
3830					  &pktgen_if_proc_ops, pkt_dev);
3831	if (!pkt_dev->entry) {
3832		pr_err("cannot create %s/%s procfs entry\n",
3833		       PG_PROC_DIR, ifname);
3834		err = -EINVAL;
3835		goto out2;
3836	}
3837#ifdef CONFIG_XFRM
3838	pkt_dev->ipsmode = XFRM_MODE_TRANSPORT;
3839	pkt_dev->ipsproto = IPPROTO_ESP;
3840
3841	/* xfrm tunnel mode needs additional dst to extract outer
3842	 * ip header protocol/ttl/id field, here create a phony one.
3843	 * instead of looking for a valid rt, which definitely hurting
3844	 * performance under such circumstance.
3845	 */
3846	pkt_dev->dstops.family = AF_INET;
3847	pkt_dev->xdst.u.dst.dev = pkt_dev->odev;
3848	dst_init_metrics(&pkt_dev->xdst.u.dst, pktgen_dst_metrics, false);
3849	pkt_dev->xdst.child = &pkt_dev->xdst.u.dst;
3850	pkt_dev->xdst.u.dst.ops = &pkt_dev->dstops;
3851#endif
3852
3853	return add_dev_to_thread(t, pkt_dev);
3854out2:
3855	netdev_put(pkt_dev->odev, &pkt_dev->dev_tracker);
3856out1:
3857#ifdef CONFIG_XFRM
3858	free_SAs(pkt_dev);
3859#endif
3860	vfree(pkt_dev->flows);
3861	kfree(pkt_dev);
3862	return err;
3863}
3864
3865static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)
3866{
3867	struct pktgen_thread *t;
3868	struct proc_dir_entry *pe;
3869	struct task_struct *p;
3870
3871	t = kzalloc_node(sizeof(struct pktgen_thread), GFP_KERNEL,
3872			 cpu_to_node(cpu));
3873	if (!t) {
3874		pr_err("ERROR: out of memory, can't create new thread\n");
3875		return -ENOMEM;
3876	}
3877
3878	mutex_init(&t->if_lock);
3879	t->cpu = cpu;
3880
3881	INIT_LIST_HEAD(&t->if_list);
3882
3883	list_add_tail(&t->th_list, &pn->pktgen_threads);
3884	init_completion(&t->start_done);
3885
3886	p = kthread_create_on_node(pktgen_thread_worker,
3887				   t,
3888				   cpu_to_node(cpu),
3889				   "kpktgend_%d", cpu);
3890	if (IS_ERR(p)) {
3891		pr_err("kthread_create_on_node() failed for cpu %d\n", t->cpu);
3892		list_del(&t->th_list);
3893		kfree(t);
3894		return PTR_ERR(p);
3895	}
3896	kthread_bind(p, cpu);
3897	t->tsk = p;
3898
3899	pe = proc_create_data(t->tsk->comm, 0600, pn->proc_dir,
3900			      &pktgen_thread_proc_ops, t);
3901	if (!pe) {
3902		pr_err("cannot create %s/%s procfs entry\n",
3903		       PG_PROC_DIR, t->tsk->comm);
3904		kthread_stop(p);
3905		list_del(&t->th_list);
3906		kfree(t);
3907		return -EINVAL;
3908	}
3909
3910	t->net = pn;
3911	get_task_struct(p);
3912	wake_up_process(p);
3913	wait_for_completion(&t->start_done);
3914
3915	return 0;
3916}
3917
3918/*
3919 * Removes a device from the thread if_list.
3920 */
3921static void _rem_dev_from_if_list(struct pktgen_thread *t,
3922				  struct pktgen_dev *pkt_dev)
3923{
3924	struct list_head *q, *n;
3925	struct pktgen_dev *p;
3926
3927	if_lock(t);
3928	list_for_each_safe(q, n, &t->if_list) {
3929		p = list_entry(q, struct pktgen_dev, list);
3930		if (p == pkt_dev)
3931			list_del_rcu(&p->list);
3932	}
3933	if_unlock(t);
3934}
3935
3936static int pktgen_remove_device(struct pktgen_thread *t,
3937				struct pktgen_dev *pkt_dev)
3938{
3939	pr_debug("remove_device pkt_dev=%p\n", pkt_dev);
3940
3941	if (pkt_dev->running) {
3942		pr_warn("WARNING: trying to remove a running interface, stopping it now\n");
3943		pktgen_stop_device(pkt_dev);
3944	}
3945
3946	/* Dis-associate from the interface */
3947
3948	if (pkt_dev->odev) {
3949		netdev_put(pkt_dev->odev, &pkt_dev->dev_tracker);
3950		pkt_dev->odev = NULL;
3951	}
3952
3953	/* Remove proc before if_list entry, because add_device uses
3954	 * list to determine if interface already exist, avoid race
3955	 * with proc_create_data() */
3956	proc_remove(pkt_dev->entry);
3957
3958	/* And update the thread if_list */
3959	_rem_dev_from_if_list(t, pkt_dev);
3960
3961#ifdef CONFIG_XFRM
3962	free_SAs(pkt_dev);
3963#endif
3964	vfree(pkt_dev->flows);
3965	if (pkt_dev->page)
3966		put_page(pkt_dev->page);
3967	kfree_rcu(pkt_dev, rcu);
3968	return 0;
3969}
3970
3971static int __net_init pg_net_init(struct net *net)
3972{
3973	struct pktgen_net *pn = net_generic(net, pg_net_id);
3974	struct proc_dir_entry *pe;
3975	int cpu, ret = 0;
3976
3977	pn->net = net;
3978	INIT_LIST_HEAD(&pn->pktgen_threads);
3979	pn->pktgen_exiting = false;
3980	pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
3981	if (!pn->proc_dir) {
3982		pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
3983		return -ENODEV;
3984	}
3985	pe = proc_create(PGCTRL, 0600, pn->proc_dir, &pktgen_proc_ops);
3986	if (pe == NULL) {
3987		pr_err("cannot create %s procfs entry\n", PGCTRL);
3988		ret = -EINVAL;
3989		goto remove;
3990	}
3991
3992	cpus_read_lock();
3993	for_each_online_cpu(cpu) {
3994		int err;
3995
3996		err = pktgen_create_thread(cpu, pn);
3997		if (err)
3998			pr_warn("Cannot create thread for cpu %d (%d)\n",
3999				   cpu, err);
4000	}
4001	cpus_read_unlock();
4002
4003	if (list_empty(&pn->pktgen_threads)) {
4004		pr_err("Initialization failed for all threads\n");
4005		ret = -ENODEV;
4006		goto remove_entry;
4007	}
4008
4009	return 0;
4010
4011remove_entry:
4012	remove_proc_entry(PGCTRL, pn->proc_dir);
4013remove:
4014	remove_proc_entry(PG_PROC_DIR, pn->net->proc_net);
4015	return ret;
4016}
4017
4018static void __net_exit pg_net_exit(struct net *net)
4019{
4020	struct pktgen_net *pn = net_generic(net, pg_net_id);
4021	struct pktgen_thread *t;
4022	struct list_head *q, *n;
4023	LIST_HEAD(list);
4024
4025	/* Stop all interfaces & threads */
4026	pn->pktgen_exiting = true;
4027
4028	mutex_lock(&pktgen_thread_lock);
4029	list_splice_init(&pn->pktgen_threads, &list);
4030	mutex_unlock(&pktgen_thread_lock);
4031
4032	list_for_each_safe(q, n, &list) {
4033		t = list_entry(q, struct pktgen_thread, th_list);
4034		list_del(&t->th_list);
4035		kthread_stop_put(t->tsk);
 
4036		kfree(t);
4037	}
4038
4039	remove_proc_entry(PGCTRL, pn->proc_dir);
4040	remove_proc_entry(PG_PROC_DIR, pn->net->proc_net);
4041}
4042
4043static struct pernet_operations pg_net_ops = {
4044	.init = pg_net_init,
4045	.exit = pg_net_exit,
4046	.id   = &pg_net_id,
4047	.size = sizeof(struct pktgen_net),
4048};
4049
4050static int __init pg_init(void)
4051{
4052	int ret = 0;
4053
4054	pr_info("%s", version);
4055	ret = register_pernet_subsys(&pg_net_ops);
4056	if (ret)
4057		return ret;
4058	ret = register_netdevice_notifier(&pktgen_notifier_block);
4059	if (ret)
4060		unregister_pernet_subsys(&pg_net_ops);
4061
4062	return ret;
4063}
4064
4065static void __exit pg_cleanup(void)
4066{
4067	unregister_netdevice_notifier(&pktgen_notifier_block);
4068	unregister_pernet_subsys(&pg_net_ops);
4069	/* Don't need rcu_barrier() due to use of kfree_rcu() */
4070}
4071
4072module_init(pg_init);
4073module_exit(pg_cleanup);
4074
4075MODULE_AUTHOR("Robert Olsson <robert.olsson@its.uu.se>");
4076MODULE_DESCRIPTION("Packet Generator tool");
4077MODULE_LICENSE("GPL");
4078MODULE_VERSION(VERSION);
4079module_param(pg_count_d, int, 0);
4080MODULE_PARM_DESC(pg_count_d, "Default number of packets to inject");
4081module_param(pg_delay_d, int, 0);
4082MODULE_PARM_DESC(pg_delay_d, "Default delay between packets (nanoseconds)");
4083module_param(pg_clone_skb_d, int, 0);
4084MODULE_PARM_DESC(pg_clone_skb_d, "Default number of copies of the same packet");
4085module_param(debug, int, 0);
4086MODULE_PARM_DESC(debug, "Enable debugging of pktgen module");
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Authors:
   4 * Copyright 2001, 2002 by Robert Olsson <robert.olsson@its.uu.se>
   5 *                             Uppsala University and
   6 *                             Swedish University of Agricultural Sciences
   7 *
   8 * Alexey Kuznetsov  <kuznet@ms2.inr.ac.ru>
   9 * Ben Greear <greearb@candelatech.com>
  10 * Jens Låås <jens.laas@data.slu.se>
  11 *
  12 * A tool for loading the network with preconfigurated packets.
  13 * The tool is implemented as a linux module.  Parameters are output
  14 * device, delay (to hard_xmit), number of packets, and whether
  15 * to use multiple SKBs or just the same one.
  16 * pktgen uses the installed interface's output routine.
  17 *
  18 * Additional hacking by:
  19 *
  20 * Jens.Laas@data.slu.se
  21 * Improved by ANK. 010120.
  22 * Improved by ANK even more. 010212.
  23 * MAC address typo fixed. 010417 --ro
  24 * Integrated.  020301 --DaveM
  25 * Added multiskb option 020301 --DaveM
  26 * Scaling of results. 020417--sigurdur@linpro.no
  27 * Significant re-work of the module:
  28 *   *  Convert to threaded model to more efficiently be able to transmit
  29 *       and receive on multiple interfaces at once.
  30 *   *  Converted many counters to __u64 to allow longer runs.
  31 *   *  Allow configuration of ranges, like min/max IP address, MACs,
  32 *       and UDP-ports, for both source and destination, and can
  33 *       set to use a random distribution or sequentially walk the range.
  34 *   *  Can now change most values after starting.
  35 *   *  Place 12-byte packet in UDP payload with magic number,
  36 *       sequence number, and timestamp.
  37 *   *  Add receiver code that detects dropped pkts, re-ordered pkts, and
  38 *       latencies (with micro-second) precision.
  39 *   *  Add IOCTL interface to easily get counters & configuration.
  40 *   --Ben Greear <greearb@candelatech.com>
  41 *
  42 * Renamed multiskb to clone_skb and cleaned up sending core for two distinct
  43 * skb modes. A clone_skb=0 mode for Ben "ranges" work and a clone_skb != 0
  44 * as a "fastpath" with a configurable number of clones after alloc's.
  45 * clone_skb=0 means all packets are allocated this also means ranges time
  46 * stamps etc can be used. clone_skb=100 means 1 malloc is followed by 100
  47 * clones.
  48 *
  49 * Also moved to /proc/net/pktgen/
  50 * --ro
  51 *
  52 * Sept 10:  Fixed threading/locking.  Lots of bone-headed and more clever
  53 *    mistakes.  Also merged in DaveM's patch in the -pre6 patch.
  54 * --Ben Greear <greearb@candelatech.com>
  55 *
  56 * Integrated to 2.5.x 021029 --Lucio Maciel (luciomaciel@zipmail.com.br)
  57 *
  58 * 021124 Finished major redesign and rewrite for new functionality.
  59 * See Documentation/networking/pktgen.rst for how to use this.
  60 *
  61 * The new operation:
  62 * For each CPU one thread/process is created at start. This process checks
  63 * for running devices in the if_list and sends packets until count is 0 it
  64 * also the thread checks the thread->control which is used for inter-process
  65 * communication. controlling process "posts" operations to the threads this
  66 * way.
  67 * The if_list is RCU protected, and the if_lock remains to protect updating
  68 * of if_list, from "add_device" as it invoked from userspace (via proc write).
  69 *
  70 * By design there should only be *one* "controlling" process. In practice
  71 * multiple write accesses gives unpredictable result. Understood by "write"
  72 * to /proc gives result code thats should be read be the "writer".
  73 * For practical use this should be no problem.
  74 *
  75 * Note when adding devices to a specific CPU there good idea to also assign
  76 * /proc/irq/XX/smp_affinity so TX-interrupts gets bound to the same CPU.
  77 * --ro
  78 *
  79 * Fix refcount off by one if first packet fails, potential null deref,
  80 * memleak 030710- KJP
  81 *
  82 * First "ranges" functionality for ipv6 030726 --ro
  83 *
  84 * Included flow support. 030802 ANK.
  85 *
  86 * Fixed unaligned access on IA-64 Grant Grundler <grundler@parisc-linux.org>
  87 *
  88 * Remove if fix from added Harald Welte <laforge@netfilter.org> 040419
  89 * ia64 compilation fix from  Aron Griffis <aron@hp.com> 040604
  90 *
  91 * New xmit() return, do_div and misc clean up by Stephen Hemminger
  92 * <shemminger@osdl.org> 040923
  93 *
  94 * Randy Dunlap fixed u64 printk compiler warning
  95 *
  96 * Remove FCS from BW calculation.  Lennert Buytenhek <buytenh@wantstofly.org>
  97 * New time handling. Lennert Buytenhek <buytenh@wantstofly.org> 041213
  98 *
  99 * Corrections from Nikolai Malykh (nmalykh@bilim.com)
 100 * Removed unused flags F_SET_SRCMAC & F_SET_SRCIP 041230
 101 *
 102 * interruptible_sleep_on_timeout() replaced Nishanth Aravamudan <nacc@us.ibm.com>
 103 * 050103
 104 *
 105 * MPLS support by Steven Whitehouse <steve@chygwyn.com>
 106 *
 107 * 802.1Q/Q-in-Q support by Francesco Fondelli (FF) <francesco.fondelli@gmail.com>
 108 *
 109 * Fixed src_mac command to set source mac of packet to value specified in
 110 * command by Adit Ranadive <adit.262@gmail.com>
 111 */
 112
 113#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 114
 115#include <linux/sys.h>
 116#include <linux/types.h>
 117#include <linux/module.h>
 118#include <linux/moduleparam.h>
 119#include <linux/kernel.h>
 120#include <linux/mutex.h>
 121#include <linux/sched.h>
 122#include <linux/slab.h>
 123#include <linux/vmalloc.h>
 124#include <linux/unistd.h>
 125#include <linux/string.h>
 126#include <linux/ptrace.h>
 127#include <linux/errno.h>
 128#include <linux/ioport.h>
 129#include <linux/interrupt.h>
 130#include <linux/capability.h>
 131#include <linux/hrtimer.h>
 132#include <linux/freezer.h>
 133#include <linux/delay.h>
 134#include <linux/timer.h>
 135#include <linux/list.h>
 136#include <linux/init.h>
 137#include <linux/skbuff.h>
 138#include <linux/netdevice.h>
 139#include <linux/inet.h>
 140#include <linux/inetdevice.h>
 141#include <linux/rtnetlink.h>
 142#include <linux/if_arp.h>
 143#include <linux/if_vlan.h>
 144#include <linux/in.h>
 145#include <linux/ip.h>
 146#include <linux/ipv6.h>
 147#include <linux/udp.h>
 148#include <linux/proc_fs.h>
 149#include <linux/seq_file.h>
 150#include <linux/wait.h>
 151#include <linux/etherdevice.h>
 152#include <linux/kthread.h>
 153#include <linux/prefetch.h>
 154#include <linux/mmzone.h>
 155#include <net/net_namespace.h>
 156#include <net/checksum.h>
 157#include <net/ipv6.h>
 158#include <net/udp.h>
 159#include <net/ip6_checksum.h>
 160#include <net/addrconf.h>
 161#ifdef CONFIG_XFRM
 162#include <net/xfrm.h>
 163#endif
 164#include <net/netns/generic.h>
 165#include <asm/byteorder.h>
 166#include <linux/rcupdate.h>
 167#include <linux/bitops.h>
 168#include <linux/io.h>
 169#include <linux/timex.h>
 170#include <linux/uaccess.h>
 171#include <asm/dma.h>
 172#include <asm/div64.h>		/* do_div */
 173
 174#define VERSION	"2.75"
 175#define IP_NAME_SZ 32
 176#define MAX_MPLS_LABELS 16 /* This is the max label stack depth */
 177#define MPLS_STACK_BOTTOM htonl(0x00000100)
 178/* Max number of internet mix entries that can be specified in imix_weights. */
 179#define MAX_IMIX_ENTRIES 20
 180#define IMIX_PRECISION 100 /* Precision of IMIX distribution */
 181
 182#define func_enter() pr_debug("entering %s\n", __func__);
 183
 184#define PKT_FLAGS							\
 185	pf(IPV6)		/* Interface in IPV6 Mode */		\
 186	pf(IPSRC_RND)		/* IP-Src Random  */			\
 187	pf(IPDST_RND)		/* IP-Dst Random  */			\
 188	pf(TXSIZE_RND)		/* Transmit size is random */		\
 189	pf(UDPSRC_RND)		/* UDP-Src Random */			\
 190	pf(UDPDST_RND)		/* UDP-Dst Random */			\
 191	pf(UDPCSUM)		/* Include UDP checksum */		\
 192	pf(NO_TIMESTAMP)	/* Don't timestamp packets (default TS) */ \
 193	pf(MPLS_RND)		/* Random MPLS labels */		\
 194	pf(QUEUE_MAP_RND)	/* queue map Random */			\
 195	pf(QUEUE_MAP_CPU)	/* queue map mirrors smp_processor_id() */ \
 196	pf(FLOW_SEQ)		/* Sequential flows */			\
 197	pf(IPSEC)		/* ipsec on for flows */		\
 198	pf(MACSRC_RND)		/* MAC-Src Random */			\
 199	pf(MACDST_RND)		/* MAC-Dst Random */			\
 200	pf(VID_RND)		/* Random VLAN ID */			\
 201	pf(SVID_RND)		/* Random SVLAN ID */			\
 202	pf(NODE)		/* Node memory alloc*/			\
 
 203
 204#define pf(flag)		flag##_SHIFT,
 205enum pkt_flags {
 206	PKT_FLAGS
 207};
 208#undef pf
 209
 210/* Device flag bits */
 211#define pf(flag)		static const __u32 F_##flag = (1<<flag##_SHIFT);
 212PKT_FLAGS
 213#undef pf
 214
 215#define pf(flag)		__stringify(flag),
 216static char *pkt_flag_names[] = {
 217	PKT_FLAGS
 218};
 219#undef pf
 220
 221#define NR_PKT_FLAGS		ARRAY_SIZE(pkt_flag_names)
 222
 223/* Thread control flag bits */
 224#define T_STOP        (1<<0)	/* Stop run */
 225#define T_RUN         (1<<1)	/* Start run */
 226#define T_REMDEVALL   (1<<2)	/* Remove all devs */
 227#define T_REMDEV      (1<<3)	/* Remove one dev */
 228
 229/* Xmit modes */
 230#define M_START_XMIT		0	/* Default normal TX */
 231#define M_NETIF_RECEIVE 	1	/* Inject packets into stack */
 232#define M_QUEUE_XMIT		2	/* Inject packet into qdisc */
 233
 234/* If lock -- protects updating of if_list */
 235#define   if_lock(t)           mutex_lock(&(t->if_lock));
 236#define   if_unlock(t)           mutex_unlock(&(t->if_lock));
 237
 238/* Used to help with determining the pkts on receive */
 239#define PKTGEN_MAGIC 0xbe9be955
 240#define PG_PROC_DIR "pktgen"
 241#define PGCTRL	    "pgctrl"
 242
 243#define MAX_CFLOWS  65536
 244
 245#define VLAN_TAG_SIZE(x) ((x)->vlan_id == 0xffff ? 0 : 4)
 246#define SVLAN_TAG_SIZE(x) ((x)->svlan_id == 0xffff ? 0 : 4)
 247
 248struct imix_pkt {
 249	u64 size;
 250	u64 weight;
 251	u64 count_so_far;
 252};
 253
 254struct flow_state {
 255	__be32 cur_daddr;
 256	int count;
 257#ifdef CONFIG_XFRM
 258	struct xfrm_state *x;
 259#endif
 260	__u32 flags;
 261};
 262
 263/* flow flag bits */
 264#define F_INIT   (1<<0)		/* flow has been initialized */
 265
 266struct pktgen_dev {
 267	/*
 268	 * Try to keep frequent/infrequent used vars. separated.
 269	 */
 270	struct proc_dir_entry *entry;	/* proc file */
 271	struct pktgen_thread *pg_thread;/* the owner */
 272	struct list_head list;		/* chaining in the thread's run-queue */
 273	struct rcu_head	 rcu;		/* freed by RCU */
 274
 275	int running;		/* if false, the test will stop */
 276
 277	/* If min != max, then we will either do a linear iteration, or
 278	 * we will do a random selection from within the range.
 279	 */
 280	__u32 flags;
 281	int xmit_mode;
 282	int min_pkt_size;
 283	int max_pkt_size;
 284	int pkt_overhead;	/* overhead for MPLS, VLANs, IPSEC etc */
 285	int nfrags;
 286	int removal_mark;	/* non-zero => the device is marked for
 287				 * removal by worker thread */
 288
 289	struct page *page;
 290	u64 delay;		/* nano-seconds */
 291
 292	__u64 count;		/* Default No packets to send */
 293	__u64 sofar;		/* How many pkts we've sent so far */
 294	__u64 tx_bytes;		/* How many bytes we've transmitted */
 295	__u64 errors;		/* Errors when trying to transmit, */
 296
 297	/* runtime counters relating to clone_skb */
 298
 299	__u32 clone_count;
 300	int last_ok;		/* Was last skb sent?
 301				 * Or a failed transmit of some sort?
 302				 * This will keep sequence numbers in order
 303				 */
 304	ktime_t next_tx;
 305	ktime_t started_at;
 306	ktime_t stopped_at;
 307	u64	idle_acc;	/* nano-seconds */
 308
 309	__u32 seq_num;
 310
 311	int clone_skb;		/*
 312				 * Use multiple SKBs during packet gen.
 313				 * If this number is greater than 1, then
 314				 * that many copies of the same packet will be
 315				 * sent before a new packet is allocated.
 316				 * If you want to send 1024 identical packets
 317				 * before creating a new packet,
 318				 * set clone_skb to 1024.
 319				 */
 320
 321	char dst_min[IP_NAME_SZ];	/* IP, ie 1.2.3.4 */
 322	char dst_max[IP_NAME_SZ];	/* IP, ie 1.2.3.4 */
 323	char src_min[IP_NAME_SZ];	/* IP, ie 1.2.3.4 */
 324	char src_max[IP_NAME_SZ];	/* IP, ie 1.2.3.4 */
 325
 326	struct in6_addr in6_saddr;
 327	struct in6_addr in6_daddr;
 328	struct in6_addr cur_in6_daddr;
 329	struct in6_addr cur_in6_saddr;
 330	/* For ranges */
 331	struct in6_addr min_in6_daddr;
 332	struct in6_addr max_in6_daddr;
 333	struct in6_addr min_in6_saddr;
 334	struct in6_addr max_in6_saddr;
 335
 336	/* If we're doing ranges, random or incremental, then this
 337	 * defines the min/max for those ranges.
 338	 */
 339	__be32 saddr_min;	/* inclusive, source IP address */
 340	__be32 saddr_max;	/* exclusive, source IP address */
 341	__be32 daddr_min;	/* inclusive, dest IP address */
 342	__be32 daddr_max;	/* exclusive, dest IP address */
 343
 344	__u16 udp_src_min;	/* inclusive, source UDP port */
 345	__u16 udp_src_max;	/* exclusive, source UDP port */
 346	__u16 udp_dst_min;	/* inclusive, dest UDP port */
 347	__u16 udp_dst_max;	/* exclusive, dest UDP port */
 348
 349	/* DSCP + ECN */
 350	__u8 tos;            /* six MSB of (former) IPv4 TOS
 351				are for dscp codepoint */
 352	__u8 traffic_class;  /* ditto for the (former) Traffic Class in IPv6
 353				(see RFC 3260, sec. 4) */
 354
 355	/* IMIX */
 356	unsigned int n_imix_entries;
 357	struct imix_pkt imix_entries[MAX_IMIX_ENTRIES];
 358	/* Maps 0-IMIX_PRECISION range to imix_entry based on probability*/
 359	__u8 imix_distribution[IMIX_PRECISION];
 360
 361	/* MPLS */
 362	unsigned int nr_labels;	/* Depth of stack, 0 = no MPLS */
 363	__be32 labels[MAX_MPLS_LABELS];
 364
 365	/* VLAN/SVLAN (802.1Q/Q-in-Q) */
 366	__u8  vlan_p;
 367	__u8  vlan_cfi;
 368	__u16 vlan_id;  /* 0xffff means no vlan tag */
 369
 370	__u8  svlan_p;
 371	__u8  svlan_cfi;
 372	__u16 svlan_id; /* 0xffff means no svlan tag */
 373
 374	__u32 src_mac_count;	/* How many MACs to iterate through */
 375	__u32 dst_mac_count;	/* How many MACs to iterate through */
 376
 377	unsigned char dst_mac[ETH_ALEN];
 378	unsigned char src_mac[ETH_ALEN];
 379
 380	__u32 cur_dst_mac_offset;
 381	__u32 cur_src_mac_offset;
 382	__be32 cur_saddr;
 383	__be32 cur_daddr;
 384	__u16 ip_id;
 385	__u16 cur_udp_dst;
 386	__u16 cur_udp_src;
 387	__u16 cur_queue_map;
 388	__u32 cur_pkt_size;
 389	__u32 last_pkt_size;
 390
 391	__u8 hh[14];
 392	/* = {
 393	   0x00, 0x80, 0xC8, 0x79, 0xB3, 0xCB,
 394
 395	   We fill in SRC address later
 396	   0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 397	   0x08, 0x00
 398	   };
 399	 */
 400	__u16 pad;		/* pad out the hh struct to an even 16 bytes */
 401
 402	struct sk_buff *skb;	/* skb we are to transmit next, used for when we
 403				 * are transmitting the same one multiple times
 404				 */
 405	struct net_device *odev; /* The out-going device.
 406				  * Note that the device should have it's
 407				  * pg_info pointer pointing back to this
 408				  * device.
 409				  * Set when the user specifies the out-going
 410				  * device name (not when the inject is
 411				  * started as it used to do.)
 412				  */
 413	netdevice_tracker dev_tracker;
 414	char odevname[32];
 415	struct flow_state *flows;
 416	unsigned int cflows;	/* Concurrent flows (config) */
 417	unsigned int lflow;		/* Flow length  (config) */
 418	unsigned int nflows;	/* accumulated flows (stats) */
 419	unsigned int curfl;		/* current sequenced flow (state)*/
 420
 421	u16 queue_map_min;
 422	u16 queue_map_max;
 423	__u32 skb_priority;	/* skb priority field */
 424	unsigned int burst;	/* number of duplicated packets to burst */
 425	int node;               /* Memory node */
 426
 427#ifdef CONFIG_XFRM
 428	__u8	ipsmode;		/* IPSEC mode (config) */
 429	__u8	ipsproto;		/* IPSEC type (config) */
 430	__u32	spi;
 431	struct xfrm_dst xdst;
 432	struct dst_ops dstops;
 433#endif
 434	char result[512];
 435};
 436
 437struct pktgen_hdr {
 438	__be32 pgh_magic;
 439	__be32 seq_num;
 440	__be32 tv_sec;
 441	__be32 tv_usec;
 442};
 443
 444
 445static unsigned int pg_net_id __read_mostly;
 446
 447struct pktgen_net {
 448	struct net		*net;
 449	struct proc_dir_entry	*proc_dir;
 450	struct list_head	pktgen_threads;
 451	bool			pktgen_exiting;
 452};
 453
 454struct pktgen_thread {
 455	struct mutex if_lock;		/* for list of devices */
 456	struct list_head if_list;	/* All device here */
 457	struct list_head th_list;
 458	struct task_struct *tsk;
 459	char result[512];
 460
 461	/* Field for thread to receive "posted" events terminate,
 462	   stop ifs etc. */
 463
 464	u32 control;
 465	int cpu;
 466
 467	wait_queue_head_t queue;
 468	struct completion start_done;
 469	struct pktgen_net *net;
 470};
 471
 472#define REMOVE 1
 473#define FIND   0
 474
 475static const char version[] =
 476	"Packet Generator for packet performance testing. "
 477	"Version: " VERSION "\n";
 478
 479static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i);
 480static int pktgen_add_device(struct pktgen_thread *t, const char *ifname);
 481static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
 482					  const char *ifname, bool exact);
 483static int pktgen_device_event(struct notifier_block *, unsigned long, void *);
 484static void pktgen_run_all_threads(struct pktgen_net *pn);
 485static void pktgen_reset_all_threads(struct pktgen_net *pn);
 486static void pktgen_stop_all_threads(struct pktgen_net *pn);
 487
 488static void pktgen_stop(struct pktgen_thread *t);
 489static void pktgen_clear_counters(struct pktgen_dev *pkt_dev);
 490static void fill_imix_distribution(struct pktgen_dev *pkt_dev);
 491
 492/* Module parameters, defaults. */
 493static int pg_count_d __read_mostly = 1000;
 494static int pg_delay_d __read_mostly;
 495static int pg_clone_skb_d  __read_mostly;
 496static int debug  __read_mostly;
 497
 498static DEFINE_MUTEX(pktgen_thread_lock);
 499
 500static struct notifier_block pktgen_notifier_block = {
 501	.notifier_call = pktgen_device_event,
 502};
 503
 504/*
 505 * /proc handling functions
 506 *
 507 */
 508
 509static int pgctrl_show(struct seq_file *seq, void *v)
 510{
 511	seq_puts(seq, version);
 512	return 0;
 513}
 514
 515static ssize_t pgctrl_write(struct file *file, const char __user *buf,
 516			    size_t count, loff_t *ppos)
 517{
 518	char data[128];
 519	struct pktgen_net *pn = net_generic(current->nsproxy->net_ns, pg_net_id);
 520
 521	if (!capable(CAP_NET_ADMIN))
 522		return -EPERM;
 523
 524	if (count == 0)
 525		return -EINVAL;
 526
 527	if (count > sizeof(data))
 528		count = sizeof(data);
 529
 530	if (copy_from_user(data, buf, count))
 531		return -EFAULT;
 532
 533	data[count - 1] = 0;	/* Strip trailing '\n' and terminate string */
 534
 535	if (!strcmp(data, "stop"))
 536		pktgen_stop_all_threads(pn);
 537	else if (!strcmp(data, "start"))
 538		pktgen_run_all_threads(pn);
 539	else if (!strcmp(data, "reset"))
 540		pktgen_reset_all_threads(pn);
 541	else
 542		return -EINVAL;
 543
 544	return count;
 545}
 546
 547static int pgctrl_open(struct inode *inode, struct file *file)
 548{
 549	return single_open(file, pgctrl_show, pde_data(inode));
 550}
 551
 552static const struct proc_ops pktgen_proc_ops = {
 553	.proc_open	= pgctrl_open,
 554	.proc_read	= seq_read,
 555	.proc_lseek	= seq_lseek,
 556	.proc_write	= pgctrl_write,
 557	.proc_release	= single_release,
 558};
 559
 560static int pktgen_if_show(struct seq_file *seq, void *v)
 561{
 562	const struct pktgen_dev *pkt_dev = seq->private;
 563	ktime_t stopped;
 564	unsigned int i;
 565	u64 idle;
 566
 567	seq_printf(seq,
 568		   "Params: count %llu  min_pkt_size: %u  max_pkt_size: %u\n",
 569		   (unsigned long long)pkt_dev->count, pkt_dev->min_pkt_size,
 570		   pkt_dev->max_pkt_size);
 571
 572	if (pkt_dev->n_imix_entries > 0) {
 573		seq_puts(seq, "     imix_weights: ");
 574		for (i = 0; i < pkt_dev->n_imix_entries; i++) {
 575			seq_printf(seq, "%llu,%llu ",
 576				   pkt_dev->imix_entries[i].size,
 577				   pkt_dev->imix_entries[i].weight);
 578		}
 579		seq_puts(seq, "\n");
 580	}
 581
 582	seq_printf(seq,
 583		   "     frags: %d  delay: %llu  clone_skb: %d  ifname: %s\n",
 584		   pkt_dev->nfrags, (unsigned long long) pkt_dev->delay,
 585		   pkt_dev->clone_skb, pkt_dev->odevname);
 586
 587	seq_printf(seq, "     flows: %u flowlen: %u\n", pkt_dev->cflows,
 588		   pkt_dev->lflow);
 589
 590	seq_printf(seq,
 591		   "     queue_map_min: %u  queue_map_max: %u\n",
 592		   pkt_dev->queue_map_min,
 593		   pkt_dev->queue_map_max);
 594
 595	if (pkt_dev->skb_priority)
 596		seq_printf(seq, "     skb_priority: %u\n",
 597			   pkt_dev->skb_priority);
 598
 599	if (pkt_dev->flags & F_IPV6) {
 600		seq_printf(seq,
 601			   "     saddr: %pI6c  min_saddr: %pI6c  max_saddr: %pI6c\n"
 602			   "     daddr: %pI6c  min_daddr: %pI6c  max_daddr: %pI6c\n",
 603			   &pkt_dev->in6_saddr,
 604			   &pkt_dev->min_in6_saddr, &pkt_dev->max_in6_saddr,
 605			   &pkt_dev->in6_daddr,
 606			   &pkt_dev->min_in6_daddr, &pkt_dev->max_in6_daddr);
 607	} else {
 608		seq_printf(seq,
 609			   "     dst_min: %s  dst_max: %s\n",
 610			   pkt_dev->dst_min, pkt_dev->dst_max);
 611		seq_printf(seq,
 612			   "     src_min: %s  src_max: %s\n",
 613			   pkt_dev->src_min, pkt_dev->src_max);
 614	}
 615
 616	seq_puts(seq, "     src_mac: ");
 617
 618	seq_printf(seq, "%pM ",
 619		   is_zero_ether_addr(pkt_dev->src_mac) ?
 620			     pkt_dev->odev->dev_addr : pkt_dev->src_mac);
 621
 622	seq_puts(seq, "dst_mac: ");
 623	seq_printf(seq, "%pM\n", pkt_dev->dst_mac);
 624
 625	seq_printf(seq,
 626		   "     udp_src_min: %d  udp_src_max: %d"
 627		   "  udp_dst_min: %d  udp_dst_max: %d\n",
 628		   pkt_dev->udp_src_min, pkt_dev->udp_src_max,
 629		   pkt_dev->udp_dst_min, pkt_dev->udp_dst_max);
 630
 631	seq_printf(seq,
 632		   "     src_mac_count: %d  dst_mac_count: %d\n",
 633		   pkt_dev->src_mac_count, pkt_dev->dst_mac_count);
 634
 635	if (pkt_dev->nr_labels) {
 636		seq_puts(seq, "     mpls: ");
 637		for (i = 0; i < pkt_dev->nr_labels; i++)
 638			seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]),
 639				   i == pkt_dev->nr_labels-1 ? "\n" : ", ");
 640	}
 641
 642	if (pkt_dev->vlan_id != 0xffff)
 643		seq_printf(seq, "     vlan_id: %u  vlan_p: %u  vlan_cfi: %u\n",
 644			   pkt_dev->vlan_id, pkt_dev->vlan_p,
 645			   pkt_dev->vlan_cfi);
 646
 647	if (pkt_dev->svlan_id != 0xffff)
 648		seq_printf(seq, "     svlan_id: %u  vlan_p: %u  vlan_cfi: %u\n",
 649			   pkt_dev->svlan_id, pkt_dev->svlan_p,
 650			   pkt_dev->svlan_cfi);
 651
 652	if (pkt_dev->tos)
 653		seq_printf(seq, "     tos: 0x%02x\n", pkt_dev->tos);
 654
 655	if (pkt_dev->traffic_class)
 656		seq_printf(seq, "     traffic_class: 0x%02x\n", pkt_dev->traffic_class);
 657
 658	if (pkt_dev->burst > 1)
 659		seq_printf(seq, "     burst: %d\n", pkt_dev->burst);
 660
 661	if (pkt_dev->node >= 0)
 662		seq_printf(seq, "     node: %d\n", pkt_dev->node);
 663
 664	if (pkt_dev->xmit_mode == M_NETIF_RECEIVE)
 665		seq_puts(seq, "     xmit_mode: netif_receive\n");
 666	else if (pkt_dev->xmit_mode == M_QUEUE_XMIT)
 667		seq_puts(seq, "     xmit_mode: xmit_queue\n");
 668
 669	seq_puts(seq, "     Flags: ");
 670
 671	for (i = 0; i < NR_PKT_FLAGS; i++) {
 672		if (i == F_FLOW_SEQ)
 673			if (!pkt_dev->cflows)
 674				continue;
 675
 676		if (pkt_dev->flags & (1 << i))
 677			seq_printf(seq, "%s  ", pkt_flag_names[i]);
 678		else if (i == F_FLOW_SEQ)
 679			seq_puts(seq, "FLOW_RND  ");
 680
 681#ifdef CONFIG_XFRM
 682		if (i == F_IPSEC && pkt_dev->spi)
 683			seq_printf(seq, "spi:%u", pkt_dev->spi);
 684#endif
 
 
 
 685	}
 686
 687	seq_puts(seq, "\n");
 688
 689	/* not really stopped, more like last-running-at */
 690	stopped = pkt_dev->running ? ktime_get() : pkt_dev->stopped_at;
 691	idle = pkt_dev->idle_acc;
 692	do_div(idle, NSEC_PER_USEC);
 693
 694	seq_printf(seq,
 695		   "Current:\n     pkts-sofar: %llu  errors: %llu\n",
 696		   (unsigned long long)pkt_dev->sofar,
 697		   (unsigned long long)pkt_dev->errors);
 698
 699	if (pkt_dev->n_imix_entries > 0) {
 700		int i;
 701
 702		seq_puts(seq, "     imix_size_counts: ");
 703		for (i = 0; i < pkt_dev->n_imix_entries; i++) {
 704			seq_printf(seq, "%llu,%llu ",
 705				   pkt_dev->imix_entries[i].size,
 706				   pkt_dev->imix_entries[i].count_so_far);
 707		}
 708		seq_puts(seq, "\n");
 709	}
 710
 711	seq_printf(seq,
 712		   "     started: %lluus  stopped: %lluus idle: %lluus\n",
 713		   (unsigned long long) ktime_to_us(pkt_dev->started_at),
 714		   (unsigned long long) ktime_to_us(stopped),
 715		   (unsigned long long) idle);
 716
 717	seq_printf(seq,
 718		   "     seq_num: %d  cur_dst_mac_offset: %d  cur_src_mac_offset: %d\n",
 719		   pkt_dev->seq_num, pkt_dev->cur_dst_mac_offset,
 720		   pkt_dev->cur_src_mac_offset);
 721
 722	if (pkt_dev->flags & F_IPV6) {
 723		seq_printf(seq, "     cur_saddr: %pI6c  cur_daddr: %pI6c\n",
 724				&pkt_dev->cur_in6_saddr,
 725				&pkt_dev->cur_in6_daddr);
 726	} else
 727		seq_printf(seq, "     cur_saddr: %pI4  cur_daddr: %pI4\n",
 728			   &pkt_dev->cur_saddr, &pkt_dev->cur_daddr);
 729
 730	seq_printf(seq, "     cur_udp_dst: %d  cur_udp_src: %d\n",
 731		   pkt_dev->cur_udp_dst, pkt_dev->cur_udp_src);
 732
 733	seq_printf(seq, "     cur_queue_map: %u\n", pkt_dev->cur_queue_map);
 734
 735	seq_printf(seq, "     flows: %u\n", pkt_dev->nflows);
 736
 737	if (pkt_dev->result[0])
 738		seq_printf(seq, "Result: %s\n", pkt_dev->result);
 739	else
 740		seq_puts(seq, "Result: Idle\n");
 741
 742	return 0;
 743}
 744
 745
 746static int hex32_arg(const char __user *user_buffer, unsigned long maxlen,
 747		     __u32 *num)
 748{
 749	int i = 0;
 750	*num = 0;
 751
 752	for (; i < maxlen; i++) {
 753		int value;
 754		char c;
 755		*num <<= 4;
 756		if (get_user(c, &user_buffer[i]))
 757			return -EFAULT;
 758		value = hex_to_bin(c);
 759		if (value >= 0)
 760			*num |= value;
 761		else
 762			break;
 763	}
 764	return i;
 765}
 766
 767static int count_trail_chars(const char __user * user_buffer,
 768			     unsigned int maxlen)
 769{
 770	int i;
 771
 772	for (i = 0; i < maxlen; i++) {
 773		char c;
 774		if (get_user(c, &user_buffer[i]))
 775			return -EFAULT;
 776		switch (c) {
 777		case '\"':
 778		case '\n':
 779		case '\r':
 780		case '\t':
 781		case ' ':
 782		case '=':
 783			break;
 784		default:
 785			goto done;
 786		}
 787	}
 788done:
 789	return i;
 790}
 791
 792static long num_arg(const char __user *user_buffer, unsigned long maxlen,
 793				unsigned long *num)
 794{
 795	int i;
 796	*num = 0;
 797
 798	for (i = 0; i < maxlen; i++) {
 799		char c;
 800		if (get_user(c, &user_buffer[i]))
 801			return -EFAULT;
 802		if ((c >= '0') && (c <= '9')) {
 803			*num *= 10;
 804			*num += c - '0';
 805		} else
 806			break;
 807	}
 808	return i;
 809}
 810
 811static int strn_len(const char __user * user_buffer, unsigned int maxlen)
 812{
 813	int i;
 814
 815	for (i = 0; i < maxlen; i++) {
 816		char c;
 817		if (get_user(c, &user_buffer[i]))
 818			return -EFAULT;
 819		switch (c) {
 820		case '\"':
 821		case '\n':
 822		case '\r':
 823		case '\t':
 824		case ' ':
 825			goto done_str;
 826		default:
 827			break;
 828		}
 829	}
 830done_str:
 831	return i;
 832}
 833
 834/* Parses imix entries from user buffer.
 835 * The user buffer should consist of imix entries separated by spaces
 836 * where each entry consists of size and weight delimited by commas.
 837 * "size1,weight_1 size2,weight_2 ... size_n,weight_n" for example.
 838 */
 839static ssize_t get_imix_entries(const char __user *buffer,
 840				struct pktgen_dev *pkt_dev)
 841{
 842	const int max_digits = 10;
 843	int i = 0;
 844	long len;
 845	char c;
 846
 847	pkt_dev->n_imix_entries = 0;
 848
 849	do {
 850		unsigned long weight;
 851		unsigned long size;
 852
 
 
 
 853		len = num_arg(&buffer[i], max_digits, &size);
 854		if (len < 0)
 855			return len;
 856		i += len;
 857		if (get_user(c, &buffer[i]))
 858			return -EFAULT;
 859		/* Check for comma between size_i and weight_i */
 860		if (c != ',')
 861			return -EINVAL;
 862		i++;
 863
 864		if (size < 14 + 20 + 8)
 865			size = 14 + 20 + 8;
 866
 867		len = num_arg(&buffer[i], max_digits, &weight);
 868		if (len < 0)
 869			return len;
 870		if (weight <= 0)
 871			return -EINVAL;
 872
 873		pkt_dev->imix_entries[pkt_dev->n_imix_entries].size = size;
 874		pkt_dev->imix_entries[pkt_dev->n_imix_entries].weight = weight;
 875
 876		i += len;
 877		if (get_user(c, &buffer[i]))
 878			return -EFAULT;
 879
 880		i++;
 881		pkt_dev->n_imix_entries++;
 882
 883		if (pkt_dev->n_imix_entries > MAX_IMIX_ENTRIES)
 884			return -E2BIG;
 885	} while (c == ' ');
 886
 887	return i;
 888}
 889
 890static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev)
 891{
 892	unsigned int n = 0;
 893	char c;
 894	ssize_t i = 0;
 895	int len;
 896
 897	pkt_dev->nr_labels = 0;
 898	do {
 899		__u32 tmp;
 900		len = hex32_arg(&buffer[i], 8, &tmp);
 901		if (len <= 0)
 902			return len;
 903		pkt_dev->labels[n] = htonl(tmp);
 904		if (pkt_dev->labels[n] & MPLS_STACK_BOTTOM)
 905			pkt_dev->flags |= F_MPLS_RND;
 906		i += len;
 907		if (get_user(c, &buffer[i]))
 908			return -EFAULT;
 909		i++;
 910		n++;
 911		if (n >= MAX_MPLS_LABELS)
 912			return -E2BIG;
 913	} while (c == ',');
 914
 915	pkt_dev->nr_labels = n;
 916	return i;
 917}
 918
 919static __u32 pktgen_read_flag(const char *f, bool *disable)
 920{
 921	__u32 i;
 922
 923	if (f[0] == '!') {
 924		*disable = true;
 925		f++;
 926	}
 927
 928	for (i = 0; i < NR_PKT_FLAGS; i++) {
 929		if (!IS_ENABLED(CONFIG_XFRM) && i == IPSEC_SHIFT)
 930			continue;
 931
 932		/* allow only disabling ipv6 flag */
 933		if (!*disable && i == IPV6_SHIFT)
 934			continue;
 935
 936		if (strcmp(f, pkt_flag_names[i]) == 0)
 937			return 1 << i;
 938	}
 939
 940	if (strcmp(f, "FLOW_RND") == 0) {
 941		*disable = !*disable;
 942		return F_FLOW_SEQ;
 943	}
 944
 945	return 0;
 946}
 947
 948static ssize_t pktgen_if_write(struct file *file,
 949			       const char __user * user_buffer, size_t count,
 950			       loff_t * offset)
 951{
 952	struct seq_file *seq = file->private_data;
 953	struct pktgen_dev *pkt_dev = seq->private;
 954	int i, max, len;
 955	char name[16], valstr[32];
 956	unsigned long value = 0;
 957	char *pg_result = NULL;
 958	int tmp = 0;
 959	char buf[128];
 960
 961	pg_result = &(pkt_dev->result[0]);
 962
 963	if (count < 1) {
 964		pr_warn("wrong command format\n");
 965		return -EINVAL;
 966	}
 967
 968	max = count;
 969	tmp = count_trail_chars(user_buffer, max);
 970	if (tmp < 0) {
 971		pr_warn("illegal format\n");
 972		return tmp;
 973	}
 974	i = tmp;
 975
 976	/* Read variable name */
 977
 978	len = strn_len(&user_buffer[i], sizeof(name) - 1);
 979	if (len < 0)
 980		return len;
 981
 982	memset(name, 0, sizeof(name));
 983	if (copy_from_user(name, &user_buffer[i], len))
 984		return -EFAULT;
 985	i += len;
 986
 987	max = count - i;
 988	len = count_trail_chars(&user_buffer[i], max);
 989	if (len < 0)
 990		return len;
 991
 992	i += len;
 993
 994	if (debug) {
 995		size_t copy = min_t(size_t, count + 1, 1024);
 996		char *tp = strndup_user(user_buffer, copy);
 997
 998		if (IS_ERR(tp))
 999			return PTR_ERR(tp);
1000
1001		pr_debug("%s,%zu  buffer -:%s:-\n", name, count, tp);
1002		kfree(tp);
1003	}
1004
1005	if (!strcmp(name, "min_pkt_size")) {
1006		len = num_arg(&user_buffer[i], 10, &value);
1007		if (len < 0)
1008			return len;
1009
1010		i += len;
1011		if (value < 14 + 20 + 8)
1012			value = 14 + 20 + 8;
1013		if (value != pkt_dev->min_pkt_size) {
1014			pkt_dev->min_pkt_size = value;
1015			pkt_dev->cur_pkt_size = value;
1016		}
1017		sprintf(pg_result, "OK: min_pkt_size=%d",
1018			pkt_dev->min_pkt_size);
1019		return count;
1020	}
1021
1022	if (!strcmp(name, "max_pkt_size")) {
1023		len = num_arg(&user_buffer[i], 10, &value);
1024		if (len < 0)
1025			return len;
1026
1027		i += len;
1028		if (value < 14 + 20 + 8)
1029			value = 14 + 20 + 8;
1030		if (value != pkt_dev->max_pkt_size) {
1031			pkt_dev->max_pkt_size = value;
1032			pkt_dev->cur_pkt_size = value;
1033		}
1034		sprintf(pg_result, "OK: max_pkt_size=%d",
1035			pkt_dev->max_pkt_size);
1036		return count;
1037	}
1038
1039	/* Shortcut for min = max */
1040
1041	if (!strcmp(name, "pkt_size")) {
1042		len = num_arg(&user_buffer[i], 10, &value);
1043		if (len < 0)
1044			return len;
1045
1046		i += len;
1047		if (value < 14 + 20 + 8)
1048			value = 14 + 20 + 8;
1049		if (value != pkt_dev->min_pkt_size) {
1050			pkt_dev->min_pkt_size = value;
1051			pkt_dev->max_pkt_size = value;
1052			pkt_dev->cur_pkt_size = value;
1053		}
1054		sprintf(pg_result, "OK: pkt_size=%d", pkt_dev->min_pkt_size);
1055		return count;
1056	}
1057
1058	if (!strcmp(name, "imix_weights")) {
1059		if (pkt_dev->clone_skb > 0)
1060			return -EINVAL;
1061
1062		len = get_imix_entries(&user_buffer[i], pkt_dev);
1063		if (len < 0)
1064			return len;
1065
1066		fill_imix_distribution(pkt_dev);
1067
1068		i += len;
1069		return count;
1070	}
1071
1072	if (!strcmp(name, "debug")) {
1073		len = num_arg(&user_buffer[i], 10, &value);
1074		if (len < 0)
1075			return len;
1076
1077		i += len;
1078		debug = value;
1079		sprintf(pg_result, "OK: debug=%u", debug);
1080		return count;
1081	}
1082
1083	if (!strcmp(name, "frags")) {
1084		len = num_arg(&user_buffer[i], 10, &value);
1085		if (len < 0)
1086			return len;
1087
1088		i += len;
1089		pkt_dev->nfrags = value;
1090		sprintf(pg_result, "OK: frags=%d", pkt_dev->nfrags);
1091		return count;
1092	}
1093	if (!strcmp(name, "delay")) {
1094		len = num_arg(&user_buffer[i], 10, &value);
1095		if (len < 0)
1096			return len;
1097
1098		i += len;
1099		if (value == 0x7FFFFFFF)
1100			pkt_dev->delay = ULLONG_MAX;
1101		else
1102			pkt_dev->delay = (u64)value;
1103
1104		sprintf(pg_result, "OK: delay=%llu",
1105			(unsigned long long) pkt_dev->delay);
1106		return count;
1107	}
1108	if (!strcmp(name, "rate")) {
1109		len = num_arg(&user_buffer[i], 10, &value);
1110		if (len < 0)
1111			return len;
1112
1113		i += len;
1114		if (!value)
1115			return len;
1116		pkt_dev->delay = pkt_dev->min_pkt_size*8*NSEC_PER_USEC/value;
1117		if (debug)
1118			pr_info("Delay set at: %llu ns\n", pkt_dev->delay);
1119
1120		sprintf(pg_result, "OK: rate=%lu", value);
1121		return count;
1122	}
1123	if (!strcmp(name, "ratep")) {
1124		len = num_arg(&user_buffer[i], 10, &value);
1125		if (len < 0)
1126			return len;
1127
1128		i += len;
1129		if (!value)
1130			return len;
1131		pkt_dev->delay = NSEC_PER_SEC/value;
1132		if (debug)
1133			pr_info("Delay set at: %llu ns\n", pkt_dev->delay);
1134
1135		sprintf(pg_result, "OK: rate=%lu", value);
1136		return count;
1137	}
1138	if (!strcmp(name, "udp_src_min")) {
1139		len = num_arg(&user_buffer[i], 10, &value);
1140		if (len < 0)
1141			return len;
1142
1143		i += len;
1144		if (value != pkt_dev->udp_src_min) {
1145			pkt_dev->udp_src_min = value;
1146			pkt_dev->cur_udp_src = value;
1147		}
1148		sprintf(pg_result, "OK: udp_src_min=%u", pkt_dev->udp_src_min);
1149		return count;
1150	}
1151	if (!strcmp(name, "udp_dst_min")) {
1152		len = num_arg(&user_buffer[i], 10, &value);
1153		if (len < 0)
1154			return len;
1155
1156		i += len;
1157		if (value != pkt_dev->udp_dst_min) {
1158			pkt_dev->udp_dst_min = value;
1159			pkt_dev->cur_udp_dst = value;
1160		}
1161		sprintf(pg_result, "OK: udp_dst_min=%u", pkt_dev->udp_dst_min);
1162		return count;
1163	}
1164	if (!strcmp(name, "udp_src_max")) {
1165		len = num_arg(&user_buffer[i], 10, &value);
1166		if (len < 0)
1167			return len;
1168
1169		i += len;
1170		if (value != pkt_dev->udp_src_max) {
1171			pkt_dev->udp_src_max = value;
1172			pkt_dev->cur_udp_src = value;
1173		}
1174		sprintf(pg_result, "OK: udp_src_max=%u", pkt_dev->udp_src_max);
1175		return count;
1176	}
1177	if (!strcmp(name, "udp_dst_max")) {
1178		len = num_arg(&user_buffer[i], 10, &value);
1179		if (len < 0)
1180			return len;
1181
1182		i += len;
1183		if (value != pkt_dev->udp_dst_max) {
1184			pkt_dev->udp_dst_max = value;
1185			pkt_dev->cur_udp_dst = value;
1186		}
1187		sprintf(pg_result, "OK: udp_dst_max=%u", pkt_dev->udp_dst_max);
1188		return count;
1189	}
1190	if (!strcmp(name, "clone_skb")) {
1191		len = num_arg(&user_buffer[i], 10, &value);
1192		if (len < 0)
1193			return len;
1194		/* clone_skb is not supported for netif_receive xmit_mode and
1195		 * IMIX mode.
1196		 */
1197		if ((value > 0) &&
1198		    ((pkt_dev->xmit_mode == M_NETIF_RECEIVE) ||
1199		     !(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))
1200			return -ENOTSUPP;
1201		if (value > 0 && pkt_dev->n_imix_entries > 0)
 
1202			return -EINVAL;
1203
1204		i += len;
1205		pkt_dev->clone_skb = value;
1206
1207		sprintf(pg_result, "OK: clone_skb=%d", pkt_dev->clone_skb);
1208		return count;
1209	}
1210	if (!strcmp(name, "count")) {
1211		len = num_arg(&user_buffer[i], 10, &value);
1212		if (len < 0)
1213			return len;
1214
1215		i += len;
1216		pkt_dev->count = value;
1217		sprintf(pg_result, "OK: count=%llu",
1218			(unsigned long long)pkt_dev->count);
1219		return count;
1220	}
1221	if (!strcmp(name, "src_mac_count")) {
1222		len = num_arg(&user_buffer[i], 10, &value);
1223		if (len < 0)
1224			return len;
1225
1226		i += len;
1227		if (pkt_dev->src_mac_count != value) {
1228			pkt_dev->src_mac_count = value;
1229			pkt_dev->cur_src_mac_offset = 0;
1230		}
1231		sprintf(pg_result, "OK: src_mac_count=%d",
1232			pkt_dev->src_mac_count);
1233		return count;
1234	}
1235	if (!strcmp(name, "dst_mac_count")) {
1236		len = num_arg(&user_buffer[i], 10, &value);
1237		if (len < 0)
1238			return len;
1239
1240		i += len;
1241		if (pkt_dev->dst_mac_count != value) {
1242			pkt_dev->dst_mac_count = value;
1243			pkt_dev->cur_dst_mac_offset = 0;
1244		}
1245		sprintf(pg_result, "OK: dst_mac_count=%d",
1246			pkt_dev->dst_mac_count);
1247		return count;
1248	}
1249	if (!strcmp(name, "burst")) {
1250		len = num_arg(&user_buffer[i], 10, &value);
1251		if (len < 0)
1252			return len;
1253
1254		i += len;
1255		if ((value > 1) &&
1256		    ((pkt_dev->xmit_mode == M_QUEUE_XMIT) ||
1257		     ((pkt_dev->xmit_mode == M_START_XMIT) &&
1258		     (!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))))
1259			return -ENOTSUPP;
 
 
 
 
1260		pkt_dev->burst = value < 1 ? 1 : value;
1261		sprintf(pg_result, "OK: burst=%u", pkt_dev->burst);
1262		return count;
1263	}
1264	if (!strcmp(name, "node")) {
1265		len = num_arg(&user_buffer[i], 10, &value);
1266		if (len < 0)
1267			return len;
1268
1269		i += len;
1270
1271		if (node_possible(value)) {
1272			pkt_dev->node = value;
1273			sprintf(pg_result, "OK: node=%d", pkt_dev->node);
1274			if (pkt_dev->page) {
1275				put_page(pkt_dev->page);
1276				pkt_dev->page = NULL;
1277			}
1278		}
1279		else
1280			sprintf(pg_result, "ERROR: node not possible");
1281		return count;
1282	}
1283	if (!strcmp(name, "xmit_mode")) {
1284		char f[32];
1285
1286		memset(f, 0, 32);
1287		len = strn_len(&user_buffer[i], sizeof(f) - 1);
1288		if (len < 0)
1289			return len;
1290
1291		if (copy_from_user(f, &user_buffer[i], len))
1292			return -EFAULT;
1293		i += len;
1294
1295		if (strcmp(f, "start_xmit") == 0) {
1296			pkt_dev->xmit_mode = M_START_XMIT;
1297		} else if (strcmp(f, "netif_receive") == 0) {
1298			/* clone_skb set earlier, not supported in this mode */
1299			if (pkt_dev->clone_skb > 0)
1300				return -ENOTSUPP;
1301
1302			pkt_dev->xmit_mode = M_NETIF_RECEIVE;
1303
1304			/* make sure new packet is allocated every time
1305			 * pktgen_xmit() is called
1306			 */
1307			pkt_dev->last_ok = 1;
1308		} else if (strcmp(f, "queue_xmit") == 0) {
1309			pkt_dev->xmit_mode = M_QUEUE_XMIT;
1310			pkt_dev->last_ok = 1;
1311		} else {
1312			sprintf(pg_result,
1313				"xmit_mode -:%s:- unknown\nAvailable modes: %s",
1314				f, "start_xmit, netif_receive\n");
1315			return count;
1316		}
1317		sprintf(pg_result, "OK: xmit_mode=%s", f);
1318		return count;
1319	}
1320	if (!strcmp(name, "flag")) {
 
1321		__u32 flag;
1322		char f[32];
1323		bool disable = false;
1324
1325		memset(f, 0, 32);
1326		len = strn_len(&user_buffer[i], sizeof(f) - 1);
1327		if (len < 0)
1328			return len;
1329
1330		if (copy_from_user(f, &user_buffer[i], len))
1331			return -EFAULT;
1332		i += len;
1333
1334		flag = pktgen_read_flag(f, &disable);
1335
1336		if (flag) {
1337			if (disable)
 
 
 
 
 
 
 
 
1338				pkt_dev->flags &= ~flag;
1339			else
1340				pkt_dev->flags |= flag;
1341		} else {
1342			sprintf(pg_result,
1343				"Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s",
1344				f,
1345				"IPSRC_RND, IPDST_RND, UDPSRC_RND, UDPDST_RND, "
1346				"MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, "
1347				"MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, "
1348				"QUEUE_MAP_RND, QUEUE_MAP_CPU, UDPCSUM, "
1349				"NO_TIMESTAMP, "
1350#ifdef CONFIG_XFRM
1351				"IPSEC, "
1352#endif
1353				"NODE_ALLOC\n");
1354			return count;
1355		}
1356		sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1357		return count;
1358	}
1359	if (!strcmp(name, "dst_min") || !strcmp(name, "dst")) {
1360		len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_min) - 1);
1361		if (len < 0)
1362			return len;
1363
1364		if (copy_from_user(buf, &user_buffer[i], len))
1365			return -EFAULT;
1366		buf[len] = 0;
1367		if (strcmp(buf, pkt_dev->dst_min) != 0) {
1368			memset(pkt_dev->dst_min, 0, sizeof(pkt_dev->dst_min));
1369			strcpy(pkt_dev->dst_min, buf);
1370			pkt_dev->daddr_min = in_aton(pkt_dev->dst_min);
1371			pkt_dev->cur_daddr = pkt_dev->daddr_min;
1372		}
1373		if (debug)
1374			pr_debug("dst_min set to: %s\n", pkt_dev->dst_min);
1375		i += len;
1376		sprintf(pg_result, "OK: dst_min=%s", pkt_dev->dst_min);
1377		return count;
1378	}
1379	if (!strcmp(name, "dst_max")) {
1380		len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_max) - 1);
1381		if (len < 0)
1382			return len;
1383
1384		if (copy_from_user(buf, &user_buffer[i], len))
1385			return -EFAULT;
1386		buf[len] = 0;
1387		if (strcmp(buf, pkt_dev->dst_max) != 0) {
1388			memset(pkt_dev->dst_max, 0, sizeof(pkt_dev->dst_max));
1389			strcpy(pkt_dev->dst_max, buf);
1390			pkt_dev->daddr_max = in_aton(pkt_dev->dst_max);
1391			pkt_dev->cur_daddr = pkt_dev->daddr_max;
1392		}
1393		if (debug)
1394			pr_debug("dst_max set to: %s\n", pkt_dev->dst_max);
1395		i += len;
1396		sprintf(pg_result, "OK: dst_max=%s", pkt_dev->dst_max);
1397		return count;
1398	}
1399	if (!strcmp(name, "dst6")) {
1400		len = strn_len(&user_buffer[i], sizeof(buf) - 1);
1401		if (len < 0)
1402			return len;
1403
1404		pkt_dev->flags |= F_IPV6;
1405
1406		if (copy_from_user(buf, &user_buffer[i], len))
1407			return -EFAULT;
1408		buf[len] = 0;
1409
1410		in6_pton(buf, -1, pkt_dev->in6_daddr.s6_addr, -1, NULL);
1411		snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_daddr);
1412
1413		pkt_dev->cur_in6_daddr = pkt_dev->in6_daddr;
1414
1415		if (debug)
1416			pr_debug("dst6 set to: %s\n", buf);
1417
1418		i += len;
1419		sprintf(pg_result, "OK: dst6=%s", buf);
1420		return count;
1421	}
1422	if (!strcmp(name, "dst6_min")) {
1423		len = strn_len(&user_buffer[i], sizeof(buf) - 1);
1424		if (len < 0)
1425			return len;
1426
1427		pkt_dev->flags |= F_IPV6;
1428
1429		if (copy_from_user(buf, &user_buffer[i], len))
1430			return -EFAULT;
1431		buf[len] = 0;
1432
1433		in6_pton(buf, -1, pkt_dev->min_in6_daddr.s6_addr, -1, NULL);
1434		snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->min_in6_daddr);
1435
1436		pkt_dev->cur_in6_daddr = pkt_dev->min_in6_daddr;
1437		if (debug)
1438			pr_debug("dst6_min set to: %s\n", buf);
1439
1440		i += len;
1441		sprintf(pg_result, "OK: dst6_min=%s", buf);
1442		return count;
1443	}
1444	if (!strcmp(name, "dst6_max")) {
1445		len = strn_len(&user_buffer[i], sizeof(buf) - 1);
1446		if (len < 0)
1447			return len;
1448
1449		pkt_dev->flags |= F_IPV6;
1450
1451		if (copy_from_user(buf, &user_buffer[i], len))
1452			return -EFAULT;
1453		buf[len] = 0;
1454
1455		in6_pton(buf, -1, pkt_dev->max_in6_daddr.s6_addr, -1, NULL);
1456		snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->max_in6_daddr);
1457
1458		if (debug)
1459			pr_debug("dst6_max set to: %s\n", buf);
1460
1461		i += len;
1462		sprintf(pg_result, "OK: dst6_max=%s", buf);
1463		return count;
1464	}
1465	if (!strcmp(name, "src6")) {
1466		len = strn_len(&user_buffer[i], sizeof(buf) - 1);
1467		if (len < 0)
1468			return len;
1469
1470		pkt_dev->flags |= F_IPV6;
1471
1472		if (copy_from_user(buf, &user_buffer[i], len))
1473			return -EFAULT;
1474		buf[len] = 0;
1475
1476		in6_pton(buf, -1, pkt_dev->in6_saddr.s6_addr, -1, NULL);
1477		snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_saddr);
1478
1479		pkt_dev->cur_in6_saddr = pkt_dev->in6_saddr;
1480
1481		if (debug)
1482			pr_debug("src6 set to: %s\n", buf);
1483
1484		i += len;
1485		sprintf(pg_result, "OK: src6=%s", buf);
1486		return count;
1487	}
1488	if (!strcmp(name, "src_min")) {
1489		len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_min) - 1);
1490		if (len < 0)
1491			return len;
1492
1493		if (copy_from_user(buf, &user_buffer[i], len))
1494			return -EFAULT;
1495		buf[len] = 0;
1496		if (strcmp(buf, pkt_dev->src_min) != 0) {
1497			memset(pkt_dev->src_min, 0, sizeof(pkt_dev->src_min));
1498			strcpy(pkt_dev->src_min, buf);
1499			pkt_dev->saddr_min = in_aton(pkt_dev->src_min);
1500			pkt_dev->cur_saddr = pkt_dev->saddr_min;
1501		}
1502		if (debug)
1503			pr_debug("src_min set to: %s\n", pkt_dev->src_min);
1504		i += len;
1505		sprintf(pg_result, "OK: src_min=%s", pkt_dev->src_min);
1506		return count;
1507	}
1508	if (!strcmp(name, "src_max")) {
1509		len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_max) - 1);
1510		if (len < 0)
1511			return len;
1512
1513		if (copy_from_user(buf, &user_buffer[i], len))
1514			return -EFAULT;
1515		buf[len] = 0;
1516		if (strcmp(buf, pkt_dev->src_max) != 0) {
1517			memset(pkt_dev->src_max, 0, sizeof(pkt_dev->src_max));
1518			strcpy(pkt_dev->src_max, buf);
1519			pkt_dev->saddr_max = in_aton(pkt_dev->src_max);
1520			pkt_dev->cur_saddr = pkt_dev->saddr_max;
1521		}
1522		if (debug)
1523			pr_debug("src_max set to: %s\n", pkt_dev->src_max);
1524		i += len;
1525		sprintf(pg_result, "OK: src_max=%s", pkt_dev->src_max);
1526		return count;
1527	}
1528	if (!strcmp(name, "dst_mac")) {
1529		len = strn_len(&user_buffer[i], sizeof(valstr) - 1);
1530		if (len < 0)
1531			return len;
1532
1533		memset(valstr, 0, sizeof(valstr));
1534		if (copy_from_user(valstr, &user_buffer[i], len))
1535			return -EFAULT;
1536
1537		if (!mac_pton(valstr, pkt_dev->dst_mac))
1538			return -EINVAL;
1539		/* Set up Dest MAC */
1540		ether_addr_copy(&pkt_dev->hh[0], pkt_dev->dst_mac);
1541
1542		sprintf(pg_result, "OK: dstmac %pM", pkt_dev->dst_mac);
1543		return count;
1544	}
1545	if (!strcmp(name, "src_mac")) {
1546		len = strn_len(&user_buffer[i], sizeof(valstr) - 1);
1547		if (len < 0)
1548			return len;
1549
1550		memset(valstr, 0, sizeof(valstr));
1551		if (copy_from_user(valstr, &user_buffer[i], len))
1552			return -EFAULT;
1553
1554		if (!mac_pton(valstr, pkt_dev->src_mac))
1555			return -EINVAL;
1556		/* Set up Src MAC */
1557		ether_addr_copy(&pkt_dev->hh[6], pkt_dev->src_mac);
1558
1559		sprintf(pg_result, "OK: srcmac %pM", pkt_dev->src_mac);
1560		return count;
1561	}
1562
1563	if (!strcmp(name, "clear_counters")) {
1564		pktgen_clear_counters(pkt_dev);
1565		sprintf(pg_result, "OK: Clearing counters.\n");
1566		return count;
1567	}
1568
1569	if (!strcmp(name, "flows")) {
1570		len = num_arg(&user_buffer[i], 10, &value);
1571		if (len < 0)
1572			return len;
1573
1574		i += len;
1575		if (value > MAX_CFLOWS)
1576			value = MAX_CFLOWS;
1577
1578		pkt_dev->cflows = value;
1579		sprintf(pg_result, "OK: flows=%u", pkt_dev->cflows);
1580		return count;
1581	}
1582#ifdef CONFIG_XFRM
1583	if (!strcmp(name, "spi")) {
1584		len = num_arg(&user_buffer[i], 10, &value);
1585		if (len < 0)
1586			return len;
1587
1588		i += len;
1589		pkt_dev->spi = value;
1590		sprintf(pg_result, "OK: spi=%u", pkt_dev->spi);
1591		return count;
1592	}
1593#endif
1594	if (!strcmp(name, "flowlen")) {
1595		len = num_arg(&user_buffer[i], 10, &value);
1596		if (len < 0)
1597			return len;
1598
1599		i += len;
1600		pkt_dev->lflow = value;
1601		sprintf(pg_result, "OK: flowlen=%u", pkt_dev->lflow);
1602		return count;
1603	}
1604
1605	if (!strcmp(name, "queue_map_min")) {
1606		len = num_arg(&user_buffer[i], 5, &value);
1607		if (len < 0)
1608			return len;
1609
1610		i += len;
1611		pkt_dev->queue_map_min = value;
1612		sprintf(pg_result, "OK: queue_map_min=%u", pkt_dev->queue_map_min);
1613		return count;
1614	}
1615
1616	if (!strcmp(name, "queue_map_max")) {
1617		len = num_arg(&user_buffer[i], 5, &value);
1618		if (len < 0)
1619			return len;
1620
1621		i += len;
1622		pkt_dev->queue_map_max = value;
1623		sprintf(pg_result, "OK: queue_map_max=%u", pkt_dev->queue_map_max);
1624		return count;
1625	}
1626
1627	if (!strcmp(name, "mpls")) {
1628		unsigned int n, cnt;
1629
1630		len = get_labels(&user_buffer[i], pkt_dev);
1631		if (len < 0)
1632			return len;
1633		i += len;
1634		cnt = sprintf(pg_result, "OK: mpls=");
1635		for (n = 0; n < pkt_dev->nr_labels; n++)
1636			cnt += sprintf(pg_result + cnt,
1637				       "%08x%s", ntohl(pkt_dev->labels[n]),
1638				       n == pkt_dev->nr_labels-1 ? "" : ",");
1639
1640		if (pkt_dev->nr_labels && pkt_dev->vlan_id != 0xffff) {
1641			pkt_dev->vlan_id = 0xffff; /* turn off VLAN/SVLAN */
1642			pkt_dev->svlan_id = 0xffff;
1643
1644			if (debug)
1645				pr_debug("VLAN/SVLAN auto turned off\n");
1646		}
1647		return count;
1648	}
1649
1650	if (!strcmp(name, "vlan_id")) {
1651		len = num_arg(&user_buffer[i], 4, &value);
1652		if (len < 0)
1653			return len;
1654
1655		i += len;
1656		if (value <= 4095) {
1657			pkt_dev->vlan_id = value;  /* turn on VLAN */
1658
1659			if (debug)
1660				pr_debug("VLAN turned on\n");
1661
1662			if (debug && pkt_dev->nr_labels)
1663				pr_debug("MPLS auto turned off\n");
1664
1665			pkt_dev->nr_labels = 0;    /* turn off MPLS */
1666			sprintf(pg_result, "OK: vlan_id=%u", pkt_dev->vlan_id);
1667		} else {
1668			pkt_dev->vlan_id = 0xffff; /* turn off VLAN/SVLAN */
1669			pkt_dev->svlan_id = 0xffff;
1670
1671			if (debug)
1672				pr_debug("VLAN/SVLAN turned off\n");
1673		}
1674		return count;
1675	}
1676
1677	if (!strcmp(name, "vlan_p")) {
1678		len = num_arg(&user_buffer[i], 1, &value);
1679		if (len < 0)
1680			return len;
1681
1682		i += len;
1683		if ((value <= 7) && (pkt_dev->vlan_id != 0xffff)) {
1684			pkt_dev->vlan_p = value;
1685			sprintf(pg_result, "OK: vlan_p=%u", pkt_dev->vlan_p);
1686		} else {
1687			sprintf(pg_result, "ERROR: vlan_p must be 0-7");
1688		}
1689		return count;
1690	}
1691
1692	if (!strcmp(name, "vlan_cfi")) {
1693		len = num_arg(&user_buffer[i], 1, &value);
1694		if (len < 0)
1695			return len;
1696
1697		i += len;
1698		if ((value <= 1) && (pkt_dev->vlan_id != 0xffff)) {
1699			pkt_dev->vlan_cfi = value;
1700			sprintf(pg_result, "OK: vlan_cfi=%u", pkt_dev->vlan_cfi);
1701		} else {
1702			sprintf(pg_result, "ERROR: vlan_cfi must be 0-1");
1703		}
1704		return count;
1705	}
1706
1707	if (!strcmp(name, "svlan_id")) {
1708		len = num_arg(&user_buffer[i], 4, &value);
1709		if (len < 0)
1710			return len;
1711
1712		i += len;
1713		if ((value <= 4095) && ((pkt_dev->vlan_id != 0xffff))) {
1714			pkt_dev->svlan_id = value;  /* turn on SVLAN */
1715
1716			if (debug)
1717				pr_debug("SVLAN turned on\n");
1718
1719			if (debug && pkt_dev->nr_labels)
1720				pr_debug("MPLS auto turned off\n");
1721
1722			pkt_dev->nr_labels = 0;    /* turn off MPLS */
1723			sprintf(pg_result, "OK: svlan_id=%u", pkt_dev->svlan_id);
1724		} else {
1725			pkt_dev->vlan_id = 0xffff; /* turn off VLAN/SVLAN */
1726			pkt_dev->svlan_id = 0xffff;
1727
1728			if (debug)
1729				pr_debug("VLAN/SVLAN turned off\n");
1730		}
1731		return count;
1732	}
1733
1734	if (!strcmp(name, "svlan_p")) {
1735		len = num_arg(&user_buffer[i], 1, &value);
1736		if (len < 0)
1737			return len;
1738
1739		i += len;
1740		if ((value <= 7) && (pkt_dev->svlan_id != 0xffff)) {
1741			pkt_dev->svlan_p = value;
1742			sprintf(pg_result, "OK: svlan_p=%u", pkt_dev->svlan_p);
1743		} else {
1744			sprintf(pg_result, "ERROR: svlan_p must be 0-7");
1745		}
1746		return count;
1747	}
1748
1749	if (!strcmp(name, "svlan_cfi")) {
1750		len = num_arg(&user_buffer[i], 1, &value);
1751		if (len < 0)
1752			return len;
1753
1754		i += len;
1755		if ((value <= 1) && (pkt_dev->svlan_id != 0xffff)) {
1756			pkt_dev->svlan_cfi = value;
1757			sprintf(pg_result, "OK: svlan_cfi=%u", pkt_dev->svlan_cfi);
1758		} else {
1759			sprintf(pg_result, "ERROR: svlan_cfi must be 0-1");
1760		}
1761		return count;
1762	}
1763
1764	if (!strcmp(name, "tos")) {
1765		__u32 tmp_value = 0;
1766		len = hex32_arg(&user_buffer[i], 2, &tmp_value);
1767		if (len < 0)
1768			return len;
1769
1770		i += len;
1771		if (len == 2) {
1772			pkt_dev->tos = tmp_value;
1773			sprintf(pg_result, "OK: tos=0x%02x", pkt_dev->tos);
1774		} else {
1775			sprintf(pg_result, "ERROR: tos must be 00-ff");
1776		}
1777		return count;
1778	}
1779
1780	if (!strcmp(name, "traffic_class")) {
1781		__u32 tmp_value = 0;
1782		len = hex32_arg(&user_buffer[i], 2, &tmp_value);
1783		if (len < 0)
1784			return len;
1785
1786		i += len;
1787		if (len == 2) {
1788			pkt_dev->traffic_class = tmp_value;
1789			sprintf(pg_result, "OK: traffic_class=0x%02x", pkt_dev->traffic_class);
1790		} else {
1791			sprintf(pg_result, "ERROR: traffic_class must be 00-ff");
1792		}
1793		return count;
1794	}
1795
1796	if (!strcmp(name, "skb_priority")) {
1797		len = num_arg(&user_buffer[i], 9, &value);
1798		if (len < 0)
1799			return len;
1800
1801		i += len;
1802		pkt_dev->skb_priority = value;
1803		sprintf(pg_result, "OK: skb_priority=%i",
1804			pkt_dev->skb_priority);
1805		return count;
1806	}
1807
1808	sprintf(pkt_dev->result, "No such parameter \"%s\"", name);
1809	return -EINVAL;
1810}
1811
1812static int pktgen_if_open(struct inode *inode, struct file *file)
1813{
1814	return single_open(file, pktgen_if_show, pde_data(inode));
1815}
1816
1817static const struct proc_ops pktgen_if_proc_ops = {
1818	.proc_open	= pktgen_if_open,
1819	.proc_read	= seq_read,
1820	.proc_lseek	= seq_lseek,
1821	.proc_write	= pktgen_if_write,
1822	.proc_release	= single_release,
1823};
1824
1825static int pktgen_thread_show(struct seq_file *seq, void *v)
1826{
1827	struct pktgen_thread *t = seq->private;
1828	const struct pktgen_dev *pkt_dev;
1829
1830	BUG_ON(!t);
1831
1832	seq_puts(seq, "Running: ");
1833
1834	rcu_read_lock();
1835	list_for_each_entry_rcu(pkt_dev, &t->if_list, list)
1836		if (pkt_dev->running)
1837			seq_printf(seq, "%s ", pkt_dev->odevname);
1838
1839	seq_puts(seq, "\nStopped: ");
1840
1841	list_for_each_entry_rcu(pkt_dev, &t->if_list, list)
1842		if (!pkt_dev->running)
1843			seq_printf(seq, "%s ", pkt_dev->odevname);
1844
1845	if (t->result[0])
1846		seq_printf(seq, "\nResult: %s\n", t->result);
1847	else
1848		seq_puts(seq, "\nResult: NA\n");
1849
1850	rcu_read_unlock();
1851
1852	return 0;
1853}
1854
1855static ssize_t pktgen_thread_write(struct file *file,
1856				   const char __user * user_buffer,
1857				   size_t count, loff_t * offset)
1858{
1859	struct seq_file *seq = file->private_data;
1860	struct pktgen_thread *t = seq->private;
1861	int i, max, len, ret;
1862	char name[40];
1863	char *pg_result;
1864
1865	if (count < 1) {
1866		//      sprintf(pg_result, "Wrong command format");
1867		return -EINVAL;
1868	}
1869
1870	max = count;
1871	len = count_trail_chars(user_buffer, max);
1872	if (len < 0)
1873		return len;
1874
1875	i = len;
1876
1877	/* Read variable name */
1878
1879	len = strn_len(&user_buffer[i], sizeof(name) - 1);
1880	if (len < 0)
1881		return len;
1882
1883	memset(name, 0, sizeof(name));
1884	if (copy_from_user(name, &user_buffer[i], len))
1885		return -EFAULT;
1886	i += len;
1887
1888	max = count - i;
1889	len = count_trail_chars(&user_buffer[i], max);
1890	if (len < 0)
1891		return len;
1892
1893	i += len;
1894
1895	if (debug)
1896		pr_debug("t=%s, count=%lu\n", name, (unsigned long)count);
1897
1898	if (!t) {
1899		pr_err("ERROR: No thread\n");
1900		ret = -EINVAL;
1901		goto out;
1902	}
1903
1904	pg_result = &(t->result[0]);
1905
1906	if (!strcmp(name, "add_device")) {
1907		char f[32];
1908		memset(f, 0, 32);
1909		len = strn_len(&user_buffer[i], sizeof(f) - 1);
1910		if (len < 0) {
1911			ret = len;
1912			goto out;
1913		}
1914		if (copy_from_user(f, &user_buffer[i], len))
1915			return -EFAULT;
1916		i += len;
1917		mutex_lock(&pktgen_thread_lock);
1918		ret = pktgen_add_device(t, f);
1919		mutex_unlock(&pktgen_thread_lock);
1920		if (!ret) {
1921			ret = count;
1922			sprintf(pg_result, "OK: add_device=%s", f);
1923		} else
1924			sprintf(pg_result, "ERROR: can not add device %s", f);
1925		goto out;
1926	}
1927
1928	if (!strcmp(name, "rem_device_all")) {
1929		mutex_lock(&pktgen_thread_lock);
1930		t->control |= T_REMDEVALL;
1931		mutex_unlock(&pktgen_thread_lock);
1932		schedule_timeout_interruptible(msecs_to_jiffies(125));	/* Propagate thread->control  */
1933		ret = count;
1934		sprintf(pg_result, "OK: rem_device_all");
1935		goto out;
1936	}
1937
1938	if (!strcmp(name, "max_before_softirq")) {
1939		sprintf(pg_result, "OK: Note! max_before_softirq is obsoleted -- Do not use");
1940		ret = count;
1941		goto out;
1942	}
1943
1944	ret = -EINVAL;
1945out:
1946	return ret;
1947}
1948
1949static int pktgen_thread_open(struct inode *inode, struct file *file)
1950{
1951	return single_open(file, pktgen_thread_show, pde_data(inode));
1952}
1953
1954static const struct proc_ops pktgen_thread_proc_ops = {
1955	.proc_open	= pktgen_thread_open,
1956	.proc_read	= seq_read,
1957	.proc_lseek	= seq_lseek,
1958	.proc_write	= pktgen_thread_write,
1959	.proc_release	= single_release,
1960};
1961
1962/* Think find or remove for NN */
1963static struct pktgen_dev *__pktgen_NN_threads(const struct pktgen_net *pn,
1964					      const char *ifname, int remove)
1965{
1966	struct pktgen_thread *t;
1967	struct pktgen_dev *pkt_dev = NULL;
1968	bool exact = (remove == FIND);
1969
1970	list_for_each_entry(t, &pn->pktgen_threads, th_list) {
1971		pkt_dev = pktgen_find_dev(t, ifname, exact);
1972		if (pkt_dev) {
1973			if (remove) {
1974				pkt_dev->removal_mark = 1;
1975				t->control |= T_REMDEV;
1976			}
1977			break;
1978		}
1979	}
1980	return pkt_dev;
1981}
1982
1983/*
1984 * mark a device for removal
1985 */
1986static void pktgen_mark_device(const struct pktgen_net *pn, const char *ifname)
1987{
1988	struct pktgen_dev *pkt_dev = NULL;
1989	const int max_tries = 10, msec_per_try = 125;
1990	int i = 0;
1991
1992	mutex_lock(&pktgen_thread_lock);
1993	pr_debug("%s: marking %s for removal\n", __func__, ifname);
1994
1995	while (1) {
1996
1997		pkt_dev = __pktgen_NN_threads(pn, ifname, REMOVE);
1998		if (pkt_dev == NULL)
1999			break;	/* success */
2000
2001		mutex_unlock(&pktgen_thread_lock);
2002		pr_debug("%s: waiting for %s to disappear....\n",
2003			 __func__, ifname);
2004		schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try));
2005		mutex_lock(&pktgen_thread_lock);
2006
2007		if (++i >= max_tries) {
2008			pr_err("%s: timed out after waiting %d msec for device %s to be removed\n",
2009			       __func__, msec_per_try * i, ifname);
2010			break;
2011		}
2012
2013	}
2014
2015	mutex_unlock(&pktgen_thread_lock);
2016}
2017
2018static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *dev)
2019{
2020	struct pktgen_thread *t;
2021
2022	mutex_lock(&pktgen_thread_lock);
2023
2024	list_for_each_entry(t, &pn->pktgen_threads, th_list) {
2025		struct pktgen_dev *pkt_dev;
2026
2027		if_lock(t);
2028		list_for_each_entry(pkt_dev, &t->if_list, list) {
2029			if (pkt_dev->odev != dev)
2030				continue;
2031
2032			proc_remove(pkt_dev->entry);
2033
2034			pkt_dev->entry = proc_create_data(dev->name, 0600,
2035							  pn->proc_dir,
2036							  &pktgen_if_proc_ops,
2037							  pkt_dev);
2038			if (!pkt_dev->entry)
2039				pr_err("can't move proc entry for '%s'\n",
2040				       dev->name);
2041			break;
2042		}
2043		if_unlock(t);
2044	}
2045	mutex_unlock(&pktgen_thread_lock);
2046}
2047
2048static int pktgen_device_event(struct notifier_block *unused,
2049			       unsigned long event, void *ptr)
2050{
2051	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2052	struct pktgen_net *pn = net_generic(dev_net(dev), pg_net_id);
2053
2054	if (pn->pktgen_exiting)
2055		return NOTIFY_DONE;
2056
2057	/* It is OK that we do not hold the group lock right now,
2058	 * as we run under the RTNL lock.
2059	 */
2060
2061	switch (event) {
2062	case NETDEV_CHANGENAME:
2063		pktgen_change_name(pn, dev);
2064		break;
2065
2066	case NETDEV_UNREGISTER:
2067		pktgen_mark_device(pn, dev->name);
2068		break;
2069	}
2070
2071	return NOTIFY_DONE;
2072}
2073
2074static struct net_device *pktgen_dev_get_by_name(const struct pktgen_net *pn,
2075						 struct pktgen_dev *pkt_dev,
2076						 const char *ifname)
2077{
2078	char b[IFNAMSIZ+5];
2079	int i;
2080
2081	for (i = 0; ifname[i] != '@'; i++) {
2082		if (i == IFNAMSIZ)
2083			break;
2084
2085		b[i] = ifname[i];
2086	}
2087	b[i] = 0;
2088
2089	return dev_get_by_name(pn->net, b);
2090}
2091
2092
2093/* Associate pktgen_dev with a device. */
2094
2095static int pktgen_setup_dev(const struct pktgen_net *pn,
2096			    struct pktgen_dev *pkt_dev, const char *ifname)
2097{
2098	struct net_device *odev;
2099	int err;
2100
2101	/* Clean old setups */
2102	if (pkt_dev->odev) {
2103		netdev_put(pkt_dev->odev, &pkt_dev->dev_tracker);
2104		pkt_dev->odev = NULL;
2105	}
2106
2107	odev = pktgen_dev_get_by_name(pn, pkt_dev, ifname);
2108	if (!odev) {
2109		pr_err("no such netdevice: \"%s\"\n", ifname);
2110		return -ENODEV;
2111	}
2112
2113	if (odev->type != ARPHRD_ETHER && odev->type != ARPHRD_LOOPBACK) {
2114		pr_err("not an ethernet or loopback device: \"%s\"\n", ifname);
2115		err = -EINVAL;
2116	} else if (!netif_running(odev)) {
2117		pr_err("device is down: \"%s\"\n", ifname);
2118		err = -ENETDOWN;
2119	} else {
2120		pkt_dev->odev = odev;
2121		netdev_tracker_alloc(odev, &pkt_dev->dev_tracker, GFP_KERNEL);
2122		return 0;
2123	}
2124
2125	dev_put(odev);
2126	return err;
2127}
2128
2129/* Read pkt_dev from the interface and set up internal pktgen_dev
2130 * structure to have the right information to create/send packets
2131 */
2132static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
2133{
2134	int ntxq;
2135
2136	if (!pkt_dev->odev) {
2137		pr_err("ERROR: pkt_dev->odev == NULL in setup_inject\n");
2138		sprintf(pkt_dev->result,
2139			"ERROR: pkt_dev->odev == NULL in setup_inject.\n");
2140		return;
2141	}
2142
2143	/* make sure that we don't pick a non-existing transmit queue */
2144	ntxq = pkt_dev->odev->real_num_tx_queues;
2145
2146	if (ntxq <= pkt_dev->queue_map_min) {
2147		pr_warn("WARNING: Requested queue_map_min (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n",
2148			pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq,
2149			pkt_dev->odevname);
2150		pkt_dev->queue_map_min = (ntxq ?: 1) - 1;
2151	}
2152	if (pkt_dev->queue_map_max >= ntxq) {
2153		pr_warn("WARNING: Requested queue_map_max (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n",
2154			pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq,
2155			pkt_dev->odevname);
2156		pkt_dev->queue_map_max = (ntxq ?: 1) - 1;
2157	}
2158
2159	/* Default to the interface's mac if not explicitly set. */
2160
2161	if (is_zero_ether_addr(pkt_dev->src_mac))
2162		ether_addr_copy(&(pkt_dev->hh[6]), pkt_dev->odev->dev_addr);
2163
2164	/* Set up Dest MAC */
2165	ether_addr_copy(&(pkt_dev->hh[0]), pkt_dev->dst_mac);
2166
2167	if (pkt_dev->flags & F_IPV6) {
2168		int i, set = 0, err = 1;
2169		struct inet6_dev *idev;
2170
2171		if (pkt_dev->min_pkt_size == 0) {
2172			pkt_dev->min_pkt_size = 14 + sizeof(struct ipv6hdr)
2173						+ sizeof(struct udphdr)
2174						+ sizeof(struct pktgen_hdr)
2175						+ pkt_dev->pkt_overhead;
2176		}
2177
2178		for (i = 0; i < sizeof(struct in6_addr); i++)
2179			if (pkt_dev->cur_in6_saddr.s6_addr[i]) {
2180				set = 1;
2181				break;
2182			}
2183
2184		if (!set) {
2185
2186			/*
2187			 * Use linklevel address if unconfigured.
2188			 *
2189			 * use ipv6_get_lladdr if/when it's get exported
2190			 */
2191
2192			rcu_read_lock();
2193			idev = __in6_dev_get(pkt_dev->odev);
2194			if (idev) {
2195				struct inet6_ifaddr *ifp;
2196
2197				read_lock_bh(&idev->lock);
2198				list_for_each_entry(ifp, &idev->addr_list, if_list) {
2199					if ((ifp->scope & IFA_LINK) &&
2200					    !(ifp->flags & IFA_F_TENTATIVE)) {
2201						pkt_dev->cur_in6_saddr = ifp->addr;
2202						err = 0;
2203						break;
2204					}
2205				}
2206				read_unlock_bh(&idev->lock);
2207			}
2208			rcu_read_unlock();
2209			if (err)
2210				pr_err("ERROR: IPv6 link address not available\n");
2211		}
2212	} else {
2213		if (pkt_dev->min_pkt_size == 0) {
2214			pkt_dev->min_pkt_size = 14 + sizeof(struct iphdr)
2215						+ sizeof(struct udphdr)
2216						+ sizeof(struct pktgen_hdr)
2217						+ pkt_dev->pkt_overhead;
2218		}
2219
2220		pkt_dev->saddr_min = 0;
2221		pkt_dev->saddr_max = 0;
2222		if (strlen(pkt_dev->src_min) == 0) {
2223
2224			struct in_device *in_dev;
2225
2226			rcu_read_lock();
2227			in_dev = __in_dev_get_rcu(pkt_dev->odev);
2228			if (in_dev) {
2229				const struct in_ifaddr *ifa;
2230
2231				ifa = rcu_dereference(in_dev->ifa_list);
2232				if (ifa) {
2233					pkt_dev->saddr_min = ifa->ifa_address;
2234					pkt_dev->saddr_max = pkt_dev->saddr_min;
2235				}
2236			}
2237			rcu_read_unlock();
2238		} else {
2239			pkt_dev->saddr_min = in_aton(pkt_dev->src_min);
2240			pkt_dev->saddr_max = in_aton(pkt_dev->src_max);
2241		}
2242
2243		pkt_dev->daddr_min = in_aton(pkt_dev->dst_min);
2244		pkt_dev->daddr_max = in_aton(pkt_dev->dst_max);
2245	}
2246	/* Initialize current values. */
2247	pkt_dev->cur_pkt_size = pkt_dev->min_pkt_size;
2248	if (pkt_dev->min_pkt_size > pkt_dev->max_pkt_size)
2249		pkt_dev->max_pkt_size = pkt_dev->min_pkt_size;
2250
2251	pkt_dev->cur_dst_mac_offset = 0;
2252	pkt_dev->cur_src_mac_offset = 0;
2253	pkt_dev->cur_saddr = pkt_dev->saddr_min;
2254	pkt_dev->cur_daddr = pkt_dev->daddr_min;
2255	pkt_dev->cur_udp_dst = pkt_dev->udp_dst_min;
2256	pkt_dev->cur_udp_src = pkt_dev->udp_src_min;
2257	pkt_dev->nflows = 0;
2258}
2259
2260
2261static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
2262{
2263	ktime_t start_time, end_time;
2264	s64 remaining;
2265	struct hrtimer_sleeper t;
2266
2267	hrtimer_init_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2268	hrtimer_set_expires(&t.timer, spin_until);
2269
2270	remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer));
2271	if (remaining <= 0)
2272		goto out;
2273
2274	start_time = ktime_get();
2275	if (remaining < 100000) {
2276		/* for small delays (<100us), just loop until limit is reached */
2277		do {
2278			end_time = ktime_get();
2279		} while (ktime_compare(end_time, spin_until) < 0);
2280	} else {
2281		do {
2282			set_current_state(TASK_INTERRUPTIBLE);
2283			hrtimer_sleeper_start_expires(&t, HRTIMER_MODE_ABS);
2284
2285			if (likely(t.task))
2286				schedule();
2287
2288			hrtimer_cancel(&t.timer);
2289		} while (t.task && pkt_dev->running && !signal_pending(current));
2290		__set_current_state(TASK_RUNNING);
2291		end_time = ktime_get();
2292	}
2293
2294	pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
2295out:
2296	pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
2297	destroy_hrtimer_on_stack(&t.timer);
2298}
2299
2300static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
2301{
2302	pkt_dev->pkt_overhead = 0;
2303	pkt_dev->pkt_overhead += pkt_dev->nr_labels*sizeof(u32);
2304	pkt_dev->pkt_overhead += VLAN_TAG_SIZE(pkt_dev);
2305	pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev);
2306}
2307
2308static inline int f_seen(const struct pktgen_dev *pkt_dev, int flow)
2309{
2310	return !!(pkt_dev->flows[flow].flags & F_INIT);
2311}
2312
2313static inline int f_pick(struct pktgen_dev *pkt_dev)
2314{
2315	int flow = pkt_dev->curfl;
2316
2317	if (pkt_dev->flags & F_FLOW_SEQ) {
2318		if (pkt_dev->flows[flow].count >= pkt_dev->lflow) {
2319			/* reset time */
2320			pkt_dev->flows[flow].count = 0;
2321			pkt_dev->flows[flow].flags = 0;
2322			pkt_dev->curfl += 1;
2323			if (pkt_dev->curfl >= pkt_dev->cflows)
2324				pkt_dev->curfl = 0; /*reset */
2325		}
2326	} else {
2327		flow = get_random_u32_below(pkt_dev->cflows);
2328		pkt_dev->curfl = flow;
2329
2330		if (pkt_dev->flows[flow].count > pkt_dev->lflow) {
2331			pkt_dev->flows[flow].count = 0;
2332			pkt_dev->flows[flow].flags = 0;
2333		}
2334	}
2335
2336	return pkt_dev->curfl;
2337}
2338
2339
2340#ifdef CONFIG_XFRM
2341/* If there was already an IPSEC SA, we keep it as is, else
2342 * we go look for it ...
2343*/
2344#define DUMMY_MARK 0
2345static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow)
2346{
2347	struct xfrm_state *x = pkt_dev->flows[flow].x;
2348	struct pktgen_net *pn = net_generic(dev_net(pkt_dev->odev), pg_net_id);
2349	if (!x) {
2350
2351		if (pkt_dev->spi) {
2352			/* We need as quick as possible to find the right SA
2353			 * Searching with minimum criteria to archieve this.
2354			 */
2355			x = xfrm_state_lookup_byspi(pn->net, htonl(pkt_dev->spi), AF_INET);
2356		} else {
2357			/* slow path: we dont already have xfrm_state */
2358			x = xfrm_stateonly_find(pn->net, DUMMY_MARK, 0,
2359						(xfrm_address_t *)&pkt_dev->cur_daddr,
2360						(xfrm_address_t *)&pkt_dev->cur_saddr,
2361						AF_INET,
2362						pkt_dev->ipsmode,
2363						pkt_dev->ipsproto, 0);
2364		}
2365		if (x) {
2366			pkt_dev->flows[flow].x = x;
2367			set_pkt_overhead(pkt_dev);
2368			pkt_dev->pkt_overhead += x->props.header_len;
2369		}
2370
2371	}
2372}
2373#endif
2374static void set_cur_queue_map(struct pktgen_dev *pkt_dev)
2375{
2376
2377	if (pkt_dev->flags & F_QUEUE_MAP_CPU)
2378		pkt_dev->cur_queue_map = smp_processor_id();
2379
2380	else if (pkt_dev->queue_map_min <= pkt_dev->queue_map_max) {
2381		__u16 t;
2382		if (pkt_dev->flags & F_QUEUE_MAP_RND) {
2383			t = get_random_u32_inclusive(pkt_dev->queue_map_min,
2384						     pkt_dev->queue_map_max);
2385		} else {
2386			t = pkt_dev->cur_queue_map + 1;
2387			if (t > pkt_dev->queue_map_max)
2388				t = pkt_dev->queue_map_min;
2389		}
2390		pkt_dev->cur_queue_map = t;
2391	}
2392	pkt_dev->cur_queue_map  = pkt_dev->cur_queue_map % pkt_dev->odev->real_num_tx_queues;
2393}
2394
2395/* Increment/randomize headers according to flags and current values
2396 * for IP src/dest, UDP src/dst port, MAC-Addr src/dst
2397 */
2398static void mod_cur_headers(struct pktgen_dev *pkt_dev)
2399{
2400	__u32 imn;
2401	__u32 imx;
2402	int flow = 0;
2403
2404	if (pkt_dev->cflows)
2405		flow = f_pick(pkt_dev);
2406
2407	/*  Deal with source MAC */
2408	if (pkt_dev->src_mac_count > 1) {
2409		__u32 mc;
2410		__u32 tmp;
2411
2412		if (pkt_dev->flags & F_MACSRC_RND)
2413			mc = get_random_u32_below(pkt_dev->src_mac_count);
2414		else {
2415			mc = pkt_dev->cur_src_mac_offset++;
2416			if (pkt_dev->cur_src_mac_offset >=
2417			    pkt_dev->src_mac_count)
2418				pkt_dev->cur_src_mac_offset = 0;
2419		}
2420
2421		tmp = pkt_dev->src_mac[5] + (mc & 0xFF);
2422		pkt_dev->hh[11] = tmp;
2423		tmp = (pkt_dev->src_mac[4] + ((mc >> 8) & 0xFF) + (tmp >> 8));
2424		pkt_dev->hh[10] = tmp;
2425		tmp = (pkt_dev->src_mac[3] + ((mc >> 16) & 0xFF) + (tmp >> 8));
2426		pkt_dev->hh[9] = tmp;
2427		tmp = (pkt_dev->src_mac[2] + ((mc >> 24) & 0xFF) + (tmp >> 8));
2428		pkt_dev->hh[8] = tmp;
2429		tmp = (pkt_dev->src_mac[1] + (tmp >> 8));
2430		pkt_dev->hh[7] = tmp;
2431	}
2432
2433	/*  Deal with Destination MAC */
2434	if (pkt_dev->dst_mac_count > 1) {
2435		__u32 mc;
2436		__u32 tmp;
2437
2438		if (pkt_dev->flags & F_MACDST_RND)
2439			mc = get_random_u32_below(pkt_dev->dst_mac_count);
2440
2441		else {
2442			mc = pkt_dev->cur_dst_mac_offset++;
2443			if (pkt_dev->cur_dst_mac_offset >=
2444			    pkt_dev->dst_mac_count) {
2445				pkt_dev->cur_dst_mac_offset = 0;
2446			}
2447		}
2448
2449		tmp = pkt_dev->dst_mac[5] + (mc & 0xFF);
2450		pkt_dev->hh[5] = tmp;
2451		tmp = (pkt_dev->dst_mac[4] + ((mc >> 8) & 0xFF) + (tmp >> 8));
2452		pkt_dev->hh[4] = tmp;
2453		tmp = (pkt_dev->dst_mac[3] + ((mc >> 16) & 0xFF) + (tmp >> 8));
2454		pkt_dev->hh[3] = tmp;
2455		tmp = (pkt_dev->dst_mac[2] + ((mc >> 24) & 0xFF) + (tmp >> 8));
2456		pkt_dev->hh[2] = tmp;
2457		tmp = (pkt_dev->dst_mac[1] + (tmp >> 8));
2458		pkt_dev->hh[1] = tmp;
2459	}
2460
2461	if (pkt_dev->flags & F_MPLS_RND) {
2462		unsigned int i;
2463		for (i = 0; i < pkt_dev->nr_labels; i++)
2464			if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM)
2465				pkt_dev->labels[i] = MPLS_STACK_BOTTOM |
2466					     ((__force __be32)get_random_u32() &
2467						      htonl(0x000fffff));
2468	}
2469
2470	if ((pkt_dev->flags & F_VID_RND) && (pkt_dev->vlan_id != 0xffff)) {
2471		pkt_dev->vlan_id = get_random_u32_below(4096);
2472	}
2473
2474	if ((pkt_dev->flags & F_SVID_RND) && (pkt_dev->svlan_id != 0xffff)) {
2475		pkt_dev->svlan_id = get_random_u32_below(4096);
2476	}
2477
2478	if (pkt_dev->udp_src_min < pkt_dev->udp_src_max) {
2479		if (pkt_dev->flags & F_UDPSRC_RND)
2480			pkt_dev->cur_udp_src = get_random_u32_inclusive(pkt_dev->udp_src_min,
2481									pkt_dev->udp_src_max - 1);
2482
2483		else {
2484			pkt_dev->cur_udp_src++;
2485			if (pkt_dev->cur_udp_src >= pkt_dev->udp_src_max)
2486				pkt_dev->cur_udp_src = pkt_dev->udp_src_min;
2487		}
2488	}
2489
2490	if (pkt_dev->udp_dst_min < pkt_dev->udp_dst_max) {
2491		if (pkt_dev->flags & F_UDPDST_RND) {
2492			pkt_dev->cur_udp_dst = get_random_u32_inclusive(pkt_dev->udp_dst_min,
2493									pkt_dev->udp_dst_max - 1);
2494		} else {
2495			pkt_dev->cur_udp_dst++;
2496			if (pkt_dev->cur_udp_dst >= pkt_dev->udp_dst_max)
2497				pkt_dev->cur_udp_dst = pkt_dev->udp_dst_min;
2498		}
2499	}
2500
2501	if (!(pkt_dev->flags & F_IPV6)) {
2502
2503		imn = ntohl(pkt_dev->saddr_min);
2504		imx = ntohl(pkt_dev->saddr_max);
2505		if (imn < imx) {
2506			__u32 t;
2507			if (pkt_dev->flags & F_IPSRC_RND)
2508				t = get_random_u32_inclusive(imn, imx - 1);
2509			else {
2510				t = ntohl(pkt_dev->cur_saddr);
2511				t++;
2512				if (t > imx)
2513					t = imn;
2514
2515			}
2516			pkt_dev->cur_saddr = htonl(t);
2517		}
2518
2519		if (pkt_dev->cflows && f_seen(pkt_dev, flow)) {
2520			pkt_dev->cur_daddr = pkt_dev->flows[flow].cur_daddr;
2521		} else {
2522			imn = ntohl(pkt_dev->daddr_min);
2523			imx = ntohl(pkt_dev->daddr_max);
2524			if (imn < imx) {
2525				__u32 t;
2526				__be32 s;
2527				if (pkt_dev->flags & F_IPDST_RND) {
2528
2529					do {
2530						t = get_random_u32_inclusive(imn, imx - 1);
2531						s = htonl(t);
2532					} while (ipv4_is_loopback(s) ||
2533						ipv4_is_multicast(s) ||
2534						ipv4_is_lbcast(s) ||
2535						ipv4_is_zeronet(s) ||
2536						ipv4_is_local_multicast(s));
2537					pkt_dev->cur_daddr = s;
2538				} else {
2539					t = ntohl(pkt_dev->cur_daddr);
2540					t++;
2541					if (t > imx) {
2542						t = imn;
2543					}
2544					pkt_dev->cur_daddr = htonl(t);
2545				}
2546			}
2547			if (pkt_dev->cflows) {
2548				pkt_dev->flows[flow].flags |= F_INIT;
2549				pkt_dev->flows[flow].cur_daddr =
2550				    pkt_dev->cur_daddr;
2551#ifdef CONFIG_XFRM
2552				if (pkt_dev->flags & F_IPSEC)
2553					get_ipsec_sa(pkt_dev, flow);
2554#endif
2555				pkt_dev->nflows++;
2556			}
2557		}
2558	} else {		/* IPV6 * */
2559
2560		if (!ipv6_addr_any(&pkt_dev->min_in6_daddr)) {
2561			int i;
2562
2563			/* Only random destinations yet */
2564
2565			for (i = 0; i < 4; i++) {
2566				pkt_dev->cur_in6_daddr.s6_addr32[i] =
2567				    (((__force __be32)get_random_u32() |
2568				      pkt_dev->min_in6_daddr.s6_addr32[i]) &
2569				     pkt_dev->max_in6_daddr.s6_addr32[i]);
2570			}
2571		}
2572	}
2573
2574	if (pkt_dev->min_pkt_size < pkt_dev->max_pkt_size) {
2575		__u32 t;
2576		if (pkt_dev->flags & F_TXSIZE_RND) {
2577			t = get_random_u32_inclusive(pkt_dev->min_pkt_size,
2578						     pkt_dev->max_pkt_size - 1);
2579		} else {
2580			t = pkt_dev->cur_pkt_size + 1;
2581			if (t > pkt_dev->max_pkt_size)
2582				t = pkt_dev->min_pkt_size;
2583		}
2584		pkt_dev->cur_pkt_size = t;
2585	} else if (pkt_dev->n_imix_entries > 0) {
2586		struct imix_pkt *entry;
2587		__u32 t = get_random_u32_below(IMIX_PRECISION);
2588		__u8 entry_index = pkt_dev->imix_distribution[t];
2589
2590		entry = &pkt_dev->imix_entries[entry_index];
2591		entry->count_so_far++;
2592		pkt_dev->cur_pkt_size = entry->size;
2593	}
2594
2595	set_cur_queue_map(pkt_dev);
2596
2597	pkt_dev->flows[flow].count++;
2598}
2599
2600static void fill_imix_distribution(struct pktgen_dev *pkt_dev)
2601{
2602	int cumulative_probabilites[MAX_IMIX_ENTRIES];
2603	int j = 0;
2604	__u64 cumulative_prob = 0;
2605	__u64 total_weight = 0;
2606	int i = 0;
2607
2608	for (i = 0; i < pkt_dev->n_imix_entries; i++)
2609		total_weight += pkt_dev->imix_entries[i].weight;
2610
2611	/* Fill cumulative_probabilites with sum of normalized probabilities */
2612	for (i = 0; i < pkt_dev->n_imix_entries - 1; i++) {
2613		cumulative_prob += div64_u64(pkt_dev->imix_entries[i].weight *
2614						     IMIX_PRECISION,
2615					     total_weight);
2616		cumulative_probabilites[i] = cumulative_prob;
2617	}
2618	cumulative_probabilites[pkt_dev->n_imix_entries - 1] = 100;
2619
2620	for (i = 0; i < IMIX_PRECISION; i++) {
2621		if (i == cumulative_probabilites[j])
2622			j++;
2623		pkt_dev->imix_distribution[i] = j;
2624	}
2625}
2626
2627#ifdef CONFIG_XFRM
2628static u32 pktgen_dst_metrics[RTAX_MAX + 1] = {
2629
2630	[RTAX_HOPLIMIT] = 0x5, /* Set a static hoplimit */
2631};
2632
2633static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev)
2634{
2635	struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x;
2636	int err = 0;
2637	struct net *net = dev_net(pkt_dev->odev);
2638
2639	if (!x)
2640		return 0;
2641	/* XXX: we dont support tunnel mode for now until
2642	 * we resolve the dst issue */
2643	if ((x->props.mode != XFRM_MODE_TRANSPORT) && (pkt_dev->spi == 0))
2644		return 0;
2645
2646	/* But when user specify an valid SPI, transformation
2647	 * supports both transport/tunnel mode + ESP/AH type.
2648	 */
2649	if ((x->props.mode == XFRM_MODE_TUNNEL) && (pkt_dev->spi != 0))
2650		skb->_skb_refdst = (unsigned long)&pkt_dev->xdst.u.dst | SKB_DST_NOREF;
2651
2652	rcu_read_lock_bh();
2653	err = pktgen_xfrm_outer_mode_output(x, skb);
2654	rcu_read_unlock_bh();
2655	if (err) {
2656		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR);
2657		goto error;
2658	}
2659	err = x->type->output(x, skb);
2660	if (err) {
2661		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEPROTOERROR);
2662		goto error;
2663	}
2664	spin_lock_bh(&x->lock);
2665	x->curlft.bytes += skb->len;
2666	x->curlft.packets++;
2667	spin_unlock_bh(&x->lock);
2668error:
2669	return err;
2670}
2671
2672static void free_SAs(struct pktgen_dev *pkt_dev)
2673{
2674	if (pkt_dev->cflows) {
2675		/* let go of the SAs if we have them */
2676		int i;
2677		for (i = 0; i < pkt_dev->cflows; i++) {
2678			struct xfrm_state *x = pkt_dev->flows[i].x;
2679			if (x) {
2680				xfrm_state_put(x);
2681				pkt_dev->flows[i].x = NULL;
2682			}
2683		}
2684	}
2685}
2686
2687static int process_ipsec(struct pktgen_dev *pkt_dev,
2688			      struct sk_buff *skb, __be16 protocol)
2689{
2690	if (pkt_dev->flags & F_IPSEC) {
2691		struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x;
2692		int nhead = 0;
2693		if (x) {
2694			struct ethhdr *eth;
2695			struct iphdr *iph;
2696			int ret;
2697
2698			nhead = x->props.header_len - skb_headroom(skb);
2699			if (nhead > 0) {
2700				ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
2701				if (ret < 0) {
2702					pr_err("Error expanding ipsec packet %d\n",
2703					       ret);
2704					goto err;
2705				}
2706			}
2707
2708			/* ipsec is not expecting ll header */
2709			skb_pull(skb, ETH_HLEN);
2710			ret = pktgen_output_ipsec(skb, pkt_dev);
2711			if (ret) {
2712				pr_err("Error creating ipsec packet %d\n", ret);
2713				goto err;
2714			}
2715			/* restore ll */
2716			eth = skb_push(skb, ETH_HLEN);
2717			memcpy(eth, pkt_dev->hh, 2 * ETH_ALEN);
2718			eth->h_proto = protocol;
2719
2720			/* Update IPv4 header len as well as checksum value */
2721			iph = ip_hdr(skb);
2722			iph->tot_len = htons(skb->len - ETH_HLEN);
2723			ip_send_check(iph);
2724		}
2725	}
2726	return 1;
2727err:
2728	kfree_skb(skb);
2729	return 0;
2730}
2731#endif
2732
2733static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev)
2734{
2735	unsigned int i;
2736	for (i = 0; i < pkt_dev->nr_labels; i++)
2737		*mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM;
2738
2739	mpls--;
2740	*mpls |= MPLS_STACK_BOTTOM;
2741}
2742
2743static inline __be16 build_tci(unsigned int id, unsigned int cfi,
2744			       unsigned int prio)
2745{
2746	return htons(id | (cfi << 12) | (prio << 13));
2747}
2748
2749static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
2750				int datalen)
2751{
2752	struct timespec64 timestamp;
2753	struct pktgen_hdr *pgh;
2754
2755	pgh = skb_put(skb, sizeof(*pgh));
2756	datalen -= sizeof(*pgh);
2757
2758	if (pkt_dev->nfrags <= 0) {
2759		skb_put_zero(skb, datalen);
2760	} else {
2761		int frags = pkt_dev->nfrags;
2762		int i, len;
2763		int frag_len;
2764
2765
2766		if (frags > MAX_SKB_FRAGS)
2767			frags = MAX_SKB_FRAGS;
2768		len = datalen - frags * PAGE_SIZE;
2769		if (len > 0) {
2770			skb_put_zero(skb, len);
2771			datalen = frags * PAGE_SIZE;
2772		}
2773
2774		i = 0;
2775		frag_len = (datalen/frags) < PAGE_SIZE ?
2776			   (datalen/frags) : PAGE_SIZE;
2777		while (datalen > 0) {
2778			if (unlikely(!pkt_dev->page)) {
2779				int node = numa_node_id();
2780
2781				if (pkt_dev->node >= 0 && (pkt_dev->flags & F_NODE))
2782					node = pkt_dev->node;
2783				pkt_dev->page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
2784				if (!pkt_dev->page)
2785					break;
2786			}
2787			get_page(pkt_dev->page);
2788			skb_frag_set_page(skb, i, pkt_dev->page);
2789			skb_frag_off_set(&skb_shinfo(skb)->frags[i], 0);
2790			/*last fragment, fill rest of data*/
2791			if (i == (frags - 1))
2792				skb_frag_size_set(&skb_shinfo(skb)->frags[i],
2793				    (datalen < PAGE_SIZE ? datalen : PAGE_SIZE));
 
 
2794			else
2795				skb_frag_size_set(&skb_shinfo(skb)->frags[i], frag_len);
 
 
2796			datalen -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
2797			skb->len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
2798			skb->data_len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
2799			i++;
2800			skb_shinfo(skb)->nr_frags = i;
2801		}
2802	}
2803
2804	/* Stamp the time, and sequence number,
2805	 * convert them to network byte order
2806	 */
2807	pgh->pgh_magic = htonl(PKTGEN_MAGIC);
2808	pgh->seq_num = htonl(pkt_dev->seq_num);
2809
2810	if (pkt_dev->flags & F_NO_TIMESTAMP) {
2811		pgh->tv_sec = 0;
2812		pgh->tv_usec = 0;
2813	} else {
2814		/*
2815		 * pgh->tv_sec wraps in y2106 when interpreted as unsigned
2816		 * as done by wireshark, or y2038 when interpreted as signed.
2817		 * This is probably harmless, but if anyone wants to improve
2818		 * it, we could introduce a variant that puts 64-bit nanoseconds
2819		 * into the respective header bytes.
2820		 * This would also be slightly faster to read.
2821		 */
2822		ktime_get_real_ts64(&timestamp);
2823		pgh->tv_sec = htonl(timestamp.tv_sec);
2824		pgh->tv_usec = htonl(timestamp.tv_nsec / NSEC_PER_USEC);
2825	}
2826}
2827
2828static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
2829					struct pktgen_dev *pkt_dev)
2830{
2831	unsigned int extralen = LL_RESERVED_SPACE(dev);
2832	struct sk_buff *skb = NULL;
2833	unsigned int size;
2834
2835	size = pkt_dev->cur_pkt_size + 64 + extralen + pkt_dev->pkt_overhead;
2836	if (pkt_dev->flags & F_NODE) {
2837		int node = pkt_dev->node >= 0 ? pkt_dev->node : numa_node_id();
2838
2839		skb = __alloc_skb(NET_SKB_PAD + size, GFP_NOWAIT, 0, node);
2840		if (likely(skb)) {
2841			skb_reserve(skb, NET_SKB_PAD);
2842			skb->dev = dev;
2843		}
2844	} else {
2845		 skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT);
2846	}
2847
2848	/* the caller pre-fetches from skb->data and reserves for the mac hdr */
2849	if (likely(skb))
2850		skb_reserve(skb, extralen - 16);
2851
2852	return skb;
2853}
2854
2855static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2856					struct pktgen_dev *pkt_dev)
2857{
2858	struct sk_buff *skb = NULL;
2859	__u8 *eth;
2860	struct udphdr *udph;
2861	int datalen, iplen;
2862	struct iphdr *iph;
2863	__be16 protocol = htons(ETH_P_IP);
2864	__be32 *mpls;
2865	__be16 *vlan_tci = NULL;                 /* Encapsulates priority and VLAN ID */
2866	__be16 *vlan_encapsulated_proto = NULL;  /* packet type ID field (or len) for VLAN tag */
2867	__be16 *svlan_tci = NULL;                /* Encapsulates priority and SVLAN ID */
2868	__be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */
2869	u16 queue_map;
2870
2871	if (pkt_dev->nr_labels)
2872		protocol = htons(ETH_P_MPLS_UC);
2873
2874	if (pkt_dev->vlan_id != 0xffff)
2875		protocol = htons(ETH_P_8021Q);
2876
2877	/* Update any of the values, used when we're incrementing various
2878	 * fields.
2879	 */
2880	mod_cur_headers(pkt_dev);
2881	queue_map = pkt_dev->cur_queue_map;
2882
2883	skb = pktgen_alloc_skb(odev, pkt_dev);
2884	if (!skb) {
2885		sprintf(pkt_dev->result, "No memory");
2886		return NULL;
2887	}
2888
2889	prefetchw(skb->data);
2890	skb_reserve(skb, 16);
2891
2892	/*  Reserve for ethernet and IP header  */
2893	eth = skb_push(skb, 14);
2894	mpls = skb_put(skb, pkt_dev->nr_labels * sizeof(__u32));
2895	if (pkt_dev->nr_labels)
2896		mpls_push(mpls, pkt_dev);
2897
2898	if (pkt_dev->vlan_id != 0xffff) {
2899		if (pkt_dev->svlan_id != 0xffff) {
2900			svlan_tci = skb_put(skb, sizeof(__be16));
2901			*svlan_tci = build_tci(pkt_dev->svlan_id,
2902					       pkt_dev->svlan_cfi,
2903					       pkt_dev->svlan_p);
2904			svlan_encapsulated_proto = skb_put(skb,
2905							   sizeof(__be16));
2906			*svlan_encapsulated_proto = htons(ETH_P_8021Q);
2907		}
2908		vlan_tci = skb_put(skb, sizeof(__be16));
2909		*vlan_tci = build_tci(pkt_dev->vlan_id,
2910				      pkt_dev->vlan_cfi,
2911				      pkt_dev->vlan_p);
2912		vlan_encapsulated_proto = skb_put(skb, sizeof(__be16));
2913		*vlan_encapsulated_proto = htons(ETH_P_IP);
2914	}
2915
2916	skb_reset_mac_header(skb);
2917	skb_set_network_header(skb, skb->len);
2918	iph = skb_put(skb, sizeof(struct iphdr));
2919
2920	skb_set_transport_header(skb, skb->len);
2921	udph = skb_put(skb, sizeof(struct udphdr));
2922	skb_set_queue_mapping(skb, queue_map);
2923	skb->priority = pkt_dev->skb_priority;
2924
2925	memcpy(eth, pkt_dev->hh, 12);
2926	*(__be16 *) & eth[12] = protocol;
2927
2928	/* Eth + IPh + UDPh + mpls */
2929	datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 -
2930		  pkt_dev->pkt_overhead;
2931	if (datalen < 0 || datalen < sizeof(struct pktgen_hdr))
2932		datalen = sizeof(struct pktgen_hdr);
2933
2934	udph->source = htons(pkt_dev->cur_udp_src);
2935	udph->dest = htons(pkt_dev->cur_udp_dst);
2936	udph->len = htons(datalen + 8);	/* DATA + udphdr */
2937	udph->check = 0;
2938
2939	iph->ihl = 5;
2940	iph->version = 4;
2941	iph->ttl = 32;
2942	iph->tos = pkt_dev->tos;
2943	iph->protocol = IPPROTO_UDP;	/* UDP */
2944	iph->saddr = pkt_dev->cur_saddr;
2945	iph->daddr = pkt_dev->cur_daddr;
2946	iph->id = htons(pkt_dev->ip_id);
2947	pkt_dev->ip_id++;
2948	iph->frag_off = 0;
2949	iplen = 20 + 8 + datalen;
2950	iph->tot_len = htons(iplen);
2951	ip_send_check(iph);
2952	skb->protocol = protocol;
2953	skb->dev = odev;
2954	skb->pkt_type = PACKET_HOST;
2955
2956	pktgen_finalize_skb(pkt_dev, skb, datalen);
2957
2958	if (!(pkt_dev->flags & F_UDPCSUM)) {
2959		skb->ip_summed = CHECKSUM_NONE;
2960	} else if (odev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM)) {
2961		skb->ip_summed = CHECKSUM_PARTIAL;
2962		skb->csum = 0;
2963		udp4_hwcsum(skb, iph->saddr, iph->daddr);
2964	} else {
2965		__wsum csum = skb_checksum(skb, skb_transport_offset(skb), datalen + 8, 0);
2966
2967		/* add protocol-dependent pseudo-header */
2968		udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
2969						datalen + 8, IPPROTO_UDP, csum);
2970
2971		if (udph->check == 0)
2972			udph->check = CSUM_MANGLED_0;
2973	}
2974
2975#ifdef CONFIG_XFRM
2976	if (!process_ipsec(pkt_dev, skb, protocol))
2977		return NULL;
2978#endif
2979
2980	return skb;
2981}
2982
2983static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2984					struct pktgen_dev *pkt_dev)
2985{
2986	struct sk_buff *skb = NULL;
2987	__u8 *eth;
2988	struct udphdr *udph;
2989	int datalen, udplen;
2990	struct ipv6hdr *iph;
2991	__be16 protocol = htons(ETH_P_IPV6);
2992	__be32 *mpls;
2993	__be16 *vlan_tci = NULL;                 /* Encapsulates priority and VLAN ID */
2994	__be16 *vlan_encapsulated_proto = NULL;  /* packet type ID field (or len) for VLAN tag */
2995	__be16 *svlan_tci = NULL;                /* Encapsulates priority and SVLAN ID */
2996	__be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */
2997	u16 queue_map;
2998
2999	if (pkt_dev->nr_labels)
3000		protocol = htons(ETH_P_MPLS_UC);
3001
3002	if (pkt_dev->vlan_id != 0xffff)
3003		protocol = htons(ETH_P_8021Q);
3004
3005	/* Update any of the values, used when we're incrementing various
3006	 * fields.
3007	 */
3008	mod_cur_headers(pkt_dev);
3009	queue_map = pkt_dev->cur_queue_map;
3010
3011	skb = pktgen_alloc_skb(odev, pkt_dev);
3012	if (!skb) {
3013		sprintf(pkt_dev->result, "No memory");
3014		return NULL;
3015	}
3016
3017	prefetchw(skb->data);
3018	skb_reserve(skb, 16);
3019
3020	/*  Reserve for ethernet and IP header  */
3021	eth = skb_push(skb, 14);
3022	mpls = skb_put(skb, pkt_dev->nr_labels * sizeof(__u32));
3023	if (pkt_dev->nr_labels)
3024		mpls_push(mpls, pkt_dev);
3025
3026	if (pkt_dev->vlan_id != 0xffff) {
3027		if (pkt_dev->svlan_id != 0xffff) {
3028			svlan_tci = skb_put(skb, sizeof(__be16));
3029			*svlan_tci = build_tci(pkt_dev->svlan_id,
3030					       pkt_dev->svlan_cfi,
3031					       pkt_dev->svlan_p);
3032			svlan_encapsulated_proto = skb_put(skb,
3033							   sizeof(__be16));
3034			*svlan_encapsulated_proto = htons(ETH_P_8021Q);
3035		}
3036		vlan_tci = skb_put(skb, sizeof(__be16));
3037		*vlan_tci = build_tci(pkt_dev->vlan_id,
3038				      pkt_dev->vlan_cfi,
3039				      pkt_dev->vlan_p);
3040		vlan_encapsulated_proto = skb_put(skb, sizeof(__be16));
3041		*vlan_encapsulated_proto = htons(ETH_P_IPV6);
3042	}
3043
3044	skb_reset_mac_header(skb);
3045	skb_set_network_header(skb, skb->len);
3046	iph = skb_put(skb, sizeof(struct ipv6hdr));
3047
3048	skb_set_transport_header(skb, skb->len);
3049	udph = skb_put(skb, sizeof(struct udphdr));
3050	skb_set_queue_mapping(skb, queue_map);
3051	skb->priority = pkt_dev->skb_priority;
3052
3053	memcpy(eth, pkt_dev->hh, 12);
3054	*(__be16 *) &eth[12] = protocol;
3055
3056	/* Eth + IPh + UDPh + mpls */
3057	datalen = pkt_dev->cur_pkt_size - 14 -
3058		  sizeof(struct ipv6hdr) - sizeof(struct udphdr) -
3059		  pkt_dev->pkt_overhead;
3060
3061	if (datalen < 0 || datalen < sizeof(struct pktgen_hdr)) {
3062		datalen = sizeof(struct pktgen_hdr);
3063		net_info_ratelimited("increased datalen to %d\n", datalen);
3064	}
3065
3066	udplen = datalen + sizeof(struct udphdr);
3067	udph->source = htons(pkt_dev->cur_udp_src);
3068	udph->dest = htons(pkt_dev->cur_udp_dst);
3069	udph->len = htons(udplen);
3070	udph->check = 0;
3071
3072	*(__be32 *) iph = htonl(0x60000000);	/* Version + flow */
3073
3074	if (pkt_dev->traffic_class) {
3075		/* Version + traffic class + flow (0) */
3076		*(__be32 *)iph |= htonl(0x60000000 | (pkt_dev->traffic_class << 20));
3077	}
3078
3079	iph->hop_limit = 32;
3080
3081	iph->payload_len = htons(udplen);
3082	iph->nexthdr = IPPROTO_UDP;
3083
3084	iph->daddr = pkt_dev->cur_in6_daddr;
3085	iph->saddr = pkt_dev->cur_in6_saddr;
3086
3087	skb->protocol = protocol;
3088	skb->dev = odev;
3089	skb->pkt_type = PACKET_HOST;
3090
3091	pktgen_finalize_skb(pkt_dev, skb, datalen);
3092
3093	if (!(pkt_dev->flags & F_UDPCSUM)) {
3094		skb->ip_summed = CHECKSUM_NONE;
3095	} else if (odev->features & (NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM)) {
3096		skb->ip_summed = CHECKSUM_PARTIAL;
3097		skb->csum_start = skb_transport_header(skb) - skb->head;
3098		skb->csum_offset = offsetof(struct udphdr, check);
3099		udph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, 0);
3100	} else {
3101		__wsum csum = skb_checksum(skb, skb_transport_offset(skb), udplen, 0);
3102
3103		/* add protocol-dependent pseudo-header */
3104		udph->check = csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, csum);
3105
3106		if (udph->check == 0)
3107			udph->check = CSUM_MANGLED_0;
3108	}
3109
3110	return skb;
3111}
3112
3113static struct sk_buff *fill_packet(struct net_device *odev,
3114				   struct pktgen_dev *pkt_dev)
3115{
3116	if (pkt_dev->flags & F_IPV6)
3117		return fill_packet_ipv6(odev, pkt_dev);
3118	else
3119		return fill_packet_ipv4(odev, pkt_dev);
3120}
3121
3122static void pktgen_clear_counters(struct pktgen_dev *pkt_dev)
3123{
3124	pkt_dev->seq_num = 1;
3125	pkt_dev->idle_acc = 0;
3126	pkt_dev->sofar = 0;
3127	pkt_dev->tx_bytes = 0;
3128	pkt_dev->errors = 0;
3129}
3130
3131/* Set up structure for sending pkts, clear counters */
3132
3133static void pktgen_run(struct pktgen_thread *t)
3134{
3135	struct pktgen_dev *pkt_dev;
3136	int started = 0;
3137
3138	func_enter();
3139
3140	rcu_read_lock();
3141	list_for_each_entry_rcu(pkt_dev, &t->if_list, list) {
3142
3143		/*
3144		 * setup odev and create initial packet.
3145		 */
3146		pktgen_setup_inject(pkt_dev);
3147
3148		if (pkt_dev->odev) {
3149			pktgen_clear_counters(pkt_dev);
3150			pkt_dev->skb = NULL;
3151			pkt_dev->started_at = pkt_dev->next_tx = ktime_get();
3152
3153			set_pkt_overhead(pkt_dev);
3154
3155			strcpy(pkt_dev->result, "Starting");
3156			pkt_dev->running = 1;	/* Cranke yeself! */
3157			started++;
3158		} else
3159			strcpy(pkt_dev->result, "Error starting");
3160	}
3161	rcu_read_unlock();
3162	if (started)
3163		t->control &= ~(T_STOP);
3164}
3165
3166static void pktgen_handle_all_threads(struct pktgen_net *pn, u32 flags)
3167{
3168	struct pktgen_thread *t;
3169
3170	mutex_lock(&pktgen_thread_lock);
3171
3172	list_for_each_entry(t, &pn->pktgen_threads, th_list)
3173		t->control |= (flags);
3174
3175	mutex_unlock(&pktgen_thread_lock);
3176}
3177
3178static void pktgen_stop_all_threads(struct pktgen_net *pn)
3179{
3180	func_enter();
3181
3182	pktgen_handle_all_threads(pn, T_STOP);
3183}
3184
3185static int thread_is_running(const struct pktgen_thread *t)
3186{
3187	const struct pktgen_dev *pkt_dev;
3188
3189	rcu_read_lock();
3190	list_for_each_entry_rcu(pkt_dev, &t->if_list, list)
3191		if (pkt_dev->running) {
3192			rcu_read_unlock();
3193			return 1;
3194		}
3195	rcu_read_unlock();
3196	return 0;
3197}
3198
3199static int pktgen_wait_thread_run(struct pktgen_thread *t)
3200{
3201	while (thread_is_running(t)) {
3202
3203		/* note: 't' will still be around even after the unlock/lock
3204		 * cycle because pktgen_thread threads are only cleared at
3205		 * net exit
3206		 */
3207		mutex_unlock(&pktgen_thread_lock);
3208		msleep_interruptible(100);
3209		mutex_lock(&pktgen_thread_lock);
3210
3211		if (signal_pending(current))
3212			goto signal;
3213	}
3214	return 1;
3215signal:
3216	return 0;
3217}
3218
3219static int pktgen_wait_all_threads_run(struct pktgen_net *pn)
3220{
3221	struct pktgen_thread *t;
3222	int sig = 1;
3223
3224	/* prevent from racing with rmmod */
3225	if (!try_module_get(THIS_MODULE))
3226		return sig;
3227
3228	mutex_lock(&pktgen_thread_lock);
3229
3230	list_for_each_entry(t, &pn->pktgen_threads, th_list) {
3231		sig = pktgen_wait_thread_run(t);
3232		if (sig == 0)
3233			break;
3234	}
3235
3236	if (sig == 0)
3237		list_for_each_entry(t, &pn->pktgen_threads, th_list)
3238			t->control |= (T_STOP);
3239
3240	mutex_unlock(&pktgen_thread_lock);
3241	module_put(THIS_MODULE);
3242	return sig;
3243}
3244
3245static void pktgen_run_all_threads(struct pktgen_net *pn)
3246{
3247	func_enter();
3248
3249	pktgen_handle_all_threads(pn, T_RUN);
3250
3251	/* Propagate thread->control  */
3252	schedule_timeout_interruptible(msecs_to_jiffies(125));
3253
3254	pktgen_wait_all_threads_run(pn);
3255}
3256
3257static void pktgen_reset_all_threads(struct pktgen_net *pn)
3258{
3259	func_enter();
3260
3261	pktgen_handle_all_threads(pn, T_REMDEVALL);
3262
3263	/* Propagate thread->control  */
3264	schedule_timeout_interruptible(msecs_to_jiffies(125));
3265
3266	pktgen_wait_all_threads_run(pn);
3267}
3268
3269static void show_results(struct pktgen_dev *pkt_dev, int nr_frags)
3270{
3271	__u64 bps, mbps, pps;
3272	char *p = pkt_dev->result;
3273	ktime_t elapsed = ktime_sub(pkt_dev->stopped_at,
3274				    pkt_dev->started_at);
3275	ktime_t idle = ns_to_ktime(pkt_dev->idle_acc);
3276
3277	p += sprintf(p, "OK: %llu(c%llu+d%llu) usec, %llu (%dbyte,%dfrags)\n",
3278		     (unsigned long long)ktime_to_us(elapsed),
3279		     (unsigned long long)ktime_to_us(ktime_sub(elapsed, idle)),
3280		     (unsigned long long)ktime_to_us(idle),
3281		     (unsigned long long)pkt_dev->sofar,
3282		     pkt_dev->cur_pkt_size, nr_frags);
3283
3284	pps = div64_u64(pkt_dev->sofar * NSEC_PER_SEC,
3285			ktime_to_ns(elapsed));
3286
3287	if (pkt_dev->n_imix_entries > 0) {
3288		int i;
3289		struct imix_pkt *entry;
3290
3291		bps = 0;
3292		for (i = 0; i < pkt_dev->n_imix_entries; i++) {
3293			entry = &pkt_dev->imix_entries[i];
3294			bps += entry->size * entry->count_so_far;
3295		}
3296		bps = div64_u64(bps * 8 * NSEC_PER_SEC, ktime_to_ns(elapsed));
3297	} else {
3298		bps = pps * 8 * pkt_dev->cur_pkt_size;
3299	}
3300
3301	mbps = bps;
3302	do_div(mbps, 1000000);
3303	p += sprintf(p, "  %llupps %lluMb/sec (%llubps) errors: %llu",
3304		     (unsigned long long)pps,
3305		     (unsigned long long)mbps,
3306		     (unsigned long long)bps,
3307		     (unsigned long long)pkt_dev->errors);
3308}
3309
3310/* Set stopped-at timer, remove from running list, do counters & statistics */
3311static int pktgen_stop_device(struct pktgen_dev *pkt_dev)
3312{
3313	int nr_frags = pkt_dev->skb ? skb_shinfo(pkt_dev->skb)->nr_frags : -1;
3314
3315	if (!pkt_dev->running) {
3316		pr_warn("interface: %s is already stopped\n",
3317			pkt_dev->odevname);
3318		return -EINVAL;
3319	}
3320
3321	pkt_dev->running = 0;
3322	kfree_skb(pkt_dev->skb);
3323	pkt_dev->skb = NULL;
3324	pkt_dev->stopped_at = ktime_get();
3325
3326	show_results(pkt_dev, nr_frags);
3327
3328	return 0;
3329}
3330
3331static struct pktgen_dev *next_to_run(struct pktgen_thread *t)
3332{
3333	struct pktgen_dev *pkt_dev, *best = NULL;
3334
3335	rcu_read_lock();
3336	list_for_each_entry_rcu(pkt_dev, &t->if_list, list) {
3337		if (!pkt_dev->running)
3338			continue;
3339		if (best == NULL)
3340			best = pkt_dev;
3341		else if (ktime_compare(pkt_dev->next_tx, best->next_tx) < 0)
3342			best = pkt_dev;
3343	}
3344	rcu_read_unlock();
3345
3346	return best;
3347}
3348
3349static void pktgen_stop(struct pktgen_thread *t)
3350{
3351	struct pktgen_dev *pkt_dev;
3352
3353	func_enter();
3354
3355	rcu_read_lock();
3356
3357	list_for_each_entry_rcu(pkt_dev, &t->if_list, list) {
3358		pktgen_stop_device(pkt_dev);
3359	}
3360
3361	rcu_read_unlock();
3362}
3363
3364/*
3365 * one of our devices needs to be removed - find it
3366 * and remove it
3367 */
3368static void pktgen_rem_one_if(struct pktgen_thread *t)
3369{
3370	struct list_head *q, *n;
3371	struct pktgen_dev *cur;
3372
3373	func_enter();
3374
3375	list_for_each_safe(q, n, &t->if_list) {
3376		cur = list_entry(q, struct pktgen_dev, list);
3377
3378		if (!cur->removal_mark)
3379			continue;
3380
3381		kfree_skb(cur->skb);
3382		cur->skb = NULL;
3383
3384		pktgen_remove_device(t, cur);
3385
3386		break;
3387	}
3388}
3389
3390static void pktgen_rem_all_ifs(struct pktgen_thread *t)
3391{
3392	struct list_head *q, *n;
3393	struct pktgen_dev *cur;
3394
3395	func_enter();
3396
3397	/* Remove all devices, free mem */
3398
3399	list_for_each_safe(q, n, &t->if_list) {
3400		cur = list_entry(q, struct pktgen_dev, list);
3401
3402		kfree_skb(cur->skb);
3403		cur->skb = NULL;
3404
3405		pktgen_remove_device(t, cur);
3406	}
3407}
3408
3409static void pktgen_rem_thread(struct pktgen_thread *t)
3410{
3411	/* Remove from the thread list */
3412	remove_proc_entry(t->tsk->comm, t->net->proc_dir);
3413}
3414
3415static void pktgen_resched(struct pktgen_dev *pkt_dev)
3416{
3417	ktime_t idle_start = ktime_get();
3418	schedule();
3419	pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_get(), idle_start));
3420}
3421
3422static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)
3423{
3424	ktime_t idle_start = ktime_get();
3425
3426	while (refcount_read(&(pkt_dev->skb->users)) != 1) {
3427		if (signal_pending(current))
3428			break;
3429
3430		if (need_resched())
3431			pktgen_resched(pkt_dev);
3432		else
3433			cpu_relax();
3434	}
3435	pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_get(), idle_start));
3436}
3437
3438static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3439{
3440	unsigned int burst = READ_ONCE(pkt_dev->burst);
3441	struct net_device *odev = pkt_dev->odev;
3442	struct netdev_queue *txq;
 
3443	struct sk_buff *skb;
 
3444	int ret;
3445
 
 
 
 
 
 
 
 
 
 
3446	/* If device is offline, then don't send */
3447	if (unlikely(!netif_running(odev) || !netif_carrier_ok(odev))) {
3448		pktgen_stop_device(pkt_dev);
3449		return;
3450	}
3451
3452	/* This is max DELAY, this has special meaning of
3453	 * "never transmit"
3454	 */
3455	if (unlikely(pkt_dev->delay == ULLONG_MAX)) {
3456		pkt_dev->next_tx = ktime_add_ns(ktime_get(), ULONG_MAX);
3457		return;
3458	}
3459
3460	/* If no skb or clone count exhausted then get new one */
3461	if (!pkt_dev->skb || (pkt_dev->last_ok &&
3462			      ++pkt_dev->clone_count >= pkt_dev->clone_skb)) {
3463		/* build a new pkt */
3464		kfree_skb(pkt_dev->skb);
3465
3466		pkt_dev->skb = fill_packet(odev, pkt_dev);
3467		if (pkt_dev->skb == NULL) {
3468			pr_err("ERROR: couldn't allocate skb in fill_packet\n");
3469			schedule();
3470			pkt_dev->clone_count--;	/* back out increment, OOM */
3471			return;
3472		}
3473		pkt_dev->last_pkt_size = pkt_dev->skb->len;
3474		pkt_dev->clone_count = 0;	/* reset counter */
3475	}
3476
3477	if (pkt_dev->delay && pkt_dev->last_ok)
3478		spin(pkt_dev, pkt_dev->next_tx);
3479
3480	if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) {
3481		skb = pkt_dev->skb;
3482		skb->protocol = eth_type_trans(skb, skb->dev);
3483		refcount_add(burst, &skb->users);
 
3484		local_bh_disable();
3485		do {
3486			ret = netif_receive_skb(skb);
3487			if (ret == NET_RX_DROP)
3488				pkt_dev->errors++;
3489			pkt_dev->sofar++;
3490			pkt_dev->seq_num++;
 
 
 
 
3491			if (refcount_read(&skb->users) != burst) {
3492				/* skb was queued by rps/rfs or taps,
3493				 * so cannot reuse this skb
3494				 */
3495				WARN_ON(refcount_sub_and_test(burst - 1, &skb->users));
3496				/* get out of the loop and wait
3497				 * until skb is consumed
3498				 */
3499				break;
3500			}
3501			/* skb was 'freed' by stack, so clean few
3502			 * bits and reuse it
3503			 */
3504			skb_reset_redirect(skb);
3505		} while (--burst > 0);
3506		goto out; /* Skips xmit_mode M_START_XMIT */
3507	} else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) {
3508		local_bh_disable();
3509		refcount_inc(&pkt_dev->skb->users);
 
3510
3511		ret = dev_queue_xmit(pkt_dev->skb);
 
 
 
 
3512		switch (ret) {
3513		case NET_XMIT_SUCCESS:
3514			pkt_dev->sofar++;
3515			pkt_dev->seq_num++;
3516			pkt_dev->tx_bytes += pkt_dev->last_pkt_size;
3517			break;
3518		case NET_XMIT_DROP:
3519		case NET_XMIT_CN:
3520		/* These are all valid return codes for a qdisc but
3521		 * indicate packets are being dropped or will likely
3522		 * be dropped soon.
3523		 */
3524		case NETDEV_TX_BUSY:
3525		/* qdisc may call dev_hard_start_xmit directly in cases
3526		 * where no queues exist e.g. loopback device, virtual
3527		 * devices, etc. In this case we need to handle
3528		 * NETDEV_TX_ codes.
3529		 */
3530		default:
3531			pkt_dev->errors++;
3532			net_info_ratelimited("%s xmit error: %d\n",
3533					     pkt_dev->odevname, ret);
3534			break;
3535		}
3536		goto out;
3537	}
3538
3539	txq = skb_get_tx_queue(odev, pkt_dev->skb);
3540
3541	local_bh_disable();
3542
3543	HARD_TX_LOCK(odev, txq, smp_processor_id());
3544
3545	if (unlikely(netif_xmit_frozen_or_drv_stopped(txq))) {
3546		pkt_dev->last_ok = 0;
3547		goto unlock;
3548	}
3549	refcount_add(burst, &pkt_dev->skb->users);
 
3550
3551xmit_more:
3552	ret = netdev_start_xmit(pkt_dev->skb, odev, txq, --burst > 0);
3553
 
 
 
3554	switch (ret) {
3555	case NETDEV_TX_OK:
3556		pkt_dev->last_ok = 1;
3557		pkt_dev->sofar++;
3558		pkt_dev->seq_num++;
3559		pkt_dev->tx_bytes += pkt_dev->last_pkt_size;
3560		if (burst > 0 && !netif_xmit_frozen_or_drv_stopped(txq))
3561			goto xmit_more;
3562		break;
3563	case NET_XMIT_DROP:
3564	case NET_XMIT_CN:
3565		/* skb has been consumed */
3566		pkt_dev->errors++;
3567		break;
3568	default: /* Drivers are not supposed to return other values! */
3569		net_info_ratelimited("%s xmit error: %d\n",
3570				     pkt_dev->odevname, ret);
3571		pkt_dev->errors++;
3572		fallthrough;
3573	case NETDEV_TX_BUSY:
3574		/* Retry it next time */
3575		refcount_dec(&(pkt_dev->skb->users));
 
3576		pkt_dev->last_ok = 0;
3577	}
3578	if (unlikely(burst))
3579		WARN_ON(refcount_sub_and_test(burst, &pkt_dev->skb->users));
3580unlock:
3581	HARD_TX_UNLOCK(odev, txq);
3582
3583out:
3584	local_bh_enable();
3585
3586	/* If pkt_dev->count is zero, then run forever */
3587	if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
3588		pktgen_wait_for_skb(pkt_dev);
 
3589
3590		/* Done with this */
3591		pktgen_stop_device(pkt_dev);
3592	}
3593}
3594
3595/*
3596 * Main loop of the thread goes here
3597 */
3598
3599static int pktgen_thread_worker(void *arg)
3600{
3601	struct pktgen_thread *t = arg;
3602	struct pktgen_dev *pkt_dev = NULL;
3603	int cpu = t->cpu;
3604
3605	WARN_ON(smp_processor_id() != cpu);
3606
3607	init_waitqueue_head(&t->queue);
3608	complete(&t->start_done);
3609
3610	pr_debug("starting pktgen/%d:  pid=%d\n", cpu, task_pid_nr(current));
3611
3612	set_freezable();
3613
3614	while (!kthread_should_stop()) {
3615		pkt_dev = next_to_run(t);
3616
3617		if (unlikely(!pkt_dev && t->control == 0)) {
3618			if (t->net->pktgen_exiting)
3619				break;
3620			wait_event_interruptible_timeout(t->queue,
3621							 t->control != 0,
3622							 HZ/10);
3623			try_to_freeze();
3624			continue;
3625		}
3626
3627		if (likely(pkt_dev)) {
3628			pktgen_xmit(pkt_dev);
3629
3630			if (need_resched())
3631				pktgen_resched(pkt_dev);
3632			else
3633				cpu_relax();
3634		}
3635
3636		if (t->control & T_STOP) {
3637			pktgen_stop(t);
3638			t->control &= ~(T_STOP);
3639		}
3640
3641		if (t->control & T_RUN) {
3642			pktgen_run(t);
3643			t->control &= ~(T_RUN);
3644		}
3645
3646		if (t->control & T_REMDEVALL) {
3647			pktgen_rem_all_ifs(t);
3648			t->control &= ~(T_REMDEVALL);
3649		}
3650
3651		if (t->control & T_REMDEV) {
3652			pktgen_rem_one_if(t);
3653			t->control &= ~(T_REMDEV);
3654		}
3655
3656		try_to_freeze();
3657	}
3658
3659	pr_debug("%s stopping all device\n", t->tsk->comm);
3660	pktgen_stop(t);
3661
3662	pr_debug("%s removing all device\n", t->tsk->comm);
3663	pktgen_rem_all_ifs(t);
3664
3665	pr_debug("%s removing thread\n", t->tsk->comm);
3666	pktgen_rem_thread(t);
3667
3668	return 0;
3669}
3670
3671static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
3672					  const char *ifname, bool exact)
3673{
3674	struct pktgen_dev *p, *pkt_dev = NULL;
3675	size_t len = strlen(ifname);
3676
3677	rcu_read_lock();
3678	list_for_each_entry_rcu(p, &t->if_list, list)
3679		if (strncmp(p->odevname, ifname, len) == 0) {
3680			if (p->odevname[len]) {
3681				if (exact || p->odevname[len] != '@')
3682					continue;
3683			}
3684			pkt_dev = p;
3685			break;
3686		}
3687
3688	rcu_read_unlock();
3689	pr_debug("find_dev(%s) returning %p\n", ifname, pkt_dev);
3690	return pkt_dev;
3691}
3692
3693/*
3694 * Adds a dev at front of if_list.
3695 */
3696
3697static int add_dev_to_thread(struct pktgen_thread *t,
3698			     struct pktgen_dev *pkt_dev)
3699{
3700	int rv = 0;
3701
3702	/* This function cannot be called concurrently, as its called
3703	 * under pktgen_thread_lock mutex, but it can run from
3704	 * userspace on another CPU than the kthread.  The if_lock()
3705	 * is used here to sync with concurrent instances of
3706	 * _rem_dev_from_if_list() invoked via kthread, which is also
3707	 * updating the if_list */
3708	if_lock(t);
3709
3710	if (pkt_dev->pg_thread) {
3711		pr_err("ERROR: already assigned to a thread\n");
3712		rv = -EBUSY;
3713		goto out;
3714	}
3715
3716	pkt_dev->running = 0;
3717	pkt_dev->pg_thread = t;
3718	list_add_rcu(&pkt_dev->list, &t->if_list);
3719
3720out:
3721	if_unlock(t);
3722	return rv;
3723}
3724
3725/* Called under thread lock */
3726
3727static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
3728{
3729	struct pktgen_dev *pkt_dev;
3730	int err;
3731	int node = cpu_to_node(t->cpu);
3732
3733	/* We don't allow a device to be on several threads */
3734
3735	pkt_dev = __pktgen_NN_threads(t->net, ifname, FIND);
3736	if (pkt_dev) {
3737		pr_err("ERROR: interface already used\n");
3738		return -EBUSY;
3739	}
3740
3741	pkt_dev = kzalloc_node(sizeof(struct pktgen_dev), GFP_KERNEL, node);
3742	if (!pkt_dev)
3743		return -ENOMEM;
3744
3745	strcpy(pkt_dev->odevname, ifname);
3746	pkt_dev->flows = vzalloc_node(array_size(MAX_CFLOWS,
3747						 sizeof(struct flow_state)),
3748				      node);
3749	if (pkt_dev->flows == NULL) {
3750		kfree(pkt_dev);
3751		return -ENOMEM;
3752	}
3753
3754	pkt_dev->removal_mark = 0;
3755	pkt_dev->nfrags = 0;
3756	pkt_dev->delay = pg_delay_d;
3757	pkt_dev->count = pg_count_d;
3758	pkt_dev->sofar = 0;
3759	pkt_dev->udp_src_min = 9;	/* sink port */
3760	pkt_dev->udp_src_max = 9;
3761	pkt_dev->udp_dst_min = 9;
3762	pkt_dev->udp_dst_max = 9;
3763	pkt_dev->vlan_p = 0;
3764	pkt_dev->vlan_cfi = 0;
3765	pkt_dev->vlan_id = 0xffff;
3766	pkt_dev->svlan_p = 0;
3767	pkt_dev->svlan_cfi = 0;
3768	pkt_dev->svlan_id = 0xffff;
3769	pkt_dev->burst = 1;
3770	pkt_dev->node = NUMA_NO_NODE;
 
3771
3772	err = pktgen_setup_dev(t->net, pkt_dev, ifname);
3773	if (err)
3774		goto out1;
3775	if (pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)
3776		pkt_dev->clone_skb = pg_clone_skb_d;
3777
3778	pkt_dev->entry = proc_create_data(ifname, 0600, t->net->proc_dir,
3779					  &pktgen_if_proc_ops, pkt_dev);
3780	if (!pkt_dev->entry) {
3781		pr_err("cannot create %s/%s procfs entry\n",
3782		       PG_PROC_DIR, ifname);
3783		err = -EINVAL;
3784		goto out2;
3785	}
3786#ifdef CONFIG_XFRM
3787	pkt_dev->ipsmode = XFRM_MODE_TRANSPORT;
3788	pkt_dev->ipsproto = IPPROTO_ESP;
3789
3790	/* xfrm tunnel mode needs additional dst to extract outter
3791	 * ip header protocol/ttl/id field, here creat a phony one.
3792	 * instead of looking for a valid rt, which definitely hurting
3793	 * performance under such circumstance.
3794	 */
3795	pkt_dev->dstops.family = AF_INET;
3796	pkt_dev->xdst.u.dst.dev = pkt_dev->odev;
3797	dst_init_metrics(&pkt_dev->xdst.u.dst, pktgen_dst_metrics, false);
3798	pkt_dev->xdst.child = &pkt_dev->xdst.u.dst;
3799	pkt_dev->xdst.u.dst.ops = &pkt_dev->dstops;
3800#endif
3801
3802	return add_dev_to_thread(t, pkt_dev);
3803out2:
3804	netdev_put(pkt_dev->odev, &pkt_dev->dev_tracker);
3805out1:
3806#ifdef CONFIG_XFRM
3807	free_SAs(pkt_dev);
3808#endif
3809	vfree(pkt_dev->flows);
3810	kfree(pkt_dev);
3811	return err;
3812}
3813
3814static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)
3815{
3816	struct pktgen_thread *t;
3817	struct proc_dir_entry *pe;
3818	struct task_struct *p;
3819
3820	t = kzalloc_node(sizeof(struct pktgen_thread), GFP_KERNEL,
3821			 cpu_to_node(cpu));
3822	if (!t) {
3823		pr_err("ERROR: out of memory, can't create new thread\n");
3824		return -ENOMEM;
3825	}
3826
3827	mutex_init(&t->if_lock);
3828	t->cpu = cpu;
3829
3830	INIT_LIST_HEAD(&t->if_list);
3831
3832	list_add_tail(&t->th_list, &pn->pktgen_threads);
3833	init_completion(&t->start_done);
3834
3835	p = kthread_create_on_node(pktgen_thread_worker,
3836				   t,
3837				   cpu_to_node(cpu),
3838				   "kpktgend_%d", cpu);
3839	if (IS_ERR(p)) {
3840		pr_err("kthread_create_on_node() failed for cpu %d\n", t->cpu);
3841		list_del(&t->th_list);
3842		kfree(t);
3843		return PTR_ERR(p);
3844	}
3845	kthread_bind(p, cpu);
3846	t->tsk = p;
3847
3848	pe = proc_create_data(t->tsk->comm, 0600, pn->proc_dir,
3849			      &pktgen_thread_proc_ops, t);
3850	if (!pe) {
3851		pr_err("cannot create %s/%s procfs entry\n",
3852		       PG_PROC_DIR, t->tsk->comm);
3853		kthread_stop(p);
3854		list_del(&t->th_list);
3855		kfree(t);
3856		return -EINVAL;
3857	}
3858
3859	t->net = pn;
3860	get_task_struct(p);
3861	wake_up_process(p);
3862	wait_for_completion(&t->start_done);
3863
3864	return 0;
3865}
3866
3867/*
3868 * Removes a device from the thread if_list.
3869 */
3870static void _rem_dev_from_if_list(struct pktgen_thread *t,
3871				  struct pktgen_dev *pkt_dev)
3872{
3873	struct list_head *q, *n;
3874	struct pktgen_dev *p;
3875
3876	if_lock(t);
3877	list_for_each_safe(q, n, &t->if_list) {
3878		p = list_entry(q, struct pktgen_dev, list);
3879		if (p == pkt_dev)
3880			list_del_rcu(&p->list);
3881	}
3882	if_unlock(t);
3883}
3884
3885static int pktgen_remove_device(struct pktgen_thread *t,
3886				struct pktgen_dev *pkt_dev)
3887{
3888	pr_debug("remove_device pkt_dev=%p\n", pkt_dev);
3889
3890	if (pkt_dev->running) {
3891		pr_warn("WARNING: trying to remove a running interface, stopping it now\n");
3892		pktgen_stop_device(pkt_dev);
3893	}
3894
3895	/* Dis-associate from the interface */
3896
3897	if (pkt_dev->odev) {
3898		netdev_put(pkt_dev->odev, &pkt_dev->dev_tracker);
3899		pkt_dev->odev = NULL;
3900	}
3901
3902	/* Remove proc before if_list entry, because add_device uses
3903	 * list to determine if interface already exist, avoid race
3904	 * with proc_create_data() */
3905	proc_remove(pkt_dev->entry);
3906
3907	/* And update the thread if_list */
3908	_rem_dev_from_if_list(t, pkt_dev);
3909
3910#ifdef CONFIG_XFRM
3911	free_SAs(pkt_dev);
3912#endif
3913	vfree(pkt_dev->flows);
3914	if (pkt_dev->page)
3915		put_page(pkt_dev->page);
3916	kfree_rcu(pkt_dev, rcu);
3917	return 0;
3918}
3919
3920static int __net_init pg_net_init(struct net *net)
3921{
3922	struct pktgen_net *pn = net_generic(net, pg_net_id);
3923	struct proc_dir_entry *pe;
3924	int cpu, ret = 0;
3925
3926	pn->net = net;
3927	INIT_LIST_HEAD(&pn->pktgen_threads);
3928	pn->pktgen_exiting = false;
3929	pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
3930	if (!pn->proc_dir) {
3931		pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
3932		return -ENODEV;
3933	}
3934	pe = proc_create(PGCTRL, 0600, pn->proc_dir, &pktgen_proc_ops);
3935	if (pe == NULL) {
3936		pr_err("cannot create %s procfs entry\n", PGCTRL);
3937		ret = -EINVAL;
3938		goto remove;
3939	}
3940
 
3941	for_each_online_cpu(cpu) {
3942		int err;
3943
3944		err = pktgen_create_thread(cpu, pn);
3945		if (err)
3946			pr_warn("Cannot create thread for cpu %d (%d)\n",
3947				   cpu, err);
3948	}
 
3949
3950	if (list_empty(&pn->pktgen_threads)) {
3951		pr_err("Initialization failed for all threads\n");
3952		ret = -ENODEV;
3953		goto remove_entry;
3954	}
3955
3956	return 0;
3957
3958remove_entry:
3959	remove_proc_entry(PGCTRL, pn->proc_dir);
3960remove:
3961	remove_proc_entry(PG_PROC_DIR, pn->net->proc_net);
3962	return ret;
3963}
3964
3965static void __net_exit pg_net_exit(struct net *net)
3966{
3967	struct pktgen_net *pn = net_generic(net, pg_net_id);
3968	struct pktgen_thread *t;
3969	struct list_head *q, *n;
3970	LIST_HEAD(list);
3971
3972	/* Stop all interfaces & threads */
3973	pn->pktgen_exiting = true;
3974
3975	mutex_lock(&pktgen_thread_lock);
3976	list_splice_init(&pn->pktgen_threads, &list);
3977	mutex_unlock(&pktgen_thread_lock);
3978
3979	list_for_each_safe(q, n, &list) {
3980		t = list_entry(q, struct pktgen_thread, th_list);
3981		list_del(&t->th_list);
3982		kthread_stop(t->tsk);
3983		put_task_struct(t->tsk);
3984		kfree(t);
3985	}
3986
3987	remove_proc_entry(PGCTRL, pn->proc_dir);
3988	remove_proc_entry(PG_PROC_DIR, pn->net->proc_net);
3989}
3990
3991static struct pernet_operations pg_net_ops = {
3992	.init = pg_net_init,
3993	.exit = pg_net_exit,
3994	.id   = &pg_net_id,
3995	.size = sizeof(struct pktgen_net),
3996};
3997
3998static int __init pg_init(void)
3999{
4000	int ret = 0;
4001
4002	pr_info("%s", version);
4003	ret = register_pernet_subsys(&pg_net_ops);
4004	if (ret)
4005		return ret;
4006	ret = register_netdevice_notifier(&pktgen_notifier_block);
4007	if (ret)
4008		unregister_pernet_subsys(&pg_net_ops);
4009
4010	return ret;
4011}
4012
4013static void __exit pg_cleanup(void)
4014{
4015	unregister_netdevice_notifier(&pktgen_notifier_block);
4016	unregister_pernet_subsys(&pg_net_ops);
4017	/* Don't need rcu_barrier() due to use of kfree_rcu() */
4018}
4019
4020module_init(pg_init);
4021module_exit(pg_cleanup);
4022
4023MODULE_AUTHOR("Robert Olsson <robert.olsson@its.uu.se>");
4024MODULE_DESCRIPTION("Packet Generator tool");
4025MODULE_LICENSE("GPL");
4026MODULE_VERSION(VERSION);
4027module_param(pg_count_d, int, 0);
4028MODULE_PARM_DESC(pg_count_d, "Default number of packets to inject");
4029module_param(pg_delay_d, int, 0);
4030MODULE_PARM_DESC(pg_delay_d, "Default delay between packets (nanoseconds)");
4031module_param(pg_clone_skb_d, int, 0);
4032MODULE_PARM_DESC(pg_clone_skb_d, "Default number of copies of the same packet");
4033module_param(debug, int, 0);
4034MODULE_PARM_DESC(debug, "Enable debugging of pktgen module");