Loading...
1/*
2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 *
4 * Copyright (C) 2008-2024, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
22 *
23 * Maintained by: pv-drivers@vmware.com
24 *
25 */
26
27#ifndef _VMXNET3_INT_H
28#define _VMXNET3_INT_H
29
30#include <linux/bitops.h>
31#include <linux/ethtool.h>
32#include <linux/delay.h>
33#include <linux/netdevice.h>
34#include <linux/pci.h>
35#include <linux/compiler.h>
36#include <linux/slab.h>
37#include <linux/spinlock.h>
38#include <linux/ioport.h>
39#include <linux/highmem.h>
40#include <linux/timer.h>
41#include <linux/skbuff.h>
42#include <linux/interrupt.h>
43#include <linux/workqueue.h>
44#include <linux/uaccess.h>
45#include <asm/dma.h>
46#include <asm/page.h>
47
48#include <linux/tcp.h>
49#include <linux/udp.h>
50#include <linux/ip.h>
51#include <linux/ipv6.h>
52#include <linux/in.h>
53#include <linux/etherdevice.h>
54#include <asm/checksum.h>
55#include <linux/if_vlan.h>
56#include <linux/if_arp.h>
57#include <linux/inetdevice.h>
58#include <linux/log2.h>
59#include <linux/bpf.h>
60#include <net/page_pool/helpers.h>
61#include <net/xdp.h>
62
63#include "vmxnet3_defs.h"
64
65#ifdef DEBUG
66# define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI(debug)"
67#else
68# define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI"
69#endif
70
71
72/*
73 * Version numbers
74 */
75#define VMXNET3_DRIVER_VERSION_STRING "1.9.0.0-k"
76
77/* Each byte of this 32-bit integer encodes a version number in
78 * VMXNET3_DRIVER_VERSION_STRING.
79 */
80#define VMXNET3_DRIVER_VERSION_NUM 0x01090000
81
82#if defined(CONFIG_PCI_MSI)
83 /* RSS only makes sense if MSI-X is supported. */
84 #define VMXNET3_RSS
85#endif
86
87#define VMXNET3_REV_9 8 /* Vmxnet3 Rev. 9 */
88#define VMXNET3_REV_8 7 /* Vmxnet3 Rev. 8 */
89#define VMXNET3_REV_7 6 /* Vmxnet3 Rev. 7 */
90#define VMXNET3_REV_6 5 /* Vmxnet3 Rev. 6 */
91#define VMXNET3_REV_5 4 /* Vmxnet3 Rev. 5 */
92#define VMXNET3_REV_4 3 /* Vmxnet3 Rev. 4 */
93#define VMXNET3_REV_3 2 /* Vmxnet3 Rev. 3 */
94#define VMXNET3_REV_2 1 /* Vmxnet3 Rev. 2 */
95#define VMXNET3_REV_1 0 /* Vmxnet3 Rev. 1 */
96
97/*
98 * Capabilities
99 */
100
101enum {
102 VMNET_CAP_SG = 0x0001, /* Can do scatter-gather transmits. */
103 VMNET_CAP_IP4_CSUM = 0x0002, /* Can checksum only TCP/UDP over
104 * IPv4 */
105 VMNET_CAP_HW_CSUM = 0x0004, /* Can checksum all packets. */
106 VMNET_CAP_HIGH_DMA = 0x0008, /* Can DMA to high memory. */
107 VMNET_CAP_TOE = 0x0010, /* Supports TCP/IP offload. */
108 VMNET_CAP_TSO = 0x0020, /* Supports TCP Segmentation
109 * offload */
110 VMNET_CAP_SW_TSO = 0x0040, /* Supports SW TCP Segmentation */
111 VMNET_CAP_VMXNET_APROM = 0x0080, /* Vmxnet APROM support */
112 VMNET_CAP_HW_TX_VLAN = 0x0100, /* Can we do VLAN tagging in HW */
113 VMNET_CAP_HW_RX_VLAN = 0x0200, /* Can we do VLAN untagging in HW */
114 VMNET_CAP_SW_VLAN = 0x0400, /* VLAN tagging/untagging in SW */
115 VMNET_CAP_WAKE_PCKT_RCV = 0x0800, /* Can wake on network packet recv? */
116 VMNET_CAP_ENABLE_INT_INLINE = 0x1000, /* Enable Interrupt Inline */
117 VMNET_CAP_ENABLE_HEADER_COPY = 0x2000, /* copy header for vmkernel */
118 VMNET_CAP_TX_CHAIN = 0x4000, /* Guest can use multiple tx entries
119 * for a pkt */
120 VMNET_CAP_RX_CHAIN = 0x8000, /* pkt can span multiple rx entries */
121 VMNET_CAP_LPD = 0x10000, /* large pkt delivery */
122 VMNET_CAP_BPF = 0x20000, /* BPF Support in VMXNET Virtual HW*/
123 VMNET_CAP_SG_SPAN_PAGES = 0x40000, /* Scatter-gather can span multiple*/
124 /* pages transmits */
125 VMNET_CAP_IP6_CSUM = 0x80000, /* Can do IPv6 csum offload. */
126 VMNET_CAP_TSO6 = 0x100000, /* TSO seg. offload for IPv6 pkts. */
127 VMNET_CAP_TSO256k = 0x200000, /* Can do TSO seg offload for */
128 /* pkts up to 256kB. */
129 VMNET_CAP_UPT = 0x400000 /* Support UPT */
130};
131
132/*
133 * Maximum devices supported.
134 */
135#define MAX_ETHERNET_CARDS 10
136#define MAX_PCI_PASSTHRU_DEVICE 6
137
138struct vmxnet3_cmd_ring {
139 union Vmxnet3_GenericDesc *base;
140 u32 size;
141 u32 next2fill;
142 u32 next2comp;
143 u8 gen;
144 u8 isOutOfOrder;
145 dma_addr_t basePA;
146};
147
148static inline void
149vmxnet3_cmd_ring_adv_next2fill(struct vmxnet3_cmd_ring *ring)
150{
151 ring->next2fill++;
152 if (unlikely(ring->next2fill == ring->size)) {
153 ring->next2fill = 0;
154 VMXNET3_FLIP_RING_GEN(ring->gen);
155 }
156}
157
158static inline void
159vmxnet3_cmd_ring_adv_next2comp(struct vmxnet3_cmd_ring *ring)
160{
161 VMXNET3_INC_RING_IDX_ONLY(ring->next2comp, ring->size);
162}
163
164static inline int
165vmxnet3_cmd_ring_desc_avail(struct vmxnet3_cmd_ring *ring)
166{
167 return (ring->next2comp > ring->next2fill ? 0 : ring->size) +
168 ring->next2comp - ring->next2fill - 1;
169}
170
171struct vmxnet3_comp_ring {
172 union Vmxnet3_GenericDesc *base;
173 u32 size;
174 u32 next2proc;
175 u8 gen;
176 u8 intr_idx;
177 dma_addr_t basePA;
178};
179
180static inline void
181vmxnet3_comp_ring_adv_next2proc(struct vmxnet3_comp_ring *ring)
182{
183 ring->next2proc++;
184 if (unlikely(ring->next2proc == ring->size)) {
185 ring->next2proc = 0;
186 VMXNET3_FLIP_RING_GEN(ring->gen);
187 }
188}
189
190struct vmxnet3_tx_data_ring {
191 struct Vmxnet3_TxDataDesc *base;
192 u32 size;
193 dma_addr_t basePA;
194};
195
196struct vmxnet3_tx_ts_ring {
197 struct Vmxnet3_TxTSDesc *base;
198 dma_addr_t basePA;
199};
200
201#define VMXNET3_MAP_NONE 0
202#define VMXNET3_MAP_SINGLE BIT(0)
203#define VMXNET3_MAP_PAGE BIT(1)
204#define VMXNET3_MAP_XDP BIT(2)
205
206struct vmxnet3_tx_buf_info {
207 u32 map_type;
208 u16 len;
209 u16 sop_idx;
210 dma_addr_t dma_addr;
211 union {
212 struct sk_buff *skb;
213 struct xdp_frame *xdpf;
214 };
215};
216
217struct vmxnet3_tq_driver_stats {
218 u64 drop_total; /* # of pkts dropped by the driver, the
219 * counters below track droppings due to
220 * different reasons
221 */
222 u64 drop_too_many_frags;
223 u64 drop_oversized_hdr;
224 u64 drop_hdr_inspect_err;
225 u64 drop_tso;
226
227 u64 tx_ring_full;
228 u64 linearized; /* # of pkts linearized */
229 u64 copy_skb_header; /* # of times we have to copy skb header */
230 u64 oversized_hdr;
231
232 u64 xdp_xmit;
233 u64 xdp_xmit_err;
234};
235
236struct vmxnet3_tx_ctx {
237 bool ipv4;
238 bool ipv6;
239 u16 mss;
240 u32 l4_offset; /* only valid for pkts requesting tso or csum
241 * offloading. For encap offload, it refers to
242 * inner L4 offset i.e. it includes outer header
243 * encap header and inner eth and ip header size
244 */
245
246 u32 l4_hdr_size; /* only valid if mss != 0
247 * Refers to inner L4 hdr size for encap
248 * offload
249 */
250 u32 copy_size; /* # of bytes copied into the data ring */
251 union Vmxnet3_GenericDesc *sop_txd;
252 union Vmxnet3_GenericDesc *eop_txd;
253 struct Vmxnet3_TxTSDesc *ts_txd;
254};
255
256struct vmxnet3_tx_queue {
257 char name[IFNAMSIZ+8]; /* To identify interrupt */
258 struct vmxnet3_adapter *adapter;
259 spinlock_t tx_lock;
260 struct vmxnet3_cmd_ring tx_ring;
261 struct vmxnet3_tx_buf_info *buf_info;
262 struct vmxnet3_tx_data_ring data_ring;
263 struct vmxnet3_tx_ts_ring ts_ring;
264 struct vmxnet3_comp_ring comp_ring;
265 struct Vmxnet3_TxQueueCtrl *shared;
266 struct vmxnet3_tq_driver_stats stats;
267 bool stopped;
268 int num_stop; /* # of times the queue is
269 * stopped */
270 int qid;
271 u16 txdata_desc_size;
272 u16 tx_ts_desc_size;
273 u16 tsPktCount;
274} ____cacheline_aligned;
275
276enum vmxnet3_rx_buf_type {
277 VMXNET3_RX_BUF_NONE = 0,
278 VMXNET3_RX_BUF_SKB = 1,
279 VMXNET3_RX_BUF_PAGE = 2,
280 VMXNET3_RX_BUF_XDP = 3,
281};
282
283#define VMXNET3_RXD_COMP_PENDING 0
284#define VMXNET3_RXD_COMP_DONE 1
285
286struct vmxnet3_rx_buf_info {
287 enum vmxnet3_rx_buf_type buf_type;
288 u16 len;
289 u8 comp_state;
290 union {
291 struct sk_buff *skb;
292 struct page *page;
293 };
294 dma_addr_t dma_addr;
295};
296
297struct vmxnet3_rx_ctx {
298 struct sk_buff *skb;
299 u32 sop_idx;
300};
301
302struct vmxnet3_rq_driver_stats {
303 u64 drop_total;
304 u64 drop_err;
305 u64 drop_fcs;
306 u64 rx_buf_alloc_failure;
307
308 u64 xdp_packets; /* Total packets processed by XDP. */
309 u64 xdp_tx;
310 u64 xdp_redirects;
311 u64 xdp_drops;
312 u64 xdp_aborted;
313};
314
315struct vmxnet3_rx_data_ring {
316 Vmxnet3_RxDataDesc *base;
317 dma_addr_t basePA;
318 u16 desc_size;
319};
320
321struct vmxnet3_rx_ts_ring {
322 struct Vmxnet3_RxTSDesc *base;
323 dma_addr_t basePA;
324};
325
326struct vmxnet3_rx_queue {
327 char name[IFNAMSIZ + 8]; /* To identify interrupt */
328 struct vmxnet3_adapter *adapter;
329 struct napi_struct napi;
330 struct vmxnet3_cmd_ring rx_ring[2];
331 struct vmxnet3_rx_data_ring data_ring;
332 struct vmxnet3_comp_ring comp_ring;
333 struct vmxnet3_rx_ts_ring ts_ring;
334 struct vmxnet3_rx_ctx rx_ctx;
335 u32 qid; /* rqID in RCD for buffer from 1st ring */
336 u32 qid2; /* rqID in RCD for buffer from 2nd ring */
337 u32 dataRingQid; /* rqID in RCD for buffer from data ring */
338 struct vmxnet3_rx_buf_info *buf_info[2];
339 struct Vmxnet3_RxQueueCtrl *shared;
340 struct vmxnet3_rq_driver_stats stats;
341 struct page_pool *page_pool;
342 struct xdp_rxq_info xdp_rxq;
343 u16 rx_ts_desc_size;
344} ____cacheline_aligned;
345
346#define VMXNET3_DEVICE_MAX_TX_QUEUES 32
347#define VMXNET3_DEVICE_MAX_RX_QUEUES 32 /* Keep this value as a power of 2 */
348
349#define VMXNET3_DEVICE_DEFAULT_TX_QUEUES 8
350#define VMXNET3_DEVICE_DEFAULT_RX_QUEUES 8 /* Keep this value as a power of 2 */
351
352/* Should be less than UPT1_RSS_MAX_IND_TABLE_SIZE */
353#define VMXNET3_RSS_IND_TABLE_SIZE (VMXNET3_DEVICE_MAX_RX_QUEUES * 4)
354
355#define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \
356 VMXNET3_DEVICE_MAX_RX_QUEUES + 1)
357#define VMXNET3_LINUX_MIN_MSIX_VECT 3 /* 1 for tx, 1 for rx pair and 1 for event */
358
359
360struct vmxnet3_intr {
361 enum vmxnet3_intr_mask_mode mask_mode;
362 enum vmxnet3_intr_type type; /* MSI-X, MSI, or INTx? */
363 u8 num_intrs; /* # of intr vectors */
364 u8 event_intr_idx; /* idx of the intr vector for event */
365 u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */
366 char event_msi_vector_name[IFNAMSIZ+17];
367#ifdef CONFIG_PCI_MSI
368 struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT];
369#endif
370};
371
372/* Interrupt sharing schemes, share_intr */
373#define VMXNET3_INTR_BUDDYSHARE 0 /* Corresponding tx,rx queues share irq */
374#define VMXNET3_INTR_TXSHARE 1 /* All tx queues share one irq */
375#define VMXNET3_INTR_DONTSHARE 2 /* each queue has its own irq */
376
377
378#define VMXNET3_STATE_BIT_RESETTING 0
379#define VMXNET3_STATE_BIT_QUIESCED 1
380struct vmxnet3_adapter {
381 struct vmxnet3_tx_queue tx_queue[VMXNET3_DEVICE_MAX_TX_QUEUES];
382 struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES];
383 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
384 struct vmxnet3_intr intr;
385 spinlock_t cmd_lock;
386 struct Vmxnet3_DriverShared *shared;
387 struct Vmxnet3_PMConf *pm_conf;
388 struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */
389 struct Vmxnet3_RxQueueDesc *rqd_start; /* all rx queue desc */
390 struct net_device *netdev;
391 struct pci_dev *pdev;
392
393 u8 __iomem *hw_addr0; /* for BAR 0 */
394 u8 __iomem *hw_addr1; /* for BAR 1 */
395 u8 version;
396
397#ifdef VMXNET3_RSS
398 struct UPT1_RSSConf *rss_conf;
399 bool rss;
400#endif
401 u32 num_rx_queues;
402 u32 num_tx_queues;
403
404 /* rx buffer related */
405 unsigned skb_buf_size;
406 int rx_buf_per_pkt; /* only apply to the 1st ring */
407 dma_addr_t shared_pa;
408 dma_addr_t queue_desc_pa;
409 dma_addr_t coal_conf_pa;
410
411 /* Wake-on-LAN */
412 u32 wol;
413
414 /* Link speed */
415 u32 link_speed; /* in mbps */
416
417 u64 tx_timeout_count;
418
419 /* Ring sizes */
420 u32 tx_ring_size;
421 u32 rx_ring_size;
422 u32 rx_ring2_size;
423
424 /* Size of buffer in the data ring */
425 u16 txdata_desc_size;
426 u16 rxdata_desc_size;
427
428 bool rxdataring_enabled;
429 bool default_rss_fields;
430 enum Vmxnet3_RSSField rss_fields;
431
432 struct work_struct work;
433
434 unsigned long state; /* VMXNET3_STATE_BIT_xxx */
435
436 int share_intr;
437
438 struct Vmxnet3_CoalesceScheme *coal_conf;
439 bool default_coal_mode;
440
441 dma_addr_t adapter_pa;
442 dma_addr_t pm_conf_pa;
443 dma_addr_t rss_conf_pa;
444 bool queuesExtEnabled;
445 struct Vmxnet3_RingBufferSize ringBufSize;
446 u32 devcap_supported[8];
447 u32 ptcap_supported[8];
448 u32 dev_caps[8];
449 u16 tx_prod_offset;
450 u16 rx_prod_offset;
451 u16 rx_prod2_offset;
452 struct bpf_prog __rcu *xdp_bpf_prog;
453 struct Vmxnet3_LatencyConf *latencyConf;
454 /* Size of buffer in the ts ring */
455 u16 tx_ts_desc_size;
456 u16 rx_ts_desc_size;
457 u32 disabledOffloads;
458};
459
460#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
461 writel((val), (adapter)->hw_addr0 + (reg))
462#define VMXNET3_READ_BAR0_REG(adapter, reg) \
463 readl((adapter)->hw_addr0 + (reg))
464
465#define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \
466 writel((val), (adapter)->hw_addr1 + (reg))
467#define VMXNET3_READ_BAR1_REG(adapter, reg) \
468 readl((adapter)->hw_addr1 + (reg))
469
470#define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5)
471#define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \
472 ((rq)->rx_ring[ring_idx].size >> 3)
473
474#define VMXNET3_GET_ADDR_LO(dma) ((u32)(dma))
475#define VMXNET3_GET_ADDR_HI(dma) ((u32)(((u64)(dma)) >> 32))
476
477#define VMXNET3_VERSION_GE_2(adapter) \
478 (adapter->version >= VMXNET3_REV_2 + 1)
479#define VMXNET3_VERSION_GE_3(adapter) \
480 (adapter->version >= VMXNET3_REV_3 + 1)
481#define VMXNET3_VERSION_GE_4(adapter) \
482 (adapter->version >= VMXNET3_REV_4 + 1)
483#define VMXNET3_VERSION_GE_5(adapter) \
484 (adapter->version >= VMXNET3_REV_5 + 1)
485#define VMXNET3_VERSION_GE_6(adapter) \
486 (adapter->version >= VMXNET3_REV_6 + 1)
487#define VMXNET3_VERSION_GE_7(adapter) \
488 (adapter->version >= VMXNET3_REV_7 + 1)
489#define VMXNET3_VERSION_GE_8(adapter) \
490 (adapter->version >= VMXNET3_REV_8 + 1)
491#define VMXNET3_VERSION_GE_9(adapter) \
492 (adapter->version >= VMXNET3_REV_9 + 1)
493
494/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
495#define VMXNET3_DEF_TX_RING_SIZE 512
496#define VMXNET3_DEF_RX_RING_SIZE 1024
497#define VMXNET3_DEF_RX_RING2_SIZE 512
498
499#define VMXNET3_DEF_RXDATA_DESC_SIZE 128
500
501#define VMXNET3_MAX_ETH_HDR_SIZE 22
502#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024)
503
504#define VMXNET3_GET_RING_IDX(adapter, rqID) \
505 ((rqID >= adapter->num_rx_queues && \
506 rqID < 2 * adapter->num_rx_queues) ? 1 : 0) \
507
508#define VMXNET3_RX_DATA_RING(adapter, rqID) \
509 (rqID >= 2 * adapter->num_rx_queues && \
510 rqID < 3 * adapter->num_rx_queues) \
511
512#define VMXNET3_COAL_STATIC_DEFAULT_DEPTH 64
513
514#define VMXNET3_COAL_RBC_RATE(usecs) (1000000 / usecs)
515#define VMXNET3_COAL_RBC_USECS(rbc_rate) (1000000 / rbc_rate)
516#define VMXNET3_RSS_FIELDS_DEFAULT (VMXNET3_RSS_FIELDS_TCPIP4 | \
517 VMXNET3_RSS_FIELDS_TCPIP6)
518
519int
520vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
521
522int
523vmxnet3_activate_dev(struct vmxnet3_adapter *adapter);
524
525void
526vmxnet3_force_close(struct vmxnet3_adapter *adapter);
527
528void
529vmxnet3_reset_dev(struct vmxnet3_adapter *adapter);
530
531void
532vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter);
533
534void
535vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
536
537int
538vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter);
539
540void
541vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter);
542
543netdev_features_t
544vmxnet3_fix_features(struct net_device *netdev, netdev_features_t features);
545
546netdev_features_t
547vmxnet3_features_check(struct sk_buff *skb,
548 struct net_device *netdev, netdev_features_t features);
549
550int
551vmxnet3_set_features(struct net_device *netdev, netdev_features_t features);
552
553int
554vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
555 u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size,
556 u16 txdata_desc_size, u16 rxdata_desc_size);
557
558void vmxnet3_set_ethtool_ops(struct net_device *netdev);
559
560void vmxnet3_get_stats64(struct net_device *dev,
561 struct rtnl_link_stats64 *stats);
562bool vmxnet3_check_ptcapability(u32 cap_supported, u32 cap);
563
564extern char vmxnet3_driver_name[];
565#endif
1/*
2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 *
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
22 *
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
24 *
25 */
26
27#ifndef _VMXNET3_INT_H
28#define _VMXNET3_INT_H
29
30#include <linux/bitops.h>
31#include <linux/ethtool.h>
32#include <linux/delay.h>
33#include <linux/netdevice.h>
34#include <linux/pci.h>
35#include <linux/compiler.h>
36#include <linux/slab.h>
37#include <linux/spinlock.h>
38#include <linux/ioport.h>
39#include <linux/highmem.h>
40#include <linux/timer.h>
41#include <linux/skbuff.h>
42#include <linux/interrupt.h>
43#include <linux/workqueue.h>
44#include <linux/uaccess.h>
45#include <asm/dma.h>
46#include <asm/page.h>
47
48#include <linux/tcp.h>
49#include <linux/udp.h>
50#include <linux/ip.h>
51#include <linux/ipv6.h>
52#include <linux/in.h>
53#include <linux/etherdevice.h>
54#include <asm/checksum.h>
55#include <linux/if_vlan.h>
56#include <linux/if_arp.h>
57#include <linux/inetdevice.h>
58#include <linux/log2.h>
59
60#include "vmxnet3_defs.h"
61
62#ifdef DEBUG
63# define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI(debug)"
64#else
65# define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI"
66#endif
67
68
69/*
70 * Version numbers
71 */
72#define VMXNET3_DRIVER_VERSION_STRING "1.4.7.0-k"
73
74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
75#define VMXNET3_DRIVER_VERSION_NUM 0x01040700
76
77#if defined(CONFIG_PCI_MSI)
78 /* RSS only makes sense if MSI-X is supported. */
79 #define VMXNET3_RSS
80#endif
81
82/*
83 * Capabilities
84 */
85
86enum {
87 VMNET_CAP_SG = 0x0001, /* Can do scatter-gather transmits. */
88 VMNET_CAP_IP4_CSUM = 0x0002, /* Can checksum only TCP/UDP over
89 * IPv4 */
90 VMNET_CAP_HW_CSUM = 0x0004, /* Can checksum all packets. */
91 VMNET_CAP_HIGH_DMA = 0x0008, /* Can DMA to high memory. */
92 VMNET_CAP_TOE = 0x0010, /* Supports TCP/IP offload. */
93 VMNET_CAP_TSO = 0x0020, /* Supports TCP Segmentation
94 * offload */
95 VMNET_CAP_SW_TSO = 0x0040, /* Supports SW TCP Segmentation */
96 VMNET_CAP_VMXNET_APROM = 0x0080, /* Vmxnet APROM support */
97 VMNET_CAP_HW_TX_VLAN = 0x0100, /* Can we do VLAN tagging in HW */
98 VMNET_CAP_HW_RX_VLAN = 0x0200, /* Can we do VLAN untagging in HW */
99 VMNET_CAP_SW_VLAN = 0x0400, /* VLAN tagging/untagging in SW */
100 VMNET_CAP_WAKE_PCKT_RCV = 0x0800, /* Can wake on network packet recv? */
101 VMNET_CAP_ENABLE_INT_INLINE = 0x1000, /* Enable Interrupt Inline */
102 VMNET_CAP_ENABLE_HEADER_COPY = 0x2000, /* copy header for vmkernel */
103 VMNET_CAP_TX_CHAIN = 0x4000, /* Guest can use multiple tx entries
104 * for a pkt */
105 VMNET_CAP_RX_CHAIN = 0x8000, /* pkt can span multiple rx entries */
106 VMNET_CAP_LPD = 0x10000, /* large pkt delivery */
107 VMNET_CAP_BPF = 0x20000, /* BPF Support in VMXNET Virtual HW*/
108 VMNET_CAP_SG_SPAN_PAGES = 0x40000, /* Scatter-gather can span multiple*/
109 /* pages transmits */
110 VMNET_CAP_IP6_CSUM = 0x80000, /* Can do IPv6 csum offload. */
111 VMNET_CAP_TSO6 = 0x100000, /* TSO seg. offload for IPv6 pkts. */
112 VMNET_CAP_TSO256k = 0x200000, /* Can do TSO seg offload for */
113 /* pkts up to 256kB. */
114 VMNET_CAP_UPT = 0x400000 /* Support UPT */
115};
116
117/*
118 * PCI vendor and device IDs.
119 */
120#define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07B0
121#define MAX_ETHERNET_CARDS 10
122#define MAX_PCI_PASSTHRU_DEVICE 6
123
124struct vmxnet3_cmd_ring {
125 union Vmxnet3_GenericDesc *base;
126 u32 size;
127 u32 next2fill;
128 u32 next2comp;
129 u8 gen;
130 dma_addr_t basePA;
131};
132
133static inline void
134vmxnet3_cmd_ring_adv_next2fill(struct vmxnet3_cmd_ring *ring)
135{
136 ring->next2fill++;
137 if (unlikely(ring->next2fill == ring->size)) {
138 ring->next2fill = 0;
139 VMXNET3_FLIP_RING_GEN(ring->gen);
140 }
141}
142
143static inline void
144vmxnet3_cmd_ring_adv_next2comp(struct vmxnet3_cmd_ring *ring)
145{
146 VMXNET3_INC_RING_IDX_ONLY(ring->next2comp, ring->size);
147}
148
149static inline int
150vmxnet3_cmd_ring_desc_avail(struct vmxnet3_cmd_ring *ring)
151{
152 return (ring->next2comp > ring->next2fill ? 0 : ring->size) +
153 ring->next2comp - ring->next2fill - 1;
154}
155
156struct vmxnet3_comp_ring {
157 union Vmxnet3_GenericDesc *base;
158 u32 size;
159 u32 next2proc;
160 u8 gen;
161 u8 intr_idx;
162 dma_addr_t basePA;
163};
164
165static inline void
166vmxnet3_comp_ring_adv_next2proc(struct vmxnet3_comp_ring *ring)
167{
168 ring->next2proc++;
169 if (unlikely(ring->next2proc == ring->size)) {
170 ring->next2proc = 0;
171 VMXNET3_FLIP_RING_GEN(ring->gen);
172 }
173}
174
175struct vmxnet3_tx_data_ring {
176 struct Vmxnet3_TxDataDesc *base;
177 u32 size;
178 dma_addr_t basePA;
179};
180
181enum vmxnet3_buf_map_type {
182 VMXNET3_MAP_INVALID = 0,
183 VMXNET3_MAP_NONE,
184 VMXNET3_MAP_SINGLE,
185 VMXNET3_MAP_PAGE,
186};
187
188struct vmxnet3_tx_buf_info {
189 u32 map_type;
190 u16 len;
191 u16 sop_idx;
192 dma_addr_t dma_addr;
193 struct sk_buff *skb;
194};
195
196struct vmxnet3_tq_driver_stats {
197 u64 drop_total; /* # of pkts dropped by the driver, the
198 * counters below track droppings due to
199 * different reasons
200 */
201 u64 drop_too_many_frags;
202 u64 drop_oversized_hdr;
203 u64 drop_hdr_inspect_err;
204 u64 drop_tso;
205
206 u64 tx_ring_full;
207 u64 linearized; /* # of pkts linearized */
208 u64 copy_skb_header; /* # of times we have to copy skb header */
209 u64 oversized_hdr;
210};
211
212struct vmxnet3_tx_ctx {
213 bool ipv4;
214 bool ipv6;
215 u16 mss;
216 u32 eth_ip_hdr_size; /* only valid for pkts requesting tso or csum
217 * offloading
218 */
219 u32 l4_hdr_size; /* only valid if mss != 0 */
220 u32 copy_size; /* # of bytes copied into the data ring */
221 union Vmxnet3_GenericDesc *sop_txd;
222 union Vmxnet3_GenericDesc *eop_txd;
223};
224
225struct vmxnet3_tx_queue {
226 char name[IFNAMSIZ+8]; /* To identify interrupt */
227 struct vmxnet3_adapter *adapter;
228 spinlock_t tx_lock;
229 struct vmxnet3_cmd_ring tx_ring;
230 struct vmxnet3_tx_buf_info *buf_info;
231 dma_addr_t buf_info_pa;
232 struct vmxnet3_tx_data_ring data_ring;
233 struct vmxnet3_comp_ring comp_ring;
234 struct Vmxnet3_TxQueueCtrl *shared;
235 struct vmxnet3_tq_driver_stats stats;
236 bool stopped;
237 int num_stop; /* # of times the queue is
238 * stopped */
239 int qid;
240} __attribute__((__aligned__(SMP_CACHE_BYTES)));
241
242enum vmxnet3_rx_buf_type {
243 VMXNET3_RX_BUF_NONE = 0,
244 VMXNET3_RX_BUF_SKB = 1,
245 VMXNET3_RX_BUF_PAGE = 2
246};
247
248struct vmxnet3_rx_buf_info {
249 enum vmxnet3_rx_buf_type buf_type;
250 u16 len;
251 union {
252 struct sk_buff *skb;
253 struct page *page;
254 };
255 dma_addr_t dma_addr;
256};
257
258struct vmxnet3_rx_ctx {
259 struct sk_buff *skb;
260 u32 sop_idx;
261};
262
263struct vmxnet3_rq_driver_stats {
264 u64 drop_total;
265 u64 drop_err;
266 u64 drop_fcs;
267 u64 rx_buf_alloc_failure;
268};
269
270struct vmxnet3_rx_queue {
271 char name[IFNAMSIZ + 8]; /* To identify interrupt */
272 struct vmxnet3_adapter *adapter;
273 struct napi_struct napi;
274 struct vmxnet3_cmd_ring rx_ring[2];
275 struct vmxnet3_comp_ring comp_ring;
276 struct vmxnet3_rx_ctx rx_ctx;
277 u32 qid; /* rqID in RCD for buffer from 1st ring */
278 u32 qid2; /* rqID in RCD for buffer from 2nd ring */
279 struct vmxnet3_rx_buf_info *buf_info[2];
280 dma_addr_t buf_info_pa;
281 struct Vmxnet3_RxQueueCtrl *shared;
282 struct vmxnet3_rq_driver_stats stats;
283} __attribute__((__aligned__(SMP_CACHE_BYTES)));
284
285#define VMXNET3_DEVICE_MAX_TX_QUEUES 8
286#define VMXNET3_DEVICE_MAX_RX_QUEUES 8 /* Keep this value as a power of 2 */
287
288/* Should be less than UPT1_RSS_MAX_IND_TABLE_SIZE */
289#define VMXNET3_RSS_IND_TABLE_SIZE (VMXNET3_DEVICE_MAX_RX_QUEUES * 4)
290
291#define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \
292 VMXNET3_DEVICE_MAX_RX_QUEUES + 1)
293#define VMXNET3_LINUX_MIN_MSIX_VECT 2 /* 1 for tx-rx pair and 1 for event */
294
295
296struct vmxnet3_intr {
297 enum vmxnet3_intr_mask_mode mask_mode;
298 enum vmxnet3_intr_type type; /* MSI-X, MSI, or INTx? */
299 u8 num_intrs; /* # of intr vectors */
300 u8 event_intr_idx; /* idx of the intr vector for event */
301 u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */
302 char event_msi_vector_name[IFNAMSIZ+11];
303#ifdef CONFIG_PCI_MSI
304 struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT];
305#endif
306};
307
308/* Interrupt sharing schemes, share_intr */
309#define VMXNET3_INTR_BUDDYSHARE 0 /* Corresponding tx,rx queues share irq */
310#define VMXNET3_INTR_TXSHARE 1 /* All tx queues share one irq */
311#define VMXNET3_INTR_DONTSHARE 2 /* each queue has its own irq */
312
313
314#define VMXNET3_STATE_BIT_RESETTING 0
315#define VMXNET3_STATE_BIT_QUIESCED 1
316struct vmxnet3_adapter {
317 struct vmxnet3_tx_queue tx_queue[VMXNET3_DEVICE_MAX_TX_QUEUES];
318 struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES];
319 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
320 struct vmxnet3_intr intr;
321 spinlock_t cmd_lock;
322 struct Vmxnet3_DriverShared *shared;
323 struct Vmxnet3_PMConf *pm_conf;
324 struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */
325 struct Vmxnet3_RxQueueDesc *rqd_start; /* all rx queue desc */
326 struct net_device *netdev;
327 struct pci_dev *pdev;
328
329 u8 __iomem *hw_addr0; /* for BAR 0 */
330 u8 __iomem *hw_addr1; /* for BAR 1 */
331 u8 version;
332
333 bool rxcsum;
334 bool lro;
335
336#ifdef VMXNET3_RSS
337 struct UPT1_RSSConf *rss_conf;
338 bool rss;
339#endif
340 u32 num_rx_queues;
341 u32 num_tx_queues;
342
343 /* rx buffer related */
344 unsigned skb_buf_size;
345 int rx_buf_per_pkt; /* only apply to the 1st ring */
346 dma_addr_t shared_pa;
347 dma_addr_t queue_desc_pa;
348
349 /* Wake-on-LAN */
350 u32 wol;
351
352 /* Link speed */
353 u32 link_speed; /* in mbps */
354
355 u64 tx_timeout_count;
356
357 /* Ring sizes */
358 u32 tx_ring_size;
359 u32 rx_ring_size;
360 u32 rx_ring2_size;
361
362 struct work_struct work;
363
364 unsigned long state; /* VMXNET3_STATE_BIT_xxx */
365
366 int share_intr;
367
368 dma_addr_t adapter_pa;
369 dma_addr_t pm_conf_pa;
370 dma_addr_t rss_conf_pa;
371};
372
373#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
374 writel((val), (adapter)->hw_addr0 + (reg))
375#define VMXNET3_READ_BAR0_REG(adapter, reg) \
376 readl((adapter)->hw_addr0 + (reg))
377
378#define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \
379 writel((val), (adapter)->hw_addr1 + (reg))
380#define VMXNET3_READ_BAR1_REG(adapter, reg) \
381 readl((adapter)->hw_addr1 + (reg))
382
383#define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5)
384#define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \
385 ((rq)->rx_ring[ring_idx].size >> 3)
386
387#define VMXNET3_GET_ADDR_LO(dma) ((u32)(dma))
388#define VMXNET3_GET_ADDR_HI(dma) ((u32)(((u64)(dma)) >> 32))
389
390/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
391#define VMXNET3_DEF_TX_RING_SIZE 512
392#define VMXNET3_DEF_RX_RING_SIZE 256
393#define VMXNET3_DEF_RX_RING2_SIZE 128
394
395#define VMXNET3_MAX_ETH_HDR_SIZE 22
396#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024)
397
398int
399vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
400
401int
402vmxnet3_activate_dev(struct vmxnet3_adapter *adapter);
403
404void
405vmxnet3_force_close(struct vmxnet3_adapter *adapter);
406
407void
408vmxnet3_reset_dev(struct vmxnet3_adapter *adapter);
409
410void
411vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter);
412
413void
414vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
415
416int
417vmxnet3_set_features(struct net_device *netdev, netdev_features_t features);
418
419int
420vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
421 u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size);
422
423void vmxnet3_set_ethtool_ops(struct net_device *netdev);
424
425struct rtnl_link_stats64 *
426vmxnet3_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
427
428extern char vmxnet3_driver_name[];
429#endif