Loading...
Note: File does not exist in v5.4.
1/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
2
3/* Header file for Gigabit Ethernet driver for Mellanox BlueField SoC
4 * - this file contains software data structures and any chip-specific
5 * data structures (e.g. TX WQE format) that are memory resident.
6 *
7 * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
8 */
9
10#ifndef __MLXBF_GIGE_H__
11#define __MLXBF_GIGE_H__
12
13#include <linux/io-64-nonatomic-lo-hi.h>
14#include <linux/irqreturn.h>
15#include <linux/netdevice.h>
16#include <linux/irq.h>
17#include <linux/phy.h>
18
19/* The silicon design supports a maximum RX ring size of
20 * 32K entries. Based on current testing this maximum size
21 * is not required to be supported. Instead the RX ring
22 * will be capped at a realistic value of 1024 entries.
23 */
24#define MLXBF_GIGE_MIN_RXQ_SZ 32
25#define MLXBF_GIGE_MAX_RXQ_SZ 1024
26#define MLXBF_GIGE_DEFAULT_RXQ_SZ 128
27
28#define MLXBF_GIGE_MIN_TXQ_SZ 4
29#define MLXBF_GIGE_MAX_TXQ_SZ 256
30#define MLXBF_GIGE_DEFAULT_TXQ_SZ 128
31
32#define MLXBF_GIGE_DEFAULT_BUF_SZ 2048
33
34#define MLXBF_GIGE_DMA_PAGE_SZ 4096
35#define MLXBF_GIGE_DMA_PAGE_SHIFT 12
36
37/* There are four individual MAC RX filters. Currently
38 * two of them are being used: one for the broadcast MAC
39 * (index 0) and one for local MAC (index 1)
40 */
41#define MLXBF_GIGE_BCAST_MAC_FILTER_IDX 0
42#define MLXBF_GIGE_LOCAL_MAC_FILTER_IDX 1
43#define MLXBF_GIGE_MAX_FILTER_IDX 3
44
45/* Define for broadcast MAC literal */
46#define BCAST_MAC_ADDR 0xFFFFFFFFFFFF
47
48/* There are three individual interrupts:
49 * 1) Errors, "OOB" interrupt line
50 * 2) Receive Packet, "OOB_LLU" interrupt line
51 * 3) LLU and PLU Events, "OOB_PLU" interrupt line
52 */
53#define MLXBF_GIGE_ERROR_INTR_IDX 0
54#define MLXBF_GIGE_RECEIVE_PKT_INTR_IDX 1
55#define MLXBF_GIGE_LLU_PLU_INTR_IDX 2
56
57struct mlxbf_gige_stats {
58 u64 hw_access_errors;
59 u64 tx_invalid_checksums;
60 u64 tx_small_frames;
61 u64 tx_index_errors;
62 u64 sw_config_errors;
63 u64 sw_access_errors;
64 u64 rx_truncate_errors;
65 u64 rx_mac_errors;
66 u64 rx_din_dropped_pkts;
67 u64 tx_fifo_full;
68 u64 rx_filter_passed_pkts;
69 u64 rx_filter_discard_pkts;
70};
71
72struct mlxbf_gige_reg_param {
73 u32 mask;
74 u32 shift;
75};
76
77struct mlxbf_gige_mdio_gw {
78 u32 gw_address;
79 u32 read_data_address;
80 struct mlxbf_gige_reg_param busy;
81 struct mlxbf_gige_reg_param write_data;
82 struct mlxbf_gige_reg_param read_data;
83 struct mlxbf_gige_reg_param devad;
84 struct mlxbf_gige_reg_param partad;
85 struct mlxbf_gige_reg_param opcode;
86 struct mlxbf_gige_reg_param st1;
87};
88
89struct mlxbf_gige_link_cfg {
90 void (*set_phy_link_mode)(struct phy_device *phydev);
91 void (*adjust_link)(struct net_device *netdev);
92 phy_interface_t phy_mode;
93};
94
95struct mlxbf_gige {
96 void __iomem *base;
97 void __iomem *llu_base;
98 void __iomem *plu_base;
99 struct device *dev;
100 struct net_device *netdev;
101 struct platform_device *pdev;
102 void __iomem *mdio_io;
103 void __iomem *clk_io;
104 struct mii_bus *mdiobus;
105 spinlock_t lock; /* for packet processing indices */
106 u16 rx_q_entries;
107 u16 tx_q_entries;
108 u64 *tx_wqe_base;
109 dma_addr_t tx_wqe_base_dma;
110 u64 *tx_wqe_next;
111 u64 *tx_cc;
112 dma_addr_t tx_cc_dma;
113 dma_addr_t *rx_wqe_base;
114 dma_addr_t rx_wqe_base_dma;
115 u64 *rx_cqe_base;
116 dma_addr_t rx_cqe_base_dma;
117 u16 tx_pi;
118 u16 prev_tx_ci;
119 struct sk_buff *rx_skb[MLXBF_GIGE_MAX_RXQ_SZ];
120 struct sk_buff *tx_skb[MLXBF_GIGE_MAX_TXQ_SZ];
121 int error_irq;
122 int rx_irq;
123 int llu_plu_irq;
124 int phy_irq;
125 int hw_phy_irq;
126 bool promisc_enabled;
127 u8 valid_polarity;
128 struct napi_struct napi;
129 struct mlxbf_gige_stats stats;
130 u8 hw_version;
131 struct mlxbf_gige_mdio_gw *mdio_gw;
132 int prev_speed;
133};
134
135/* Rx Work Queue Element definitions */
136#define MLXBF_GIGE_RX_WQE_SZ 8
137
138/* Rx Completion Queue Element definitions */
139#define MLXBF_GIGE_RX_CQE_SZ 8
140#define MLXBF_GIGE_RX_CQE_PKT_LEN_MASK GENMASK(10, 0)
141#define MLXBF_GIGE_RX_CQE_VALID_MASK GENMASK(11, 11)
142#define MLXBF_GIGE_RX_CQE_PKT_STATUS_MASK GENMASK(15, 12)
143#define MLXBF_GIGE_RX_CQE_PKT_STATUS_MAC_ERR GENMASK(12, 12)
144#define MLXBF_GIGE_RX_CQE_PKT_STATUS_TRUNCATED GENMASK(13, 13)
145#define MLXBF_GIGE_RX_CQE_CHKSUM_MASK GENMASK(31, 16)
146
147/* Tx Work Queue Element definitions */
148#define MLXBF_GIGE_TX_WQE_SZ_QWORDS 2
149#define MLXBF_GIGE_TX_WQE_SZ 16
150#define MLXBF_GIGE_TX_WQE_PKT_LEN_MASK GENMASK(10, 0)
151#define MLXBF_GIGE_TX_WQE_UPDATE_MASK GENMASK(31, 31)
152#define MLXBF_GIGE_TX_WQE_CHKSUM_LEN_MASK GENMASK(42, 32)
153#define MLXBF_GIGE_TX_WQE_CHKSUM_START_MASK GENMASK(55, 48)
154#define MLXBF_GIGE_TX_WQE_CHKSUM_OFFSET_MASK GENMASK(63, 56)
155
156/* Macro to return packet length of specified TX WQE */
157#define MLXBF_GIGE_TX_WQE_PKT_LEN(tx_wqe_addr) \
158 (*((tx_wqe_addr) + 1) & MLXBF_GIGE_TX_WQE_PKT_LEN_MASK)
159
160/* Tx Completion Count */
161#define MLXBF_GIGE_TX_CC_SZ 8
162
163/* List of resources in ACPI table */
164enum mlxbf_gige_res {
165 MLXBF_GIGE_RES_MAC,
166 MLXBF_GIGE_RES_MDIO9,
167 MLXBF_GIGE_RES_GPIO0,
168 MLXBF_GIGE_RES_LLU,
169 MLXBF_GIGE_RES_PLU,
170 MLXBF_GIGE_RES_CLK
171};
172
173/* Version of register data returned by mlxbf_gige_get_regs() */
174#define MLXBF_GIGE_REGS_VERSION 1
175
176int mlxbf_gige_mdio_probe(struct platform_device *pdev,
177 struct mlxbf_gige *priv);
178void mlxbf_gige_mdio_remove(struct mlxbf_gige *priv);
179
180void mlxbf_gige_enable_multicast_rx(struct mlxbf_gige *priv);
181void mlxbf_gige_disable_multicast_rx(struct mlxbf_gige *priv);
182void mlxbf_gige_enable_mac_rx_filter(struct mlxbf_gige *priv,
183 unsigned int index);
184void mlxbf_gige_disable_mac_rx_filter(struct mlxbf_gige *priv,
185 unsigned int index);
186void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
187 unsigned int index, u64 dmac);
188void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
189 unsigned int index, u64 *dmac);
190void mlxbf_gige_enable_promisc(struct mlxbf_gige *priv);
191void mlxbf_gige_disable_promisc(struct mlxbf_gige *priv);
192int mlxbf_gige_rx_init(struct mlxbf_gige *priv);
193void mlxbf_gige_rx_deinit(struct mlxbf_gige *priv);
194int mlxbf_gige_tx_init(struct mlxbf_gige *priv);
195void mlxbf_gige_tx_deinit(struct mlxbf_gige *priv);
196bool mlxbf_gige_handle_tx_complete(struct mlxbf_gige *priv);
197netdev_tx_t mlxbf_gige_start_xmit(struct sk_buff *skb,
198 struct net_device *netdev);
199struct sk_buff *mlxbf_gige_alloc_skb(struct mlxbf_gige *priv,
200 unsigned int map_len,
201 dma_addr_t *buf_dma,
202 enum dma_data_direction dir);
203int mlxbf_gige_request_irqs(struct mlxbf_gige *priv);
204void mlxbf_gige_free_irqs(struct mlxbf_gige *priv);
205int mlxbf_gige_poll(struct napi_struct *napi, int budget);
206extern const struct ethtool_ops mlxbf_gige_ethtool_ops;
207void mlxbf_gige_update_tx_wqe_next(struct mlxbf_gige *priv);
208
209#endif /* !defined(__MLXBF_GIGE_H__) */