Loading...
1/* Copyright 2008 - 2016 Freescale Semiconductor Inc.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
17 * later version.
18 *
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#ifndef __DPAA_H
32#define __DPAA_H
33
34#include <linux/netdevice.h>
35#include <soc/fsl/qman.h>
36#include <soc/fsl/bman.h>
37
38#include "fman.h"
39#include "mac.h"
40#include "dpaa_eth_trace.h"
41
42/* Number of prioritised traffic classes */
43#define DPAA_TC_NUM 4
44/* Number of Tx queues per traffic class */
45#define DPAA_TC_TXQ_NUM NR_CPUS
46/* Total number of Tx queues */
47#define DPAA_ETH_TXQ_NUM (DPAA_TC_NUM * DPAA_TC_TXQ_NUM)
48
49#define DPAA_BPS_NUM 3 /* number of bpools per interface */
50
51/* More detailed FQ types - used for fine-grained WQ assignments */
52enum dpaa_fq_type {
53 FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
54 FQ_TYPE_RX_ERROR, /* Rx Error FQs */
55 FQ_TYPE_RX_PCD, /* Rx Parse Classify Distribute FQs */
56 FQ_TYPE_TX, /* "Real" Tx FQs */
57 FQ_TYPE_TX_CONFIRM, /* Tx default Conf FQ (actually an Rx FQ) */
58 FQ_TYPE_TX_CONF_MQ, /* Tx conf FQs (one for each Tx FQ) */
59 FQ_TYPE_TX_ERROR, /* Tx Error FQs (these are actually Rx FQs) */
60};
61
62struct dpaa_fq {
63 struct qman_fq fq_base;
64 struct list_head list;
65 struct net_device *net_dev;
66 bool init;
67 u32 fqid;
68 u32 flags;
69 u16 channel;
70 u8 wq;
71 enum dpaa_fq_type fq_type;
72};
73
74struct dpaa_fq_cbs {
75 struct qman_fq rx_defq;
76 struct qman_fq tx_defq;
77 struct qman_fq rx_errq;
78 struct qman_fq tx_errq;
79 struct qman_fq egress_ern;
80};
81
82struct dpaa_bp {
83 /* device used in the DMA mapping operations */
84 struct device *dev;
85 /* current number of buffers in the buffer pool alloted to each CPU */
86 int __percpu *percpu_count;
87 /* all buffers allocated for this pool have this raw size */
88 size_t raw_size;
89 /* all buffers in this pool have this same usable size */
90 size_t size;
91 /* the buffer pools are initialized with config_count buffers for each
92 * CPU; at runtime the number of buffers per CPU is constantly brought
93 * back to this level
94 */
95 u16 config_count;
96 u8 bpid;
97 struct bman_pool *pool;
98 /* bpool can be seeded before use by this cb */
99 int (*seed_cb)(struct dpaa_bp *);
100 /* bpool can be emptied before freeing by this cb */
101 void (*free_buf_cb)(const struct dpaa_bp *, struct bm_buffer *);
102 atomic_t refs;
103};
104
105struct dpaa_rx_errors {
106 u64 dme; /* DMA Error */
107 u64 fpe; /* Frame Physical Error */
108 u64 fse; /* Frame Size Error */
109 u64 phe; /* Header Error */
110};
111
112/* Counters for QMan ERN frames - one counter per rejection code */
113struct dpaa_ern_cnt {
114 u64 cg_tdrop; /* Congestion group taildrop */
115 u64 wred; /* WRED congestion */
116 u64 err_cond; /* Error condition */
117 u64 early_window; /* Order restoration, frame too early */
118 u64 late_window; /* Order restoration, frame too late */
119 u64 fq_tdrop; /* FQ taildrop */
120 u64 fq_retired; /* FQ is retired */
121 u64 orp_zero; /* ORP disabled */
122};
123
124struct dpaa_napi_portal {
125 struct napi_struct napi;
126 struct qman_portal *p;
127 bool down;
128};
129
130struct dpaa_percpu_priv {
131 struct net_device *net_dev;
132 struct dpaa_napi_portal np;
133 u64 in_interrupt;
134 u64 tx_confirm;
135 /* fragmented (non-linear) skbuffs received from the stack */
136 u64 tx_frag_skbuffs;
137 struct rtnl_link_stats64 stats;
138 struct dpaa_rx_errors rx_errors;
139 struct dpaa_ern_cnt ern_cnt;
140};
141
142struct dpaa_buffer_layout {
143 u16 priv_data_size;
144};
145
146struct dpaa_priv {
147 struct dpaa_percpu_priv __percpu *percpu_priv;
148 struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM];
149 /* Store here the needed Tx headroom for convenience and speed
150 * (even though it can be computed based on the fields of buf_layout)
151 */
152 u16 tx_headroom;
153 struct net_device *net_dev;
154 struct mac_device *mac_dev;
155 struct qman_fq *egress_fqs[DPAA_ETH_TXQ_NUM];
156 struct qman_fq *conf_fqs[DPAA_ETH_TXQ_NUM];
157
158 u16 channel;
159 struct list_head dpaa_fq_list;
160
161 u8 num_tc;
162 bool keygen_in_use;
163 u32 msg_enable; /* net_device message level */
164
165 struct {
166 /* All egress queues to a given net device belong to one
167 * (and the same) congestion group.
168 */
169 struct qman_cgr cgr;
170 /* If congested, when it began. Used for performance stats. */
171 u32 congestion_start_jiffies;
172 /* Number of jiffies the Tx port was congested. */
173 u32 congested_jiffies;
174 /* Counter for the number of times the CGR
175 * entered congestion state
176 */
177 u32 cgr_congested_count;
178 } cgr_data;
179 /* Use a per-port CGR for ingress traffic. */
180 bool use_ingress_cgr;
181 struct qman_cgr ingress_cgr;
182
183 struct dpaa_buffer_layout buf_layout[2];
184 u16 rx_headroom;
185};
186
187/* from dpaa_ethtool.c */
188extern const struct ethtool_ops dpaa_ethtool_ops;
189
190/* from dpaa_eth_sysfs.c */
191void dpaa_eth_sysfs_remove(struct device *dev);
192void dpaa_eth_sysfs_init(struct device *dev);
193#endif /* __DPAA_H */
1/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
2/*
3 * Copyright 2008 - 2016 Freescale Semiconductor Inc.
4 */
5
6#ifndef __DPAA_H
7#define __DPAA_H
8
9#include <linux/netdevice.h>
10#include <linux/refcount.h>
11#include <net/xdp.h>
12#include <soc/fsl/qman.h>
13#include <soc/fsl/bman.h>
14
15#include "fman.h"
16#include "mac.h"
17#include "dpaa_eth_trace.h"
18
19/* Number of prioritised traffic classes */
20#define DPAA_TC_NUM 4
21
22/* More detailed FQ types - used for fine-grained WQ assignments */
23enum dpaa_fq_type {
24 FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
25 FQ_TYPE_RX_ERROR, /* Rx Error FQs */
26 FQ_TYPE_RX_PCD, /* Rx Parse Classify Distribute FQs */
27 FQ_TYPE_TX, /* "Real" Tx FQs */
28 FQ_TYPE_TX_CONFIRM, /* Tx default Conf FQ (actually an Rx FQ) */
29 FQ_TYPE_TX_CONF_MQ, /* Tx conf FQs (one for each Tx FQ) */
30 FQ_TYPE_TX_ERROR, /* Tx Error FQs (these are actually Rx FQs) */
31};
32
33struct dpaa_fq {
34 struct qman_fq fq_base;
35 struct list_head list;
36 struct net_device *net_dev;
37 bool init;
38 u32 fqid;
39 u32 flags;
40 u16 channel;
41 u8 wq;
42 enum dpaa_fq_type fq_type;
43 struct xdp_rxq_info xdp_rxq;
44};
45
46struct dpaa_fq_cbs {
47 struct qman_fq rx_defq;
48 struct qman_fq tx_defq;
49 struct qman_fq rx_errq;
50 struct qman_fq tx_errq;
51 struct qman_fq egress_ern;
52};
53
54struct dpaa_priv;
55
56struct dpaa_bp {
57 /* used in the DMA mapping operations */
58 struct dpaa_priv *priv;
59 /* current number of buffers in the buffer pool alloted to each CPU */
60 int __percpu *percpu_count;
61 /* all buffers allocated for this pool have this raw size */
62 size_t raw_size;
63 /* all buffers in this pool have this same usable size */
64 size_t size;
65 /* the buffer pools are initialized with config_count buffers for each
66 * CPU; at runtime the number of buffers per CPU is constantly brought
67 * back to this level
68 */
69 u16 config_count;
70 u8 bpid;
71 struct bman_pool *pool;
72 /* bpool can be seeded before use by this cb */
73 int (*seed_cb)(struct dpaa_bp *);
74 /* bpool can be emptied before freeing by this cb */
75 void (*free_buf_cb)(const struct dpaa_bp *, struct bm_buffer *);
76 refcount_t refs;
77};
78
79struct dpaa_rx_errors {
80 u64 dme; /* DMA Error */
81 u64 fpe; /* Frame Physical Error */
82 u64 fse; /* Frame Size Error */
83 u64 phe; /* Header Error */
84};
85
86/* Counters for QMan ERN frames - one counter per rejection code */
87struct dpaa_ern_cnt {
88 u64 cg_tdrop; /* Congestion group taildrop */
89 u64 wred; /* WRED congestion */
90 u64 err_cond; /* Error condition */
91 u64 early_window; /* Order restoration, frame too early */
92 u64 late_window; /* Order restoration, frame too late */
93 u64 fq_tdrop; /* FQ taildrop */
94 u64 fq_retired; /* FQ is retired */
95 u64 orp_zero; /* ORP disabled */
96};
97
98struct dpaa_napi_portal {
99 struct napi_struct napi;
100 struct qman_portal *p;
101 bool down;
102 int xdp_act;
103};
104
105struct dpaa_percpu_priv {
106 struct net_device *net_dev;
107 struct dpaa_napi_portal np;
108 u64 in_interrupt;
109 u64 tx_confirm;
110 /* fragmented (non-linear) skbuffs received from the stack */
111 u64 tx_frag_skbuffs;
112 struct rtnl_link_stats64 stats;
113 struct dpaa_rx_errors rx_errors;
114 struct dpaa_ern_cnt ern_cnt;
115};
116
117struct dpaa_buffer_layout {
118 u16 priv_data_size;
119};
120
121/* Information to be used on the Tx confirmation path. Stored just
122 * before the start of the transmit buffer. Maximum size allowed
123 * is DPAA_TX_PRIV_DATA_SIZE bytes.
124 */
125struct dpaa_eth_swbp {
126 struct sk_buff *skb;
127 struct xdp_frame *xdpf;
128};
129
130struct dpaa_priv {
131 struct dpaa_percpu_priv __percpu *percpu_priv;
132 struct dpaa_bp *dpaa_bp;
133 /* Store here the needed Tx headroom for convenience and speed
134 * (even though it can be computed based on the fields of buf_layout)
135 */
136 u16 tx_headroom;
137 struct net_device *net_dev;
138 struct mac_device *mac_dev;
139 struct device *rx_dma_dev;
140 struct device *tx_dma_dev;
141 struct qman_fq **egress_fqs;
142 struct qman_fq **conf_fqs;
143
144 u16 channel;
145 struct list_head dpaa_fq_list;
146
147 u8 num_tc;
148 bool keygen_in_use;
149 u32 msg_enable; /* net_device message level */
150
151 struct {
152 /* All egress queues to a given net device belong to one
153 * (and the same) congestion group.
154 */
155 struct qman_cgr cgr;
156 /* If congested, when it began. Used for performance stats. */
157 u32 congestion_start_jiffies;
158 /* Number of jiffies the Tx port was congested. */
159 u32 congested_jiffies;
160 /* Counter for the number of times the CGR
161 * entered congestion state
162 */
163 u32 cgr_congested_count;
164 } cgr_data;
165 /* Use a per-port CGR for ingress traffic. */
166 bool use_ingress_cgr;
167 struct qman_cgr ingress_cgr;
168
169 struct dpaa_buffer_layout buf_layout[2];
170 u16 rx_headroom;
171
172 bool tx_tstamp; /* Tx timestamping enabled */
173 bool rx_tstamp; /* Rx timestamping enabled */
174
175 struct bpf_prog *xdp_prog;
176};
177
178/* from dpaa_ethtool.c */
179extern const struct ethtool_ops dpaa_ethtool_ops;
180
181/* from dpaa_eth_sysfs.c */
182void dpaa_eth_sysfs_remove(struct device *dev);
183void dpaa_eth_sysfs_init(struct device *dev);
184
185static inline size_t dpaa_num_txqs_per_tc(void)
186{
187 return num_possible_cpus();
188}
189
190/* Total number of Tx queues */
191static inline size_t dpaa_max_num_txqs(void)
192{
193 return DPAA_TC_NUM * dpaa_num_txqs_per_tc();
194}
195
196#endif /* __DPAA_H */