Linux Audio

Check our new training course

Loading...
v5.14.15
  1/* Copyright 2008-2016 Freescale Semiconductor, Inc.
  2 *
  3 * Redistribution and use in source and binary forms, with or without
  4 * modification, are permitted provided that the following conditions are met:
  5 *     * Redistributions of source code must retain the above copyright
  6 *	 notice, this list of conditions and the following disclaimer.
  7 *     * Redistributions in binary form must reproduce the above copyright
  8 *	 notice, this list of conditions and the following disclaimer in the
  9 *	 documentation and/or other materials provided with the distribution.
 10 *     * Neither the name of Freescale Semiconductor nor the
 11 *	 names of its contributors may be used to endorse or promote products
 12 *	 derived from this software without specific prior written permission.
 13 *
 14 *
 15 * ALTERNATIVELY, this software may be distributed under the terms of the
 16 * GNU General Public License ("GPL") as published by the Free Software
 17 * Foundation, either version 2 of that License or (at your option) any
 18 * later version.
 19 *
 20 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
 21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 22 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 23 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
 24 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
 25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
 27 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 30 */
 31
 32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 33
 34#include <linux/string.h>
 35#include <linux/of_platform.h>
 36#include <linux/net_tstamp.h>
 37#include <linux/fsl/ptp_qoriq.h>
 38
 39#include "dpaa_eth.h"
 40#include "mac.h"
 41
 42static const char dpaa_stats_percpu[][ETH_GSTRING_LEN] = {
 43	"interrupts",
 44	"rx packets",
 45	"tx packets",
 46	"tx confirm",
 47	"tx S/G",
 48	"tx error",
 49	"rx error",
 50	"rx dropped",
 51	"tx dropped",
 52};
 53
 54static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
 55	/* dpa rx errors */
 56	"rx dma error",
 57	"rx frame physical error",
 58	"rx frame size error",
 59	"rx header error",
 60
 61	/* demultiplexing errors */
 62	"qman cg_tdrop",
 63	"qman wred",
 64	"qman error cond",
 65	"qman early window",
 66	"qman late window",
 67	"qman fq tdrop",
 68	"qman fq retired",
 69	"qman orp disabled",
 70
 71	/* congestion related stats */
 72	"congestion time (ms)",
 73	"entered congestion",
 74	"congested (0/1)"
 75};
 76
 77#define DPAA_STATS_PERCPU_LEN ARRAY_SIZE(dpaa_stats_percpu)
 78#define DPAA_STATS_GLOBAL_LEN ARRAY_SIZE(dpaa_stats_global)
 79
 80static int dpaa_get_link_ksettings(struct net_device *net_dev,
 81				   struct ethtool_link_ksettings *cmd)
 82{
 83	if (!net_dev->phydev)
 
 84		return 0;
 
 85
 86	phy_ethtool_ksettings_get(net_dev->phydev, cmd);
 87
 88	return 0;
 89}
 90
 91static int dpaa_set_link_ksettings(struct net_device *net_dev,
 92				   const struct ethtool_link_ksettings *cmd)
 93{
 94	int err;
 95
 96	if (!net_dev->phydev)
 
 97		return -ENODEV;
 
 98
 99	err = phy_ethtool_ksettings_set(net_dev->phydev, cmd);
100	if (err < 0)
101		netdev_err(net_dev, "phy_ethtool_ksettings_set() = %d\n", err);
102
103	return err;
104}
105
106static void dpaa_get_drvinfo(struct net_device *net_dev,
107			     struct ethtool_drvinfo *drvinfo)
108{
 
 
109	strlcpy(drvinfo->driver, KBUILD_MODNAME,
110		sizeof(drvinfo->driver));
 
 
 
 
 
 
 
 
 
111	strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
112		sizeof(drvinfo->bus_info));
113}
114
115static u32 dpaa_get_msglevel(struct net_device *net_dev)
116{
117	return ((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable;
118}
119
120static void dpaa_set_msglevel(struct net_device *net_dev,
121			      u32 msg_enable)
122{
123	((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable = msg_enable;
124}
125
126static int dpaa_nway_reset(struct net_device *net_dev)
127{
128	int err;
129
130	if (!net_dev->phydev)
 
131		return -ENODEV;
 
132
133	err = 0;
134	if (net_dev->phydev->autoneg) {
135		err = phy_start_aneg(net_dev->phydev);
136		if (err < 0)
137			netdev_err(net_dev, "phy_start_aneg() = %d\n",
138				   err);
139	}
140
141	return err;
142}
143
144static void dpaa_get_pauseparam(struct net_device *net_dev,
145				struct ethtool_pauseparam *epause)
146{
147	struct mac_device *mac_dev;
148	struct dpaa_priv *priv;
149
150	priv = netdev_priv(net_dev);
151	mac_dev = priv->mac_dev;
152
153	if (!net_dev->phydev)
 
154		return;
 
155
156	epause->autoneg = mac_dev->autoneg_pause;
157	epause->rx_pause = mac_dev->rx_pause_active;
158	epause->tx_pause = mac_dev->tx_pause_active;
159}
160
161static int dpaa_set_pauseparam(struct net_device *net_dev,
162			       struct ethtool_pauseparam *epause)
163{
164	struct mac_device *mac_dev;
165	struct phy_device *phydev;
166	bool rx_pause, tx_pause;
167	struct dpaa_priv *priv;
 
168	int err;
169
170	priv = netdev_priv(net_dev);
171	mac_dev = priv->mac_dev;
172
173	phydev = net_dev->phydev;
174	if (!phydev) {
175		netdev_err(net_dev, "phy device not initialized\n");
176		return -ENODEV;
177	}
178
179	if (!phy_validate_pause(phydev, epause))
 
 
180		return -EINVAL;
181
182	/* The MAC should know how to handle PAUSE frame autonegotiation before
183	 * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE
184	 * settings.
185	 */
186	mac_dev->autoneg_pause = !!epause->autoneg;
187	mac_dev->rx_pause_req = !!epause->rx_pause;
188	mac_dev->tx_pause_req = !!epause->tx_pause;
189
190	/* Determine the sym/asym advertised PAUSE capabilities from the desired
191	 * rx/tx pause settings.
192	 */
 
 
 
 
 
193
194	phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195
196	fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
197	err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
198	if (err < 0)
199		netdev_err(net_dev, "set_mac_active_pause() = %d\n", err);
200
201	return err;
202}
203
204static int dpaa_get_sset_count(struct net_device *net_dev, int type)
205{
206	unsigned int total_stats, num_stats;
207
208	num_stats   = num_online_cpus() + 1;
209	total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + 1) +
210			DPAA_STATS_GLOBAL_LEN;
211
212	switch (type) {
213	case ETH_SS_STATS:
214		return total_stats;
215	default:
216		return -EOPNOTSUPP;
217	}
218}
219
220static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus,
221		       int crr_cpu, u64 bp_count, u64 *data)
222{
223	int num_values = num_cpus + 1;
224	int crr = 0;
225
226	/* update current CPU's stats and also add them to the total values */
227	data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt;
228	data[crr++ * num_values + num_cpus] += percpu_priv->in_interrupt;
229
230	data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_packets;
231	data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_packets;
232
233	data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_packets;
234	data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_packets;
235
236	data[crr * num_values + crr_cpu] = percpu_priv->tx_confirm;
237	data[crr++ * num_values + num_cpus] += percpu_priv->tx_confirm;
238
239	data[crr * num_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
240	data[crr++ * num_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
241
242	data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_errors;
243	data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_errors;
244
245	data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
246	data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;
247
248	data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_dropped;
249	data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_dropped;
250
251	data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_dropped;
252	data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_dropped;
253
254	data[crr * num_values + crr_cpu] = bp_count;
255	data[crr++ * num_values + num_cpus] += bp_count;
256}
257
258static void dpaa_get_ethtool_stats(struct net_device *net_dev,
259				   struct ethtool_stats *stats, u64 *data)
260{
 
261	struct dpaa_percpu_priv *percpu_priv;
262	struct dpaa_rx_errors rx_errors;
263	unsigned int num_cpus, offset;
264	u64 bp_count, cg_time, cg_num;
265	struct dpaa_ern_cnt ern_cnt;
266	struct dpaa_bp *dpaa_bp;
267	struct dpaa_priv *priv;
268	int total_stats, i;
269	bool cg_status;
270
271	total_stats = dpaa_get_sset_count(net_dev, ETH_SS_STATS);
272	priv     = netdev_priv(net_dev);
273	num_cpus = num_online_cpus();
274
275	memset(&bp_count, 0, sizeof(bp_count));
276	memset(&rx_errors, 0, sizeof(struct dpaa_rx_errors));
277	memset(&ern_cnt, 0, sizeof(struct dpaa_ern_cnt));
278	memset(data, 0, total_stats * sizeof(u64));
279
280	for_each_online_cpu(i) {
281		percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
282		dpaa_bp = priv->dpaa_bp;
283		if (!dpaa_bp->percpu_count)
284			continue;
285		bp_count = *(per_cpu_ptr(dpaa_bp->percpu_count, i));
 
 
286		rx_errors.dme += percpu_priv->rx_errors.dme;
287		rx_errors.fpe += percpu_priv->rx_errors.fpe;
288		rx_errors.fse += percpu_priv->rx_errors.fse;
289		rx_errors.phe += percpu_priv->rx_errors.phe;
290
291		ern_cnt.cg_tdrop     += percpu_priv->ern_cnt.cg_tdrop;
292		ern_cnt.wred         += percpu_priv->ern_cnt.wred;
293		ern_cnt.err_cond     += percpu_priv->ern_cnt.err_cond;
294		ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
295		ern_cnt.late_window  += percpu_priv->ern_cnt.late_window;
296		ern_cnt.fq_tdrop     += percpu_priv->ern_cnt.fq_tdrop;
297		ern_cnt.fq_retired   += percpu_priv->ern_cnt.fq_retired;
298		ern_cnt.orp_zero     += percpu_priv->ern_cnt.orp_zero;
299
300		copy_stats(percpu_priv, num_cpus, i, bp_count, data);
301	}
302
303	offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + 1);
304	memcpy(data + offset, &rx_errors, sizeof(struct dpaa_rx_errors));
305
306	offset += sizeof(struct dpaa_rx_errors) / sizeof(u64);
307	memcpy(data + offset, &ern_cnt, sizeof(struct dpaa_ern_cnt));
308
309	/* gather congestion related counters */
310	cg_num    = 0;
311	cg_status = false;
312	cg_time   = jiffies_to_msecs(priv->cgr_data.congested_jiffies);
313	if (qman_query_cgr_congested(&priv->cgr_data.cgr, &cg_status) == 0) {
314		cg_num    = priv->cgr_data.cgr_congested_count;
315
316		/* reset congestion stats (like QMan API does */
317		priv->cgr_data.congested_jiffies   = 0;
318		priv->cgr_data.cgr_congested_count = 0;
319	}
320
321	offset += sizeof(struct dpaa_ern_cnt) / sizeof(u64);
322	data[offset++] = cg_time;
323	data[offset++] = cg_num;
324	data[offset++] = cg_status;
325}
326
327static void dpaa_get_strings(struct net_device *net_dev, u32 stringset,
328			     u8 *data)
329{
330	unsigned int i, j, num_cpus, size;
331	char string_cpu[ETH_GSTRING_LEN];
332	u8 *strings;
333
334	memset(string_cpu, 0, sizeof(string_cpu));
335	strings   = data;
336	num_cpus  = num_online_cpus();
337	size      = DPAA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
338
339	for (i = 0; i < DPAA_STATS_PERCPU_LEN; i++) {
340		for (j = 0; j < num_cpus; j++) {
341			snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]",
342				 dpaa_stats_percpu[i], j);
343			memcpy(strings, string_cpu, ETH_GSTRING_LEN);
344			strings += ETH_GSTRING_LEN;
345		}
346		snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]",
347			 dpaa_stats_percpu[i]);
348		memcpy(strings, string_cpu, ETH_GSTRING_LEN);
349		strings += ETH_GSTRING_LEN;
350	}
351	for (j = 0; j < num_cpus; j++) {
352		snprintf(string_cpu, ETH_GSTRING_LEN,
353			 "bpool [CPU %d]", j);
 
 
 
 
 
 
354		memcpy(strings, string_cpu, ETH_GSTRING_LEN);
355		strings += ETH_GSTRING_LEN;
356	}
357	snprintf(string_cpu, ETH_GSTRING_LEN, "bpool [TOTAL]");
358	memcpy(strings, string_cpu, ETH_GSTRING_LEN);
359	strings += ETH_GSTRING_LEN;
360
361	memcpy(strings, dpaa_stats_global, size);
362}
363
364static int dpaa_get_hash_opts(struct net_device *dev,
365			      struct ethtool_rxnfc *cmd)
366{
367	struct dpaa_priv *priv = netdev_priv(dev);
368
369	cmd->data = 0;
370
371	switch (cmd->flow_type) {
372	case TCP_V4_FLOW:
373	case TCP_V6_FLOW:
374	case UDP_V4_FLOW:
375	case UDP_V6_FLOW:
376		if (priv->keygen_in_use)
377			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
378		fallthrough;
379	case IPV4_FLOW:
380	case IPV6_FLOW:
381	case SCTP_V4_FLOW:
382	case SCTP_V6_FLOW:
383	case AH_ESP_V4_FLOW:
384	case AH_ESP_V6_FLOW:
385	case AH_V4_FLOW:
386	case AH_V6_FLOW:
387	case ESP_V4_FLOW:
388	case ESP_V6_FLOW:
389		if (priv->keygen_in_use)
390			cmd->data |= RXH_IP_SRC | RXH_IP_DST;
391		break;
392	default:
393		cmd->data = 0;
394		break;
395	}
396
397	return 0;
398}
399
400static int dpaa_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
401			  u32 *unused)
402{
403	int ret = -EOPNOTSUPP;
404
405	switch (cmd->cmd) {
406	case ETHTOOL_GRXFH:
407		ret = dpaa_get_hash_opts(dev, cmd);
408		break;
409	default:
410		break;
411	}
412
413	return ret;
414}
415
416static void dpaa_set_hash(struct net_device *net_dev, bool enable)
417{
418	struct mac_device *mac_dev;
419	struct fman_port *rxport;
420	struct dpaa_priv *priv;
421
422	priv = netdev_priv(net_dev);
423	mac_dev = priv->mac_dev;
424	rxport = mac_dev->port[0];
425
426	fman_port_use_kg_hash(rxport, enable);
427	priv->keygen_in_use = enable;
428}
429
430static int dpaa_set_hash_opts(struct net_device *dev,
431			      struct ethtool_rxnfc *nfc)
432{
433	int ret = -EINVAL;
434
435	/* we support hashing on IPv4/v6 src/dest IP and L4 src/dest port */
436	if (nfc->data &
437	    ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
438		return -EINVAL;
439
440	switch (nfc->flow_type) {
441	case TCP_V4_FLOW:
442	case TCP_V6_FLOW:
443	case UDP_V4_FLOW:
444	case UDP_V6_FLOW:
445	case IPV4_FLOW:
446	case IPV6_FLOW:
447	case SCTP_V4_FLOW:
448	case SCTP_V6_FLOW:
449	case AH_ESP_V4_FLOW:
450	case AH_ESP_V6_FLOW:
451	case AH_V4_FLOW:
452	case AH_V6_FLOW:
453	case ESP_V4_FLOW:
454	case ESP_V6_FLOW:
455		dpaa_set_hash(dev, !!nfc->data);
456		ret = 0;
457		break;
458	default:
459		break;
460	}
461
462	return ret;
463}
464
465static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
466{
467	int ret = -EOPNOTSUPP;
468
469	switch (cmd->cmd) {
470	case ETHTOOL_SRXFH:
471		ret = dpaa_set_hash_opts(dev, cmd);
472		break;
473	default:
474		break;
475	}
476
477	return ret;
478}
479
480static int dpaa_get_ts_info(struct net_device *net_dev,
481			    struct ethtool_ts_info *info)
482{
483	struct device *dev = net_dev->dev.parent;
484	struct device_node *mac_node = dev->of_node;
485	struct device_node *fman_node = NULL, *ptp_node = NULL;
486	struct platform_device *ptp_dev = NULL;
487	struct ptp_qoriq *ptp = NULL;
488
489	info->phc_index = -1;
490
491	fman_node = of_get_parent(mac_node);
492	if (fman_node)
493		ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
494
495	if (ptp_node)
496		ptp_dev = of_find_device_by_node(ptp_node);
497
498	if (ptp_dev)
499		ptp = platform_get_drvdata(ptp_dev);
500
501	if (ptp)
502		info->phc_index = ptp->phc_index;
503
504	info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
505				SOF_TIMESTAMPING_RX_HARDWARE |
506				SOF_TIMESTAMPING_RAW_HARDWARE;
507	info->tx_types = (1 << HWTSTAMP_TX_OFF) |
508			 (1 << HWTSTAMP_TX_ON);
509	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
510			   (1 << HWTSTAMP_FILTER_ALL);
511
512	return 0;
513}
514
515static int dpaa_get_coalesce(struct net_device *dev,
516			     struct ethtool_coalesce *c)
517{
518	struct qman_portal *portal;
519	u32 period;
520	u8 thresh;
521
522	portal = qman_get_affine_portal(smp_processor_id());
523	qman_portal_get_iperiod(portal, &period);
524	qman_dqrr_get_ithresh(portal, &thresh);
525
526	c->rx_coalesce_usecs = period;
527	c->rx_max_coalesced_frames = thresh;
528
529	return 0;
530}
531
532static int dpaa_set_coalesce(struct net_device *dev,
533			     struct ethtool_coalesce *c)
534{
535	const cpumask_t *cpus = qman_affine_cpus();
536	bool needs_revert[NR_CPUS] = {false};
537	struct qman_portal *portal;
538	u32 period, prev_period;
539	u8 thresh, prev_thresh;
540	int cpu, res;
541
542	period = c->rx_coalesce_usecs;
543	thresh = c->rx_max_coalesced_frames;
544
545	/* save previous values */
546	portal = qman_get_affine_portal(smp_processor_id());
547	qman_portal_get_iperiod(portal, &prev_period);
548	qman_dqrr_get_ithresh(portal, &prev_thresh);
549
550	/* set new values */
551	for_each_cpu_and(cpu, cpus, cpu_online_mask) {
552		portal = qman_get_affine_portal(cpu);
553		res = qman_portal_set_iperiod(portal, period);
554		if (res)
555			goto revert_values;
556		res = qman_dqrr_set_ithresh(portal, thresh);
557		if (res) {
558			qman_portal_set_iperiod(portal, prev_period);
559			goto revert_values;
560		}
561		needs_revert[cpu] = true;
562	}
563
564	return 0;
565
566revert_values:
567	/* restore previous values */
568	for_each_cpu_and(cpu, cpus, cpu_online_mask) {
569		if (!needs_revert[cpu])
570			continue;
571		portal = qman_get_affine_portal(cpu);
572		/* previous values will not fail, ignore return value */
573		qman_portal_set_iperiod(portal, prev_period);
574		qman_dqrr_set_ithresh(portal, prev_thresh);
575	}
576
577	return res;
578}
579
580const struct ethtool_ops dpaa_ethtool_ops = {
581	.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
582				     ETHTOOL_COALESCE_RX_MAX_FRAMES,
583	.get_drvinfo = dpaa_get_drvinfo,
584	.get_msglevel = dpaa_get_msglevel,
585	.set_msglevel = dpaa_set_msglevel,
586	.nway_reset = dpaa_nway_reset,
587	.get_pauseparam = dpaa_get_pauseparam,
588	.set_pauseparam = dpaa_set_pauseparam,
589	.get_link = ethtool_op_get_link,
590	.get_sset_count = dpaa_get_sset_count,
591	.get_ethtool_stats = dpaa_get_ethtool_stats,
592	.get_strings = dpaa_get_strings,
593	.get_link_ksettings = dpaa_get_link_ksettings,
594	.set_link_ksettings = dpaa_set_link_ksettings,
595	.get_rxnfc = dpaa_get_rxnfc,
596	.set_rxnfc = dpaa_set_rxnfc,
597	.get_ts_info = dpaa_get_ts_info,
598	.get_coalesce = dpaa_get_coalesce,
599	.set_coalesce = dpaa_set_coalesce,
600};
v4.17
  1/* Copyright 2008-2016 Freescale Semiconductor, Inc.
  2 *
  3 * Redistribution and use in source and binary forms, with or without
  4 * modification, are permitted provided that the following conditions are met:
  5 *     * Redistributions of source code must retain the above copyright
  6 *	 notice, this list of conditions and the following disclaimer.
  7 *     * Redistributions in binary form must reproduce the above copyright
  8 *	 notice, this list of conditions and the following disclaimer in the
  9 *	 documentation and/or other materials provided with the distribution.
 10 *     * Neither the name of Freescale Semiconductor nor the
 11 *	 names of its contributors may be used to endorse or promote products
 12 *	 derived from this software without specific prior written permission.
 13 *
 14 *
 15 * ALTERNATIVELY, this software may be distributed under the terms of the
 16 * GNU General Public License ("GPL") as published by the Free Software
 17 * Foundation, either version 2 of that License or (at your option) any
 18 * later version.
 19 *
 20 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
 21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 22 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 23 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
 24 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
 25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
 27 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 30 */
 31
 32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 33
 34#include <linux/string.h>
 
 
 
 35
 36#include "dpaa_eth.h"
 37#include "mac.h"
 38
 39static const char dpaa_stats_percpu[][ETH_GSTRING_LEN] = {
 40	"interrupts",
 41	"rx packets",
 42	"tx packets",
 43	"tx confirm",
 44	"tx S/G",
 45	"tx error",
 46	"rx error",
 
 
 47};
 48
 49static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
 50	/* dpa rx errors */
 51	"rx dma error",
 52	"rx frame physical error",
 53	"rx frame size error",
 54	"rx header error",
 55
 56	/* demultiplexing errors */
 57	"qman cg_tdrop",
 58	"qman wred",
 59	"qman error cond",
 60	"qman early window",
 61	"qman late window",
 62	"qman fq tdrop",
 63	"qman fq retired",
 64	"qman orp disabled",
 65
 66	/* congestion related stats */
 67	"congestion time (ms)",
 68	"entered congestion",
 69	"congested (0/1)"
 70};
 71
 72#define DPAA_STATS_PERCPU_LEN ARRAY_SIZE(dpaa_stats_percpu)
 73#define DPAA_STATS_GLOBAL_LEN ARRAY_SIZE(dpaa_stats_global)
 74
 75static int dpaa_get_link_ksettings(struct net_device *net_dev,
 76				   struct ethtool_link_ksettings *cmd)
 77{
 78	if (!net_dev->phydev) {
 79		netdev_dbg(net_dev, "phy device not initialized\n");
 80		return 0;
 81	}
 82
 83	phy_ethtool_ksettings_get(net_dev->phydev, cmd);
 84
 85	return 0;
 86}
 87
 88static int dpaa_set_link_ksettings(struct net_device *net_dev,
 89				   const struct ethtool_link_ksettings *cmd)
 90{
 91	int err;
 92
 93	if (!net_dev->phydev) {
 94		netdev_err(net_dev, "phy device not initialized\n");
 95		return -ENODEV;
 96	}
 97
 98	err = phy_ethtool_ksettings_set(net_dev->phydev, cmd);
 99	if (err < 0)
100		netdev_err(net_dev, "phy_ethtool_ksettings_set() = %d\n", err);
101
102	return err;
103}
104
105static void dpaa_get_drvinfo(struct net_device *net_dev,
106			     struct ethtool_drvinfo *drvinfo)
107{
108	int len;
109
110	strlcpy(drvinfo->driver, KBUILD_MODNAME,
111		sizeof(drvinfo->driver));
112	len = snprintf(drvinfo->version, sizeof(drvinfo->version),
113		       "%X", 0);
114	len = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
115		       "%X", 0);
116
117	if (len >= sizeof(drvinfo->fw_version)) {
118		/* Truncated output */
119		netdev_notice(net_dev, "snprintf() = %d\n", len);
120	}
121	strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
122		sizeof(drvinfo->bus_info));
123}
124
125static u32 dpaa_get_msglevel(struct net_device *net_dev)
126{
127	return ((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable;
128}
129
130static void dpaa_set_msglevel(struct net_device *net_dev,
131			      u32 msg_enable)
132{
133	((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable = msg_enable;
134}
135
136static int dpaa_nway_reset(struct net_device *net_dev)
137{
138	int err;
139
140	if (!net_dev->phydev) {
141		netdev_err(net_dev, "phy device not initialized\n");
142		return -ENODEV;
143	}
144
145	err = 0;
146	if (net_dev->phydev->autoneg) {
147		err = phy_start_aneg(net_dev->phydev);
148		if (err < 0)
149			netdev_err(net_dev, "phy_start_aneg() = %d\n",
150				   err);
151	}
152
153	return err;
154}
155
156static void dpaa_get_pauseparam(struct net_device *net_dev,
157				struct ethtool_pauseparam *epause)
158{
159	struct mac_device *mac_dev;
160	struct dpaa_priv *priv;
161
162	priv = netdev_priv(net_dev);
163	mac_dev = priv->mac_dev;
164
165	if (!net_dev->phydev) {
166		netdev_err(net_dev, "phy device not initialized\n");
167		return;
168	}
169
170	epause->autoneg = mac_dev->autoneg_pause;
171	epause->rx_pause = mac_dev->rx_pause_active;
172	epause->tx_pause = mac_dev->tx_pause_active;
173}
174
175static int dpaa_set_pauseparam(struct net_device *net_dev,
176			       struct ethtool_pauseparam *epause)
177{
178	struct mac_device *mac_dev;
179	struct phy_device *phydev;
180	bool rx_pause, tx_pause;
181	struct dpaa_priv *priv;
182	u32 newadv, oldadv;
183	int err;
184
185	priv = netdev_priv(net_dev);
186	mac_dev = priv->mac_dev;
187
188	phydev = net_dev->phydev;
189	if (!phydev) {
190		netdev_err(net_dev, "phy device not initialized\n");
191		return -ENODEV;
192	}
193
194	if (!(phydev->supported & SUPPORTED_Pause) ||
195	    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
196	    (epause->rx_pause != epause->tx_pause)))
197		return -EINVAL;
198
199	/* The MAC should know how to handle PAUSE frame autonegotiation before
200	 * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE
201	 * settings.
202	 */
203	mac_dev->autoneg_pause = !!epause->autoneg;
204	mac_dev->rx_pause_req = !!epause->rx_pause;
205	mac_dev->tx_pause_req = !!epause->tx_pause;
206
207	/* Determine the sym/asym advertised PAUSE capabilities from the desired
208	 * rx/tx pause settings.
209	 */
210	newadv = 0;
211	if (epause->rx_pause)
212		newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
213	if (epause->tx_pause)
214		newadv ^= ADVERTISED_Asym_Pause;
215
216	oldadv = phydev->advertising &
217			(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
218
219	/* If there are differences between the old and the new advertised
220	 * values, restart PHY autonegotiation and advertise the new values.
221	 */
222	if (oldadv != newadv) {
223		phydev->advertising &= ~(ADVERTISED_Pause
224				| ADVERTISED_Asym_Pause);
225		phydev->advertising |= newadv;
226		if (phydev->autoneg) {
227			err = phy_start_aneg(phydev);
228			if (err < 0)
229				netdev_err(net_dev, "phy_start_aneg() = %d\n",
230					   err);
231		}
232	}
233
234	fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
235	err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
236	if (err < 0)
237		netdev_err(net_dev, "set_mac_active_pause() = %d\n", err);
238
239	return err;
240}
241
242static int dpaa_get_sset_count(struct net_device *net_dev, int type)
243{
244	unsigned int total_stats, num_stats;
245
246	num_stats   = num_online_cpus() + 1;
247	total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + DPAA_BPS_NUM) +
248			DPAA_STATS_GLOBAL_LEN;
249
250	switch (type) {
251	case ETH_SS_STATS:
252		return total_stats;
253	default:
254		return -EOPNOTSUPP;
255	}
256}
257
258static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus,
259		       int crr_cpu, u64 *bp_count, u64 *data)
260{
261	int num_values = num_cpus + 1;
262	int crr = 0, j;
263
264	/* update current CPU's stats and also add them to the total values */
265	data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt;
266	data[crr++ * num_values + num_cpus] += percpu_priv->in_interrupt;
267
268	data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_packets;
269	data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_packets;
270
271	data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_packets;
272	data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_packets;
273
274	data[crr * num_values + crr_cpu] = percpu_priv->tx_confirm;
275	data[crr++ * num_values + num_cpus] += percpu_priv->tx_confirm;
276
277	data[crr * num_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
278	data[crr++ * num_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
279
280	data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_errors;
281	data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_errors;
282
283	data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
284	data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;
285
286	for (j = 0; j < DPAA_BPS_NUM; j++) {
287		data[crr * num_values + crr_cpu] = bp_count[j];
288		data[crr++ * num_values + num_cpus] += bp_count[j];
289	}
 
 
 
 
290}
291
292static void dpaa_get_ethtool_stats(struct net_device *net_dev,
293				   struct ethtool_stats *stats, u64 *data)
294{
295	u64 bp_count[DPAA_BPS_NUM], cg_time, cg_num;
296	struct dpaa_percpu_priv *percpu_priv;
297	struct dpaa_rx_errors rx_errors;
298	unsigned int num_cpus, offset;
 
299	struct dpaa_ern_cnt ern_cnt;
300	struct dpaa_bp *dpaa_bp;
301	struct dpaa_priv *priv;
302	int total_stats, i, j;
303	bool cg_status;
304
305	total_stats = dpaa_get_sset_count(net_dev, ETH_SS_STATS);
306	priv     = netdev_priv(net_dev);
307	num_cpus = num_online_cpus();
308
309	memset(&bp_count, 0, sizeof(bp_count));
310	memset(&rx_errors, 0, sizeof(struct dpaa_rx_errors));
311	memset(&ern_cnt, 0, sizeof(struct dpaa_ern_cnt));
312	memset(data, 0, total_stats * sizeof(u64));
313
314	for_each_online_cpu(i) {
315		percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
316		for (j = 0; j < DPAA_BPS_NUM; j++) {
317			dpaa_bp = priv->dpaa_bps[j];
318			if (!dpaa_bp->percpu_count)
319				continue;
320			bp_count[j] = *(per_cpu_ptr(dpaa_bp->percpu_count, i));
321		}
322		rx_errors.dme += percpu_priv->rx_errors.dme;
323		rx_errors.fpe += percpu_priv->rx_errors.fpe;
324		rx_errors.fse += percpu_priv->rx_errors.fse;
325		rx_errors.phe += percpu_priv->rx_errors.phe;
326
327		ern_cnt.cg_tdrop     += percpu_priv->ern_cnt.cg_tdrop;
328		ern_cnt.wred         += percpu_priv->ern_cnt.wred;
329		ern_cnt.err_cond     += percpu_priv->ern_cnt.err_cond;
330		ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
331		ern_cnt.late_window  += percpu_priv->ern_cnt.late_window;
332		ern_cnt.fq_tdrop     += percpu_priv->ern_cnt.fq_tdrop;
333		ern_cnt.fq_retired   += percpu_priv->ern_cnt.fq_retired;
334		ern_cnt.orp_zero     += percpu_priv->ern_cnt.orp_zero;
335
336		copy_stats(percpu_priv, num_cpus, i, bp_count, data);
337	}
338
339	offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + DPAA_BPS_NUM);
340	memcpy(data + offset, &rx_errors, sizeof(struct dpaa_rx_errors));
341
342	offset += sizeof(struct dpaa_rx_errors) / sizeof(u64);
343	memcpy(data + offset, &ern_cnt, sizeof(struct dpaa_ern_cnt));
344
345	/* gather congestion related counters */
346	cg_num    = 0;
347	cg_status = false;
348	cg_time   = jiffies_to_msecs(priv->cgr_data.congested_jiffies);
349	if (qman_query_cgr_congested(&priv->cgr_data.cgr, &cg_status) == 0) {
350		cg_num    = priv->cgr_data.cgr_congested_count;
351
352		/* reset congestion stats (like QMan API does */
353		priv->cgr_data.congested_jiffies   = 0;
354		priv->cgr_data.cgr_congested_count = 0;
355	}
356
357	offset += sizeof(struct dpaa_ern_cnt) / sizeof(u64);
358	data[offset++] = cg_time;
359	data[offset++] = cg_num;
360	data[offset++] = cg_status;
361}
362
363static void dpaa_get_strings(struct net_device *net_dev, u32 stringset,
364			     u8 *data)
365{
366	unsigned int i, j, num_cpus, size;
367	char string_cpu[ETH_GSTRING_LEN];
368	u8 *strings;
369
370	memset(string_cpu, 0, sizeof(string_cpu));
371	strings   = data;
372	num_cpus  = num_online_cpus();
373	size      = DPAA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
374
375	for (i = 0; i < DPAA_STATS_PERCPU_LEN; i++) {
376		for (j = 0; j < num_cpus; j++) {
377			snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]",
378				 dpaa_stats_percpu[i], j);
379			memcpy(strings, string_cpu, ETH_GSTRING_LEN);
380			strings += ETH_GSTRING_LEN;
381		}
382		snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]",
383			 dpaa_stats_percpu[i]);
384		memcpy(strings, string_cpu, ETH_GSTRING_LEN);
385		strings += ETH_GSTRING_LEN;
386	}
387	for (i = 0; i < DPAA_BPS_NUM; i++) {
388		for (j = 0; j < num_cpus; j++) {
389			snprintf(string_cpu, ETH_GSTRING_LEN,
390				 "bpool %c [CPU %d]", 'a' + i, j);
391			memcpy(strings, string_cpu, ETH_GSTRING_LEN);
392			strings += ETH_GSTRING_LEN;
393		}
394		snprintf(string_cpu, ETH_GSTRING_LEN, "bpool %c [TOTAL]",
395			 'a' + i);
396		memcpy(strings, string_cpu, ETH_GSTRING_LEN);
397		strings += ETH_GSTRING_LEN;
398	}
 
 
 
 
399	memcpy(strings, dpaa_stats_global, size);
400}
401
402static int dpaa_get_hash_opts(struct net_device *dev,
403			      struct ethtool_rxnfc *cmd)
404{
405	struct dpaa_priv *priv = netdev_priv(dev);
406
407	cmd->data = 0;
408
409	switch (cmd->flow_type) {
410	case TCP_V4_FLOW:
411	case TCP_V6_FLOW:
412	case UDP_V4_FLOW:
413	case UDP_V6_FLOW:
414		if (priv->keygen_in_use)
415			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
416		/* Fall through */
417	case IPV4_FLOW:
418	case IPV6_FLOW:
419	case SCTP_V4_FLOW:
420	case SCTP_V6_FLOW:
421	case AH_ESP_V4_FLOW:
422	case AH_ESP_V6_FLOW:
423	case AH_V4_FLOW:
424	case AH_V6_FLOW:
425	case ESP_V4_FLOW:
426	case ESP_V6_FLOW:
427		if (priv->keygen_in_use)
428			cmd->data |= RXH_IP_SRC | RXH_IP_DST;
429		break;
430	default:
431		cmd->data = 0;
432		break;
433	}
434
435	return 0;
436}
437
438static int dpaa_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
439			  u32 *unused)
440{
441	int ret = -EOPNOTSUPP;
442
443	switch (cmd->cmd) {
444	case ETHTOOL_GRXFH:
445		ret = dpaa_get_hash_opts(dev, cmd);
446		break;
447	default:
448		break;
449	}
450
451	return ret;
452}
453
454static void dpaa_set_hash(struct net_device *net_dev, bool enable)
455{
456	struct mac_device *mac_dev;
457	struct fman_port *rxport;
458	struct dpaa_priv *priv;
459
460	priv = netdev_priv(net_dev);
461	mac_dev = priv->mac_dev;
462	rxport = mac_dev->port[0];
463
464	fman_port_use_kg_hash(rxport, enable);
465	priv->keygen_in_use = enable;
466}
467
468static int dpaa_set_hash_opts(struct net_device *dev,
469			      struct ethtool_rxnfc *nfc)
470{
471	int ret = -EINVAL;
472
473	/* we support hashing on IPv4/v6 src/dest IP and L4 src/dest port */
474	if (nfc->data &
475	    ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
476		return -EINVAL;
477
478	switch (nfc->flow_type) {
479	case TCP_V4_FLOW:
480	case TCP_V6_FLOW:
481	case UDP_V4_FLOW:
482	case UDP_V6_FLOW:
483	case IPV4_FLOW:
484	case IPV6_FLOW:
485	case SCTP_V4_FLOW:
486	case SCTP_V6_FLOW:
487	case AH_ESP_V4_FLOW:
488	case AH_ESP_V6_FLOW:
489	case AH_V4_FLOW:
490	case AH_V6_FLOW:
491	case ESP_V4_FLOW:
492	case ESP_V6_FLOW:
493		dpaa_set_hash(dev, !!nfc->data);
494		ret = 0;
495		break;
496	default:
497		break;
498	}
499
500	return ret;
501}
502
503static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
504{
505	int ret = -EOPNOTSUPP;
506
507	switch (cmd->cmd) {
508	case ETHTOOL_SRXFH:
509		ret = dpaa_set_hash_opts(dev, cmd);
510		break;
511	default:
512		break;
513	}
514
515	return ret;
516}
517
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518const struct ethtool_ops dpaa_ethtool_ops = {
 
 
519	.get_drvinfo = dpaa_get_drvinfo,
520	.get_msglevel = dpaa_get_msglevel,
521	.set_msglevel = dpaa_set_msglevel,
522	.nway_reset = dpaa_nway_reset,
523	.get_pauseparam = dpaa_get_pauseparam,
524	.set_pauseparam = dpaa_set_pauseparam,
525	.get_link = ethtool_op_get_link,
526	.get_sset_count = dpaa_get_sset_count,
527	.get_ethtool_stats = dpaa_get_ethtool_stats,
528	.get_strings = dpaa_get_strings,
529	.get_link_ksettings = dpaa_get_link_ksettings,
530	.set_link_ksettings = dpaa_set_link_ksettings,
531	.get_rxnfc = dpaa_get_rxnfc,
532	.set_rxnfc = dpaa_set_rxnfc,
 
 
 
533};