Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.10.11.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2019 Synopsys, Inc. and/or its affiliates.
   4 * stmmac Selftests Support
   5 *
   6 * Author: Jose Abreu <joabreu@synopsys.com>
   7 */
   8
   9#include <linux/bitrev.h>
  10#include <linux/completion.h>
  11#include <linux/crc32.h>
  12#include <linux/ethtool.h>
  13#include <linux/ip.h>
  14#include <linux/phy.h>
  15#include <linux/udp.h>
  16#include <net/pkt_cls.h>
  17#include <net/pkt_sched.h>
  18#include <net/tcp.h>
  19#include <net/udp.h>
  20#include <net/tc_act/tc_gact.h>
  21#include "stmmac.h"
  22
  23struct stmmachdr {
  24	__be32 version;
  25	__be64 magic;
  26	u8 id;
  27} __packed;
  28
  29#define STMMAC_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \
  30			      sizeof(struct stmmachdr))
  31#define STMMAC_TEST_PKT_MAGIC	0xdeadcafecafedeadULL
  32#define STMMAC_LB_TIMEOUT	msecs_to_jiffies(200)
  33
  34struct stmmac_packet_attrs {
  35	int vlan;
  36	int vlan_id_in;
  37	int vlan_id_out;
  38	unsigned char *src;
  39	const unsigned char *dst;
  40	u32 ip_src;
  41	u32 ip_dst;
  42	int tcp;
  43	int sport;
  44	int dport;
  45	u32 exp_hash;
  46	int dont_wait;
  47	int timeout;
  48	int size;
  49	int max_size;
  50	int remove_sa;
  51	u8 id;
  52	int sarc;
  53	u16 queue_mapping;
  54	u64 timestamp;
  55};
  56
  57static u8 stmmac_test_next_id;
  58
  59static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv,
  60					       struct stmmac_packet_attrs *attr)
  61{
  62	struct sk_buff *skb = NULL;
  63	struct udphdr *uhdr = NULL;
  64	struct tcphdr *thdr = NULL;
  65	struct stmmachdr *shdr;
  66	struct ethhdr *ehdr;
  67	struct iphdr *ihdr;
  68	int iplen, size;
  69
  70	size = attr->size + STMMAC_TEST_PKT_SIZE;
  71	if (attr->vlan) {
  72		size += 4;
  73		if (attr->vlan > 1)
  74			size += 4;
  75	}
  76
  77	if (attr->tcp)
  78		size += sizeof(struct tcphdr);
  79	else
  80		size += sizeof(struct udphdr);
  81
  82	if (attr->max_size && (attr->max_size > size))
  83		size = attr->max_size;
  84
  85	skb = netdev_alloc_skb(priv->dev, size);
  86	if (!skb)
  87		return NULL;
  88
  89	prefetchw(skb->data);
  90
  91	if (attr->vlan > 1)
  92		ehdr = skb_push(skb, ETH_HLEN + 8);
  93	else if (attr->vlan)
  94		ehdr = skb_push(skb, ETH_HLEN + 4);
  95	else if (attr->remove_sa)
  96		ehdr = skb_push(skb, ETH_HLEN - 6);
  97	else
  98		ehdr = skb_push(skb, ETH_HLEN);
  99	skb_reset_mac_header(skb);
 100
 101	skb_set_network_header(skb, skb->len);
 102	ihdr = skb_put(skb, sizeof(*ihdr));
 103
 104	skb_set_transport_header(skb, skb->len);
 105	if (attr->tcp)
 106		thdr = skb_put(skb, sizeof(*thdr));
 107	else
 108		uhdr = skb_put(skb, sizeof(*uhdr));
 109
 110	if (!attr->remove_sa)
 111		eth_zero_addr(ehdr->h_source);
 112	eth_zero_addr(ehdr->h_dest);
 113	if (attr->src && !attr->remove_sa)
 114		ether_addr_copy(ehdr->h_source, attr->src);
 115	if (attr->dst)
 116		ether_addr_copy(ehdr->h_dest, attr->dst);
 117
 118	if (!attr->remove_sa) {
 119		ehdr->h_proto = htons(ETH_P_IP);
 120	} else {
 121		__be16 *ptr = (__be16 *)ehdr;
 122
 123		/* HACK */
 124		ptr[3] = htons(ETH_P_IP);
 125	}
 126
 127	if (attr->vlan) {
 128		__be16 *tag, *proto;
 129
 130		if (!attr->remove_sa) {
 131			tag = (void *)ehdr + ETH_HLEN;
 132			proto = (void *)ehdr + (2 * ETH_ALEN);
 133		} else {
 134			tag = (void *)ehdr + ETH_HLEN - 6;
 135			proto = (void *)ehdr + ETH_ALEN;
 136		}
 137
 138		proto[0] = htons(ETH_P_8021Q);
 139		tag[0] = htons(attr->vlan_id_out);
 140		tag[1] = htons(ETH_P_IP);
 141		if (attr->vlan > 1) {
 142			proto[0] = htons(ETH_P_8021AD);
 143			tag[1] = htons(ETH_P_8021Q);
 144			tag[2] = htons(attr->vlan_id_in);
 145			tag[3] = htons(ETH_P_IP);
 146		}
 147	}
 148
 149	if (attr->tcp) {
 150		thdr->source = htons(attr->sport);
 151		thdr->dest = htons(attr->dport);
 152		thdr->doff = sizeof(struct tcphdr) / 4;
 153		thdr->check = 0;
 154	} else {
 155		uhdr->source = htons(attr->sport);
 156		uhdr->dest = htons(attr->dport);
 157		uhdr->len = htons(sizeof(*shdr) + sizeof(*uhdr) + attr->size);
 158		if (attr->max_size)
 159			uhdr->len = htons(attr->max_size -
 160					  (sizeof(*ihdr) + sizeof(*ehdr)));
 161		uhdr->check = 0;
 162	}
 163
 164	ihdr->ihl = 5;
 165	ihdr->ttl = 32;
 166	ihdr->version = 4;
 167	if (attr->tcp)
 168		ihdr->protocol = IPPROTO_TCP;
 169	else
 170		ihdr->protocol = IPPROTO_UDP;
 171	iplen = sizeof(*ihdr) + sizeof(*shdr) + attr->size;
 172	if (attr->tcp)
 173		iplen += sizeof(*thdr);
 174	else
 175		iplen += sizeof(*uhdr);
 176
 177	if (attr->max_size)
 178		iplen = attr->max_size - sizeof(*ehdr);
 179
 180	ihdr->tot_len = htons(iplen);
 181	ihdr->frag_off = 0;
 182	ihdr->saddr = htonl(attr->ip_src);
 183	ihdr->daddr = htonl(attr->ip_dst);
 184	ihdr->tos = 0;
 185	ihdr->id = 0;
 186	ip_send_check(ihdr);
 187
 188	shdr = skb_put(skb, sizeof(*shdr));
 189	shdr->version = 0;
 190	shdr->magic = cpu_to_be64(STMMAC_TEST_PKT_MAGIC);
 191	attr->id = stmmac_test_next_id;
 192	shdr->id = stmmac_test_next_id++;
 193
 194	if (attr->size)
 195		skb_put(skb, attr->size);
 196	if (attr->max_size && (attr->max_size > skb->len))
 197		skb_put(skb, attr->max_size - skb->len);
 198
 199	skb->csum = 0;
 200	skb->ip_summed = CHECKSUM_PARTIAL;
 201	if (attr->tcp) {
 202		thdr->check = ~tcp_v4_check(skb->len, ihdr->saddr, ihdr->daddr, 0);
 203		skb->csum_start = skb_transport_header(skb) - skb->head;
 204		skb->csum_offset = offsetof(struct tcphdr, check);
 205	} else {
 206		udp4_hwcsum(skb, ihdr->saddr, ihdr->daddr);
 207	}
 208
 209	skb->protocol = htons(ETH_P_IP);
 210	skb->pkt_type = PACKET_HOST;
 211	skb->dev = priv->dev;
 212
 213	if (attr->timestamp)
 214		skb->tstamp = ns_to_ktime(attr->timestamp);
 215
 216	return skb;
 217}
 218
 219static struct sk_buff *stmmac_test_get_arp_skb(struct stmmac_priv *priv,
 220					       struct stmmac_packet_attrs *attr)
 221{
 222	__be32 ip_src = htonl(attr->ip_src);
 223	__be32 ip_dst = htonl(attr->ip_dst);
 224	struct sk_buff *skb = NULL;
 225
 226	skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, ip_dst, priv->dev, ip_src,
 227			 NULL, attr->src, attr->dst);
 228	if (!skb)
 229		return NULL;
 230
 231	skb->pkt_type = PACKET_HOST;
 232	skb->dev = priv->dev;
 233
 234	return skb;
 235}
 236
 237struct stmmac_test_priv {
 238	struct stmmac_packet_attrs *packet;
 239	struct packet_type pt;
 240	struct completion comp;
 241	int double_vlan;
 242	int vlan_id;
 243	int ok;
 244};
 245
 246static int stmmac_test_loopback_validate(struct sk_buff *skb,
 247					 struct net_device *ndev,
 248					 struct packet_type *pt,
 249					 struct net_device *orig_ndev)
 250{
 251	struct stmmac_test_priv *tpriv = pt->af_packet_priv;
 252	const unsigned char *dst = tpriv->packet->dst;
 253	unsigned char *src = tpriv->packet->src;
 254	struct stmmachdr *shdr;
 255	struct ethhdr *ehdr;
 256	struct udphdr *uhdr;
 257	struct tcphdr *thdr;
 258	struct iphdr *ihdr;
 259
 260	skb = skb_unshare(skb, GFP_ATOMIC);
 261	if (!skb)
 262		goto out;
 263
 264	if (skb_linearize(skb))
 265		goto out;
 266	if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN))
 267		goto out;
 268
 269	ehdr = (struct ethhdr *)skb_mac_header(skb);
 270	if (dst) {
 271		if (!ether_addr_equal_unaligned(ehdr->h_dest, dst))
 272			goto out;
 273	}
 274	if (tpriv->packet->sarc) {
 275		if (!ether_addr_equal_unaligned(ehdr->h_source, ehdr->h_dest))
 276			goto out;
 277	} else if (src) {
 278		if (!ether_addr_equal_unaligned(ehdr->h_source, src))
 279			goto out;
 280	}
 281
 282	ihdr = ip_hdr(skb);
 283	if (tpriv->double_vlan)
 284		ihdr = (struct iphdr *)(skb_network_header(skb) + 4);
 285
 286	if (tpriv->packet->tcp) {
 287		if (ihdr->protocol != IPPROTO_TCP)
 288			goto out;
 289
 290		thdr = (struct tcphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
 291		if (thdr->dest != htons(tpriv->packet->dport))
 292			goto out;
 293
 294		shdr = (struct stmmachdr *)((u8 *)thdr + sizeof(*thdr));
 295	} else {
 296		if (ihdr->protocol != IPPROTO_UDP)
 297			goto out;
 298
 299		uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
 300		if (uhdr->dest != htons(tpriv->packet->dport))
 301			goto out;
 302
 303		shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr));
 304	}
 305
 306	if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC))
 307		goto out;
 308	if (tpriv->packet->exp_hash && !skb->hash)
 309		goto out;
 310	if (tpriv->packet->id != shdr->id)
 311		goto out;
 312
 313	tpriv->ok = true;
 314	complete(&tpriv->comp);
 315out:
 316	kfree_skb(skb);
 317	return 0;
 318}
 319
 320static int __stmmac_test_loopback(struct stmmac_priv *priv,
 321				  struct stmmac_packet_attrs *attr)
 322{
 323	struct stmmac_test_priv *tpriv;
 324	struct sk_buff *skb = NULL;
 325	int ret = 0;
 326
 327	tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
 328	if (!tpriv)
 329		return -ENOMEM;
 330
 331	tpriv->ok = false;
 332	init_completion(&tpriv->comp);
 333
 334	tpriv->pt.type = htons(ETH_P_IP);
 335	tpriv->pt.func = stmmac_test_loopback_validate;
 336	tpriv->pt.dev = priv->dev;
 337	tpriv->pt.af_packet_priv = tpriv;
 338	tpriv->packet = attr;
 339
 340	if (!attr->dont_wait)
 341		dev_add_pack(&tpriv->pt);
 342
 343	skb = stmmac_test_get_udp_skb(priv, attr);
 344	if (!skb) {
 345		ret = -ENOMEM;
 346		goto cleanup;
 347	}
 348
 349	ret = dev_direct_xmit(skb, attr->queue_mapping);
 350	if (ret)
 351		goto cleanup;
 352
 353	if (attr->dont_wait)
 354		goto cleanup;
 355
 356	if (!attr->timeout)
 357		attr->timeout = STMMAC_LB_TIMEOUT;
 358
 359	wait_for_completion_timeout(&tpriv->comp, attr->timeout);
 360	ret = tpriv->ok ? 0 : -ETIMEDOUT;
 361
 362cleanup:
 363	if (!attr->dont_wait)
 364		dev_remove_pack(&tpriv->pt);
 365	kfree(tpriv);
 366	return ret;
 367}
 368
 369static int stmmac_test_mac_loopback(struct stmmac_priv *priv)
 370{
 371	struct stmmac_packet_attrs attr = { };
 372
 373	attr.dst = priv->dev->dev_addr;
 374	return __stmmac_test_loopback(priv, &attr);
 375}
 376
 377static int stmmac_test_phy_loopback(struct stmmac_priv *priv)
 378{
 379	struct stmmac_packet_attrs attr = { };
 380	int ret;
 381
 382	if (!priv->dev->phydev)
 383		return -EOPNOTSUPP;
 384
 385	ret = phy_loopback(priv->dev->phydev, true);
 386	if (ret)
 387		return ret;
 388
 389	attr.dst = priv->dev->dev_addr;
 390	ret = __stmmac_test_loopback(priv, &attr);
 391
 392	phy_loopback(priv->dev->phydev, false);
 393	return ret;
 394}
 395
 396static int stmmac_test_mmc(struct stmmac_priv *priv)
 397{
 398	struct stmmac_counters initial, final;
 399	int ret;
 400
 401	memset(&initial, 0, sizeof(initial));
 402	memset(&final, 0, sizeof(final));
 403
 404	if (!priv->dma_cap.rmon)
 405		return -EOPNOTSUPP;
 406
 407	/* Save previous results into internal struct */
 408	stmmac_mmc_read(priv, priv->mmcaddr, &priv->mmc);
 409
 410	ret = stmmac_test_mac_loopback(priv);
 411	if (ret)
 412		return ret;
 413
 414	/* These will be loopback results so no need to save them */
 415	stmmac_mmc_read(priv, priv->mmcaddr, &final);
 416
 417	/*
 418	 * The number of MMC counters available depends on HW configuration
 419	 * so we just use this one to validate the feature. I hope there is
 420	 * not a version without this counter.
 421	 */
 422	if (final.mmc_tx_framecount_g <= initial.mmc_tx_framecount_g)
 423		return -EINVAL;
 424
 425	return 0;
 426}
 427
 428static int stmmac_test_eee(struct stmmac_priv *priv)
 429{
 430	struct stmmac_extra_stats *initial, *final;
 431	int retries = 10;
 432	int ret;
 433
 434	if (!priv->dma_cap.eee || !priv->eee_active)
 435		return -EOPNOTSUPP;
 436
 437	initial = kzalloc(sizeof(*initial), GFP_KERNEL);
 438	if (!initial)
 439		return -ENOMEM;
 440
 441	final = kzalloc(sizeof(*final), GFP_KERNEL);
 442	if (!final) {
 443		ret = -ENOMEM;
 444		goto out_free_initial;
 445	}
 446
 447	memcpy(initial, &priv->xstats, sizeof(*initial));
 448
 449	ret = stmmac_test_mac_loopback(priv);
 450	if (ret)
 451		goto out_free_final;
 452
 453	/* We have no traffic in the line so, sooner or later it will go LPI */
 454	while (--retries) {
 455		memcpy(final, &priv->xstats, sizeof(*final));
 456
 457		if (final->irq_tx_path_in_lpi_mode_n >
 458		    initial->irq_tx_path_in_lpi_mode_n)
 459			break;
 460		msleep(100);
 461	}
 462
 463	if (!retries) {
 464		ret = -ETIMEDOUT;
 465		goto out_free_final;
 466	}
 467
 468	if (final->irq_tx_path_in_lpi_mode_n <=
 469	    initial->irq_tx_path_in_lpi_mode_n) {
 470		ret = -EINVAL;
 471		goto out_free_final;
 472	}
 473
 474	if (final->irq_tx_path_exit_lpi_mode_n <=
 475	    initial->irq_tx_path_exit_lpi_mode_n) {
 476		ret = -EINVAL;
 477		goto out_free_final;
 478	}
 479
 480out_free_final:
 481	kfree(final);
 482out_free_initial:
 483	kfree(initial);
 484	return ret;
 485}
 486
 487static int stmmac_filter_check(struct stmmac_priv *priv)
 488{
 489	if (!(priv->dev->flags & IFF_PROMISC))
 490		return 0;
 491
 492	netdev_warn(priv->dev, "Test can't be run in promiscuous mode!\n");
 493	return -EOPNOTSUPP;
 494}
 495
 496static bool stmmac_hash_check(struct stmmac_priv *priv, unsigned char *addr)
 497{
 498	int mc_offset = 32 - priv->hw->mcast_bits_log2;
 499	struct netdev_hw_addr *ha;
 500	u32 hash, hash_nr;
 501
 502	/* First compute the hash for desired addr */
 503	hash = bitrev32(~crc32_le(~0, addr, 6)) >> mc_offset;
 504	hash_nr = hash >> 5;
 505	hash = 1 << (hash & 0x1f);
 506
 507	/* Now, check if it collides with any existing one */
 508	netdev_for_each_mc_addr(ha, priv->dev) {
 509		u32 nr = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)) >> mc_offset;
 510		if (((nr >> 5) == hash_nr) && ((1 << (nr & 0x1f)) == hash))
 511			return false;
 512	}
 513
 514	/* No collisions, address is good to go */
 515	return true;
 516}
 517
 518static bool stmmac_perfect_check(struct stmmac_priv *priv, unsigned char *addr)
 519{
 520	struct netdev_hw_addr *ha;
 521
 522	/* Check if it collides with any existing one */
 523	netdev_for_each_uc_addr(ha, priv->dev) {
 524		if (!memcmp(ha->addr, addr, ETH_ALEN))
 525			return false;
 526	}
 527
 528	/* No collisions, address is good to go */
 529	return true;
 530}
 531
 532static int stmmac_test_hfilt(struct stmmac_priv *priv)
 533{
 534	unsigned char gd_addr[ETH_ALEN] = {0xf1, 0xee, 0xdd, 0xcc, 0xbb, 0xaa};
 535	unsigned char bd_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
 536	struct stmmac_packet_attrs attr = { };
 537	int ret, tries = 256;
 538
 539	ret = stmmac_filter_check(priv);
 540	if (ret)
 541		return ret;
 542
 543	if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
 544		return -EOPNOTSUPP;
 545
 546	while (--tries) {
 547		/* We only need to check the bd_addr for collisions */
 548		bd_addr[ETH_ALEN - 1] = tries;
 549		if (stmmac_hash_check(priv, bd_addr))
 550			break;
 551	}
 552
 553	if (!tries)
 554		return -EOPNOTSUPP;
 555
 556	ret = dev_mc_add(priv->dev, gd_addr);
 557	if (ret)
 558		return ret;
 559
 560	attr.dst = gd_addr;
 561
 562	/* Shall receive packet */
 563	ret = __stmmac_test_loopback(priv, &attr);
 564	if (ret)
 565		goto cleanup;
 566
 567	attr.dst = bd_addr;
 568
 569	/* Shall NOT receive packet */
 570	ret = __stmmac_test_loopback(priv, &attr);
 571	ret = ret ? 0 : -EINVAL;
 572
 573cleanup:
 574	dev_mc_del(priv->dev, gd_addr);
 575	return ret;
 576}
 577
 578static int stmmac_test_pfilt(struct stmmac_priv *priv)
 579{
 580	unsigned char gd_addr[ETH_ALEN] = {0xf0, 0x01, 0x44, 0x55, 0x66, 0x77};
 581	unsigned char bd_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
 582	struct stmmac_packet_attrs attr = { };
 583	int ret, tries = 256;
 584
 585	if (stmmac_filter_check(priv))
 586		return -EOPNOTSUPP;
 587	if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
 588		return -EOPNOTSUPP;
 589
 590	while (--tries) {
 591		/* We only need to check the bd_addr for collisions */
 592		bd_addr[ETH_ALEN - 1] = tries;
 593		if (stmmac_perfect_check(priv, bd_addr))
 594			break;
 595	}
 596
 597	if (!tries)
 598		return -EOPNOTSUPP;
 599
 600	ret = dev_uc_add(priv->dev, gd_addr);
 601	if (ret)
 602		return ret;
 603
 604	attr.dst = gd_addr;
 605
 606	/* Shall receive packet */
 607	ret = __stmmac_test_loopback(priv, &attr);
 608	if (ret)
 609		goto cleanup;
 610
 611	attr.dst = bd_addr;
 612
 613	/* Shall NOT receive packet */
 614	ret = __stmmac_test_loopback(priv, &attr);
 615	ret = ret ? 0 : -EINVAL;
 616
 617cleanup:
 618	dev_uc_del(priv->dev, gd_addr);
 619	return ret;
 620}
 621
 622static int stmmac_test_mcfilt(struct stmmac_priv *priv)
 623{
 624	unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
 625	unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
 626	struct stmmac_packet_attrs attr = { };
 627	int ret, tries = 256;
 628
 629	if (stmmac_filter_check(priv))
 630		return -EOPNOTSUPP;
 631	if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
 632		return -EOPNOTSUPP;
 633	if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
 634		return -EOPNOTSUPP;
 635
 636	while (--tries) {
 637		/* We only need to check the mc_addr for collisions */
 638		mc_addr[ETH_ALEN - 1] = tries;
 639		if (stmmac_hash_check(priv, mc_addr))
 640			break;
 641	}
 642
 643	if (!tries)
 644		return -EOPNOTSUPP;
 645
 646	ret = dev_uc_add(priv->dev, uc_addr);
 647	if (ret)
 648		return ret;
 649
 650	attr.dst = uc_addr;
 651
 652	/* Shall receive packet */
 653	ret = __stmmac_test_loopback(priv, &attr);
 654	if (ret)
 655		goto cleanup;
 656
 657	attr.dst = mc_addr;
 658
 659	/* Shall NOT receive packet */
 660	ret = __stmmac_test_loopback(priv, &attr);
 661	ret = ret ? 0 : -EINVAL;
 662
 663cleanup:
 664	dev_uc_del(priv->dev, uc_addr);
 665	return ret;
 666}
 667
 668static int stmmac_test_ucfilt(struct stmmac_priv *priv)
 669{
 670	unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
 671	unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
 672	struct stmmac_packet_attrs attr = { };
 673	int ret, tries = 256;
 674
 675	if (stmmac_filter_check(priv))
 676		return -EOPNOTSUPP;
 677	if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
 678		return -EOPNOTSUPP;
 679	if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
 680		return -EOPNOTSUPP;
 681
 682	while (--tries) {
 683		/* We only need to check the uc_addr for collisions */
 684		uc_addr[ETH_ALEN - 1] = tries;
 685		if (stmmac_perfect_check(priv, uc_addr))
 686			break;
 687	}
 688
 689	if (!tries)
 690		return -EOPNOTSUPP;
 691
 692	ret = dev_mc_add(priv->dev, mc_addr);
 693	if (ret)
 694		return ret;
 695
 696	attr.dst = mc_addr;
 697
 698	/* Shall receive packet */
 699	ret = __stmmac_test_loopback(priv, &attr);
 700	if (ret)
 701		goto cleanup;
 702
 703	attr.dst = uc_addr;
 704
 705	/* Shall NOT receive packet */
 706	ret = __stmmac_test_loopback(priv, &attr);
 707	ret = ret ? 0 : -EINVAL;
 708
 709cleanup:
 710	dev_mc_del(priv->dev, mc_addr);
 711	return ret;
 712}
 713
 714static int stmmac_test_flowctrl_validate(struct sk_buff *skb,
 715					 struct net_device *ndev,
 716					 struct packet_type *pt,
 717					 struct net_device *orig_ndev)
 718{
 719	struct stmmac_test_priv *tpriv = pt->af_packet_priv;
 720	struct ethhdr *ehdr;
 721
 722	ehdr = (struct ethhdr *)skb_mac_header(skb);
 723	if (!ether_addr_equal_unaligned(ehdr->h_source, orig_ndev->dev_addr))
 724		goto out;
 725	if (ehdr->h_proto != htons(ETH_P_PAUSE))
 726		goto out;
 727
 728	tpriv->ok = true;
 729	complete(&tpriv->comp);
 730out:
 731	kfree_skb(skb);
 732	return 0;
 733}
 734
 735static int stmmac_test_flowctrl(struct stmmac_priv *priv)
 736{
 737	unsigned char paddr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x01};
 738	struct phy_device *phydev = priv->dev->phydev;
 739	u32 rx_cnt = priv->plat->rx_queues_to_use;
 740	struct stmmac_test_priv *tpriv;
 741	unsigned int pkt_count;
 742	int i, ret = 0;
 743
 744	if (!phydev || (!phydev->pause && !phydev->asym_pause))
 745		return -EOPNOTSUPP;
 746
 747	tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
 748	if (!tpriv)
 749		return -ENOMEM;
 750
 751	tpriv->ok = false;
 752	init_completion(&tpriv->comp);
 753	tpriv->pt.type = htons(ETH_P_PAUSE);
 754	tpriv->pt.func = stmmac_test_flowctrl_validate;
 755	tpriv->pt.dev = priv->dev;
 756	tpriv->pt.af_packet_priv = tpriv;
 757	dev_add_pack(&tpriv->pt);
 758
 759	/* Compute minimum number of packets to make FIFO full */
 760	pkt_count = priv->plat->rx_fifo_size;
 761	if (!pkt_count)
 762		pkt_count = priv->dma_cap.rx_fifo_size;
 763	pkt_count /= 1400;
 764	pkt_count *= 2;
 765
 766	for (i = 0; i < rx_cnt; i++)
 767		stmmac_stop_rx(priv, priv->ioaddr, i);
 768
 769	ret = dev_set_promiscuity(priv->dev, 1);
 770	if (ret)
 771		goto cleanup;
 772
 773	ret = dev_mc_add(priv->dev, paddr);
 774	if (ret)
 775		goto cleanup;
 776
 777	for (i = 0; i < pkt_count; i++) {
 778		struct stmmac_packet_attrs attr = { };
 779
 780		attr.dst = priv->dev->dev_addr;
 781		attr.dont_wait = true;
 782		attr.size = 1400;
 783
 784		ret = __stmmac_test_loopback(priv, &attr);
 785		if (ret)
 786			goto cleanup;
 787		if (tpriv->ok)
 788			break;
 789	}
 790
 791	/* Wait for some time in case RX Watchdog is enabled */
 792	msleep(200);
 793
 794	for (i = 0; i < rx_cnt; i++) {
 795		struct stmmac_channel *ch = &priv->channel[i];
 796		u32 tail;
 797
 798		tail = priv->dma_conf.rx_queue[i].dma_rx_phy +
 799			(priv->dma_conf.dma_rx_size * sizeof(struct dma_desc));
 800
 801		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i);
 802		stmmac_start_rx(priv, priv->ioaddr, i);
 803
 804		local_bh_disable();
 805		napi_schedule(&ch->rx_napi);
 806		local_bh_enable();
 807	}
 808
 809	wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
 810	ret = tpriv->ok ? 0 : -ETIMEDOUT;
 811
 812cleanup:
 813	dev_mc_del(priv->dev, paddr);
 814	dev_set_promiscuity(priv->dev, -1);
 815	dev_remove_pack(&tpriv->pt);
 816	kfree(tpriv);
 817	return ret;
 818}
 819
 820static int stmmac_test_rss(struct stmmac_priv *priv)
 821{
 822	struct stmmac_packet_attrs attr = { };
 823
 824	if (!priv->dma_cap.rssen || !priv->rss.enable)
 825		return -EOPNOTSUPP;
 826
 827	attr.dst = priv->dev->dev_addr;
 828	attr.exp_hash = true;
 829	attr.sport = 0x321;
 830	attr.dport = 0x123;
 831
 832	return __stmmac_test_loopback(priv, &attr);
 833}
 834
 835static int stmmac_test_vlan_validate(struct sk_buff *skb,
 836				     struct net_device *ndev,
 837				     struct packet_type *pt,
 838				     struct net_device *orig_ndev)
 839{
 840	struct stmmac_test_priv *tpriv = pt->af_packet_priv;
 841	struct stmmachdr *shdr;
 842	struct ethhdr *ehdr;
 843	struct udphdr *uhdr;
 844	struct iphdr *ihdr;
 845	u16 proto;
 846
 847	proto = tpriv->double_vlan ? ETH_P_8021AD : ETH_P_8021Q;
 848
 849	skb = skb_unshare(skb, GFP_ATOMIC);
 850	if (!skb)
 851		goto out;
 852
 853	if (skb_linearize(skb))
 854		goto out;
 855	if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN))
 856		goto out;
 857	if (tpriv->vlan_id) {
 858		if (skb->vlan_proto != htons(proto))
 859			goto out;
 860		if (skb->vlan_tci != tpriv->vlan_id) {
 861			/* Means filter did not work. */
 862			tpriv->ok = false;
 863			complete(&tpriv->comp);
 864			goto out;
 865		}
 866	}
 867
 868	ehdr = (struct ethhdr *)skb_mac_header(skb);
 869	if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->dst))
 870		goto out;
 871
 872	ihdr = ip_hdr(skb);
 873	if (tpriv->double_vlan)
 874		ihdr = (struct iphdr *)(skb_network_header(skb) + 4);
 875	if (ihdr->protocol != IPPROTO_UDP)
 876		goto out;
 877
 878	uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
 879	if (uhdr->dest != htons(tpriv->packet->dport))
 880		goto out;
 881
 882	shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr));
 883	if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC))
 884		goto out;
 885
 886	tpriv->ok = true;
 887	complete(&tpriv->comp);
 888
 889out:
 890	kfree_skb(skb);
 891	return 0;
 892}
 893
 894static int __stmmac_test_vlanfilt(struct stmmac_priv *priv)
 895{
 896	struct stmmac_packet_attrs attr = { };
 897	struct stmmac_test_priv *tpriv;
 898	struct sk_buff *skb = NULL;
 899	int ret = 0, i;
 900
 901	tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
 902	if (!tpriv)
 903		return -ENOMEM;
 904
 905	tpriv->ok = false;
 906	init_completion(&tpriv->comp);
 907
 908	tpriv->pt.type = htons(ETH_P_IP);
 909	tpriv->pt.func = stmmac_test_vlan_validate;
 910	tpriv->pt.dev = priv->dev;
 911	tpriv->pt.af_packet_priv = tpriv;
 912	tpriv->packet = &attr;
 913
 914	/*
 915	 * As we use HASH filtering, false positives may appear. This is a
 916	 * specially chosen ID so that adjacent IDs (+4) have different
 917	 * HASH values.
 918	 */
 919	tpriv->vlan_id = 0x123;
 920	dev_add_pack(&tpriv->pt);
 921
 922	ret = vlan_vid_add(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id);
 923	if (ret)
 924		goto cleanup;
 925
 926	for (i = 0; i < 4; i++) {
 927		attr.vlan = 1;
 928		attr.vlan_id_out = tpriv->vlan_id + i;
 929		attr.dst = priv->dev->dev_addr;
 930		attr.sport = 9;
 931		attr.dport = 9;
 932
 933		skb = stmmac_test_get_udp_skb(priv, &attr);
 934		if (!skb) {
 935			ret = -ENOMEM;
 936			goto vlan_del;
 937		}
 938
 939		ret = dev_direct_xmit(skb, 0);
 940		if (ret)
 941			goto vlan_del;
 942
 943		wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
 944		ret = tpriv->ok ? 0 : -ETIMEDOUT;
 945		if (ret && !i) {
 946			goto vlan_del;
 947		} else if (!ret && i) {
 948			ret = -EINVAL;
 949			goto vlan_del;
 950		} else {
 951			ret = 0;
 952		}
 953
 954		tpriv->ok = false;
 955	}
 956
 957vlan_del:
 958	vlan_vid_del(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id);
 959cleanup:
 960	dev_remove_pack(&tpriv->pt);
 961	kfree(tpriv);
 962	return ret;
 963}
 964
 965static int stmmac_test_vlanfilt(struct stmmac_priv *priv)
 966{
 967	if (!priv->dma_cap.vlhash)
 968		return -EOPNOTSUPP;
 969
 970	return __stmmac_test_vlanfilt(priv);
 971}
 972
 973static int stmmac_test_vlanfilt_perfect(struct stmmac_priv *priv)
 974{
 975	int ret, prev_cap = priv->dma_cap.vlhash;
 976
 977	if (!(priv->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
 978		return -EOPNOTSUPP;
 979
 980	priv->dma_cap.vlhash = 0;
 981	ret = __stmmac_test_vlanfilt(priv);
 982	priv->dma_cap.vlhash = prev_cap;
 983
 984	return ret;
 985}
 986
 987static int __stmmac_test_dvlanfilt(struct stmmac_priv *priv)
 988{
 989	struct stmmac_packet_attrs attr = { };
 990	struct stmmac_test_priv *tpriv;
 991	struct sk_buff *skb = NULL;
 992	int ret = 0, i;
 993
 994	tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
 995	if (!tpriv)
 996		return -ENOMEM;
 997
 998	tpriv->ok = false;
 999	tpriv->double_vlan = true;
1000	init_completion(&tpriv->comp);
1001
1002	tpriv->pt.type = htons(ETH_P_8021Q);
1003	tpriv->pt.func = stmmac_test_vlan_validate;
1004	tpriv->pt.dev = priv->dev;
1005	tpriv->pt.af_packet_priv = tpriv;
1006	tpriv->packet = &attr;
1007
1008	/*
1009	 * As we use HASH filtering, false positives may appear. This is a
1010	 * specially chosen ID so that adjacent IDs (+4) have different
1011	 * HASH values.
1012	 */
1013	tpriv->vlan_id = 0x123;
1014	dev_add_pack(&tpriv->pt);
1015
1016	ret = vlan_vid_add(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id);
1017	if (ret)
1018		goto cleanup;
1019
1020	for (i = 0; i < 4; i++) {
1021		attr.vlan = 2;
1022		attr.vlan_id_out = tpriv->vlan_id + i;
1023		attr.dst = priv->dev->dev_addr;
1024		attr.sport = 9;
1025		attr.dport = 9;
1026
1027		skb = stmmac_test_get_udp_skb(priv, &attr);
1028		if (!skb) {
1029			ret = -ENOMEM;
1030			goto vlan_del;
1031		}
1032
1033		ret = dev_direct_xmit(skb, 0);
1034		if (ret)
1035			goto vlan_del;
1036
1037		wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
1038		ret = tpriv->ok ? 0 : -ETIMEDOUT;
1039		if (ret && !i) {
1040			goto vlan_del;
1041		} else if (!ret && i) {
1042			ret = -EINVAL;
1043			goto vlan_del;
1044		} else {
1045			ret = 0;
1046		}
1047
1048		tpriv->ok = false;
1049	}
1050
1051vlan_del:
1052	vlan_vid_del(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id);
1053cleanup:
1054	dev_remove_pack(&tpriv->pt);
1055	kfree(tpriv);
1056	return ret;
1057}
1058
1059static int stmmac_test_dvlanfilt(struct stmmac_priv *priv)
1060{
1061	if (!priv->dma_cap.vlhash)
1062		return -EOPNOTSUPP;
1063
1064	return __stmmac_test_dvlanfilt(priv);
1065}
1066
1067static int stmmac_test_dvlanfilt_perfect(struct stmmac_priv *priv)
1068{
1069	int ret, prev_cap = priv->dma_cap.vlhash;
1070
1071	if (!(priv->dev->features & NETIF_F_HW_VLAN_STAG_FILTER))
1072		return -EOPNOTSUPP;
1073
1074	priv->dma_cap.vlhash = 0;
1075	ret = __stmmac_test_dvlanfilt(priv);
1076	priv->dma_cap.vlhash = prev_cap;
1077
1078	return ret;
1079}
1080
1081#ifdef CONFIG_NET_CLS_ACT
1082static int stmmac_test_rxp(struct stmmac_priv *priv)
1083{
1084	unsigned char addr[ETH_ALEN] = {0xde, 0xad, 0xbe, 0xef, 0x00, 0x00};
1085	struct tc_cls_u32_offload cls_u32 = { };
1086	struct stmmac_packet_attrs attr = { };
1087	struct tc_action **actions;
1088	struct tc_u32_sel *sel;
1089	struct tcf_gact *gact;
1090	struct tcf_exts *exts;
1091	int ret, i, nk = 1;
1092
1093	if (!tc_can_offload(priv->dev))
1094		return -EOPNOTSUPP;
1095	if (!priv->dma_cap.frpsel)
1096		return -EOPNOTSUPP;
1097
1098	sel = kzalloc(struct_size(sel, keys, nk), GFP_KERNEL);
1099	if (!sel)
1100		return -ENOMEM;
1101
1102	exts = kzalloc(sizeof(*exts), GFP_KERNEL);
1103	if (!exts) {
1104		ret = -ENOMEM;
1105		goto cleanup_sel;
1106	}
1107
1108	actions = kcalloc(nk, sizeof(*actions), GFP_KERNEL);
1109	if (!actions) {
1110		ret = -ENOMEM;
1111		goto cleanup_exts;
1112	}
1113
1114	gact = kcalloc(nk, sizeof(*gact), GFP_KERNEL);
1115	if (!gact) {
1116		ret = -ENOMEM;
1117		goto cleanup_actions;
1118	}
1119
1120	cls_u32.command = TC_CLSU32_NEW_KNODE;
1121	cls_u32.common.chain_index = 0;
1122	cls_u32.common.protocol = htons(ETH_P_ALL);
1123	cls_u32.knode.exts = exts;
1124	cls_u32.knode.sel = sel;
1125	cls_u32.knode.handle = 0x123;
1126
1127	exts->nr_actions = nk;
1128	exts->actions = actions;
1129	for (i = 0; i < nk; i++) {
1130		actions[i] = (struct tc_action *)&gact[i];
1131		gact->tcf_action = TC_ACT_SHOT;
1132	}
1133
1134	sel->nkeys = nk;
1135	sel->offshift = 0;
1136	sel->keys[0].off = 6;
1137	sel->keys[0].val = htonl(0xdeadbeef);
1138	sel->keys[0].mask = ~0x0;
1139
1140	ret = stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
1141	if (ret)
1142		goto cleanup_act;
1143
1144	attr.dst = priv->dev->dev_addr;
1145	attr.src = addr;
1146
1147	ret = __stmmac_test_loopback(priv, &attr);
1148	ret = ret ? 0 : -EINVAL; /* Shall NOT receive packet */
1149
1150	cls_u32.command = TC_CLSU32_DELETE_KNODE;
1151	stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
1152
1153cleanup_act:
1154	kfree(gact);
1155cleanup_actions:
1156	kfree(actions);
1157cleanup_exts:
1158	kfree(exts);
1159cleanup_sel:
1160	kfree(sel);
1161	return ret;
1162}
1163#else
1164static int stmmac_test_rxp(struct stmmac_priv *priv)
1165{
1166	return -EOPNOTSUPP;
1167}
1168#endif
1169
1170static int stmmac_test_desc_sai(struct stmmac_priv *priv)
1171{
1172	unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1173	struct stmmac_packet_attrs attr = { };
1174	int ret;
1175
1176	if (!priv->dma_cap.vlins)
1177		return -EOPNOTSUPP;
1178
1179	attr.remove_sa = true;
1180	attr.sarc = true;
1181	attr.src = src;
1182	attr.dst = priv->dev->dev_addr;
1183
1184	priv->sarc_type = 0x1;
1185
1186	ret = __stmmac_test_loopback(priv, &attr);
1187
1188	priv->sarc_type = 0x0;
1189	return ret;
1190}
1191
1192static int stmmac_test_desc_sar(struct stmmac_priv *priv)
1193{
1194	unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1195	struct stmmac_packet_attrs attr = { };
1196	int ret;
1197
1198	if (!priv->dma_cap.vlins)
1199		return -EOPNOTSUPP;
1200
1201	attr.sarc = true;
1202	attr.src = src;
1203	attr.dst = priv->dev->dev_addr;
1204
1205	priv->sarc_type = 0x2;
1206
1207	ret = __stmmac_test_loopback(priv, &attr);
1208
1209	priv->sarc_type = 0x0;
1210	return ret;
1211}
1212
1213static int stmmac_test_reg_sai(struct stmmac_priv *priv)
1214{
1215	unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1216	struct stmmac_packet_attrs attr = { };
1217	int ret;
1218
1219	if (!priv->dma_cap.vlins)
1220		return -EOPNOTSUPP;
1221
1222	attr.remove_sa = true;
1223	attr.sarc = true;
1224	attr.src = src;
1225	attr.dst = priv->dev->dev_addr;
1226
1227	if (stmmac_sarc_configure(priv, priv->ioaddr, 0x2))
1228		return -EOPNOTSUPP;
1229
1230	ret = __stmmac_test_loopback(priv, &attr);
1231
1232	stmmac_sarc_configure(priv, priv->ioaddr, 0x0);
1233	return ret;
1234}
1235
1236static int stmmac_test_reg_sar(struct stmmac_priv *priv)
1237{
1238	unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1239	struct stmmac_packet_attrs attr = { };
1240	int ret;
1241
1242	if (!priv->dma_cap.vlins)
1243		return -EOPNOTSUPP;
1244
1245	attr.sarc = true;
1246	attr.src = src;
1247	attr.dst = priv->dev->dev_addr;
1248
1249	if (stmmac_sarc_configure(priv, priv->ioaddr, 0x3))
1250		return -EOPNOTSUPP;
1251
1252	ret = __stmmac_test_loopback(priv, &attr);
1253
1254	stmmac_sarc_configure(priv, priv->ioaddr, 0x0);
1255	return ret;
1256}
1257
1258static int stmmac_test_vlanoff_common(struct stmmac_priv *priv, bool svlan)
1259{
1260	struct stmmac_packet_attrs attr = { };
1261	struct stmmac_test_priv *tpriv;
1262	struct sk_buff *skb = NULL;
1263	int ret = 0;
1264	u16 proto;
1265
1266	if (!priv->dma_cap.vlins)
1267		return -EOPNOTSUPP;
1268
1269	tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
1270	if (!tpriv)
1271		return -ENOMEM;
1272
1273	proto = svlan ? ETH_P_8021AD : ETH_P_8021Q;
1274
1275	tpriv->ok = false;
1276	tpriv->double_vlan = svlan;
1277	init_completion(&tpriv->comp);
1278
1279	tpriv->pt.type = svlan ? htons(ETH_P_8021Q) : htons(ETH_P_IP);
1280	tpriv->pt.func = stmmac_test_vlan_validate;
1281	tpriv->pt.dev = priv->dev;
1282	tpriv->pt.af_packet_priv = tpriv;
1283	tpriv->packet = &attr;
1284	tpriv->vlan_id = 0x123;
1285	dev_add_pack(&tpriv->pt);
1286
1287	ret = vlan_vid_add(priv->dev, htons(proto), tpriv->vlan_id);
1288	if (ret)
1289		goto cleanup;
1290
1291	attr.dst = priv->dev->dev_addr;
1292
1293	skb = stmmac_test_get_udp_skb(priv, &attr);
1294	if (!skb) {
1295		ret = -ENOMEM;
1296		goto vlan_del;
1297	}
1298
1299	__vlan_hwaccel_put_tag(skb, htons(proto), tpriv->vlan_id);
1300	skb->protocol = htons(proto);
1301
1302	ret = dev_direct_xmit(skb, 0);
1303	if (ret)
1304		goto vlan_del;
1305
1306	wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
1307	ret = tpriv->ok ? 0 : -ETIMEDOUT;
1308
1309vlan_del:
1310	vlan_vid_del(priv->dev, htons(proto), tpriv->vlan_id);
1311cleanup:
1312	dev_remove_pack(&tpriv->pt);
1313	kfree(tpriv);
1314	return ret;
1315}
1316
1317static int stmmac_test_vlanoff(struct stmmac_priv *priv)
1318{
1319	return stmmac_test_vlanoff_common(priv, false);
1320}
1321
1322static int stmmac_test_svlanoff(struct stmmac_priv *priv)
1323{
1324	if (!priv->dma_cap.dvlan)
1325		return -EOPNOTSUPP;
1326	return stmmac_test_vlanoff_common(priv, true);
1327}
1328
1329#ifdef CONFIG_NET_CLS_ACT
1330static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
1331				u32 dst_mask, u32 src_mask)
1332{
1333	struct flow_dissector_key_ipv4_addrs key, mask;
1334	unsigned long dummy_cookie = 0xdeadbeef;
1335	struct stmmac_packet_attrs attr = { };
1336	struct flow_dissector *dissector;
1337	struct flow_cls_offload *cls;
1338	int ret, old_enable = 0;
1339	struct flow_rule *rule;
1340
1341	if (!tc_can_offload(priv->dev))
1342		return -EOPNOTSUPP;
1343	if (!priv->dma_cap.l3l4fnum)
1344		return -EOPNOTSUPP;
1345	if (priv->rss.enable) {
1346		old_enable = priv->rss.enable;
1347		priv->rss.enable = false;
1348		stmmac_rss_configure(priv, priv->hw, NULL,
1349				     priv->plat->rx_queues_to_use);
1350	}
1351
1352	dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
1353	if (!dissector) {
1354		ret = -ENOMEM;
1355		goto cleanup_rss;
1356	}
1357
1358	dissector->used_keys |= (1ULL << FLOW_DISSECTOR_KEY_IPV4_ADDRS);
1359	dissector->offset[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = 0;
1360
1361	cls = kzalloc(sizeof(*cls), GFP_KERNEL);
1362	if (!cls) {
1363		ret = -ENOMEM;
1364		goto cleanup_dissector;
1365	}
1366
1367	cls->common.chain_index = 0;
1368	cls->command = FLOW_CLS_REPLACE;
1369	cls->cookie = dummy_cookie;
1370
1371	rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL);
1372	if (!rule) {
1373		ret = -ENOMEM;
1374		goto cleanup_cls;
1375	}
1376
1377	rule->match.dissector = dissector;
1378	rule->match.key = (void *)&key;
1379	rule->match.mask = (void *)&mask;
1380
1381	key.src = htonl(src);
1382	key.dst = htonl(dst);
1383	mask.src = src_mask;
1384	mask.dst = dst_mask;
1385
1386	cls->rule = rule;
1387
1388	rule->action.entries[0].id = FLOW_ACTION_DROP;
1389	rule->action.entries[0].hw_stats = FLOW_ACTION_HW_STATS_ANY;
1390	rule->action.num_entries = 1;
1391
1392	attr.dst = priv->dev->dev_addr;
1393	attr.ip_dst = dst;
1394	attr.ip_src = src;
1395
1396	/* Shall receive packet */
1397	ret = __stmmac_test_loopback(priv, &attr);
1398	if (ret)
1399		goto cleanup_rule;
1400
1401	ret = stmmac_tc_setup_cls(priv, priv, cls);
1402	if (ret)
1403		goto cleanup_rule;
1404
1405	/* Shall NOT receive packet */
1406	ret = __stmmac_test_loopback(priv, &attr);
1407	ret = ret ? 0 : -EINVAL;
1408
1409	cls->command = FLOW_CLS_DESTROY;
1410	stmmac_tc_setup_cls(priv, priv, cls);
1411cleanup_rule:
1412	kfree(rule);
1413cleanup_cls:
1414	kfree(cls);
1415cleanup_dissector:
1416	kfree(dissector);
1417cleanup_rss:
1418	if (old_enable) {
1419		priv->rss.enable = old_enable;
1420		stmmac_rss_configure(priv, priv->hw, &priv->rss,
1421				     priv->plat->rx_queues_to_use);
1422	}
1423
1424	return ret;
1425}
1426#else
1427static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
1428				u32 dst_mask, u32 src_mask)
1429{
1430	return -EOPNOTSUPP;
1431}
1432#endif
1433
1434static int stmmac_test_l3filt_da(struct stmmac_priv *priv)
1435{
1436	u32 addr = 0x10203040;
1437
1438	return __stmmac_test_l3filt(priv, addr, 0, ~0, 0);
1439}
1440
1441static int stmmac_test_l3filt_sa(struct stmmac_priv *priv)
1442{
1443	u32 addr = 0x10203040;
1444
1445	return __stmmac_test_l3filt(priv, 0, addr, 0, ~0);
1446}
1447
1448#ifdef CONFIG_NET_CLS_ACT
1449static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
1450				u32 dst_mask, u32 src_mask, bool udp)
1451{
1452	struct {
1453		struct flow_dissector_key_basic bkey;
1454		struct flow_dissector_key_ports key;
1455	} __aligned(BITS_PER_LONG / 8) keys;
1456	struct {
1457		struct flow_dissector_key_basic bmask;
1458		struct flow_dissector_key_ports mask;
1459	} __aligned(BITS_PER_LONG / 8) masks;
1460	unsigned long dummy_cookie = 0xdeadbeef;
1461	struct stmmac_packet_attrs attr = { };
1462	struct flow_dissector *dissector;
1463	struct flow_cls_offload *cls;
1464	int ret, old_enable = 0;
1465	struct flow_rule *rule;
1466
1467	if (!tc_can_offload(priv->dev))
1468		return -EOPNOTSUPP;
1469	if (!priv->dma_cap.l3l4fnum)
1470		return -EOPNOTSUPP;
1471	if (priv->rss.enable) {
1472		old_enable = priv->rss.enable;
1473		priv->rss.enable = false;
1474		stmmac_rss_configure(priv, priv->hw, NULL,
1475				     priv->plat->rx_queues_to_use);
1476	}
1477
1478	dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
1479	if (!dissector) {
1480		ret = -ENOMEM;
1481		goto cleanup_rss;
1482	}
1483
1484	dissector->used_keys |= (1ULL << FLOW_DISSECTOR_KEY_BASIC);
1485	dissector->used_keys |= (1ULL << FLOW_DISSECTOR_KEY_PORTS);
1486	dissector->offset[FLOW_DISSECTOR_KEY_BASIC] = 0;
1487	dissector->offset[FLOW_DISSECTOR_KEY_PORTS] = offsetof(typeof(keys), key);
1488
1489	cls = kzalloc(sizeof(*cls), GFP_KERNEL);
1490	if (!cls) {
1491		ret = -ENOMEM;
1492		goto cleanup_dissector;
1493	}
1494
1495	cls->common.chain_index = 0;
1496	cls->command = FLOW_CLS_REPLACE;
1497	cls->cookie = dummy_cookie;
1498
1499	rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL);
1500	if (!rule) {
1501		ret = -ENOMEM;
1502		goto cleanup_cls;
1503	}
1504
1505	rule->match.dissector = dissector;
1506	rule->match.key = (void *)&keys;
1507	rule->match.mask = (void *)&masks;
1508
1509	keys.bkey.ip_proto = udp ? IPPROTO_UDP : IPPROTO_TCP;
1510	keys.key.src = htons(src);
1511	keys.key.dst = htons(dst);
1512	masks.mask.src = src_mask;
1513	masks.mask.dst = dst_mask;
1514
1515	cls->rule = rule;
1516
1517	rule->action.entries[0].id = FLOW_ACTION_DROP;
1518	rule->action.entries[0].hw_stats = FLOW_ACTION_HW_STATS_ANY;
1519	rule->action.num_entries = 1;
1520
1521	attr.dst = priv->dev->dev_addr;
1522	attr.tcp = !udp;
1523	attr.sport = src;
1524	attr.dport = dst;
1525	attr.ip_dst = 0;
1526
1527	/* Shall receive packet */
1528	ret = __stmmac_test_loopback(priv, &attr);
1529	if (ret)
1530		goto cleanup_rule;
1531
1532	ret = stmmac_tc_setup_cls(priv, priv, cls);
1533	if (ret)
1534		goto cleanup_rule;
1535
1536	/* Shall NOT receive packet */
1537	ret = __stmmac_test_loopback(priv, &attr);
1538	ret = ret ? 0 : -EINVAL;
1539
1540	cls->command = FLOW_CLS_DESTROY;
1541	stmmac_tc_setup_cls(priv, priv, cls);
1542cleanup_rule:
1543	kfree(rule);
1544cleanup_cls:
1545	kfree(cls);
1546cleanup_dissector:
1547	kfree(dissector);
1548cleanup_rss:
1549	if (old_enable) {
1550		priv->rss.enable = old_enable;
1551		stmmac_rss_configure(priv, priv->hw, &priv->rss,
1552				     priv->plat->rx_queues_to_use);
1553	}
1554
1555	return ret;
1556}
1557#else
1558static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
1559				u32 dst_mask, u32 src_mask, bool udp)
1560{
1561	return -EOPNOTSUPP;
1562}
1563#endif
1564
1565static int stmmac_test_l4filt_da_tcp(struct stmmac_priv *priv)
1566{
1567	u16 dummy_port = 0x123;
1568
1569	return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, false);
1570}
1571
1572static int stmmac_test_l4filt_sa_tcp(struct stmmac_priv *priv)
1573{
1574	u16 dummy_port = 0x123;
1575
1576	return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, false);
1577}
1578
1579static int stmmac_test_l4filt_da_udp(struct stmmac_priv *priv)
1580{
1581	u16 dummy_port = 0x123;
1582
1583	return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, true);
1584}
1585
1586static int stmmac_test_l4filt_sa_udp(struct stmmac_priv *priv)
1587{
1588	u16 dummy_port = 0x123;
1589
1590	return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, true);
1591}
1592
1593static int stmmac_test_arp_validate(struct sk_buff *skb,
1594				    struct net_device *ndev,
1595				    struct packet_type *pt,
1596				    struct net_device *orig_ndev)
1597{
1598	struct stmmac_test_priv *tpriv = pt->af_packet_priv;
1599	struct ethhdr *ehdr;
1600	struct arphdr *ahdr;
1601
1602	ehdr = (struct ethhdr *)skb_mac_header(skb);
1603	if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->src))
1604		goto out;
1605
1606	ahdr = arp_hdr(skb);
1607	if (ahdr->ar_op != htons(ARPOP_REPLY))
1608		goto out;
1609
1610	tpriv->ok = true;
1611	complete(&tpriv->comp);
1612out:
1613	kfree_skb(skb);
1614	return 0;
1615}
1616
1617static int stmmac_test_arpoffload(struct stmmac_priv *priv)
1618{
1619	unsigned char src[ETH_ALEN] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06};
1620	unsigned char dst[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1621	struct stmmac_packet_attrs attr = { };
1622	struct stmmac_test_priv *tpriv;
1623	struct sk_buff *skb = NULL;
1624	u32 ip_addr = 0xdeadcafe;
1625	u32 ip_src = 0xdeadbeef;
1626	int ret;
1627
1628	if (!priv->dma_cap.arpoffsel)
1629		return -EOPNOTSUPP;
1630
1631	tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
1632	if (!tpriv)
1633		return -ENOMEM;
1634
1635	tpriv->ok = false;
1636	init_completion(&tpriv->comp);
1637
1638	tpriv->pt.type = htons(ETH_P_ARP);
1639	tpriv->pt.func = stmmac_test_arp_validate;
1640	tpriv->pt.dev = priv->dev;
1641	tpriv->pt.af_packet_priv = tpriv;
1642	tpriv->packet = &attr;
1643	dev_add_pack(&tpriv->pt);
1644
1645	attr.src = src;
1646	attr.ip_src = ip_src;
1647	attr.dst = dst;
1648	attr.ip_dst = ip_addr;
1649
1650	skb = stmmac_test_get_arp_skb(priv, &attr);
1651	if (!skb) {
1652		ret = -ENOMEM;
1653		goto cleanup;
1654	}
1655
1656	ret = stmmac_set_arp_offload(priv, priv->hw, true, ip_addr);
1657	if (ret) {
1658		kfree_skb(skb);
1659		goto cleanup;
1660	}
1661
1662	ret = dev_set_promiscuity(priv->dev, 1);
1663	if (ret) {
1664		kfree_skb(skb);
1665		goto cleanup;
1666	}
1667
1668	ret = dev_direct_xmit(skb, 0);
1669	if (ret)
1670		goto cleanup_promisc;
1671
1672	wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
1673	ret = tpriv->ok ? 0 : -ETIMEDOUT;
1674
1675cleanup_promisc:
1676	dev_set_promiscuity(priv->dev, -1);
1677cleanup:
1678	stmmac_set_arp_offload(priv, priv->hw, false, 0x0);
1679	dev_remove_pack(&tpriv->pt);
1680	kfree(tpriv);
1681	return ret;
1682}
1683
1684static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue)
1685{
1686	struct stmmac_packet_attrs attr = { };
1687	int size = priv->dma_conf.dma_buf_sz;
1688
1689	attr.dst = priv->dev->dev_addr;
1690	attr.max_size = size - ETH_FCS_LEN;
1691	attr.queue_mapping = queue;
1692
1693	return __stmmac_test_loopback(priv, &attr);
1694}
1695
1696static int stmmac_test_jumbo(struct stmmac_priv *priv)
1697{
1698	return __stmmac_test_jumbo(priv, 0);
1699}
1700
1701static int stmmac_test_mjumbo(struct stmmac_priv *priv)
1702{
1703	u32 chan, tx_cnt = priv->plat->tx_queues_to_use;
1704	int ret;
1705
1706	if (tx_cnt <= 1)
1707		return -EOPNOTSUPP;
1708
1709	for (chan = 0; chan < tx_cnt; chan++) {
1710		ret = __stmmac_test_jumbo(priv, chan);
1711		if (ret)
1712			return ret;
1713	}
1714
1715	return 0;
1716}
1717
1718static int stmmac_test_sph(struct stmmac_priv *priv)
1719{
1720	unsigned long cnt_end, cnt_start = priv->xstats.rx_split_hdr_pkt_n;
1721	struct stmmac_packet_attrs attr = { };
1722	int ret;
1723
1724	if (!priv->sph)
1725		return -EOPNOTSUPP;
1726
1727	/* Check for UDP first */
1728	attr.dst = priv->dev->dev_addr;
1729	attr.tcp = false;
1730
1731	ret = __stmmac_test_loopback(priv, &attr);
1732	if (ret)
1733		return ret;
1734
1735	cnt_end = priv->xstats.rx_split_hdr_pkt_n;
1736	if (cnt_end <= cnt_start)
1737		return -EINVAL;
1738
1739	/* Check for TCP now */
1740	cnt_start = cnt_end;
1741
1742	attr.dst = priv->dev->dev_addr;
1743	attr.tcp = true;
1744
1745	ret = __stmmac_test_loopback(priv, &attr);
1746	if (ret)
1747		return ret;
1748
1749	cnt_end = priv->xstats.rx_split_hdr_pkt_n;
1750	if (cnt_end <= cnt_start)
1751		return -EINVAL;
1752
1753	return 0;
1754}
1755
1756static int stmmac_test_tbs(struct stmmac_priv *priv)
1757{
1758#define STMMAC_TBS_LT_OFFSET		(500 * 1000 * 1000) /* 500 ms*/
1759	struct stmmac_packet_attrs attr = { };
1760	struct tc_etf_qopt_offload qopt;
1761	u64 start_time, curr_time = 0;
1762	unsigned long flags;
1763	int ret, i;
1764
1765	if (!priv->hwts_tx_en)
1766		return -EOPNOTSUPP;
1767
1768	/* Find first TBS enabled Queue, if any */
1769	for (i = 0; i < priv->plat->tx_queues_to_use; i++)
1770		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_AVAIL)
1771			break;
1772
1773	if (i >= priv->plat->tx_queues_to_use)
1774		return -EOPNOTSUPP;
1775
1776	qopt.enable = true;
1777	qopt.queue = i;
1778
1779	ret = stmmac_tc_setup_etf(priv, priv, &qopt);
1780	if (ret)
1781		return ret;
1782
1783	read_lock_irqsave(&priv->ptp_lock, flags);
1784	stmmac_get_systime(priv, priv->ptpaddr, &curr_time);
1785	read_unlock_irqrestore(&priv->ptp_lock, flags);
1786
1787	if (!curr_time) {
1788		ret = -EOPNOTSUPP;
1789		goto fail_disable;
1790	}
1791
1792	start_time = curr_time;
1793	curr_time += STMMAC_TBS_LT_OFFSET;
1794
1795	attr.dst = priv->dev->dev_addr;
1796	attr.timestamp = curr_time;
1797	attr.timeout = nsecs_to_jiffies(2 * STMMAC_TBS_LT_OFFSET);
1798	attr.queue_mapping = i;
1799
1800	ret = __stmmac_test_loopback(priv, &attr);
1801	if (ret)
1802		goto fail_disable;
1803
1804	/* Check if expected time has elapsed */
1805	read_lock_irqsave(&priv->ptp_lock, flags);
1806	stmmac_get_systime(priv, priv->ptpaddr, &curr_time);
1807	read_unlock_irqrestore(&priv->ptp_lock, flags);
1808
1809	if ((curr_time - start_time) < STMMAC_TBS_LT_OFFSET)
1810		ret = -EINVAL;
1811
1812fail_disable:
1813	qopt.enable = false;
1814	stmmac_tc_setup_etf(priv, priv, &qopt);
1815	return ret;
1816}
1817
1818#define STMMAC_LOOPBACK_NONE	0
1819#define STMMAC_LOOPBACK_MAC	1
1820#define STMMAC_LOOPBACK_PHY	2
1821
1822static const struct stmmac_test {
1823	char name[ETH_GSTRING_LEN];
1824	int lb;
1825	int (*fn)(struct stmmac_priv *priv);
1826} stmmac_selftests[] = {
1827	{
1828		.name = "MAC Loopback               ",
1829		.lb = STMMAC_LOOPBACK_MAC,
1830		.fn = stmmac_test_mac_loopback,
1831	}, {
1832		.name = "PHY Loopback               ",
1833		.lb = STMMAC_LOOPBACK_NONE, /* Test will handle it */
1834		.fn = stmmac_test_phy_loopback,
1835	}, {
1836		.name = "MMC Counters               ",
1837		.lb = STMMAC_LOOPBACK_PHY,
1838		.fn = stmmac_test_mmc,
1839	}, {
1840		.name = "EEE                        ",
1841		.lb = STMMAC_LOOPBACK_PHY,
1842		.fn = stmmac_test_eee,
1843	}, {
1844		.name = "Hash Filter MC             ",
1845		.lb = STMMAC_LOOPBACK_PHY,
1846		.fn = stmmac_test_hfilt,
1847	}, {
1848		.name = "Perfect Filter UC          ",
1849		.lb = STMMAC_LOOPBACK_PHY,
1850		.fn = stmmac_test_pfilt,
1851	}, {
1852		.name = "MC Filter                  ",
1853		.lb = STMMAC_LOOPBACK_PHY,
1854		.fn = stmmac_test_mcfilt,
1855	}, {
1856		.name = "UC Filter                  ",
1857		.lb = STMMAC_LOOPBACK_PHY,
1858		.fn = stmmac_test_ucfilt,
1859	}, {
1860		.name = "Flow Control               ",
1861		.lb = STMMAC_LOOPBACK_PHY,
1862		.fn = stmmac_test_flowctrl,
1863	}, {
1864		.name = "RSS                        ",
1865		.lb = STMMAC_LOOPBACK_PHY,
1866		.fn = stmmac_test_rss,
1867	}, {
1868		.name = "VLAN Filtering             ",
1869		.lb = STMMAC_LOOPBACK_PHY,
1870		.fn = stmmac_test_vlanfilt,
1871	}, {
1872		.name = "VLAN Filtering (perf)      ",
1873		.lb = STMMAC_LOOPBACK_PHY,
1874		.fn = stmmac_test_vlanfilt_perfect,
1875	}, {
1876		.name = "Double VLAN Filter         ",
1877		.lb = STMMAC_LOOPBACK_PHY,
1878		.fn = stmmac_test_dvlanfilt,
1879	}, {
1880		.name = "Double VLAN Filter (perf)  ",
1881		.lb = STMMAC_LOOPBACK_PHY,
1882		.fn = stmmac_test_dvlanfilt_perfect,
1883	}, {
1884		.name = "Flexible RX Parser         ",
1885		.lb = STMMAC_LOOPBACK_PHY,
1886		.fn = stmmac_test_rxp,
1887	}, {
1888		.name = "SA Insertion (desc)        ",
1889		.lb = STMMAC_LOOPBACK_PHY,
1890		.fn = stmmac_test_desc_sai,
1891	}, {
1892		.name = "SA Replacement (desc)      ",
1893		.lb = STMMAC_LOOPBACK_PHY,
1894		.fn = stmmac_test_desc_sar,
1895	}, {
1896		.name = "SA Insertion (reg)         ",
1897		.lb = STMMAC_LOOPBACK_PHY,
1898		.fn = stmmac_test_reg_sai,
1899	}, {
1900		.name = "SA Replacement (reg)       ",
1901		.lb = STMMAC_LOOPBACK_PHY,
1902		.fn = stmmac_test_reg_sar,
1903	}, {
1904		.name = "VLAN TX Insertion          ",
1905		.lb = STMMAC_LOOPBACK_PHY,
1906		.fn = stmmac_test_vlanoff,
1907	}, {
1908		.name = "SVLAN TX Insertion         ",
1909		.lb = STMMAC_LOOPBACK_PHY,
1910		.fn = stmmac_test_svlanoff,
1911	}, {
1912		.name = "L3 DA Filtering            ",
1913		.lb = STMMAC_LOOPBACK_PHY,
1914		.fn = stmmac_test_l3filt_da,
1915	}, {
1916		.name = "L3 SA Filtering            ",
1917		.lb = STMMAC_LOOPBACK_PHY,
1918		.fn = stmmac_test_l3filt_sa,
1919	}, {
1920		.name = "L4 DA TCP Filtering        ",
1921		.lb = STMMAC_LOOPBACK_PHY,
1922		.fn = stmmac_test_l4filt_da_tcp,
1923	}, {
1924		.name = "L4 SA TCP Filtering        ",
1925		.lb = STMMAC_LOOPBACK_PHY,
1926		.fn = stmmac_test_l4filt_sa_tcp,
1927	}, {
1928		.name = "L4 DA UDP Filtering        ",
1929		.lb = STMMAC_LOOPBACK_PHY,
1930		.fn = stmmac_test_l4filt_da_udp,
1931	}, {
1932		.name = "L4 SA UDP Filtering        ",
1933		.lb = STMMAC_LOOPBACK_PHY,
1934		.fn = stmmac_test_l4filt_sa_udp,
1935	}, {
1936		.name = "ARP Offload                ",
1937		.lb = STMMAC_LOOPBACK_PHY,
1938		.fn = stmmac_test_arpoffload,
1939	}, {
1940		.name = "Jumbo Frame                ",
1941		.lb = STMMAC_LOOPBACK_PHY,
1942		.fn = stmmac_test_jumbo,
1943	}, {
1944		.name = "Multichannel Jumbo         ",
1945		.lb = STMMAC_LOOPBACK_PHY,
1946		.fn = stmmac_test_mjumbo,
1947	}, {
1948		.name = "Split Header               ",
1949		.lb = STMMAC_LOOPBACK_PHY,
1950		.fn = stmmac_test_sph,
1951	}, {
1952		.name = "TBS (ETF Scheduler)        ",
1953		.lb = STMMAC_LOOPBACK_PHY,
1954		.fn = stmmac_test_tbs,
1955	},
1956};
1957
1958void stmmac_selftest_run(struct net_device *dev,
1959			 struct ethtool_test *etest, u64 *buf)
1960{
1961	struct stmmac_priv *priv = netdev_priv(dev);
1962	int count = stmmac_selftest_get_count(priv);
1963	int i, ret;
1964
1965	memset(buf, 0, sizeof(*buf) * count);
1966	stmmac_test_next_id = 0;
1967
1968	if (etest->flags != ETH_TEST_FL_OFFLINE) {
1969		netdev_err(priv->dev, "Only offline tests are supported\n");
1970		etest->flags |= ETH_TEST_FL_FAILED;
1971		return;
1972	} else if (!netif_carrier_ok(dev)) {
1973		netdev_err(priv->dev, "You need valid Link to execute tests\n");
1974		etest->flags |= ETH_TEST_FL_FAILED;
1975		return;
1976	}
1977
1978	/* Wait for queues drain */
1979	msleep(200);
1980
1981	for (i = 0; i < count; i++) {
1982		ret = 0;
1983
1984		switch (stmmac_selftests[i].lb) {
1985		case STMMAC_LOOPBACK_PHY:
1986			ret = -EOPNOTSUPP;
1987			if (dev->phydev)
1988				ret = phy_loopback(dev->phydev, true);
1989			if (!ret)
1990				break;
1991			fallthrough;
1992		case STMMAC_LOOPBACK_MAC:
1993			ret = stmmac_set_mac_loopback(priv, priv->ioaddr, true);
1994			break;
1995		case STMMAC_LOOPBACK_NONE:
1996			break;
1997		default:
1998			ret = -EOPNOTSUPP;
1999			break;
2000		}
2001
2002		/*
2003		 * First tests will always be MAC / PHY loobpack. If any of
2004		 * them is not supported we abort earlier.
2005		 */
2006		if (ret) {
2007			netdev_err(priv->dev, "Loopback is not supported\n");
2008			etest->flags |= ETH_TEST_FL_FAILED;
2009			break;
2010		}
2011
2012		ret = stmmac_selftests[i].fn(priv);
2013		if (ret && (ret != -EOPNOTSUPP))
2014			etest->flags |= ETH_TEST_FL_FAILED;
2015		buf[i] = ret;
2016
2017		switch (stmmac_selftests[i].lb) {
2018		case STMMAC_LOOPBACK_PHY:
2019			ret = -EOPNOTSUPP;
2020			if (dev->phydev)
2021				ret = phy_loopback(dev->phydev, false);
2022			if (!ret)
2023				break;
2024			fallthrough;
2025		case STMMAC_LOOPBACK_MAC:
2026			stmmac_set_mac_loopback(priv, priv->ioaddr, false);
2027			break;
2028		default:
2029			break;
2030		}
2031	}
2032}
2033
2034void stmmac_selftest_get_strings(struct stmmac_priv *priv, u8 *data)
2035{
2036	u8 *p = data;
2037	int i;
2038
2039	for (i = 0; i < stmmac_selftest_get_count(priv); i++) {
2040		snprintf(p, ETH_GSTRING_LEN, "%2d. %s", i + 1,
2041			 stmmac_selftests[i].name);
2042		p += ETH_GSTRING_LEN;
2043	}
2044}
2045
2046int stmmac_selftest_get_count(struct stmmac_priv *priv)
2047{
2048	return ARRAY_SIZE(stmmac_selftests);
2049}