Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * drivers/net/macsec.c - MACsec device
   4 *
   5 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
   6 */
   7
   8#include <linux/types.h>
   9#include <linux/skbuff.h>
  10#include <linux/socket.h>
  11#include <linux/module.h>
  12#include <crypto/aead.h>
  13#include <linux/etherdevice.h>
  14#include <linux/netdevice.h>
  15#include <linux/rtnetlink.h>
  16#include <linux/refcount.h>
  17#include <net/genetlink.h>
  18#include <net/sock.h>
  19#include <net/gro_cells.h>
  20#include <net/macsec.h>
  21#include <linux/phy.h>
  22#include <linux/byteorder/generic.h>
  23#include <linux/if_arp.h>
  24
  25#include <uapi/linux/if_macsec.h>
  26
  27#define MACSEC_SCI_LEN 8
  28
  29/* SecTAG length = macsec_eth_header without the optional SCI */
  30#define MACSEC_TAG_LEN 6
  31
  32struct macsec_eth_header {
  33	struct ethhdr eth;
  34	/* SecTAG */
  35	u8  tci_an;
  36#if defined(__LITTLE_ENDIAN_BITFIELD)
  37	u8  short_length:6,
  38		  unused:2;
  39#elif defined(__BIG_ENDIAN_BITFIELD)
  40	u8        unused:2,
  41	    short_length:6;
  42#else
  43#error	"Please fix <asm/byteorder.h>"
  44#endif
  45	__be32 packet_number;
  46	u8 secure_channel_id[8]; /* optional */
  47} __packed;
  48
  49#define MACSEC_TCI_VERSION 0x80
  50#define MACSEC_TCI_ES      0x40 /* end station */
  51#define MACSEC_TCI_SC      0x20 /* SCI present */
  52#define MACSEC_TCI_SCB     0x10 /* epon */
  53#define MACSEC_TCI_E       0x08 /* encryption */
  54#define MACSEC_TCI_C       0x04 /* changed text */
  55#define MACSEC_AN_MASK     0x03 /* association number */
  56#define MACSEC_TCI_CONFID  (MACSEC_TCI_E | MACSEC_TCI_C)
  57
  58/* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
  59#define MIN_NON_SHORT_LEN 48
  60
  61#define GCM_AES_IV_LEN 12
  62#define DEFAULT_ICV_LEN 16
  63
  64#define for_each_rxsc(secy, sc)				\
  65	for (sc = rcu_dereference_bh(secy->rx_sc);	\
  66	     sc;					\
  67	     sc = rcu_dereference_bh(sc->next))
  68#define for_each_rxsc_rtnl(secy, sc)			\
  69	for (sc = rtnl_dereference(secy->rx_sc);	\
  70	     sc;					\
  71	     sc = rtnl_dereference(sc->next))
  72
  73#define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31)))
  74
  75struct gcm_iv_xpn {
  76	union {
  77		u8 short_secure_channel_id[4];
  78		ssci_t ssci;
  79	};
  80	__be64 pn;
  81} __packed;
  82
  83struct gcm_iv {
  84	union {
  85		u8 secure_channel_id[8];
  86		sci_t sci;
  87	};
  88	__be32 pn;
  89};
  90
  91#define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
  92
  93struct pcpu_secy_stats {
  94	struct macsec_dev_stats stats;
  95	struct u64_stats_sync syncp;
  96};
  97
  98/**
  99 * struct macsec_dev - private data
 100 * @secy: SecY config
 101 * @real_dev: pointer to underlying netdevice
 102 * @stats: MACsec device stats
 103 * @secys: linked list of SecY's on the underlying device
 104 * @gro_cells: pointer to the Generic Receive Offload cell
 105 * @offload: status of offloading on the MACsec device
 106 */
 107struct macsec_dev {
 108	struct macsec_secy secy;
 109	struct net_device *real_dev;
 110	struct pcpu_secy_stats __percpu *stats;
 111	struct list_head secys;
 112	struct gro_cells gro_cells;
 113	enum macsec_offload offload;
 114};
 115
 116/**
 117 * struct macsec_rxh_data - rx_handler private argument
 118 * @secys: linked list of SecY's on this underlying device
 119 */
 120struct macsec_rxh_data {
 121	struct list_head secys;
 122};
 123
 124static struct macsec_dev *macsec_priv(const struct net_device *dev)
 125{
 126	return (struct macsec_dev *)netdev_priv(dev);
 127}
 128
 129static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev)
 130{
 131	return rcu_dereference_bh(dev->rx_handler_data);
 132}
 133
 134static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev)
 135{
 136	return rtnl_dereference(dev->rx_handler_data);
 137}
 138
 139struct macsec_cb {
 140	struct aead_request *req;
 141	union {
 142		struct macsec_tx_sa *tx_sa;
 143		struct macsec_rx_sa *rx_sa;
 144	};
 145	u8 assoc_num;
 146	bool valid;
 147	bool has_sci;
 148};
 149
 150static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
 151{
 152	struct macsec_rx_sa *sa = rcu_dereference_bh(ptr);
 153
 154	if (!sa || !sa->active)
 155		return NULL;
 156
 157	if (!refcount_inc_not_zero(&sa->refcnt))
 158		return NULL;
 159
 160	return sa;
 161}
 162
 163static void free_rx_sc_rcu(struct rcu_head *head)
 164{
 165	struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
 166
 167	free_percpu(rx_sc->stats);
 168	kfree(rx_sc);
 169}
 170
 171static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc)
 172{
 173	return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL;
 174}
 175
 176static void macsec_rxsc_put(struct macsec_rx_sc *sc)
 177{
 178	if (refcount_dec_and_test(&sc->refcnt))
 179		call_rcu(&sc->rcu_head, free_rx_sc_rcu);
 180}
 181
 182static void free_rxsa(struct rcu_head *head)
 183{
 184	struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu);
 185
 186	crypto_free_aead(sa->key.tfm);
 187	free_percpu(sa->stats);
 188	kfree(sa);
 189}
 190
 191static void macsec_rxsa_put(struct macsec_rx_sa *sa)
 192{
 193	if (refcount_dec_and_test(&sa->refcnt))
 194		call_rcu(&sa->rcu, free_rxsa);
 195}
 196
 197static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr)
 198{
 199	struct macsec_tx_sa *sa = rcu_dereference_bh(ptr);
 200
 201	if (!sa || !sa->active)
 202		return NULL;
 203
 204	if (!refcount_inc_not_zero(&sa->refcnt))
 205		return NULL;
 206
 207	return sa;
 208}
 209
 210static void free_txsa(struct rcu_head *head)
 211{
 212	struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu);
 213
 214	crypto_free_aead(sa->key.tfm);
 215	free_percpu(sa->stats);
 216	kfree(sa);
 217}
 218
 219static void macsec_txsa_put(struct macsec_tx_sa *sa)
 220{
 221	if (refcount_dec_and_test(&sa->refcnt))
 222		call_rcu(&sa->rcu, free_txsa);
 223}
 224
 225static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
 226{
 227	BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb));
 228	return (struct macsec_cb *)skb->cb;
 229}
 230
 231#define MACSEC_PORT_ES (htons(0x0001))
 232#define MACSEC_PORT_SCB (0x0000)
 233#define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
 234#define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff)
 235
 236#define MACSEC_GCM_AES_128_SAK_LEN 16
 237#define MACSEC_GCM_AES_256_SAK_LEN 32
 238
 239#define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
 240#define DEFAULT_XPN false
 241#define DEFAULT_SEND_SCI true
 242#define DEFAULT_ENCRYPT false
 243#define DEFAULT_ENCODING_SA 0
 244
 245static bool send_sci(const struct macsec_secy *secy)
 246{
 247	const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
 248
 249	return tx_sc->send_sci ||
 250		(secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
 251}
 252
 253static sci_t make_sci(u8 *addr, __be16 port)
 254{
 255	sci_t sci;
 256
 257	memcpy(&sci, addr, ETH_ALEN);
 258	memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port));
 259
 260	return sci;
 261}
 262
 263static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present)
 264{
 265	sci_t sci;
 266
 267	if (sci_present)
 268		memcpy(&sci, hdr->secure_channel_id,
 269		       sizeof(hdr->secure_channel_id));
 270	else
 271		sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES);
 272
 273	return sci;
 274}
 275
 276static unsigned int macsec_sectag_len(bool sci_present)
 277{
 278	return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
 279}
 280
 281static unsigned int macsec_hdr_len(bool sci_present)
 282{
 283	return macsec_sectag_len(sci_present) + ETH_HLEN;
 284}
 285
 286static unsigned int macsec_extra_len(bool sci_present)
 287{
 288	return macsec_sectag_len(sci_present) + sizeof(__be16);
 289}
 290
 291/* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
 292static void macsec_fill_sectag(struct macsec_eth_header *h,
 293			       const struct macsec_secy *secy, u32 pn,
 294			       bool sci_present)
 295{
 296	const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
 297
 298	memset(&h->tci_an, 0, macsec_sectag_len(sci_present));
 299	h->eth.h_proto = htons(ETH_P_MACSEC);
 300
 301	if (sci_present) {
 302		h->tci_an |= MACSEC_TCI_SC;
 303		memcpy(&h->secure_channel_id, &secy->sci,
 304		       sizeof(h->secure_channel_id));
 305	} else {
 306		if (tx_sc->end_station)
 307			h->tci_an |= MACSEC_TCI_ES;
 308		if (tx_sc->scb)
 309			h->tci_an |= MACSEC_TCI_SCB;
 310	}
 311
 312	h->packet_number = htonl(pn);
 313
 314	/* with GCM, C/E clear for !encrypt, both set for encrypt */
 315	if (tx_sc->encrypt)
 316		h->tci_an |= MACSEC_TCI_CONFID;
 317	else if (secy->icv_len != DEFAULT_ICV_LEN)
 318		h->tci_an |= MACSEC_TCI_C;
 319
 320	h->tci_an |= tx_sc->encoding_sa;
 321}
 322
 323static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len)
 324{
 325	if (data_len < MIN_NON_SHORT_LEN)
 326		h->short_length = data_len;
 327}
 328
 329/* Checks if a MACsec interface is being offloaded to an hardware engine */
 330static bool macsec_is_offloaded(struct macsec_dev *macsec)
 331{
 332	if (macsec->offload == MACSEC_OFFLOAD_MAC ||
 333	    macsec->offload == MACSEC_OFFLOAD_PHY)
 334		return true;
 335
 336	return false;
 337}
 338
 339/* Checks if underlying layers implement MACsec offloading functions. */
 340static bool macsec_check_offload(enum macsec_offload offload,
 341				 struct macsec_dev *macsec)
 342{
 343	if (!macsec || !macsec->real_dev)
 344		return false;
 345
 346	if (offload == MACSEC_OFFLOAD_PHY)
 347		return macsec->real_dev->phydev &&
 348		       macsec->real_dev->phydev->macsec_ops;
 349	else if (offload == MACSEC_OFFLOAD_MAC)
 350		return macsec->real_dev->features & NETIF_F_HW_MACSEC &&
 351		       macsec->real_dev->macsec_ops;
 352
 353	return false;
 354}
 355
 356static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload,
 357						 struct macsec_dev *macsec,
 358						 struct macsec_context *ctx)
 359{
 360	if (ctx) {
 361		memset(ctx, 0, sizeof(*ctx));
 362		ctx->offload = offload;
 363
 364		if (offload == MACSEC_OFFLOAD_PHY)
 365			ctx->phydev = macsec->real_dev->phydev;
 366		else if (offload == MACSEC_OFFLOAD_MAC)
 367			ctx->netdev = macsec->real_dev;
 368	}
 369
 370	if (offload == MACSEC_OFFLOAD_PHY)
 371		return macsec->real_dev->phydev->macsec_ops;
 372	else
 373		return macsec->real_dev->macsec_ops;
 374}
 375
 376/* Returns a pointer to the MACsec ops struct if any and updates the MACsec
 377 * context device reference if provided.
 378 */
 379static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec,
 380					       struct macsec_context *ctx)
 381{
 382	if (!macsec_check_offload(macsec->offload, macsec))
 383		return NULL;
 384
 385	return __macsec_get_ops(macsec->offload, macsec, ctx);
 386}
 387
 388/* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */
 389static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn)
 390{
 391	struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data;
 392	int len = skb->len - 2 * ETH_ALEN;
 393	int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len;
 394
 395	/* a) It comprises at least 17 octets */
 396	if (skb->len <= 16)
 397		return false;
 398
 399	/* b) MACsec EtherType: already checked */
 400
 401	/* c) V bit is clear */
 402	if (h->tci_an & MACSEC_TCI_VERSION)
 403		return false;
 404
 405	/* d) ES or SCB => !SC */
 406	if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) &&
 407	    (h->tci_an & MACSEC_TCI_SC))
 408		return false;
 409
 410	/* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */
 411	if (h->unused)
 412		return false;
 413
 414	/* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */
 415	if (!h->packet_number && !xpn)
 416		return false;
 417
 418	/* length check, f) g) h) i) */
 419	if (h->short_length)
 420		return len == extra_len + h->short_length;
 421	return len >= extra_len + MIN_NON_SHORT_LEN;
 422}
 423
 424#define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
 425#define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
 426
 427static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn,
 428			       salt_t salt)
 429{
 430	struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv;
 431
 432	gcm_iv->ssci = ssci ^ salt.ssci;
 433	gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn;
 434}
 435
 436static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn)
 437{
 438	struct gcm_iv *gcm_iv = (struct gcm_iv *)iv;
 439
 440	gcm_iv->sci = sci;
 441	gcm_iv->pn = htonl(pn);
 442}
 443
 444static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
 445{
 446	return (struct macsec_eth_header *)skb_mac_header(skb);
 447}
 448
 449static sci_t dev_to_sci(struct net_device *dev, __be16 port)
 450{
 451	return make_sci(dev->dev_addr, port);
 452}
 453
 454static void __macsec_pn_wrapped(struct macsec_secy *secy,
 455				struct macsec_tx_sa *tx_sa)
 456{
 457	pr_debug("PN wrapped, transitioning to !oper\n");
 458	tx_sa->active = false;
 459	if (secy->protect_frames)
 460		secy->operational = false;
 461}
 462
 463void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa)
 464{
 465	spin_lock_bh(&tx_sa->lock);
 466	__macsec_pn_wrapped(secy, tx_sa);
 467	spin_unlock_bh(&tx_sa->lock);
 468}
 469EXPORT_SYMBOL_GPL(macsec_pn_wrapped);
 470
 471static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa,
 472			    struct macsec_secy *secy)
 473{
 474	pn_t pn;
 475
 476	spin_lock_bh(&tx_sa->lock);
 477
 478	pn = tx_sa->next_pn_halves;
 479	if (secy->xpn)
 480		tx_sa->next_pn++;
 481	else
 482		tx_sa->next_pn_halves.lower++;
 483
 484	if (tx_sa->next_pn == 0)
 485		__macsec_pn_wrapped(secy, tx_sa);
 486	spin_unlock_bh(&tx_sa->lock);
 487
 488	return pn;
 489}
 490
 491static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
 492{
 493	struct macsec_dev *macsec = netdev_priv(dev);
 494
 495	skb->dev = macsec->real_dev;
 496	skb_reset_mac_header(skb);
 497	skb->protocol = eth_hdr(skb)->h_proto;
 498}
 499
 500static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
 501			    struct macsec_tx_sa *tx_sa)
 502{
 503	struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
 504
 505	u64_stats_update_begin(&txsc_stats->syncp);
 506	if (tx_sc->encrypt) {
 507		txsc_stats->stats.OutOctetsEncrypted += skb->len;
 508		txsc_stats->stats.OutPktsEncrypted++;
 509		this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
 510	} else {
 511		txsc_stats->stats.OutOctetsProtected += skb->len;
 512		txsc_stats->stats.OutPktsProtected++;
 513		this_cpu_inc(tx_sa->stats->OutPktsProtected);
 514	}
 515	u64_stats_update_end(&txsc_stats->syncp);
 516}
 517
 518static void count_tx(struct net_device *dev, int ret, int len)
 519{
 520	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
 521		struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
 522
 523		u64_stats_update_begin(&stats->syncp);
 524		stats->tx_packets++;
 525		stats->tx_bytes += len;
 526		u64_stats_update_end(&stats->syncp);
 527	}
 528}
 529
 530static void macsec_encrypt_done(struct crypto_async_request *base, int err)
 531{
 532	struct sk_buff *skb = base->data;
 533	struct net_device *dev = skb->dev;
 534	struct macsec_dev *macsec = macsec_priv(dev);
 535	struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa;
 536	int len, ret;
 537
 538	aead_request_free(macsec_skb_cb(skb)->req);
 539
 540	rcu_read_lock_bh();
 541	macsec_encrypt_finish(skb, dev);
 542	macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
 543	len = skb->len;
 544	ret = dev_queue_xmit(skb);
 545	count_tx(dev, ret, len);
 546	rcu_read_unlock_bh();
 547
 548	macsec_txsa_put(sa);
 549	dev_put(dev);
 550}
 551
 552static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
 553					     unsigned char **iv,
 554					     struct scatterlist **sg,
 555					     int num_frags)
 556{
 557	size_t size, iv_offset, sg_offset;
 558	struct aead_request *req;
 559	void *tmp;
 560
 561	size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
 562	iv_offset = size;
 563	size += GCM_AES_IV_LEN;
 564
 565	size = ALIGN(size, __alignof__(struct scatterlist));
 566	sg_offset = size;
 567	size += sizeof(struct scatterlist) * num_frags;
 568
 569	tmp = kmalloc(size, GFP_ATOMIC);
 570	if (!tmp)
 571		return NULL;
 572
 573	*iv = (unsigned char *)(tmp + iv_offset);
 574	*sg = (struct scatterlist *)(tmp + sg_offset);
 575	req = tmp;
 576
 577	aead_request_set_tfm(req, tfm);
 578
 579	return req;
 580}
 581
 582static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
 583				      struct net_device *dev)
 584{
 585	int ret;
 586	struct scatterlist *sg;
 587	struct sk_buff *trailer;
 588	unsigned char *iv;
 589	struct ethhdr *eth;
 590	struct macsec_eth_header *hh;
 591	size_t unprotected_len;
 592	struct aead_request *req;
 593	struct macsec_secy *secy;
 594	struct macsec_tx_sc *tx_sc;
 595	struct macsec_tx_sa *tx_sa;
 596	struct macsec_dev *macsec = macsec_priv(dev);
 597	bool sci_present;
 598	pn_t pn;
 599
 600	secy = &macsec->secy;
 601	tx_sc = &secy->tx_sc;
 602
 603	/* 10.5.1 TX SA assignment */
 604	tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]);
 605	if (!tx_sa) {
 606		secy->operational = false;
 607		kfree_skb(skb);
 608		return ERR_PTR(-EINVAL);
 609	}
 610
 611	if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
 612		     skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
 613		struct sk_buff *nskb = skb_copy_expand(skb,
 614						       MACSEC_NEEDED_HEADROOM,
 615						       MACSEC_NEEDED_TAILROOM,
 616						       GFP_ATOMIC);
 617		if (likely(nskb)) {
 618			consume_skb(skb);
 619			skb = nskb;
 620		} else {
 621			macsec_txsa_put(tx_sa);
 622			kfree_skb(skb);
 623			return ERR_PTR(-ENOMEM);
 624		}
 625	} else {
 626		skb = skb_unshare(skb, GFP_ATOMIC);
 627		if (!skb) {
 628			macsec_txsa_put(tx_sa);
 629			return ERR_PTR(-ENOMEM);
 630		}
 631	}
 632
 633	unprotected_len = skb->len;
 634	eth = eth_hdr(skb);
 635	sci_present = send_sci(secy);
 636	hh = skb_push(skb, macsec_extra_len(sci_present));
 637	memmove(hh, eth, 2 * ETH_ALEN);
 638
 639	pn = tx_sa_update_pn(tx_sa, secy);
 640	if (pn.full64 == 0) {
 641		macsec_txsa_put(tx_sa);
 642		kfree_skb(skb);
 643		return ERR_PTR(-ENOLINK);
 644	}
 645	macsec_fill_sectag(hh, secy, pn.lower, sci_present);
 646	macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
 647
 648	skb_put(skb, secy->icv_len);
 649
 650	if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
 651		struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
 652
 653		u64_stats_update_begin(&secy_stats->syncp);
 654		secy_stats->stats.OutPktsTooLong++;
 655		u64_stats_update_end(&secy_stats->syncp);
 656
 657		macsec_txsa_put(tx_sa);
 658		kfree_skb(skb);
 659		return ERR_PTR(-EINVAL);
 660	}
 661
 662	ret = skb_cow_data(skb, 0, &trailer);
 663	if (unlikely(ret < 0)) {
 664		macsec_txsa_put(tx_sa);
 665		kfree_skb(skb);
 666		return ERR_PTR(ret);
 667	}
 668
 669	req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret);
 670	if (!req) {
 671		macsec_txsa_put(tx_sa);
 672		kfree_skb(skb);
 673		return ERR_PTR(-ENOMEM);
 674	}
 675
 676	if (secy->xpn)
 677		macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt);
 678	else
 679		macsec_fill_iv(iv, secy->sci, pn.lower);
 680
 681	sg_init_table(sg, ret);
 682	ret = skb_to_sgvec(skb, sg, 0, skb->len);
 683	if (unlikely(ret < 0)) {
 684		aead_request_free(req);
 685		macsec_txsa_put(tx_sa);
 686		kfree_skb(skb);
 687		return ERR_PTR(ret);
 688	}
 689
 690	if (tx_sc->encrypt) {
 691		int len = skb->len - macsec_hdr_len(sci_present) -
 692			  secy->icv_len;
 693		aead_request_set_crypt(req, sg, sg, len, iv);
 694		aead_request_set_ad(req, macsec_hdr_len(sci_present));
 695	} else {
 696		aead_request_set_crypt(req, sg, sg, 0, iv);
 697		aead_request_set_ad(req, skb->len - secy->icv_len);
 698	}
 699
 700	macsec_skb_cb(skb)->req = req;
 701	macsec_skb_cb(skb)->tx_sa = tx_sa;
 702	aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
 703
 704	dev_hold(skb->dev);
 705	ret = crypto_aead_encrypt(req);
 706	if (ret == -EINPROGRESS) {
 707		return ERR_PTR(ret);
 708	} else if (ret != 0) {
 709		dev_put(skb->dev);
 710		kfree_skb(skb);
 711		aead_request_free(req);
 712		macsec_txsa_put(tx_sa);
 713		return ERR_PTR(-EINVAL);
 714	}
 715
 716	dev_put(skb->dev);
 717	aead_request_free(req);
 718	macsec_txsa_put(tx_sa);
 719
 720	return skb;
 721}
 722
 723static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn)
 724{
 725	struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
 726	struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats);
 727	struct macsec_eth_header *hdr = macsec_ethhdr(skb);
 728	u32 lowest_pn = 0;
 729
 730	spin_lock(&rx_sa->lock);
 731	if (rx_sa->next_pn_halves.lower >= secy->replay_window)
 732		lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window;
 733
 734	/* Now perform replay protection check again
 735	 * (see IEEE 802.1AE-2006 figure 10-5)
 736	 */
 737	if (secy->replay_protect && pn < lowest_pn &&
 738	    (!secy->xpn || pn_same_half(pn, lowest_pn))) {
 739		spin_unlock(&rx_sa->lock);
 740		u64_stats_update_begin(&rxsc_stats->syncp);
 741		rxsc_stats->stats.InPktsLate++;
 742		u64_stats_update_end(&rxsc_stats->syncp);
 743		return false;
 744	}
 745
 746	if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
 747		u64_stats_update_begin(&rxsc_stats->syncp);
 748		if (hdr->tci_an & MACSEC_TCI_E)
 749			rxsc_stats->stats.InOctetsDecrypted += skb->len;
 750		else
 751			rxsc_stats->stats.InOctetsValidated += skb->len;
 752		u64_stats_update_end(&rxsc_stats->syncp);
 753	}
 754
 755	if (!macsec_skb_cb(skb)->valid) {
 756		spin_unlock(&rx_sa->lock);
 757
 758		/* 10.6.5 */
 759		if (hdr->tci_an & MACSEC_TCI_C ||
 760		    secy->validate_frames == MACSEC_VALIDATE_STRICT) {
 761			u64_stats_update_begin(&rxsc_stats->syncp);
 762			rxsc_stats->stats.InPktsNotValid++;
 763			u64_stats_update_end(&rxsc_stats->syncp);
 764			return false;
 765		}
 766
 767		u64_stats_update_begin(&rxsc_stats->syncp);
 768		if (secy->validate_frames == MACSEC_VALIDATE_CHECK) {
 769			rxsc_stats->stats.InPktsInvalid++;
 770			this_cpu_inc(rx_sa->stats->InPktsInvalid);
 771		} else if (pn < lowest_pn) {
 772			rxsc_stats->stats.InPktsDelayed++;
 773		} else {
 774			rxsc_stats->stats.InPktsUnchecked++;
 775		}
 776		u64_stats_update_end(&rxsc_stats->syncp);
 777	} else {
 778		u64_stats_update_begin(&rxsc_stats->syncp);
 779		if (pn < lowest_pn) {
 780			rxsc_stats->stats.InPktsDelayed++;
 781		} else {
 782			rxsc_stats->stats.InPktsOK++;
 783			this_cpu_inc(rx_sa->stats->InPktsOK);
 784		}
 785		u64_stats_update_end(&rxsc_stats->syncp);
 786
 787		// Instead of "pn >=" - to support pn overflow in xpn
 788		if (pn + 1 > rx_sa->next_pn_halves.lower) {
 789			rx_sa->next_pn_halves.lower = pn + 1;
 790		} else if (secy->xpn &&
 791			   !pn_same_half(pn, rx_sa->next_pn_halves.lower)) {
 792			rx_sa->next_pn_halves.upper++;
 793			rx_sa->next_pn_halves.lower = pn + 1;
 794		}
 795
 796		spin_unlock(&rx_sa->lock);
 797	}
 798
 799	return true;
 800}
 801
 802static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev)
 803{
 804	skb->pkt_type = PACKET_HOST;
 805	skb->protocol = eth_type_trans(skb, dev);
 806
 807	skb_reset_network_header(skb);
 808	if (!skb_transport_header_was_set(skb))
 809		skb_reset_transport_header(skb);
 810	skb_reset_mac_len(skb);
 811}
 812
 813static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
 814{
 815	skb->ip_summed = CHECKSUM_NONE;
 816	memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
 817	skb_pull(skb, hdr_len);
 818	pskb_trim_unique(skb, skb->len - icv_len);
 819}
 820
 821static void count_rx(struct net_device *dev, int len)
 822{
 823	struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
 824
 825	u64_stats_update_begin(&stats->syncp);
 826	stats->rx_packets++;
 827	stats->rx_bytes += len;
 828	u64_stats_update_end(&stats->syncp);
 829}
 830
 831static void macsec_decrypt_done(struct crypto_async_request *base, int err)
 832{
 833	struct sk_buff *skb = base->data;
 834	struct net_device *dev = skb->dev;
 835	struct macsec_dev *macsec = macsec_priv(dev);
 836	struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
 837	struct macsec_rx_sc *rx_sc = rx_sa->sc;
 838	int len;
 839	u32 pn;
 840
 841	aead_request_free(macsec_skb_cb(skb)->req);
 842
 843	if (!err)
 844		macsec_skb_cb(skb)->valid = true;
 845
 846	rcu_read_lock_bh();
 847	pn = ntohl(macsec_ethhdr(skb)->packet_number);
 848	if (!macsec_post_decrypt(skb, &macsec->secy, pn)) {
 849		rcu_read_unlock_bh();
 850		kfree_skb(skb);
 851		goto out;
 852	}
 853
 854	macsec_finalize_skb(skb, macsec->secy.icv_len,
 855			    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
 856	macsec_reset_skb(skb, macsec->secy.netdev);
 857
 858	len = skb->len;
 859	if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
 860		count_rx(dev, len);
 861
 862	rcu_read_unlock_bh();
 863
 864out:
 865	macsec_rxsa_put(rx_sa);
 866	macsec_rxsc_put(rx_sc);
 867	dev_put(dev);
 868}
 869
 870static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
 871				      struct net_device *dev,
 872				      struct macsec_rx_sa *rx_sa,
 873				      sci_t sci,
 874				      struct macsec_secy *secy)
 875{
 876	int ret;
 877	struct scatterlist *sg;
 878	struct sk_buff *trailer;
 879	unsigned char *iv;
 880	struct aead_request *req;
 881	struct macsec_eth_header *hdr;
 882	u32 hdr_pn;
 883	u16 icv_len = secy->icv_len;
 884
 885	macsec_skb_cb(skb)->valid = false;
 886	skb = skb_share_check(skb, GFP_ATOMIC);
 887	if (!skb)
 888		return ERR_PTR(-ENOMEM);
 889
 890	ret = skb_cow_data(skb, 0, &trailer);
 891	if (unlikely(ret < 0)) {
 892		kfree_skb(skb);
 893		return ERR_PTR(ret);
 894	}
 895	req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret);
 896	if (!req) {
 897		kfree_skb(skb);
 898		return ERR_PTR(-ENOMEM);
 899	}
 900
 901	hdr = (struct macsec_eth_header *)skb->data;
 902	hdr_pn = ntohl(hdr->packet_number);
 903
 904	if (secy->xpn) {
 905		pn_t recovered_pn = rx_sa->next_pn_halves;
 906
 907		recovered_pn.lower = hdr_pn;
 908		if (hdr_pn < rx_sa->next_pn_halves.lower &&
 909		    !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower))
 910			recovered_pn.upper++;
 911
 912		macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64,
 913				   rx_sa->key.salt);
 914	} else {
 915		macsec_fill_iv(iv, sci, hdr_pn);
 916	}
 917
 918	sg_init_table(sg, ret);
 919	ret = skb_to_sgvec(skb, sg, 0, skb->len);
 920	if (unlikely(ret < 0)) {
 921		aead_request_free(req);
 922		kfree_skb(skb);
 923		return ERR_PTR(ret);
 924	}
 925
 926	if (hdr->tci_an & MACSEC_TCI_E) {
 927		/* confidentiality: ethernet + macsec header
 928		 * authenticated, encrypted payload
 929		 */
 930		int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci);
 931
 932		aead_request_set_crypt(req, sg, sg, len, iv);
 933		aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci));
 934		skb = skb_unshare(skb, GFP_ATOMIC);
 935		if (!skb) {
 936			aead_request_free(req);
 937			return ERR_PTR(-ENOMEM);
 938		}
 939	} else {
 940		/* integrity only: all headers + data authenticated */
 941		aead_request_set_crypt(req, sg, sg, icv_len, iv);
 942		aead_request_set_ad(req, skb->len - icv_len);
 943	}
 944
 945	macsec_skb_cb(skb)->req = req;
 946	skb->dev = dev;
 947	aead_request_set_callback(req, 0, macsec_decrypt_done, skb);
 948
 949	dev_hold(dev);
 950	ret = crypto_aead_decrypt(req);
 951	if (ret == -EINPROGRESS) {
 952		return ERR_PTR(ret);
 953	} else if (ret != 0) {
 954		/* decryption/authentication failed
 955		 * 10.6 if validateFrames is disabled, deliver anyway
 956		 */
 957		if (ret != -EBADMSG) {
 958			kfree_skb(skb);
 959			skb = ERR_PTR(ret);
 960		}
 961	} else {
 962		macsec_skb_cb(skb)->valid = true;
 963	}
 964	dev_put(dev);
 965
 966	aead_request_free(req);
 967
 968	return skb;
 969}
 970
 971static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci)
 972{
 973	struct macsec_rx_sc *rx_sc;
 974
 975	for_each_rxsc(secy, rx_sc) {
 976		if (rx_sc->sci == sci)
 977			return rx_sc;
 978	}
 979
 980	return NULL;
 981}
 982
 983static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci)
 984{
 985	struct macsec_rx_sc *rx_sc;
 986
 987	for_each_rxsc_rtnl(secy, rx_sc) {
 988		if (rx_sc->sci == sci)
 989			return rx_sc;
 990	}
 991
 992	return NULL;
 993}
 994
 995static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
 996{
 997	/* Deliver to the uncontrolled port by default */
 998	enum rx_handler_result ret = RX_HANDLER_PASS;
 999	struct ethhdr *hdr = eth_hdr(skb);
1000	struct macsec_rxh_data *rxd;
1001	struct macsec_dev *macsec;
1002
1003	rcu_read_lock();
1004	rxd = macsec_data_rcu(skb->dev);
1005
1006	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1007		struct sk_buff *nskb;
1008		struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
1009		struct net_device *ndev = macsec->secy.netdev;
1010
1011		/* If h/w offloading is enabled, HW decodes frames and strips
1012		 * the SecTAG, so we have to deduce which port to deliver to.
1013		 */
1014		if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
1015			if (ether_addr_equal_64bits(hdr->h_dest,
1016						    ndev->dev_addr)) {
1017				/* exact match, divert skb to this port */
1018				skb->dev = ndev;
1019				skb->pkt_type = PACKET_HOST;
1020				ret = RX_HANDLER_ANOTHER;
1021				goto out;
1022			} else if (is_multicast_ether_addr_64bits(
1023					   hdr->h_dest)) {
1024				/* multicast frame, deliver on this port too */
1025				nskb = skb_clone(skb, GFP_ATOMIC);
1026				if (!nskb)
1027					break;
1028
1029				nskb->dev = ndev;
1030				if (ether_addr_equal_64bits(hdr->h_dest,
1031							    ndev->broadcast))
1032					nskb->pkt_type = PACKET_BROADCAST;
1033				else
1034					nskb->pkt_type = PACKET_MULTICAST;
1035
1036				netif_rx(nskb);
1037			}
1038			continue;
1039		}
1040
1041		/* 10.6 If the management control validateFrames is not
1042		 * Strict, frames without a SecTAG are received, counted, and
1043		 * delivered to the Controlled Port
1044		 */
1045		if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1046			u64_stats_update_begin(&secy_stats->syncp);
1047			secy_stats->stats.InPktsNoTag++;
1048			u64_stats_update_end(&secy_stats->syncp);
1049			continue;
1050		}
1051
1052		/* deliver on this port */
1053		nskb = skb_clone(skb, GFP_ATOMIC);
1054		if (!nskb)
1055			break;
1056
1057		nskb->dev = ndev;
1058
1059		if (netif_rx(nskb) == NET_RX_SUCCESS) {
1060			u64_stats_update_begin(&secy_stats->syncp);
1061			secy_stats->stats.InPktsUntagged++;
1062			u64_stats_update_end(&secy_stats->syncp);
1063		}
1064	}
1065
1066out:
1067	rcu_read_unlock();
1068	return ret;
1069}
1070
1071static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
1072{
1073	struct sk_buff *skb = *pskb;
1074	struct net_device *dev = skb->dev;
1075	struct macsec_eth_header *hdr;
1076	struct macsec_secy *secy = NULL;
1077	struct macsec_rx_sc *rx_sc;
1078	struct macsec_rx_sa *rx_sa;
1079	struct macsec_rxh_data *rxd;
1080	struct macsec_dev *macsec;
1081	unsigned int len;
1082	sci_t sci;
1083	u32 hdr_pn;
1084	bool cbit;
1085	struct pcpu_rx_sc_stats *rxsc_stats;
1086	struct pcpu_secy_stats *secy_stats;
1087	bool pulled_sci;
1088	int ret;
1089
1090	if (skb_headroom(skb) < ETH_HLEN)
1091		goto drop_direct;
1092
1093	hdr = macsec_ethhdr(skb);
1094	if (hdr->eth.h_proto != htons(ETH_P_MACSEC))
1095		return handle_not_macsec(skb);
1096
1097	skb = skb_unshare(skb, GFP_ATOMIC);
1098	*pskb = skb;
1099	if (!skb)
1100		return RX_HANDLER_CONSUMED;
1101
1102	pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
1103	if (!pulled_sci) {
1104		if (!pskb_may_pull(skb, macsec_extra_len(false)))
1105			goto drop_direct;
1106	}
1107
1108	hdr = macsec_ethhdr(skb);
1109
1110	/* Frames with a SecTAG that has the TCI E bit set but the C
1111	 * bit clear are discarded, as this reserved encoding is used
1112	 * to identify frames with a SecTAG that are not to be
1113	 * delivered to the Controlled Port.
1114	 */
1115	if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E)
1116		return RX_HANDLER_PASS;
1117
1118	/* now, pull the extra length */
1119	if (hdr->tci_an & MACSEC_TCI_SC) {
1120		if (!pulled_sci)
1121			goto drop_direct;
1122	}
1123
1124	/* ethernet header is part of crypto processing */
1125	skb_push(skb, ETH_HLEN);
1126
1127	macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC);
1128	macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK;
1129	sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci);
1130
1131	rcu_read_lock();
1132	rxd = macsec_data_rcu(skb->dev);
1133
1134	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1135		struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
1136
1137		sc = sc ? macsec_rxsc_get(sc) : NULL;
1138
1139		if (sc) {
1140			secy = &macsec->secy;
1141			rx_sc = sc;
1142			break;
1143		}
1144	}
1145
1146	if (!secy)
1147		goto nosci;
1148
1149	dev = secy->netdev;
1150	macsec = macsec_priv(dev);
1151	secy_stats = this_cpu_ptr(macsec->stats);
1152	rxsc_stats = this_cpu_ptr(rx_sc->stats);
1153
1154	if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) {
1155		u64_stats_update_begin(&secy_stats->syncp);
1156		secy_stats->stats.InPktsBadTag++;
1157		u64_stats_update_end(&secy_stats->syncp);
1158		goto drop_nosa;
1159	}
1160
1161	rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]);
1162	if (!rx_sa) {
1163		/* 10.6.1 if the SA is not in use */
1164
1165		/* If validateFrames is Strict or the C bit in the
1166		 * SecTAG is set, discard
1167		 */
1168		if (hdr->tci_an & MACSEC_TCI_C ||
1169		    secy->validate_frames == MACSEC_VALIDATE_STRICT) {
1170			u64_stats_update_begin(&rxsc_stats->syncp);
1171			rxsc_stats->stats.InPktsNotUsingSA++;
1172			u64_stats_update_end(&rxsc_stats->syncp);
1173			goto drop_nosa;
1174		}
1175
1176		/* not Strict, the frame (with the SecTAG and ICV
1177		 * removed) is delivered to the Controlled Port.
1178		 */
1179		u64_stats_update_begin(&rxsc_stats->syncp);
1180		rxsc_stats->stats.InPktsUnusedSA++;
1181		u64_stats_update_end(&rxsc_stats->syncp);
1182		goto deliver;
1183	}
1184
1185	/* First, PN check to avoid decrypting obviously wrong packets */
1186	hdr_pn = ntohl(hdr->packet_number);
1187	if (secy->replay_protect) {
1188		bool late;
1189
1190		spin_lock(&rx_sa->lock);
1191		late = rx_sa->next_pn_halves.lower >= secy->replay_window &&
1192		       hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window);
1193
1194		if (secy->xpn)
1195			late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn);
1196		spin_unlock(&rx_sa->lock);
1197
1198		if (late) {
1199			u64_stats_update_begin(&rxsc_stats->syncp);
1200			rxsc_stats->stats.InPktsLate++;
1201			u64_stats_update_end(&rxsc_stats->syncp);
1202			goto drop;
1203		}
1204	}
1205
1206	macsec_skb_cb(skb)->rx_sa = rx_sa;
1207
1208	/* Disabled && !changed text => skip validation */
1209	if (hdr->tci_an & MACSEC_TCI_C ||
1210	    secy->validate_frames != MACSEC_VALIDATE_DISABLED)
1211		skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
1212
1213	if (IS_ERR(skb)) {
1214		/* the decrypt callback needs the reference */
1215		if (PTR_ERR(skb) != -EINPROGRESS) {
1216			macsec_rxsa_put(rx_sa);
1217			macsec_rxsc_put(rx_sc);
1218		}
1219		rcu_read_unlock();
1220		*pskb = NULL;
1221		return RX_HANDLER_CONSUMED;
1222	}
1223
1224	if (!macsec_post_decrypt(skb, secy, hdr_pn))
1225		goto drop;
1226
1227deliver:
1228	macsec_finalize_skb(skb, secy->icv_len,
1229			    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1230	macsec_reset_skb(skb, secy->netdev);
1231
1232	if (rx_sa)
1233		macsec_rxsa_put(rx_sa);
1234	macsec_rxsc_put(rx_sc);
1235
1236	skb_orphan(skb);
1237	len = skb->len;
1238	ret = gro_cells_receive(&macsec->gro_cells, skb);
1239	if (ret == NET_RX_SUCCESS)
1240		count_rx(dev, len);
1241	else
1242		macsec->secy.netdev->stats.rx_dropped++;
1243
1244	rcu_read_unlock();
1245
1246	*pskb = NULL;
1247	return RX_HANDLER_CONSUMED;
1248
1249drop:
1250	macsec_rxsa_put(rx_sa);
1251drop_nosa:
1252	macsec_rxsc_put(rx_sc);
1253	rcu_read_unlock();
1254drop_direct:
1255	kfree_skb(skb);
1256	*pskb = NULL;
1257	return RX_HANDLER_CONSUMED;
1258
1259nosci:
1260	/* 10.6.1 if the SC is not found */
1261	cbit = !!(hdr->tci_an & MACSEC_TCI_C);
1262	if (!cbit)
1263		macsec_finalize_skb(skb, DEFAULT_ICV_LEN,
1264				    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1265
1266	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1267		struct sk_buff *nskb;
1268
1269		secy_stats = this_cpu_ptr(macsec->stats);
1270
1271		/* If validateFrames is Strict or the C bit in the
1272		 * SecTAG is set, discard
1273		 */
1274		if (cbit ||
1275		    macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1276			u64_stats_update_begin(&secy_stats->syncp);
1277			secy_stats->stats.InPktsNoSCI++;
1278			u64_stats_update_end(&secy_stats->syncp);
1279			continue;
1280		}
1281
1282		/* not strict, the frame (with the SecTAG and ICV
1283		 * removed) is delivered to the Controlled Port.
1284		 */
1285		nskb = skb_clone(skb, GFP_ATOMIC);
1286		if (!nskb)
1287			break;
1288
1289		macsec_reset_skb(nskb, macsec->secy.netdev);
1290
1291		ret = netif_rx(nskb);
1292		if (ret == NET_RX_SUCCESS) {
1293			u64_stats_update_begin(&secy_stats->syncp);
1294			secy_stats->stats.InPktsUnknownSCI++;
1295			u64_stats_update_end(&secy_stats->syncp);
1296		} else {
1297			macsec->secy.netdev->stats.rx_dropped++;
1298		}
1299	}
1300
1301	rcu_read_unlock();
1302	*pskb = skb;
1303	return RX_HANDLER_PASS;
1304}
1305
1306static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
1307{
1308	struct crypto_aead *tfm;
1309	int ret;
1310
1311	/* Pick a sync gcm(aes) cipher to ensure order is preserved. */
1312	tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
1313
1314	if (IS_ERR(tfm))
1315		return tfm;
1316
1317	ret = crypto_aead_setkey(tfm, key, key_len);
1318	if (ret < 0)
1319		goto fail;
1320
1321	ret = crypto_aead_setauthsize(tfm, icv_len);
1322	if (ret < 0)
1323		goto fail;
1324
1325	return tfm;
1326fail:
1327	crypto_free_aead(tfm);
1328	return ERR_PTR(ret);
1329}
1330
1331static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
1332		      int icv_len)
1333{
1334	rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats);
1335	if (!rx_sa->stats)
1336		return -ENOMEM;
1337
1338	rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
1339	if (IS_ERR(rx_sa->key.tfm)) {
1340		free_percpu(rx_sa->stats);
1341		return PTR_ERR(rx_sa->key.tfm);
1342	}
1343
1344	rx_sa->ssci = MACSEC_UNDEF_SSCI;
1345	rx_sa->active = false;
1346	rx_sa->next_pn = 1;
1347	refcount_set(&rx_sa->refcnt, 1);
1348	spin_lock_init(&rx_sa->lock);
1349
1350	return 0;
1351}
1352
1353static void clear_rx_sa(struct macsec_rx_sa *rx_sa)
1354{
1355	rx_sa->active = false;
1356
1357	macsec_rxsa_put(rx_sa);
1358}
1359
1360static void free_rx_sc(struct macsec_rx_sc *rx_sc)
1361{
1362	int i;
1363
1364	for (i = 0; i < MACSEC_NUM_AN; i++) {
1365		struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]);
1366
1367		RCU_INIT_POINTER(rx_sc->sa[i], NULL);
1368		if (sa)
1369			clear_rx_sa(sa);
1370	}
1371
1372	macsec_rxsc_put(rx_sc);
1373}
1374
1375static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci)
1376{
1377	struct macsec_rx_sc *rx_sc, __rcu **rx_scp;
1378
1379	for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp);
1380	     rx_sc;
1381	     rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) {
1382		if (rx_sc->sci == sci) {
1383			if (rx_sc->active)
1384				secy->n_rx_sc--;
1385			rcu_assign_pointer(*rx_scp, rx_sc->next);
1386			return rx_sc;
1387		}
1388	}
1389
1390	return NULL;
1391}
1392
1393static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci)
1394{
1395	struct macsec_rx_sc *rx_sc;
1396	struct macsec_dev *macsec;
1397	struct net_device *real_dev = macsec_priv(dev)->real_dev;
1398	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
1399	struct macsec_secy *secy;
1400
1401	list_for_each_entry(macsec, &rxd->secys, secys) {
1402		if (find_rx_sc_rtnl(&macsec->secy, sci))
1403			return ERR_PTR(-EEXIST);
1404	}
1405
1406	rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
1407	if (!rx_sc)
1408		return ERR_PTR(-ENOMEM);
1409
1410	rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats);
1411	if (!rx_sc->stats) {
1412		kfree(rx_sc);
1413		return ERR_PTR(-ENOMEM);
1414	}
1415
1416	rx_sc->sci = sci;
1417	rx_sc->active = true;
1418	refcount_set(&rx_sc->refcnt, 1);
1419
1420	secy = &macsec_priv(dev)->secy;
1421	rcu_assign_pointer(rx_sc->next, secy->rx_sc);
1422	rcu_assign_pointer(secy->rx_sc, rx_sc);
1423
1424	if (rx_sc->active)
1425		secy->n_rx_sc++;
1426
1427	return rx_sc;
1428}
1429
1430static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len,
1431		      int icv_len)
1432{
1433	tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats);
1434	if (!tx_sa->stats)
1435		return -ENOMEM;
1436
1437	tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
1438	if (IS_ERR(tx_sa->key.tfm)) {
1439		free_percpu(tx_sa->stats);
1440		return PTR_ERR(tx_sa->key.tfm);
1441	}
1442
1443	tx_sa->ssci = MACSEC_UNDEF_SSCI;
1444	tx_sa->active = false;
1445	refcount_set(&tx_sa->refcnt, 1);
1446	spin_lock_init(&tx_sa->lock);
1447
1448	return 0;
1449}
1450
1451static void clear_tx_sa(struct macsec_tx_sa *tx_sa)
1452{
1453	tx_sa->active = false;
1454
1455	macsec_txsa_put(tx_sa);
1456}
1457
1458static struct genl_family macsec_fam;
1459
1460static struct net_device *get_dev_from_nl(struct net *net,
1461					  struct nlattr **attrs)
1462{
1463	int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]);
1464	struct net_device *dev;
1465
1466	dev = __dev_get_by_index(net, ifindex);
1467	if (!dev)
1468		return ERR_PTR(-ENODEV);
1469
1470	if (!netif_is_macsec(dev))
1471		return ERR_PTR(-ENODEV);
1472
1473	return dev;
1474}
1475
1476static enum macsec_offload nla_get_offload(const struct nlattr *nla)
1477{
1478	return (__force enum macsec_offload)nla_get_u8(nla);
1479}
1480
1481static sci_t nla_get_sci(const struct nlattr *nla)
1482{
1483	return (__force sci_t)nla_get_u64(nla);
1484}
1485
1486static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value,
1487		       int padattr)
1488{
1489	return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr);
1490}
1491
1492static ssci_t nla_get_ssci(const struct nlattr *nla)
1493{
1494	return (__force ssci_t)nla_get_u32(nla);
1495}
1496
1497static int nla_put_ssci(struct sk_buff *skb, int attrtype, ssci_t value)
1498{
1499	return nla_put_u32(skb, attrtype, (__force u64)value);
1500}
1501
1502static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
1503					     struct nlattr **attrs,
1504					     struct nlattr **tb_sa,
1505					     struct net_device **devp,
1506					     struct macsec_secy **secyp,
1507					     struct macsec_tx_sc **scp,
1508					     u8 *assoc_num)
1509{
1510	struct net_device *dev;
1511	struct macsec_secy *secy;
1512	struct macsec_tx_sc *tx_sc;
1513	struct macsec_tx_sa *tx_sa;
1514
1515	if (!tb_sa[MACSEC_SA_ATTR_AN])
1516		return ERR_PTR(-EINVAL);
1517
1518	*assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1519
1520	dev = get_dev_from_nl(net, attrs);
1521	if (IS_ERR(dev))
1522		return ERR_CAST(dev);
1523
1524	if (*assoc_num >= MACSEC_NUM_AN)
1525		return ERR_PTR(-EINVAL);
1526
1527	secy = &macsec_priv(dev)->secy;
1528	tx_sc = &secy->tx_sc;
1529
1530	tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]);
1531	if (!tx_sa)
1532		return ERR_PTR(-ENODEV);
1533
1534	*devp = dev;
1535	*scp = tx_sc;
1536	*secyp = secy;
1537	return tx_sa;
1538}
1539
1540static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net,
1541					     struct nlattr **attrs,
1542					     struct nlattr **tb_rxsc,
1543					     struct net_device **devp,
1544					     struct macsec_secy **secyp)
1545{
1546	struct net_device *dev;
1547	struct macsec_secy *secy;
1548	struct macsec_rx_sc *rx_sc;
1549	sci_t sci;
1550
1551	dev = get_dev_from_nl(net, attrs);
1552	if (IS_ERR(dev))
1553		return ERR_CAST(dev);
1554
1555	secy = &macsec_priv(dev)->secy;
1556
1557	if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
1558		return ERR_PTR(-EINVAL);
1559
1560	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1561	rx_sc = find_rx_sc_rtnl(secy, sci);
1562	if (!rx_sc)
1563		return ERR_PTR(-ENODEV);
1564
1565	*secyp = secy;
1566	*devp = dev;
1567
1568	return rx_sc;
1569}
1570
1571static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net,
1572					     struct nlattr **attrs,
1573					     struct nlattr **tb_rxsc,
1574					     struct nlattr **tb_sa,
1575					     struct net_device **devp,
1576					     struct macsec_secy **secyp,
1577					     struct macsec_rx_sc **scp,
1578					     u8 *assoc_num)
1579{
1580	struct macsec_rx_sc *rx_sc;
1581	struct macsec_rx_sa *rx_sa;
1582
1583	if (!tb_sa[MACSEC_SA_ATTR_AN])
1584		return ERR_PTR(-EINVAL);
1585
1586	*assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1587	if (*assoc_num >= MACSEC_NUM_AN)
1588		return ERR_PTR(-EINVAL);
1589
1590	rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp);
1591	if (IS_ERR(rx_sc))
1592		return ERR_CAST(rx_sc);
1593
1594	rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]);
1595	if (!rx_sa)
1596		return ERR_PTR(-ENODEV);
1597
1598	*scp = rx_sc;
1599	return rx_sa;
1600}
1601
1602static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
1603	[MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 },
1604	[MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED },
1605	[MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED },
1606	[MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED },
1607};
1608
1609static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
1610	[MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 },
1611	[MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 },
1612};
1613
1614static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
1615	[MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
1616	[MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
1617	[MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN_LEN(4),
1618	[MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
1619				   .len = MACSEC_KEYID_LEN, },
1620	[MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
1621				 .len = MACSEC_MAX_KEY_LEN, },
1622	[MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 },
1623	[MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY,
1624				  .len = MACSEC_SALT_LEN, },
1625};
1626
1627static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = {
1628	[MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 },
1629};
1630
1631/* Offloads an operation to a device driver */
1632static int macsec_offload(int (* const func)(struct macsec_context *),
1633			  struct macsec_context *ctx)
1634{
1635	int ret;
1636
1637	if (unlikely(!func))
1638		return 0;
1639
1640	if (ctx->offload == MACSEC_OFFLOAD_PHY)
1641		mutex_lock(&ctx->phydev->lock);
1642
1643	/* Phase I: prepare. The drive should fail here if there are going to be
1644	 * issues in the commit phase.
1645	 */
1646	ctx->prepare = true;
1647	ret = (*func)(ctx);
1648	if (ret)
1649		goto phy_unlock;
1650
1651	/* Phase II: commit. This step cannot fail. */
1652	ctx->prepare = false;
1653	ret = (*func)(ctx);
1654	/* This should never happen: commit is not allowed to fail */
1655	if (unlikely(ret))
1656		WARN(1, "MACsec offloading commit failed (%d)\n", ret);
1657
1658phy_unlock:
1659	if (ctx->offload == MACSEC_OFFLOAD_PHY)
1660		mutex_unlock(&ctx->phydev->lock);
1661
1662	return ret;
1663}
1664
1665static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
1666{
1667	if (!attrs[MACSEC_ATTR_SA_CONFIG])
1668		return -EINVAL;
1669
1670	if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL))
1671		return -EINVAL;
1672
1673	return 0;
1674}
1675
1676static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc)
1677{
1678	if (!attrs[MACSEC_ATTR_RXSC_CONFIG])
1679		return -EINVAL;
1680
1681	if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL))
1682		return -EINVAL;
1683
1684	return 0;
1685}
1686
1687static bool validate_add_rxsa(struct nlattr **attrs)
1688{
1689	if (!attrs[MACSEC_SA_ATTR_AN] ||
1690	    !attrs[MACSEC_SA_ATTR_KEY] ||
1691	    !attrs[MACSEC_SA_ATTR_KEYID])
1692		return false;
1693
1694	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1695		return false;
1696
1697	if (attrs[MACSEC_SA_ATTR_PN] &&
1698	    *(u64 *)nla_data(attrs[MACSEC_SA_ATTR_PN]) == 0)
1699		return false;
1700
1701	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1702		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1703			return false;
1704	}
1705
1706	if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1707		return false;
1708
1709	return true;
1710}
1711
1712static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
1713{
1714	struct net_device *dev;
1715	struct nlattr **attrs = info->attrs;
1716	struct macsec_secy *secy;
1717	struct macsec_rx_sc *rx_sc;
1718	struct macsec_rx_sa *rx_sa;
1719	unsigned char assoc_num;
1720	int pn_len;
1721	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1722	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1723	int err;
1724
1725	if (!attrs[MACSEC_ATTR_IFINDEX])
1726		return -EINVAL;
1727
1728	if (parse_sa_config(attrs, tb_sa))
1729		return -EINVAL;
1730
1731	if (parse_rxsc_config(attrs, tb_rxsc))
1732		return -EINVAL;
1733
1734	if (!validate_add_rxsa(tb_sa))
1735		return -EINVAL;
1736
1737	rtnl_lock();
1738	rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
1739	if (IS_ERR(rx_sc)) {
1740		rtnl_unlock();
1741		return PTR_ERR(rx_sc);
1742	}
1743
1744	assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1745
1746	if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
1747		pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n",
1748			  nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
1749		rtnl_unlock();
1750		return -EINVAL;
1751	}
1752
1753	pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
1754	if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
1755		pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n",
1756			  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
1757		rtnl_unlock();
1758		return -EINVAL;
1759	}
1760
1761	if (secy->xpn) {
1762		if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
1763			rtnl_unlock();
1764			return -EINVAL;
1765		}
1766
1767		if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
1768			pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
1769				  nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
1770				  MACSEC_SA_ATTR_SALT);
1771			rtnl_unlock();
1772			return -EINVAL;
1773		}
1774	}
1775
1776	rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]);
1777	if (rx_sa) {
1778		rtnl_unlock();
1779		return -EBUSY;
1780	}
1781
1782	rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
1783	if (!rx_sa) {
1784		rtnl_unlock();
1785		return -ENOMEM;
1786	}
1787
1788	err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1789			 secy->key_len, secy->icv_len);
1790	if (err < 0) {
1791		kfree(rx_sa);
1792		rtnl_unlock();
1793		return err;
1794	}
1795
1796	if (tb_sa[MACSEC_SA_ATTR_PN]) {
1797		spin_lock_bh(&rx_sa->lock);
1798		rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
1799		spin_unlock_bh(&rx_sa->lock);
1800	}
1801
1802	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
1803		rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
1804
1805	rx_sa->sc = rx_sc;
1806
1807	/* If h/w offloading is available, propagate to the device */
1808	if (macsec_is_offloaded(netdev_priv(dev))) {
1809		const struct macsec_ops *ops;
1810		struct macsec_context ctx;
1811
1812		ops = macsec_get_ops(netdev_priv(dev), &ctx);
1813		if (!ops) {
1814			err = -EOPNOTSUPP;
1815			goto cleanup;
1816		}
1817
1818		ctx.sa.assoc_num = assoc_num;
1819		ctx.sa.rx_sa = rx_sa;
1820		ctx.secy = secy;
1821		memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1822		       secy->key_len);
1823
1824		err = macsec_offload(ops->mdo_add_rxsa, &ctx);
1825		if (err)
1826			goto cleanup;
1827	}
1828
1829	if (secy->xpn) {
1830		rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
1831		nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
1832			   MACSEC_SALT_LEN);
1833	}
1834
1835	nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
1836	rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
1837
1838	rtnl_unlock();
1839
1840	return 0;
1841
1842cleanup:
1843	kfree(rx_sa);
1844	rtnl_unlock();
1845	return err;
1846}
1847
1848static bool validate_add_rxsc(struct nlattr **attrs)
1849{
1850	if (!attrs[MACSEC_RXSC_ATTR_SCI])
1851		return false;
1852
1853	if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) {
1854		if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1)
1855			return false;
1856	}
1857
1858	return true;
1859}
1860
1861static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
1862{
1863	struct net_device *dev;
1864	sci_t sci = MACSEC_UNDEF_SCI;
1865	struct nlattr **attrs = info->attrs;
1866	struct macsec_rx_sc *rx_sc;
1867	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1868	struct macsec_secy *secy;
1869	bool was_active;
1870	int ret;
1871
1872	if (!attrs[MACSEC_ATTR_IFINDEX])
1873		return -EINVAL;
1874
1875	if (parse_rxsc_config(attrs, tb_rxsc))
1876		return -EINVAL;
1877
1878	if (!validate_add_rxsc(tb_rxsc))
1879		return -EINVAL;
1880
1881	rtnl_lock();
1882	dev = get_dev_from_nl(genl_info_net(info), attrs);
1883	if (IS_ERR(dev)) {
1884		rtnl_unlock();
1885		return PTR_ERR(dev);
1886	}
1887
1888	secy = &macsec_priv(dev)->secy;
1889	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1890
1891	rx_sc = create_rx_sc(dev, sci);
1892	if (IS_ERR(rx_sc)) {
1893		rtnl_unlock();
1894		return PTR_ERR(rx_sc);
1895	}
1896
1897	was_active = rx_sc->active;
1898	if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
1899		rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
1900
1901	if (macsec_is_offloaded(netdev_priv(dev))) {
1902		const struct macsec_ops *ops;
1903		struct macsec_context ctx;
1904
1905		ops = macsec_get_ops(netdev_priv(dev), &ctx);
1906		if (!ops) {
1907			ret = -EOPNOTSUPP;
1908			goto cleanup;
1909		}
1910
1911		ctx.rx_sc = rx_sc;
1912		ctx.secy = secy;
1913
1914		ret = macsec_offload(ops->mdo_add_rxsc, &ctx);
1915		if (ret)
1916			goto cleanup;
1917	}
1918
1919	rtnl_unlock();
1920
1921	return 0;
1922
1923cleanup:
1924	rx_sc->active = was_active;
1925	rtnl_unlock();
1926	return ret;
1927}
1928
1929static bool validate_add_txsa(struct nlattr **attrs)
1930{
1931	if (!attrs[MACSEC_SA_ATTR_AN] ||
1932	    !attrs[MACSEC_SA_ATTR_PN] ||
1933	    !attrs[MACSEC_SA_ATTR_KEY] ||
1934	    !attrs[MACSEC_SA_ATTR_KEYID])
1935		return false;
1936
1937	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1938		return false;
1939
1940	if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
1941		return false;
1942
1943	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1944		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1945			return false;
1946	}
1947
1948	if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1949		return false;
1950
1951	return true;
1952}
1953
1954static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
1955{
1956	struct net_device *dev;
1957	struct nlattr **attrs = info->attrs;
1958	struct macsec_secy *secy;
1959	struct macsec_tx_sc *tx_sc;
1960	struct macsec_tx_sa *tx_sa;
1961	unsigned char assoc_num;
1962	int pn_len;
1963	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1964	bool was_operational;
1965	int err;
1966
1967	if (!attrs[MACSEC_ATTR_IFINDEX])
1968		return -EINVAL;
1969
1970	if (parse_sa_config(attrs, tb_sa))
1971		return -EINVAL;
1972
1973	if (!validate_add_txsa(tb_sa))
1974		return -EINVAL;
1975
1976	rtnl_lock();
1977	dev = get_dev_from_nl(genl_info_net(info), attrs);
1978	if (IS_ERR(dev)) {
1979		rtnl_unlock();
1980		return PTR_ERR(dev);
1981	}
1982
1983	secy = &macsec_priv(dev)->secy;
1984	tx_sc = &secy->tx_sc;
1985
1986	assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1987
1988	if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
1989		pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n",
1990			  nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
1991		rtnl_unlock();
1992		return -EINVAL;
1993	}
1994
1995	pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
1996	if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
1997		pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n",
1998			  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
1999		rtnl_unlock();
2000		return -EINVAL;
2001	}
2002
2003	if (secy->xpn) {
2004		if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
2005			rtnl_unlock();
2006			return -EINVAL;
2007		}
2008
2009		if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
2010			pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
2011				  nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
2012				  MACSEC_SA_ATTR_SALT);
2013			rtnl_unlock();
2014			return -EINVAL;
2015		}
2016	}
2017
2018	tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]);
2019	if (tx_sa) {
2020		rtnl_unlock();
2021		return -EBUSY;
2022	}
2023
2024	tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
2025	if (!tx_sa) {
2026		rtnl_unlock();
2027		return -ENOMEM;
2028	}
2029
2030	err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
2031			 secy->key_len, secy->icv_len);
2032	if (err < 0) {
2033		kfree(tx_sa);
2034		rtnl_unlock();
2035		return err;
2036	}
2037
2038	spin_lock_bh(&tx_sa->lock);
2039	tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
2040	spin_unlock_bh(&tx_sa->lock);
2041
2042	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2043		tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2044
2045	was_operational = secy->operational;
2046	if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
2047		secy->operational = true;
2048
2049	/* If h/w offloading is available, propagate to the device */
2050	if (macsec_is_offloaded(netdev_priv(dev))) {
2051		const struct macsec_ops *ops;
2052		struct macsec_context ctx;
2053
2054		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2055		if (!ops) {
2056			err = -EOPNOTSUPP;
2057			goto cleanup;
2058		}
2059
2060		ctx.sa.assoc_num = assoc_num;
2061		ctx.sa.tx_sa = tx_sa;
2062		ctx.secy = secy;
2063		memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
2064		       secy->key_len);
2065
2066		err = macsec_offload(ops->mdo_add_txsa, &ctx);
2067		if (err)
2068			goto cleanup;
2069	}
2070
2071	if (secy->xpn) {
2072		tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
2073		nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
2074			   MACSEC_SALT_LEN);
2075	}
2076
2077	nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
2078	rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
2079
2080	rtnl_unlock();
2081
2082	return 0;
2083
2084cleanup:
2085	secy->operational = was_operational;
2086	kfree(tx_sa);
2087	rtnl_unlock();
2088	return err;
2089}
2090
2091static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
2092{
2093	struct nlattr **attrs = info->attrs;
2094	struct net_device *dev;
2095	struct macsec_secy *secy;
2096	struct macsec_rx_sc *rx_sc;
2097	struct macsec_rx_sa *rx_sa;
2098	u8 assoc_num;
2099	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2100	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2101	int ret;
2102
2103	if (!attrs[MACSEC_ATTR_IFINDEX])
2104		return -EINVAL;
2105
2106	if (parse_sa_config(attrs, tb_sa))
2107		return -EINVAL;
2108
2109	if (parse_rxsc_config(attrs, tb_rxsc))
2110		return -EINVAL;
2111
2112	rtnl_lock();
2113	rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
2114				 &dev, &secy, &rx_sc, &assoc_num);
2115	if (IS_ERR(rx_sa)) {
2116		rtnl_unlock();
2117		return PTR_ERR(rx_sa);
2118	}
2119
2120	if (rx_sa->active) {
2121		rtnl_unlock();
2122		return -EBUSY;
2123	}
2124
2125	/* If h/w offloading is available, propagate to the device */
2126	if (macsec_is_offloaded(netdev_priv(dev))) {
2127		const struct macsec_ops *ops;
2128		struct macsec_context ctx;
2129
2130		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2131		if (!ops) {
2132			ret = -EOPNOTSUPP;
2133			goto cleanup;
2134		}
2135
2136		ctx.sa.assoc_num = assoc_num;
2137		ctx.sa.rx_sa = rx_sa;
2138		ctx.secy = secy;
2139
2140		ret = macsec_offload(ops->mdo_del_rxsa, &ctx);
2141		if (ret)
2142			goto cleanup;
2143	}
2144
2145	RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL);
2146	clear_rx_sa(rx_sa);
2147
2148	rtnl_unlock();
2149
2150	return 0;
2151
2152cleanup:
2153	rtnl_unlock();
2154	return ret;
2155}
2156
2157static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
2158{
2159	struct nlattr **attrs = info->attrs;
2160	struct net_device *dev;
2161	struct macsec_secy *secy;
2162	struct macsec_rx_sc *rx_sc;
2163	sci_t sci;
2164	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2165	int ret;
2166
2167	if (!attrs[MACSEC_ATTR_IFINDEX])
2168		return -EINVAL;
2169
2170	if (parse_rxsc_config(attrs, tb_rxsc))
2171		return -EINVAL;
2172
2173	if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
2174		return -EINVAL;
2175
2176	rtnl_lock();
2177	dev = get_dev_from_nl(genl_info_net(info), info->attrs);
2178	if (IS_ERR(dev)) {
2179		rtnl_unlock();
2180		return PTR_ERR(dev);
2181	}
2182
2183	secy = &macsec_priv(dev)->secy;
2184	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
2185
2186	rx_sc = del_rx_sc(secy, sci);
2187	if (!rx_sc) {
2188		rtnl_unlock();
2189		return -ENODEV;
2190	}
2191
2192	/* If h/w offloading is available, propagate to the device */
2193	if (macsec_is_offloaded(netdev_priv(dev))) {
2194		const struct macsec_ops *ops;
2195		struct macsec_context ctx;
2196
2197		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2198		if (!ops) {
2199			ret = -EOPNOTSUPP;
2200			goto cleanup;
2201		}
2202
2203		ctx.rx_sc = rx_sc;
2204		ctx.secy = secy;
2205		ret = macsec_offload(ops->mdo_del_rxsc, &ctx);
2206		if (ret)
2207			goto cleanup;
2208	}
2209
2210	free_rx_sc(rx_sc);
2211	rtnl_unlock();
2212
2213	return 0;
2214
2215cleanup:
2216	rtnl_unlock();
2217	return ret;
2218}
2219
2220static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
2221{
2222	struct nlattr **attrs = info->attrs;
2223	struct net_device *dev;
2224	struct macsec_secy *secy;
2225	struct macsec_tx_sc *tx_sc;
2226	struct macsec_tx_sa *tx_sa;
2227	u8 assoc_num;
2228	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2229	int ret;
2230
2231	if (!attrs[MACSEC_ATTR_IFINDEX])
2232		return -EINVAL;
2233
2234	if (parse_sa_config(attrs, tb_sa))
2235		return -EINVAL;
2236
2237	rtnl_lock();
2238	tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2239				 &dev, &secy, &tx_sc, &assoc_num);
2240	if (IS_ERR(tx_sa)) {
2241		rtnl_unlock();
2242		return PTR_ERR(tx_sa);
2243	}
2244
2245	if (tx_sa->active) {
2246		rtnl_unlock();
2247		return -EBUSY;
2248	}
2249
2250	/* If h/w offloading is available, propagate to the device */
2251	if (macsec_is_offloaded(netdev_priv(dev))) {
2252		const struct macsec_ops *ops;
2253		struct macsec_context ctx;
2254
2255		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2256		if (!ops) {
2257			ret = -EOPNOTSUPP;
2258			goto cleanup;
2259		}
2260
2261		ctx.sa.assoc_num = assoc_num;
2262		ctx.sa.tx_sa = tx_sa;
2263		ctx.secy = secy;
2264
2265		ret = macsec_offload(ops->mdo_del_txsa, &ctx);
2266		if (ret)
2267			goto cleanup;
2268	}
2269
2270	RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL);
2271	clear_tx_sa(tx_sa);
2272
2273	rtnl_unlock();
2274
2275	return 0;
2276
2277cleanup:
2278	rtnl_unlock();
2279	return ret;
2280}
2281
2282static bool validate_upd_sa(struct nlattr **attrs)
2283{
2284	if (!attrs[MACSEC_SA_ATTR_AN] ||
2285	    attrs[MACSEC_SA_ATTR_KEY] ||
2286	    attrs[MACSEC_SA_ATTR_KEYID] ||
2287	    attrs[MACSEC_SA_ATTR_SSCI] ||
2288	    attrs[MACSEC_SA_ATTR_SALT])
2289		return false;
2290
2291	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
2292		return false;
2293
2294	if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
2295		return false;
2296
2297	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
2298		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
2299			return false;
2300	}
2301
2302	return true;
2303}
2304
2305static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
2306{
2307	struct nlattr **attrs = info->attrs;
2308	struct net_device *dev;
2309	struct macsec_secy *secy;
2310	struct macsec_tx_sc *tx_sc;
2311	struct macsec_tx_sa *tx_sa;
2312	u8 assoc_num;
2313	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2314	bool was_operational, was_active;
2315	pn_t prev_pn;
2316	int ret = 0;
2317
2318	prev_pn.full64 = 0;
2319
2320	if (!attrs[MACSEC_ATTR_IFINDEX])
2321		return -EINVAL;
2322
2323	if (parse_sa_config(attrs, tb_sa))
2324		return -EINVAL;
2325
2326	if (!validate_upd_sa(tb_sa))
2327		return -EINVAL;
2328
2329	rtnl_lock();
2330	tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2331				 &dev, &secy, &tx_sc, &assoc_num);
2332	if (IS_ERR(tx_sa)) {
2333		rtnl_unlock();
2334		return PTR_ERR(tx_sa);
2335	}
2336
2337	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2338		int pn_len;
2339
2340		pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2341		if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2342			pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n",
2343				  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2344			rtnl_unlock();
2345			return -EINVAL;
2346		}
2347
2348		spin_lock_bh(&tx_sa->lock);
2349		prev_pn = tx_sa->next_pn_halves;
2350		tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
2351		spin_unlock_bh(&tx_sa->lock);
2352	}
2353
2354	was_active = tx_sa->active;
2355	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2356		tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2357
2358	was_operational = secy->operational;
2359	if (assoc_num == tx_sc->encoding_sa)
2360		secy->operational = tx_sa->active;
2361
2362	/* If h/w offloading is available, propagate to the device */
2363	if (macsec_is_offloaded(netdev_priv(dev))) {
2364		const struct macsec_ops *ops;
2365		struct macsec_context ctx;
2366
2367		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2368		if (!ops) {
2369			ret = -EOPNOTSUPP;
2370			goto cleanup;
2371		}
2372
2373		ctx.sa.assoc_num = assoc_num;
2374		ctx.sa.tx_sa = tx_sa;
2375		ctx.secy = secy;
2376
2377		ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
2378		if (ret)
2379			goto cleanup;
2380	}
2381
2382	rtnl_unlock();
2383
2384	return 0;
2385
2386cleanup:
2387	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2388		spin_lock_bh(&tx_sa->lock);
2389		tx_sa->next_pn_halves = prev_pn;
2390		spin_unlock_bh(&tx_sa->lock);
2391	}
2392	tx_sa->active = was_active;
2393	secy->operational = was_operational;
2394	rtnl_unlock();
2395	return ret;
2396}
2397
2398static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
2399{
2400	struct nlattr **attrs = info->attrs;
2401	struct net_device *dev;
2402	struct macsec_secy *secy;
2403	struct macsec_rx_sc *rx_sc;
2404	struct macsec_rx_sa *rx_sa;
2405	u8 assoc_num;
2406	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2407	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2408	bool was_active;
2409	pn_t prev_pn;
2410	int ret = 0;
2411
2412	prev_pn.full64 = 0;
2413
2414	if (!attrs[MACSEC_ATTR_IFINDEX])
2415		return -EINVAL;
2416
2417	if (parse_rxsc_config(attrs, tb_rxsc))
2418		return -EINVAL;
2419
2420	if (parse_sa_config(attrs, tb_sa))
2421		return -EINVAL;
2422
2423	if (!validate_upd_sa(tb_sa))
2424		return -EINVAL;
2425
2426	rtnl_lock();
2427	rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
2428				 &dev, &secy, &rx_sc, &assoc_num);
2429	if (IS_ERR(rx_sa)) {
2430		rtnl_unlock();
2431		return PTR_ERR(rx_sa);
2432	}
2433
2434	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2435		int pn_len;
2436
2437		pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2438		if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2439			pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n",
2440				  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2441			rtnl_unlock();
2442			return -EINVAL;
2443		}
2444
2445		spin_lock_bh(&rx_sa->lock);
2446		prev_pn = rx_sa->next_pn_halves;
2447		rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
2448		spin_unlock_bh(&rx_sa->lock);
2449	}
2450
2451	was_active = rx_sa->active;
2452	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2453		rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2454
2455	/* If h/w offloading is available, propagate to the device */
2456	if (macsec_is_offloaded(netdev_priv(dev))) {
2457		const struct macsec_ops *ops;
2458		struct macsec_context ctx;
2459
2460		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2461		if (!ops) {
2462			ret = -EOPNOTSUPP;
2463			goto cleanup;
2464		}
2465
2466		ctx.sa.assoc_num = assoc_num;
2467		ctx.sa.rx_sa = rx_sa;
2468		ctx.secy = secy;
2469
2470		ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
2471		if (ret)
2472			goto cleanup;
2473	}
2474
2475	rtnl_unlock();
2476	return 0;
2477
2478cleanup:
2479	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2480		spin_lock_bh(&rx_sa->lock);
2481		rx_sa->next_pn_halves = prev_pn;
2482		spin_unlock_bh(&rx_sa->lock);
2483	}
2484	rx_sa->active = was_active;
2485	rtnl_unlock();
2486	return ret;
2487}
2488
2489static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
2490{
2491	struct nlattr **attrs = info->attrs;
2492	struct net_device *dev;
2493	struct macsec_secy *secy;
2494	struct macsec_rx_sc *rx_sc;
2495	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2496	unsigned int prev_n_rx_sc;
2497	bool was_active;
2498	int ret;
2499
2500	if (!attrs[MACSEC_ATTR_IFINDEX])
2501		return -EINVAL;
2502
2503	if (parse_rxsc_config(attrs, tb_rxsc))
2504		return -EINVAL;
2505
2506	if (!validate_add_rxsc(tb_rxsc))
2507		return -EINVAL;
2508
2509	rtnl_lock();
2510	rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
2511	if (IS_ERR(rx_sc)) {
2512		rtnl_unlock();
2513		return PTR_ERR(rx_sc);
2514	}
2515
2516	was_active = rx_sc->active;
2517	prev_n_rx_sc = secy->n_rx_sc;
2518	if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) {
2519		bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
2520
2521		if (rx_sc->active != new)
2522			secy->n_rx_sc += new ? 1 : -1;
2523
2524		rx_sc->active = new;
2525	}
2526
2527	/* If h/w offloading is available, propagate to the device */
2528	if (macsec_is_offloaded(netdev_priv(dev))) {
2529		const struct macsec_ops *ops;
2530		struct macsec_context ctx;
2531
2532		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2533		if (!ops) {
2534			ret = -EOPNOTSUPP;
2535			goto cleanup;
2536		}
2537
2538		ctx.rx_sc = rx_sc;
2539		ctx.secy = secy;
2540
2541		ret = macsec_offload(ops->mdo_upd_rxsc, &ctx);
2542		if (ret)
2543			goto cleanup;
2544	}
2545
2546	rtnl_unlock();
2547
2548	return 0;
2549
2550cleanup:
2551	secy->n_rx_sc = prev_n_rx_sc;
2552	rx_sc->active = was_active;
2553	rtnl_unlock();
2554	return ret;
2555}
2556
2557static bool macsec_is_configured(struct macsec_dev *macsec)
2558{
2559	struct macsec_secy *secy = &macsec->secy;
2560	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
2561	int i;
2562
2563	if (secy->n_rx_sc > 0)
2564		return true;
2565
2566	for (i = 0; i < MACSEC_NUM_AN; i++)
2567		if (tx_sc->sa[i])
2568			return true;
2569
2570	return false;
2571}
2572
2573static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
2574{
2575	struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1];
2576	enum macsec_offload offload, prev_offload;
2577	int (*func)(struct macsec_context *ctx);
2578	struct nlattr **attrs = info->attrs;
2579	struct net_device *dev;
2580	const struct macsec_ops *ops;
2581	struct macsec_context ctx;
2582	struct macsec_dev *macsec;
2583	int ret;
2584
2585	if (!attrs[MACSEC_ATTR_IFINDEX])
2586		return -EINVAL;
2587
2588	if (!attrs[MACSEC_ATTR_OFFLOAD])
2589		return -EINVAL;
2590
2591	if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX,
2592					attrs[MACSEC_ATTR_OFFLOAD],
2593					macsec_genl_offload_policy, NULL))
2594		return -EINVAL;
2595
2596	dev = get_dev_from_nl(genl_info_net(info), attrs);
2597	if (IS_ERR(dev))
2598		return PTR_ERR(dev);
2599	macsec = macsec_priv(dev);
2600
2601	if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE])
2602		return -EINVAL;
2603
2604	offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
2605	if (macsec->offload == offload)
2606		return 0;
2607
2608	/* Check if the offloading mode is supported by the underlying layers */
2609	if (offload != MACSEC_OFFLOAD_OFF &&
2610	    !macsec_check_offload(offload, macsec))
2611		return -EOPNOTSUPP;
2612
2613	/* Check if the net device is busy. */
2614	if (netif_running(dev))
2615		return -EBUSY;
2616
2617	rtnl_lock();
2618
2619	prev_offload = macsec->offload;
2620	macsec->offload = offload;
2621
2622	/* Check if the device already has rules configured: we do not support
2623	 * rules migration.
2624	 */
2625	if (macsec_is_configured(macsec)) {
2626		ret = -EBUSY;
2627		goto rollback;
2628	}
2629
2630	ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
2631			       macsec, &ctx);
2632	if (!ops) {
2633		ret = -EOPNOTSUPP;
2634		goto rollback;
2635	}
2636
2637	if (prev_offload == MACSEC_OFFLOAD_OFF)
2638		func = ops->mdo_add_secy;
2639	else
2640		func = ops->mdo_del_secy;
2641
2642	ctx.secy = &macsec->secy;
2643	ret = macsec_offload(func, &ctx);
2644	if (ret)
2645		goto rollback;
2646
2647	/* Force features update, since they are different for SW MACSec and
2648	 * HW offloading cases.
2649	 */
2650	netdev_update_features(dev);
2651
2652	rtnl_unlock();
2653	return 0;
2654
2655rollback:
2656	macsec->offload = prev_offload;
2657
2658	rtnl_unlock();
2659	return ret;
2660}
2661
2662static void get_tx_sa_stats(struct net_device *dev, int an,
2663			    struct macsec_tx_sa *tx_sa,
2664			    struct macsec_tx_sa_stats *sum)
2665{
2666	struct macsec_dev *macsec = macsec_priv(dev);
2667	int cpu;
2668
2669	/* If h/w offloading is available, propagate to the device */
2670	if (macsec_is_offloaded(macsec)) {
2671		const struct macsec_ops *ops;
2672		struct macsec_context ctx;
2673
2674		ops = macsec_get_ops(macsec, &ctx);
2675		if (ops) {
2676			ctx.sa.assoc_num = an;
2677			ctx.sa.tx_sa = tx_sa;
2678			ctx.stats.tx_sa_stats = sum;
2679			ctx.secy = &macsec_priv(dev)->secy;
2680			macsec_offload(ops->mdo_get_tx_sa_stats, &ctx);
2681		}
2682		return;
2683	}
2684
2685	for_each_possible_cpu(cpu) {
2686		const struct macsec_tx_sa_stats *stats =
2687			per_cpu_ptr(tx_sa->stats, cpu);
2688
2689		sum->OutPktsProtected += stats->OutPktsProtected;
2690		sum->OutPktsEncrypted += stats->OutPktsEncrypted;
2691	}
2692}
2693
2694static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum)
2695{
2696	if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED,
2697			sum->OutPktsProtected) ||
2698	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED,
2699			sum->OutPktsEncrypted))
2700		return -EMSGSIZE;
2701
2702	return 0;
2703}
2704
2705static void get_rx_sa_stats(struct net_device *dev,
2706			    struct macsec_rx_sc *rx_sc, int an,
2707			    struct macsec_rx_sa *rx_sa,
2708			    struct macsec_rx_sa_stats *sum)
2709{
2710	struct macsec_dev *macsec = macsec_priv(dev);
2711	int cpu;
2712
2713	/* If h/w offloading is available, propagate to the device */
2714	if (macsec_is_offloaded(macsec)) {
2715		const struct macsec_ops *ops;
2716		struct macsec_context ctx;
2717
2718		ops = macsec_get_ops(macsec, &ctx);
2719		if (ops) {
2720			ctx.sa.assoc_num = an;
2721			ctx.sa.rx_sa = rx_sa;
2722			ctx.stats.rx_sa_stats = sum;
2723			ctx.secy = &macsec_priv(dev)->secy;
2724			ctx.rx_sc = rx_sc;
2725			macsec_offload(ops->mdo_get_rx_sa_stats, &ctx);
2726		}
2727		return;
2728	}
2729
2730	for_each_possible_cpu(cpu) {
2731		const struct macsec_rx_sa_stats *stats =
2732			per_cpu_ptr(rx_sa->stats, cpu);
2733
2734		sum->InPktsOK         += stats->InPktsOK;
2735		sum->InPktsInvalid    += stats->InPktsInvalid;
2736		sum->InPktsNotValid   += stats->InPktsNotValid;
2737		sum->InPktsNotUsingSA += stats->InPktsNotUsingSA;
2738		sum->InPktsUnusedSA   += stats->InPktsUnusedSA;
2739	}
2740}
2741
2742static int copy_rx_sa_stats(struct sk_buff *skb,
2743			    struct macsec_rx_sa_stats *sum)
2744{
2745	if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) ||
2746	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID,
2747			sum->InPktsInvalid) ||
2748	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID,
2749			sum->InPktsNotValid) ||
2750	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA,
2751			sum->InPktsNotUsingSA) ||
2752	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA,
2753			sum->InPktsUnusedSA))
2754		return -EMSGSIZE;
2755
2756	return 0;
2757}
2758
2759static void get_rx_sc_stats(struct net_device *dev,
2760			    struct macsec_rx_sc *rx_sc,
2761			    struct macsec_rx_sc_stats *sum)
2762{
2763	struct macsec_dev *macsec = macsec_priv(dev);
2764	int cpu;
2765
2766	/* If h/w offloading is available, propagate to the device */
2767	if (macsec_is_offloaded(macsec)) {
2768		const struct macsec_ops *ops;
2769		struct macsec_context ctx;
2770
2771		ops = macsec_get_ops(macsec, &ctx);
2772		if (ops) {
2773			ctx.stats.rx_sc_stats = sum;
2774			ctx.secy = &macsec_priv(dev)->secy;
2775			ctx.rx_sc = rx_sc;
2776			macsec_offload(ops->mdo_get_rx_sc_stats, &ctx);
2777		}
2778		return;
2779	}
2780
2781	for_each_possible_cpu(cpu) {
2782		const struct pcpu_rx_sc_stats *stats;
2783		struct macsec_rx_sc_stats tmp;
2784		unsigned int start;
2785
2786		stats = per_cpu_ptr(rx_sc->stats, cpu);
2787		do {
2788			start = u64_stats_fetch_begin_irq(&stats->syncp);
2789			memcpy(&tmp, &stats->stats, sizeof(tmp));
2790		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2791
2792		sum->InOctetsValidated += tmp.InOctetsValidated;
2793		sum->InOctetsDecrypted += tmp.InOctetsDecrypted;
2794		sum->InPktsUnchecked   += tmp.InPktsUnchecked;
2795		sum->InPktsDelayed     += tmp.InPktsDelayed;
2796		sum->InPktsOK          += tmp.InPktsOK;
2797		sum->InPktsInvalid     += tmp.InPktsInvalid;
2798		sum->InPktsLate        += tmp.InPktsLate;
2799		sum->InPktsNotValid    += tmp.InPktsNotValid;
2800		sum->InPktsNotUsingSA  += tmp.InPktsNotUsingSA;
2801		sum->InPktsUnusedSA    += tmp.InPktsUnusedSA;
2802	}
2803}
2804
2805static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum)
2806{
2807	if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
2808			      sum->InOctetsValidated,
2809			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2810	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
2811			      sum->InOctetsDecrypted,
2812			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2813	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
2814			      sum->InPktsUnchecked,
2815			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2816	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
2817			      sum->InPktsDelayed,
2818			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2819	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
2820			      sum->InPktsOK,
2821			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2822	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
2823			      sum->InPktsInvalid,
2824			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2825	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
2826			      sum->InPktsLate,
2827			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2828	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
2829			      sum->InPktsNotValid,
2830			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2831	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
2832			      sum->InPktsNotUsingSA,
2833			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2834	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
2835			      sum->InPktsUnusedSA,
2836			      MACSEC_RXSC_STATS_ATTR_PAD))
2837		return -EMSGSIZE;
2838
2839	return 0;
2840}
2841
2842static void get_tx_sc_stats(struct net_device *dev,
2843			    struct macsec_tx_sc_stats *sum)
2844{
2845	struct macsec_dev *macsec = macsec_priv(dev);
2846	int cpu;
2847
2848	/* If h/w offloading is available, propagate to the device */
2849	if (macsec_is_offloaded(macsec)) {
2850		const struct macsec_ops *ops;
2851		struct macsec_context ctx;
2852
2853		ops = macsec_get_ops(macsec, &ctx);
2854		if (ops) {
2855			ctx.stats.tx_sc_stats = sum;
2856			ctx.secy = &macsec_priv(dev)->secy;
2857			macsec_offload(ops->mdo_get_tx_sc_stats, &ctx);
2858		}
2859		return;
2860	}
2861
2862	for_each_possible_cpu(cpu) {
2863		const struct pcpu_tx_sc_stats *stats;
2864		struct macsec_tx_sc_stats tmp;
2865		unsigned int start;
2866
2867		stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu);
2868		do {
2869			start = u64_stats_fetch_begin_irq(&stats->syncp);
2870			memcpy(&tmp, &stats->stats, sizeof(tmp));
2871		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2872
2873		sum->OutPktsProtected   += tmp.OutPktsProtected;
2874		sum->OutPktsEncrypted   += tmp.OutPktsEncrypted;
2875		sum->OutOctetsProtected += tmp.OutOctetsProtected;
2876		sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted;
2877	}
2878}
2879
2880static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum)
2881{
2882	if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
2883			      sum->OutPktsProtected,
2884			      MACSEC_TXSC_STATS_ATTR_PAD) ||
2885	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
2886			      sum->OutPktsEncrypted,
2887			      MACSEC_TXSC_STATS_ATTR_PAD) ||
2888	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
2889			      sum->OutOctetsProtected,
2890			      MACSEC_TXSC_STATS_ATTR_PAD) ||
2891	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
2892			      sum->OutOctetsEncrypted,
2893			      MACSEC_TXSC_STATS_ATTR_PAD))
2894		return -EMSGSIZE;
2895
2896	return 0;
2897}
2898
2899static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum)
2900{
2901	struct macsec_dev *macsec = macsec_priv(dev);
2902	int cpu;
2903
2904	/* If h/w offloading is available, propagate to the device */
2905	if (macsec_is_offloaded(macsec)) {
2906		const struct macsec_ops *ops;
2907		struct macsec_context ctx;
2908
2909		ops = macsec_get_ops(macsec, &ctx);
2910		if (ops) {
2911			ctx.stats.dev_stats = sum;
2912			ctx.secy = &macsec_priv(dev)->secy;
2913			macsec_offload(ops->mdo_get_dev_stats, &ctx);
2914		}
2915		return;
2916	}
2917
2918	for_each_possible_cpu(cpu) {
2919		const struct pcpu_secy_stats *stats;
2920		struct macsec_dev_stats tmp;
2921		unsigned int start;
2922
2923		stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu);
2924		do {
2925			start = u64_stats_fetch_begin_irq(&stats->syncp);
2926			memcpy(&tmp, &stats->stats, sizeof(tmp));
2927		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2928
2929		sum->OutPktsUntagged  += tmp.OutPktsUntagged;
2930		sum->InPktsUntagged   += tmp.InPktsUntagged;
2931		sum->OutPktsTooLong   += tmp.OutPktsTooLong;
2932		sum->InPktsNoTag      += tmp.InPktsNoTag;
2933		sum->InPktsBadTag     += tmp.InPktsBadTag;
2934		sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI;
2935		sum->InPktsNoSCI      += tmp.InPktsNoSCI;
2936		sum->InPktsOverrun    += tmp.InPktsOverrun;
2937	}
2938}
2939
2940static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum)
2941{
2942	if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
2943			      sum->OutPktsUntagged,
2944			      MACSEC_SECY_STATS_ATTR_PAD) ||
2945	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
2946			      sum->InPktsUntagged,
2947			      MACSEC_SECY_STATS_ATTR_PAD) ||
2948	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
2949			      sum->OutPktsTooLong,
2950			      MACSEC_SECY_STATS_ATTR_PAD) ||
2951	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
2952			      sum->InPktsNoTag,
2953			      MACSEC_SECY_STATS_ATTR_PAD) ||
2954	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
2955			      sum->InPktsBadTag,
2956			      MACSEC_SECY_STATS_ATTR_PAD) ||
2957	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
2958			      sum->InPktsUnknownSCI,
2959			      MACSEC_SECY_STATS_ATTR_PAD) ||
2960	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
2961			      sum->InPktsNoSCI,
2962			      MACSEC_SECY_STATS_ATTR_PAD) ||
2963	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
2964			      sum->InPktsOverrun,
2965			      MACSEC_SECY_STATS_ATTR_PAD))
2966		return -EMSGSIZE;
2967
2968	return 0;
2969}
2970
2971static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
2972{
2973	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
2974	struct nlattr *secy_nest = nla_nest_start_noflag(skb,
2975							 MACSEC_ATTR_SECY);
2976	u64 csid;
2977
2978	if (!secy_nest)
2979		return 1;
2980
2981	switch (secy->key_len) {
2982	case MACSEC_GCM_AES_128_SAK_LEN:
2983		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
2984		break;
2985	case MACSEC_GCM_AES_256_SAK_LEN:
2986		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
2987		break;
2988	default:
2989		goto cancel;
2990	}
2991
2992	if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
2993			MACSEC_SECY_ATTR_PAD) ||
2994	    nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
2995			      csid, MACSEC_SECY_ATTR_PAD) ||
2996	    nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
2997	    nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
2998	    nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
2999	    nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) ||
3000	    nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) ||
3001	    nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) ||
3002	    nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) ||
3003	    nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) ||
3004	    nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) ||
3005	    nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa))
3006		goto cancel;
3007
3008	if (secy->replay_protect) {
3009		if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window))
3010			goto cancel;
3011	}
3012
3013	nla_nest_end(skb, secy_nest);
3014	return 0;
3015
3016cancel:
3017	nla_nest_cancel(skb, secy_nest);
3018	return 1;
3019}
3020
3021static noinline_for_stack int
3022dump_secy(struct macsec_secy *secy, struct net_device *dev,
3023	  struct sk_buff *skb, struct netlink_callback *cb)
3024{
3025	struct macsec_tx_sc_stats tx_sc_stats = {0, };
3026	struct macsec_tx_sa_stats tx_sa_stats = {0, };
3027	struct macsec_rx_sc_stats rx_sc_stats = {0, };
3028	struct macsec_rx_sa_stats rx_sa_stats = {0, };
3029	struct macsec_dev *macsec = netdev_priv(dev);
3030	struct macsec_dev_stats dev_stats = {0, };
3031	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
3032	struct nlattr *txsa_list, *rxsc_list;
3033	struct macsec_rx_sc *rx_sc;
3034	struct nlattr *attr;
3035	void *hdr;
3036	int i, j;
3037
3038	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3039			  &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC);
3040	if (!hdr)
3041		return -EMSGSIZE;
3042
3043	genl_dump_check_consistent(cb, hdr);
3044
3045	if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
3046		goto nla_put_failure;
3047
3048	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD);
3049	if (!attr)
3050		goto nla_put_failure;
3051	if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload))
3052		goto nla_put_failure;
3053	nla_nest_end(skb, attr);
3054
3055	if (nla_put_secy(secy, skb))
3056		goto nla_put_failure;
3057
3058	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS);
3059	if (!attr)
3060		goto nla_put_failure;
3061
3062	get_tx_sc_stats(dev, &tx_sc_stats);
3063	if (copy_tx_sc_stats(skb, &tx_sc_stats)) {
3064		nla_nest_cancel(skb, attr);
3065		goto nla_put_failure;
3066	}
3067	nla_nest_end(skb, attr);
3068
3069	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS);
3070	if (!attr)
3071		goto nla_put_failure;
3072	get_secy_stats(dev, &dev_stats);
3073	if (copy_secy_stats(skb, &dev_stats)) {
3074		nla_nest_cancel(skb, attr);
3075		goto nla_put_failure;
3076	}
3077	nla_nest_end(skb, attr);
3078
3079	txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST);
3080	if (!txsa_list)
3081		goto nla_put_failure;
3082	for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) {
3083		struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]);
3084		struct nlattr *txsa_nest;
3085		u64 pn;
3086		int pn_len;
3087
3088		if (!tx_sa)
3089			continue;
3090
3091		txsa_nest = nla_nest_start_noflag(skb, j++);
3092		if (!txsa_nest) {
3093			nla_nest_cancel(skb, txsa_list);
3094			goto nla_put_failure;
3095		}
3096
3097		attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS);
3098		if (!attr) {
3099			nla_nest_cancel(skb, txsa_nest);
3100			nla_nest_cancel(skb, txsa_list);
3101			goto nla_put_failure;
3102		}
3103		memset(&tx_sa_stats, 0, sizeof(tx_sa_stats));
3104		get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats);
3105		if (copy_tx_sa_stats(skb, &tx_sa_stats)) {
3106			nla_nest_cancel(skb, attr);
3107			nla_nest_cancel(skb, txsa_nest);
3108			nla_nest_cancel(skb, txsa_list);
3109			goto nla_put_failure;
3110		}
3111		nla_nest_end(skb, attr);
3112
3113		if (secy->xpn) {
3114			pn = tx_sa->next_pn;
3115			pn_len = MACSEC_XPN_PN_LEN;
3116		} else {
3117			pn = tx_sa->next_pn_halves.lower;
3118			pn_len = MACSEC_DEFAULT_PN_LEN;
3119		}
3120
3121		if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
3122		    nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
3123		    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
3124		    (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, tx_sa->ssci)) ||
3125		    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
3126			nla_nest_cancel(skb, txsa_nest);
3127			nla_nest_cancel(skb, txsa_list);
3128			goto nla_put_failure;
3129		}
3130
3131		nla_nest_end(skb, txsa_nest);
3132	}
3133	nla_nest_end(skb, txsa_list);
3134
3135	rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST);
3136	if (!rxsc_list)
3137		goto nla_put_failure;
3138
3139	j = 1;
3140	for_each_rxsc_rtnl(secy, rx_sc) {
3141		int k;
3142		struct nlattr *rxsa_list;
3143		struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++);
3144
3145		if (!rxsc_nest) {
3146			nla_nest_cancel(skb, rxsc_list);
3147			goto nla_put_failure;
3148		}
3149
3150		if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) ||
3151		    nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci,
3152				MACSEC_RXSC_ATTR_PAD)) {
3153			nla_nest_cancel(skb, rxsc_nest);
3154			nla_nest_cancel(skb, rxsc_list);
3155			goto nla_put_failure;
3156		}
3157
3158		attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS);
3159		if (!attr) {
3160			nla_nest_cancel(skb, rxsc_nest);
3161			nla_nest_cancel(skb, rxsc_list);
3162			goto nla_put_failure;
3163		}
3164		memset(&rx_sc_stats, 0, sizeof(rx_sc_stats));
3165		get_rx_sc_stats(dev, rx_sc, &rx_sc_stats);
3166		if (copy_rx_sc_stats(skb, &rx_sc_stats)) {
3167			nla_nest_cancel(skb, attr);
3168			nla_nest_cancel(skb, rxsc_nest);
3169			nla_nest_cancel(skb, rxsc_list);
3170			goto nla_put_failure;
3171		}
3172		nla_nest_end(skb, attr);
3173
3174		rxsa_list = nla_nest_start_noflag(skb,
3175						  MACSEC_RXSC_ATTR_SA_LIST);
3176		if (!rxsa_list) {
3177			nla_nest_cancel(skb, rxsc_nest);
3178			nla_nest_cancel(skb, rxsc_list);
3179			goto nla_put_failure;
3180		}
3181
3182		for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) {
3183			struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]);
3184			struct nlattr *rxsa_nest;
3185			u64 pn;
3186			int pn_len;
3187
3188			if (!rx_sa)
3189				continue;
3190
3191			rxsa_nest = nla_nest_start_noflag(skb, k++);
3192			if (!rxsa_nest) {
3193				nla_nest_cancel(skb, rxsa_list);
3194				nla_nest_cancel(skb, rxsc_nest);
3195				nla_nest_cancel(skb, rxsc_list);
3196				goto nla_put_failure;
3197			}
3198
3199			attr = nla_nest_start_noflag(skb,
3200						     MACSEC_SA_ATTR_STATS);
3201			if (!attr) {
3202				nla_nest_cancel(skb, rxsa_list);
3203				nla_nest_cancel(skb, rxsc_nest);
3204				nla_nest_cancel(skb, rxsc_list);
3205				goto nla_put_failure;
3206			}
3207			memset(&rx_sa_stats, 0, sizeof(rx_sa_stats));
3208			get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats);
3209			if (copy_rx_sa_stats(skb, &rx_sa_stats)) {
3210				nla_nest_cancel(skb, attr);
3211				nla_nest_cancel(skb, rxsa_list);
3212				nla_nest_cancel(skb, rxsc_nest);
3213				nla_nest_cancel(skb, rxsc_list);
3214				goto nla_put_failure;
3215			}
3216			nla_nest_end(skb, attr);
3217
3218			if (secy->xpn) {
3219				pn = rx_sa->next_pn;
3220				pn_len = MACSEC_XPN_PN_LEN;
3221			} else {
3222				pn = rx_sa->next_pn_halves.lower;
3223				pn_len = MACSEC_DEFAULT_PN_LEN;
3224			}
3225
3226			if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
3227			    nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
3228			    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
3229			    (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, rx_sa->ssci)) ||
3230			    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
3231				nla_nest_cancel(skb, rxsa_nest);
3232				nla_nest_cancel(skb, rxsc_nest);
3233				nla_nest_cancel(skb, rxsc_list);
3234				goto nla_put_failure;
3235			}
3236			nla_nest_end(skb, rxsa_nest);
3237		}
3238
3239		nla_nest_end(skb, rxsa_list);
3240		nla_nest_end(skb, rxsc_nest);
3241	}
3242
3243	nla_nest_end(skb, rxsc_list);
3244
3245	genlmsg_end(skb, hdr);
3246
3247	return 0;
3248
3249nla_put_failure:
3250	genlmsg_cancel(skb, hdr);
3251	return -EMSGSIZE;
3252}
3253
3254static int macsec_generation = 1; /* protected by RTNL */
3255
3256static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
3257{
3258	struct net *net = sock_net(skb->sk);
3259	struct net_device *dev;
3260	int dev_idx, d;
3261
3262	dev_idx = cb->args[0];
3263
3264	d = 0;
3265	rtnl_lock();
3266
3267	cb->seq = macsec_generation;
3268
3269	for_each_netdev(net, dev) {
3270		struct macsec_secy *secy;
3271
3272		if (d < dev_idx)
3273			goto next;
3274
3275		if (!netif_is_macsec(dev))
3276			goto next;
3277
3278		secy = &macsec_priv(dev)->secy;
3279		if (dump_secy(secy, dev, skb, cb) < 0)
3280			goto done;
3281next:
3282		d++;
3283	}
3284
3285done:
3286	rtnl_unlock();
3287	cb->args[0] = d;
3288	return skb->len;
3289}
3290
3291static const struct genl_small_ops macsec_genl_ops[] = {
3292	{
3293		.cmd = MACSEC_CMD_GET_TXSC,
3294		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3295		.dumpit = macsec_dump_txsc,
3296	},
3297	{
3298		.cmd = MACSEC_CMD_ADD_RXSC,
3299		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3300		.doit = macsec_add_rxsc,
3301		.flags = GENL_ADMIN_PERM,
3302	},
3303	{
3304		.cmd = MACSEC_CMD_DEL_RXSC,
3305		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3306		.doit = macsec_del_rxsc,
3307		.flags = GENL_ADMIN_PERM,
3308	},
3309	{
3310		.cmd = MACSEC_CMD_UPD_RXSC,
3311		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3312		.doit = macsec_upd_rxsc,
3313		.flags = GENL_ADMIN_PERM,
3314	},
3315	{
3316		.cmd = MACSEC_CMD_ADD_TXSA,
3317		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3318		.doit = macsec_add_txsa,
3319		.flags = GENL_ADMIN_PERM,
3320	},
3321	{
3322		.cmd = MACSEC_CMD_DEL_TXSA,
3323		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3324		.doit = macsec_del_txsa,
3325		.flags = GENL_ADMIN_PERM,
3326	},
3327	{
3328		.cmd = MACSEC_CMD_UPD_TXSA,
3329		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3330		.doit = macsec_upd_txsa,
3331		.flags = GENL_ADMIN_PERM,
3332	},
3333	{
3334		.cmd = MACSEC_CMD_ADD_RXSA,
3335		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3336		.doit = macsec_add_rxsa,
3337		.flags = GENL_ADMIN_PERM,
3338	},
3339	{
3340		.cmd = MACSEC_CMD_DEL_RXSA,
3341		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3342		.doit = macsec_del_rxsa,
3343		.flags = GENL_ADMIN_PERM,
3344	},
3345	{
3346		.cmd = MACSEC_CMD_UPD_RXSA,
3347		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3348		.doit = macsec_upd_rxsa,
3349		.flags = GENL_ADMIN_PERM,
3350	},
3351	{
3352		.cmd = MACSEC_CMD_UPD_OFFLOAD,
3353		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3354		.doit = macsec_upd_offload,
3355		.flags = GENL_ADMIN_PERM,
3356	},
3357};
3358
3359static struct genl_family macsec_fam __ro_after_init = {
3360	.name		= MACSEC_GENL_NAME,
3361	.hdrsize	= 0,
3362	.version	= MACSEC_GENL_VERSION,
3363	.maxattr	= MACSEC_ATTR_MAX,
3364	.policy = macsec_genl_policy,
3365	.netnsok	= true,
3366	.module		= THIS_MODULE,
3367	.small_ops	= macsec_genl_ops,
3368	.n_small_ops	= ARRAY_SIZE(macsec_genl_ops),
3369};
3370
3371static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
3372				     struct net_device *dev)
3373{
3374	struct macsec_dev *macsec = netdev_priv(dev);
3375	struct macsec_secy *secy = &macsec->secy;
3376	struct pcpu_secy_stats *secy_stats;
3377	int ret, len;
3378
3379	if (macsec_is_offloaded(netdev_priv(dev))) {
3380		skb->dev = macsec->real_dev;
3381		return dev_queue_xmit(skb);
3382	}
3383
3384	/* 10.5 */
3385	if (!secy->protect_frames) {
3386		secy_stats = this_cpu_ptr(macsec->stats);
3387		u64_stats_update_begin(&secy_stats->syncp);
3388		secy_stats->stats.OutPktsUntagged++;
3389		u64_stats_update_end(&secy_stats->syncp);
3390		skb->dev = macsec->real_dev;
3391		len = skb->len;
3392		ret = dev_queue_xmit(skb);
3393		count_tx(dev, ret, len);
3394		return ret;
3395	}
3396
3397	if (!secy->operational) {
3398		kfree_skb(skb);
3399		dev->stats.tx_dropped++;
3400		return NETDEV_TX_OK;
3401	}
3402
3403	skb = macsec_encrypt(skb, dev);
3404	if (IS_ERR(skb)) {
3405		if (PTR_ERR(skb) != -EINPROGRESS)
3406			dev->stats.tx_dropped++;
3407		return NETDEV_TX_OK;
3408	}
3409
3410	macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
3411
3412	macsec_encrypt_finish(skb, dev);
3413	len = skb->len;
3414	ret = dev_queue_xmit(skb);
3415	count_tx(dev, ret, len);
3416	return ret;
3417}
3418
3419#define SW_MACSEC_FEATURES \
3420	(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
3421
3422/* If h/w offloading is enabled, use real device features save for
3423 *   VLAN_FEATURES - they require additional ops
3424 *   HW_MACSEC - no reason to report it
3425 */
3426#define REAL_DEV_FEATURES(dev) \
3427	((dev)->features & ~(NETIF_F_VLAN_FEATURES | NETIF_F_HW_MACSEC))
3428
3429static int macsec_dev_init(struct net_device *dev)
3430{
3431	struct macsec_dev *macsec = macsec_priv(dev);
3432	struct net_device *real_dev = macsec->real_dev;
3433	int err;
3434
3435	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
3436	if (!dev->tstats)
3437		return -ENOMEM;
3438
3439	err = gro_cells_init(&macsec->gro_cells, dev);
3440	if (err) {
3441		free_percpu(dev->tstats);
3442		return err;
3443	}
3444
3445	if (macsec_is_offloaded(macsec)) {
3446		dev->features = REAL_DEV_FEATURES(real_dev);
3447	} else {
3448		dev->features = real_dev->features & SW_MACSEC_FEATURES;
3449		dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
3450	}
3451
3452	dev->needed_headroom = real_dev->needed_headroom +
3453			       MACSEC_NEEDED_HEADROOM;
3454	dev->needed_tailroom = real_dev->needed_tailroom +
3455			       MACSEC_NEEDED_TAILROOM;
3456
3457	if (is_zero_ether_addr(dev->dev_addr))
3458		eth_hw_addr_inherit(dev, real_dev);
3459	if (is_zero_ether_addr(dev->broadcast))
3460		memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
3461
3462	return 0;
3463}
3464
3465static void macsec_dev_uninit(struct net_device *dev)
3466{
3467	struct macsec_dev *macsec = macsec_priv(dev);
3468
3469	gro_cells_destroy(&macsec->gro_cells);
3470	free_percpu(dev->tstats);
3471}
3472
3473static netdev_features_t macsec_fix_features(struct net_device *dev,
3474					     netdev_features_t features)
3475{
3476	struct macsec_dev *macsec = macsec_priv(dev);
3477	struct net_device *real_dev = macsec->real_dev;
3478
3479	if (macsec_is_offloaded(macsec))
3480		return REAL_DEV_FEATURES(real_dev);
3481
3482	features &= (real_dev->features & SW_MACSEC_FEATURES) |
3483		    NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
3484	features |= NETIF_F_LLTX;
3485
3486	return features;
3487}
3488
3489static int macsec_dev_open(struct net_device *dev)
3490{
3491	struct macsec_dev *macsec = macsec_priv(dev);
3492	struct net_device *real_dev = macsec->real_dev;
3493	int err;
3494
3495	err = dev_uc_add(real_dev, dev->dev_addr);
3496	if (err < 0)
3497		return err;
3498
3499	if (dev->flags & IFF_ALLMULTI) {
3500		err = dev_set_allmulti(real_dev, 1);
3501		if (err < 0)
3502			goto del_unicast;
3503	}
3504
3505	if (dev->flags & IFF_PROMISC) {
3506		err = dev_set_promiscuity(real_dev, 1);
3507		if (err < 0)
3508			goto clear_allmulti;
3509	}
3510
3511	/* If h/w offloading is available, propagate to the device */
3512	if (macsec_is_offloaded(macsec)) {
3513		const struct macsec_ops *ops;
3514		struct macsec_context ctx;
3515
3516		ops = macsec_get_ops(netdev_priv(dev), &ctx);
3517		if (!ops) {
3518			err = -EOPNOTSUPP;
3519			goto clear_allmulti;
3520		}
3521
3522		ctx.secy = &macsec->secy;
3523		err = macsec_offload(ops->mdo_dev_open, &ctx);
3524		if (err)
3525			goto clear_allmulti;
3526	}
3527
3528	if (netif_carrier_ok(real_dev))
3529		netif_carrier_on(dev);
3530
3531	return 0;
3532clear_allmulti:
3533	if (dev->flags & IFF_ALLMULTI)
3534		dev_set_allmulti(real_dev, -1);
3535del_unicast:
3536	dev_uc_del(real_dev, dev->dev_addr);
3537	netif_carrier_off(dev);
3538	return err;
3539}
3540
3541static int macsec_dev_stop(struct net_device *dev)
3542{
3543	struct macsec_dev *macsec = macsec_priv(dev);
3544	struct net_device *real_dev = macsec->real_dev;
3545
3546	netif_carrier_off(dev);
3547
3548	/* If h/w offloading is available, propagate to the device */
3549	if (macsec_is_offloaded(macsec)) {
3550		const struct macsec_ops *ops;
3551		struct macsec_context ctx;
3552
3553		ops = macsec_get_ops(macsec, &ctx);
3554		if (ops) {
3555			ctx.secy = &macsec->secy;
3556			macsec_offload(ops->mdo_dev_stop, &ctx);
3557		}
3558	}
3559
3560	dev_mc_unsync(real_dev, dev);
3561	dev_uc_unsync(real_dev, dev);
3562
3563	if (dev->flags & IFF_ALLMULTI)
3564		dev_set_allmulti(real_dev, -1);
3565
3566	if (dev->flags & IFF_PROMISC)
3567		dev_set_promiscuity(real_dev, -1);
3568
3569	dev_uc_del(real_dev, dev->dev_addr);
3570
3571	return 0;
3572}
3573
3574static void macsec_dev_change_rx_flags(struct net_device *dev, int change)
3575{
3576	struct net_device *real_dev = macsec_priv(dev)->real_dev;
3577
3578	if (!(dev->flags & IFF_UP))
3579		return;
3580
3581	if (change & IFF_ALLMULTI)
3582		dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
3583
3584	if (change & IFF_PROMISC)
3585		dev_set_promiscuity(real_dev,
3586				    dev->flags & IFF_PROMISC ? 1 : -1);
3587}
3588
3589static void macsec_dev_set_rx_mode(struct net_device *dev)
3590{
3591	struct net_device *real_dev = macsec_priv(dev)->real_dev;
3592
3593	dev_mc_sync(real_dev, dev);
3594	dev_uc_sync(real_dev, dev);
3595}
3596
3597static int macsec_set_mac_address(struct net_device *dev, void *p)
3598{
3599	struct macsec_dev *macsec = macsec_priv(dev);
3600	struct net_device *real_dev = macsec->real_dev;
3601	struct sockaddr *addr = p;
3602	int err;
3603
3604	if (!is_valid_ether_addr(addr->sa_data))
3605		return -EADDRNOTAVAIL;
3606
3607	if (!(dev->flags & IFF_UP))
3608		goto out;
3609
3610	err = dev_uc_add(real_dev, addr->sa_data);
3611	if (err < 0)
3612		return err;
3613
3614	dev_uc_del(real_dev, dev->dev_addr);
3615
3616out:
3617	ether_addr_copy(dev->dev_addr, addr->sa_data);
3618	macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
3619
3620	/* If h/w offloading is available, propagate to the device */
3621	if (macsec_is_offloaded(macsec)) {
3622		const struct macsec_ops *ops;
3623		struct macsec_context ctx;
3624
3625		ops = macsec_get_ops(macsec, &ctx);
3626		if (ops) {
3627			ctx.secy = &macsec->secy;
3628			macsec_offload(ops->mdo_upd_secy, &ctx);
3629		}
3630	}
3631
3632	return 0;
3633}
3634
3635static int macsec_change_mtu(struct net_device *dev, int new_mtu)
3636{
3637	struct macsec_dev *macsec = macsec_priv(dev);
3638	unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true);
3639
3640	if (macsec->real_dev->mtu - extra < new_mtu)
3641		return -ERANGE;
3642
3643	dev->mtu = new_mtu;
3644
3645	return 0;
3646}
3647
3648static void macsec_get_stats64(struct net_device *dev,
3649			       struct rtnl_link_stats64 *s)
3650{
 
 
3651	if (!dev->tstats)
3652		return;
3653
3654	dev_fetch_sw_netstats(s, dev->tstats);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3655
3656	s->rx_dropped = dev->stats.rx_dropped;
3657	s->tx_dropped = dev->stats.tx_dropped;
3658}
3659
3660static int macsec_get_iflink(const struct net_device *dev)
3661{
3662	return macsec_priv(dev)->real_dev->ifindex;
3663}
3664
3665static const struct net_device_ops macsec_netdev_ops = {
3666	.ndo_init		= macsec_dev_init,
3667	.ndo_uninit		= macsec_dev_uninit,
3668	.ndo_open		= macsec_dev_open,
3669	.ndo_stop		= macsec_dev_stop,
3670	.ndo_fix_features	= macsec_fix_features,
3671	.ndo_change_mtu		= macsec_change_mtu,
3672	.ndo_set_rx_mode	= macsec_dev_set_rx_mode,
3673	.ndo_change_rx_flags	= macsec_dev_change_rx_flags,
3674	.ndo_set_mac_address	= macsec_set_mac_address,
3675	.ndo_start_xmit		= macsec_start_xmit,
3676	.ndo_get_stats64	= macsec_get_stats64,
3677	.ndo_get_iflink		= macsec_get_iflink,
3678};
3679
3680static const struct device_type macsec_type = {
3681	.name = "macsec",
3682};
3683
3684static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
3685	[IFLA_MACSEC_SCI] = { .type = NLA_U64 },
3686	[IFLA_MACSEC_PORT] = { .type = NLA_U16 },
3687	[IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
3688	[IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
3689	[IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
3690	[IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 },
3691	[IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 },
3692	[IFLA_MACSEC_PROTECT] = { .type = NLA_U8 },
3693	[IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 },
3694	[IFLA_MACSEC_ES] = { .type = NLA_U8 },
3695	[IFLA_MACSEC_SCB] = { .type = NLA_U8 },
3696	[IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
3697	[IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
3698};
3699
3700static void macsec_free_netdev(struct net_device *dev)
3701{
3702	struct macsec_dev *macsec = macsec_priv(dev);
3703
3704	free_percpu(macsec->stats);
3705	free_percpu(macsec->secy.tx_sc.stats);
3706
3707}
3708
3709static void macsec_setup(struct net_device *dev)
3710{
3711	ether_setup(dev);
3712	dev->min_mtu = 0;
3713	dev->max_mtu = ETH_MAX_MTU;
3714	dev->priv_flags |= IFF_NO_QUEUE;
3715	dev->netdev_ops = &macsec_netdev_ops;
3716	dev->needs_free_netdev = true;
3717	dev->priv_destructor = macsec_free_netdev;
3718	SET_NETDEV_DEVTYPE(dev, &macsec_type);
3719
3720	eth_zero_addr(dev->broadcast);
3721}
3722
3723static int macsec_changelink_common(struct net_device *dev,
3724				    struct nlattr *data[])
3725{
3726	struct macsec_secy *secy;
3727	struct macsec_tx_sc *tx_sc;
3728
3729	secy = &macsec_priv(dev)->secy;
3730	tx_sc = &secy->tx_sc;
3731
3732	if (data[IFLA_MACSEC_ENCODING_SA]) {
3733		struct macsec_tx_sa *tx_sa;
3734
3735		tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]);
3736		tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]);
3737
3738		secy->operational = tx_sa && tx_sa->active;
3739	}
3740
3741	if (data[IFLA_MACSEC_WINDOW])
3742		secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
3743
3744	if (data[IFLA_MACSEC_ENCRYPT])
3745		tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
3746
3747	if (data[IFLA_MACSEC_PROTECT])
3748		secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]);
3749
3750	if (data[IFLA_MACSEC_INC_SCI])
3751		tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
3752
3753	if (data[IFLA_MACSEC_ES])
3754		tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]);
3755
3756	if (data[IFLA_MACSEC_SCB])
3757		tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]);
3758
3759	if (data[IFLA_MACSEC_REPLAY_PROTECT])
3760		secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]);
3761
3762	if (data[IFLA_MACSEC_VALIDATION])
3763		secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]);
3764
3765	if (data[IFLA_MACSEC_CIPHER_SUITE]) {
3766		switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) {
3767		case MACSEC_CIPHER_ID_GCM_AES_128:
3768		case MACSEC_DEFAULT_CIPHER_ID:
3769			secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
3770			secy->xpn = false;
3771			break;
3772		case MACSEC_CIPHER_ID_GCM_AES_256:
3773			secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
3774			secy->xpn = false;
3775			break;
3776		case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
3777			secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
3778			secy->xpn = true;
3779			break;
3780		case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
3781			secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
3782			secy->xpn = true;
3783			break;
3784		default:
3785			return -EINVAL;
3786		}
3787	}
3788
3789	return 0;
3790}
3791
3792static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
3793			     struct nlattr *data[],
3794			     struct netlink_ext_ack *extack)
3795{
3796	struct macsec_dev *macsec = macsec_priv(dev);
3797	struct macsec_tx_sc tx_sc;
3798	struct macsec_secy secy;
3799	int ret;
3800
3801	if (!data)
3802		return 0;
3803
3804	if (data[IFLA_MACSEC_CIPHER_SUITE] ||
3805	    data[IFLA_MACSEC_ICV_LEN] ||
3806	    data[IFLA_MACSEC_SCI] ||
3807	    data[IFLA_MACSEC_PORT])
3808		return -EINVAL;
3809
3810	/* Keep a copy of unmodified secy and tx_sc, in case the offload
3811	 * propagation fails, to revert macsec_changelink_common.
3812	 */
3813	memcpy(&secy, &macsec->secy, sizeof(secy));
3814	memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc));
3815
3816	ret = macsec_changelink_common(dev, data);
3817	if (ret)
3818		return ret;
3819
3820	/* If h/w offloading is available, propagate to the device */
3821	if (macsec_is_offloaded(macsec)) {
3822		const struct macsec_ops *ops;
3823		struct macsec_context ctx;
3824		int ret;
3825
3826		ops = macsec_get_ops(netdev_priv(dev), &ctx);
3827		if (!ops) {
3828			ret = -EOPNOTSUPP;
3829			goto cleanup;
3830		}
3831
3832		ctx.secy = &macsec->secy;
3833		ret = macsec_offload(ops->mdo_upd_secy, &ctx);
3834		if (ret)
3835			goto cleanup;
3836	}
3837
3838	return 0;
3839
3840cleanup:
3841	memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc));
3842	memcpy(&macsec->secy, &secy, sizeof(secy));
3843
3844	return ret;
3845}
3846
3847static void macsec_del_dev(struct macsec_dev *macsec)
3848{
3849	int i;
3850
3851	while (macsec->secy.rx_sc) {
3852		struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc);
3853
3854		rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next);
3855		free_rx_sc(rx_sc);
3856	}
3857
3858	for (i = 0; i < MACSEC_NUM_AN; i++) {
3859		struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]);
3860
3861		if (sa) {
3862			RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL);
3863			clear_tx_sa(sa);
3864		}
3865	}
3866}
3867
3868static void macsec_common_dellink(struct net_device *dev, struct list_head *head)
3869{
3870	struct macsec_dev *macsec = macsec_priv(dev);
3871	struct net_device *real_dev = macsec->real_dev;
3872
3873	unregister_netdevice_queue(dev, head);
3874	list_del_rcu(&macsec->secys);
3875	macsec_del_dev(macsec);
3876	netdev_upper_dev_unlink(real_dev, dev);
3877
3878	macsec_generation++;
3879}
3880
3881static void macsec_dellink(struct net_device *dev, struct list_head *head)
3882{
3883	struct macsec_dev *macsec = macsec_priv(dev);
3884	struct net_device *real_dev = macsec->real_dev;
3885	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3886
3887	/* If h/w offloading is available, propagate to the device */
3888	if (macsec_is_offloaded(macsec)) {
3889		const struct macsec_ops *ops;
3890		struct macsec_context ctx;
3891
3892		ops = macsec_get_ops(netdev_priv(dev), &ctx);
3893		if (ops) {
3894			ctx.secy = &macsec->secy;
3895			macsec_offload(ops->mdo_del_secy, &ctx);
3896		}
3897	}
3898
3899	macsec_common_dellink(dev, head);
3900
3901	if (list_empty(&rxd->secys)) {
3902		netdev_rx_handler_unregister(real_dev);
3903		kfree(rxd);
3904	}
3905}
3906
3907static int register_macsec_dev(struct net_device *real_dev,
3908			       struct net_device *dev)
3909{
3910	struct macsec_dev *macsec = macsec_priv(dev);
3911	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3912
3913	if (!rxd) {
3914		int err;
3915
3916		rxd = kmalloc(sizeof(*rxd), GFP_KERNEL);
3917		if (!rxd)
3918			return -ENOMEM;
3919
3920		INIT_LIST_HEAD(&rxd->secys);
3921
3922		err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
3923						 rxd);
3924		if (err < 0) {
3925			kfree(rxd);
3926			return err;
3927		}
3928	}
3929
3930	list_add_tail_rcu(&macsec->secys, &rxd->secys);
3931	return 0;
3932}
3933
3934static bool sci_exists(struct net_device *dev, sci_t sci)
3935{
3936	struct macsec_rxh_data *rxd = macsec_data_rtnl(dev);
3937	struct macsec_dev *macsec;
3938
3939	list_for_each_entry(macsec, &rxd->secys, secys) {
3940		if (macsec->secy.sci == sci)
3941			return true;
3942	}
3943
3944	return false;
3945}
3946
3947static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
3948{
3949	struct macsec_dev *macsec = macsec_priv(dev);
3950	struct macsec_secy *secy = &macsec->secy;
3951
3952	macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats);
3953	if (!macsec->stats)
3954		return -ENOMEM;
3955
3956	secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats);
3957	if (!secy->tx_sc.stats) {
3958		free_percpu(macsec->stats);
3959		return -ENOMEM;
3960	}
3961
3962	if (sci == MACSEC_UNDEF_SCI)
3963		sci = dev_to_sci(dev, MACSEC_PORT_ES);
3964
3965	secy->netdev = dev;
3966	secy->operational = true;
3967	secy->key_len = DEFAULT_SAK_LEN;
3968	secy->icv_len = icv_len;
3969	secy->validate_frames = MACSEC_VALIDATE_DEFAULT;
3970	secy->protect_frames = true;
3971	secy->replay_protect = false;
3972	secy->xpn = DEFAULT_XPN;
3973
3974	secy->sci = sci;
3975	secy->tx_sc.active = true;
3976	secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA;
3977	secy->tx_sc.encrypt = DEFAULT_ENCRYPT;
3978	secy->tx_sc.send_sci = DEFAULT_SEND_SCI;
3979	secy->tx_sc.end_station = false;
3980	secy->tx_sc.scb = false;
3981
3982	return 0;
3983}
3984
3985static struct lock_class_key macsec_netdev_addr_lock_key;
3986
3987static int macsec_newlink(struct net *net, struct net_device *dev,
3988			  struct nlattr *tb[], struct nlattr *data[],
3989			  struct netlink_ext_ack *extack)
3990{
3991	struct macsec_dev *macsec = macsec_priv(dev);
3992	rx_handler_func_t *rx_handler;
3993	u8 icv_len = DEFAULT_ICV_LEN;
3994	struct net_device *real_dev;
3995	int err, mtu;
3996	sci_t sci;
3997
3998	if (!tb[IFLA_LINK])
3999		return -EINVAL;
4000	real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
4001	if (!real_dev)
4002		return -ENODEV;
4003	if (real_dev->type != ARPHRD_ETHER)
4004		return -EINVAL;
4005
4006	dev->priv_flags |= IFF_MACSEC;
4007
4008	macsec->real_dev = real_dev;
4009
4010	if (data && data[IFLA_MACSEC_OFFLOAD])
4011		macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]);
4012	else
4013		/* MACsec offloading is off by default */
4014		macsec->offload = MACSEC_OFFLOAD_OFF;
4015
4016	/* Check if the offloading mode is supported by the underlying layers */
4017	if (macsec->offload != MACSEC_OFFLOAD_OFF &&
4018	    !macsec_check_offload(macsec->offload, macsec))
4019		return -EOPNOTSUPP;
4020
4021	if (data && data[IFLA_MACSEC_ICV_LEN])
4022		icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
4023	mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
4024	if (mtu < 0)
4025		dev->mtu = 0;
4026	else
4027		dev->mtu = mtu;
4028
4029	rx_handler = rtnl_dereference(real_dev->rx_handler);
4030	if (rx_handler && rx_handler != macsec_handle_frame)
4031		return -EBUSY;
4032
4033	err = register_netdevice(dev);
4034	if (err < 0)
4035		return err;
4036
4037	netdev_lockdep_set_classes(dev);
4038	lockdep_set_class(&dev->addr_list_lock,
4039			  &macsec_netdev_addr_lock_key);
4040
4041	err = netdev_upper_dev_link(real_dev, dev, extack);
4042	if (err < 0)
4043		goto unregister;
4044
4045	/* need to be already registered so that ->init has run and
4046	 * the MAC addr is set
4047	 */
4048	if (data && data[IFLA_MACSEC_SCI])
4049		sci = nla_get_sci(data[IFLA_MACSEC_SCI]);
4050	else if (data && data[IFLA_MACSEC_PORT])
4051		sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT]));
4052	else
4053		sci = dev_to_sci(dev, MACSEC_PORT_ES);
4054
4055	if (rx_handler && sci_exists(real_dev, sci)) {
4056		err = -EBUSY;
4057		goto unlink;
4058	}
4059
4060	err = macsec_add_dev(dev, sci, icv_len);
4061	if (err)
4062		goto unlink;
4063
4064	if (data) {
4065		err = macsec_changelink_common(dev, data);
4066		if (err)
4067			goto del_dev;
4068	}
4069
4070	/* If h/w offloading is available, propagate to the device */
4071	if (macsec_is_offloaded(macsec)) {
4072		const struct macsec_ops *ops;
4073		struct macsec_context ctx;
4074
4075		ops = macsec_get_ops(macsec, &ctx);
4076		if (ops) {
4077			ctx.secy = &macsec->secy;
4078			err = macsec_offload(ops->mdo_add_secy, &ctx);
4079			if (err)
4080				goto del_dev;
4081		}
4082	}
4083
4084	err = register_macsec_dev(real_dev, dev);
4085	if (err < 0)
4086		goto del_dev;
4087
4088	netif_stacked_transfer_operstate(real_dev, dev);
4089	linkwatch_fire_event(dev);
4090
4091	macsec_generation++;
4092
4093	return 0;
4094
4095del_dev:
4096	macsec_del_dev(macsec);
4097unlink:
4098	netdev_upper_dev_unlink(real_dev, dev);
4099unregister:
4100	unregister_netdevice(dev);
4101	return err;
4102}
4103
4104static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
4105				struct netlink_ext_ack *extack)
4106{
4107	u64 csid = MACSEC_DEFAULT_CIPHER_ID;
4108	u8 icv_len = DEFAULT_ICV_LEN;
4109	int flag;
4110	bool es, scb, sci;
4111
4112	if (!data)
4113		return 0;
4114
4115	if (data[IFLA_MACSEC_CIPHER_SUITE])
4116		csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]);
4117
4118	if (data[IFLA_MACSEC_ICV_LEN]) {
4119		icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
4120		if (icv_len != DEFAULT_ICV_LEN) {
4121			char dummy_key[DEFAULT_SAK_LEN] = { 0 };
4122			struct crypto_aead *dummy_tfm;
4123
4124			dummy_tfm = macsec_alloc_tfm(dummy_key,
4125						     DEFAULT_SAK_LEN,
4126						     icv_len);
4127			if (IS_ERR(dummy_tfm))
4128				return PTR_ERR(dummy_tfm);
4129			crypto_free_aead(dummy_tfm);
4130		}
4131	}
4132
4133	switch (csid) {
4134	case MACSEC_CIPHER_ID_GCM_AES_128:
4135	case MACSEC_CIPHER_ID_GCM_AES_256:
4136	case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
4137	case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
4138	case MACSEC_DEFAULT_CIPHER_ID:
4139		if (icv_len < MACSEC_MIN_ICV_LEN ||
4140		    icv_len > MACSEC_STD_ICV_LEN)
4141			return -EINVAL;
4142		break;
4143	default:
4144		return -EINVAL;
4145	}
4146
4147	if (data[IFLA_MACSEC_ENCODING_SA]) {
4148		if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN)
4149			return -EINVAL;
4150	}
4151
4152	for (flag = IFLA_MACSEC_ENCODING_SA + 1;
4153	     flag < IFLA_MACSEC_VALIDATION;
4154	     flag++) {
4155		if (data[flag]) {
4156			if (nla_get_u8(data[flag]) > 1)
4157				return -EINVAL;
4158		}
4159	}
4160
4161	es  = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false;
4162	sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false;
4163	scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false;
4164
4165	if ((sci && (scb || es)) || (scb && es))
4166		return -EINVAL;
4167
4168	if (data[IFLA_MACSEC_VALIDATION] &&
4169	    nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
4170		return -EINVAL;
4171
4172	if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
4173	     nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
4174	    !data[IFLA_MACSEC_WINDOW])
4175		return -EINVAL;
4176
4177	return 0;
4178}
4179
4180static struct net *macsec_get_link_net(const struct net_device *dev)
4181{
4182	return dev_net(macsec_priv(dev)->real_dev);
4183}
4184
4185static size_t macsec_get_size(const struct net_device *dev)
4186{
4187	return  nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */
4188		nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */
4189		nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */
4190		nla_total_size(4) + /* IFLA_MACSEC_WINDOW */
4191		nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */
4192		nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */
4193		nla_total_size(1) + /* IFLA_MACSEC_PROTECT */
4194		nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */
4195		nla_total_size(1) + /* IFLA_MACSEC_ES */
4196		nla_total_size(1) + /* IFLA_MACSEC_SCB */
4197		nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
4198		nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
4199		0;
4200}
4201
4202static int macsec_fill_info(struct sk_buff *skb,
4203			    const struct net_device *dev)
4204{
4205	struct macsec_secy *secy = &macsec_priv(dev)->secy;
4206	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
4207	u64 csid;
4208
4209	switch (secy->key_len) {
4210	case MACSEC_GCM_AES_128_SAK_LEN:
4211		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
4212		break;
4213	case MACSEC_GCM_AES_256_SAK_LEN:
4214		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
4215		break;
4216	default:
4217		goto nla_put_failure;
4218	}
4219
4220	if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
4221			IFLA_MACSEC_PAD) ||
4222	    nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
4223	    nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
4224			      csid, IFLA_MACSEC_PAD) ||
4225	    nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
4226	    nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
4227	    nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
4228	    nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) ||
4229	    nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) ||
4230	    nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
4231	    nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
4232	    nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) ||
4233	    0)
4234		goto nla_put_failure;
4235
4236	if (secy->replay_protect) {
4237		if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window))
4238			goto nla_put_failure;
4239	}
4240
4241	return 0;
4242
4243nla_put_failure:
4244	return -EMSGSIZE;
4245}
4246
4247static struct rtnl_link_ops macsec_link_ops __read_mostly = {
4248	.kind		= "macsec",
4249	.priv_size	= sizeof(struct macsec_dev),
4250	.maxtype	= IFLA_MACSEC_MAX,
4251	.policy		= macsec_rtnl_policy,
4252	.setup		= macsec_setup,
4253	.validate	= macsec_validate_attr,
4254	.newlink	= macsec_newlink,
4255	.changelink	= macsec_changelink,
4256	.dellink	= macsec_dellink,
4257	.get_size	= macsec_get_size,
4258	.fill_info	= macsec_fill_info,
4259	.get_link_net	= macsec_get_link_net,
4260};
4261
4262static bool is_macsec_master(struct net_device *dev)
4263{
4264	return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame;
4265}
4266
4267static int macsec_notify(struct notifier_block *this, unsigned long event,
4268			 void *ptr)
4269{
4270	struct net_device *real_dev = netdev_notifier_info_to_dev(ptr);
4271	LIST_HEAD(head);
4272
4273	if (!is_macsec_master(real_dev))
4274		return NOTIFY_DONE;
4275
4276	switch (event) {
4277	case NETDEV_DOWN:
4278	case NETDEV_UP:
4279	case NETDEV_CHANGE: {
4280		struct macsec_dev *m, *n;
4281		struct macsec_rxh_data *rxd;
4282
4283		rxd = macsec_data_rtnl(real_dev);
4284		list_for_each_entry_safe(m, n, &rxd->secys, secys) {
4285			struct net_device *dev = m->secy.netdev;
4286
4287			netif_stacked_transfer_operstate(real_dev, dev);
4288		}
4289		break;
4290	}
4291	case NETDEV_UNREGISTER: {
4292		struct macsec_dev *m, *n;
4293		struct macsec_rxh_data *rxd;
4294
4295		rxd = macsec_data_rtnl(real_dev);
4296		list_for_each_entry_safe(m, n, &rxd->secys, secys) {
4297			macsec_common_dellink(m->secy.netdev, &head);
4298		}
4299
4300		netdev_rx_handler_unregister(real_dev);
4301		kfree(rxd);
4302
4303		unregister_netdevice_many(&head);
4304		break;
4305	}
4306	case NETDEV_CHANGEMTU: {
4307		struct macsec_dev *m;
4308		struct macsec_rxh_data *rxd;
4309
4310		rxd = macsec_data_rtnl(real_dev);
4311		list_for_each_entry(m, &rxd->secys, secys) {
4312			struct net_device *dev = m->secy.netdev;
4313			unsigned int mtu = real_dev->mtu - (m->secy.icv_len +
4314							    macsec_extra_len(true));
4315
4316			if (dev->mtu > mtu)
4317				dev_set_mtu(dev, mtu);
4318		}
4319	}
4320	}
4321
4322	return NOTIFY_OK;
4323}
4324
4325static struct notifier_block macsec_notifier = {
4326	.notifier_call = macsec_notify,
4327};
4328
4329static int __init macsec_init(void)
4330{
4331	int err;
4332
4333	pr_info("MACsec IEEE 802.1AE\n");
4334	err = register_netdevice_notifier(&macsec_notifier);
4335	if (err)
4336		return err;
4337
4338	err = rtnl_link_register(&macsec_link_ops);
4339	if (err)
4340		goto notifier;
4341
4342	err = genl_register_family(&macsec_fam);
4343	if (err)
4344		goto rtnl;
4345
4346	return 0;
4347
4348rtnl:
4349	rtnl_link_unregister(&macsec_link_ops);
4350notifier:
4351	unregister_netdevice_notifier(&macsec_notifier);
4352	return err;
4353}
4354
4355static void __exit macsec_exit(void)
4356{
4357	genl_unregister_family(&macsec_fam);
4358	rtnl_link_unregister(&macsec_link_ops);
4359	unregister_netdevice_notifier(&macsec_notifier);
4360	rcu_barrier();
4361}
4362
4363module_init(macsec_init);
4364module_exit(macsec_exit);
4365
4366MODULE_ALIAS_RTNL_LINK("macsec");
4367MODULE_ALIAS_GENL_FAMILY("macsec");
4368
4369MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
4370MODULE_LICENSE("GPL v2");
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * drivers/net/macsec.c - MACsec device
   4 *
   5 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
   6 */
   7
   8#include <linux/types.h>
   9#include <linux/skbuff.h>
  10#include <linux/socket.h>
  11#include <linux/module.h>
  12#include <crypto/aead.h>
  13#include <linux/etherdevice.h>
  14#include <linux/netdevice.h>
  15#include <linux/rtnetlink.h>
  16#include <linux/refcount.h>
  17#include <net/genetlink.h>
  18#include <net/sock.h>
  19#include <net/gro_cells.h>
  20#include <net/macsec.h>
  21#include <linux/phy.h>
  22#include <linux/byteorder/generic.h>
  23#include <linux/if_arp.h>
  24
  25#include <uapi/linux/if_macsec.h>
  26
  27#define MACSEC_SCI_LEN 8
  28
  29/* SecTAG length = macsec_eth_header without the optional SCI */
  30#define MACSEC_TAG_LEN 6
  31
  32struct macsec_eth_header {
  33	struct ethhdr eth;
  34	/* SecTAG */
  35	u8  tci_an;
  36#if defined(__LITTLE_ENDIAN_BITFIELD)
  37	u8  short_length:6,
  38		  unused:2;
  39#elif defined(__BIG_ENDIAN_BITFIELD)
  40	u8        unused:2,
  41	    short_length:6;
  42#else
  43#error	"Please fix <asm/byteorder.h>"
  44#endif
  45	__be32 packet_number;
  46	u8 secure_channel_id[8]; /* optional */
  47} __packed;
  48
  49#define MACSEC_TCI_VERSION 0x80
  50#define MACSEC_TCI_ES      0x40 /* end station */
  51#define MACSEC_TCI_SC      0x20 /* SCI present */
  52#define MACSEC_TCI_SCB     0x10 /* epon */
  53#define MACSEC_TCI_E       0x08 /* encryption */
  54#define MACSEC_TCI_C       0x04 /* changed text */
  55#define MACSEC_AN_MASK     0x03 /* association number */
  56#define MACSEC_TCI_CONFID  (MACSEC_TCI_E | MACSEC_TCI_C)
  57
  58/* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
  59#define MIN_NON_SHORT_LEN 48
  60
  61#define GCM_AES_IV_LEN 12
  62#define DEFAULT_ICV_LEN 16
  63
  64#define for_each_rxsc(secy, sc)				\
  65	for (sc = rcu_dereference_bh(secy->rx_sc);	\
  66	     sc;					\
  67	     sc = rcu_dereference_bh(sc->next))
  68#define for_each_rxsc_rtnl(secy, sc)			\
  69	for (sc = rtnl_dereference(secy->rx_sc);	\
  70	     sc;					\
  71	     sc = rtnl_dereference(sc->next))
  72
  73#define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31)))
  74
  75struct gcm_iv_xpn {
  76	union {
  77		u8 short_secure_channel_id[4];
  78		ssci_t ssci;
  79	};
  80	__be64 pn;
  81} __packed;
  82
  83struct gcm_iv {
  84	union {
  85		u8 secure_channel_id[8];
  86		sci_t sci;
  87	};
  88	__be32 pn;
  89};
  90
  91#define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
  92
  93struct pcpu_secy_stats {
  94	struct macsec_dev_stats stats;
  95	struct u64_stats_sync syncp;
  96};
  97
  98/**
  99 * struct macsec_dev - private data
 100 * @secy: SecY config
 101 * @real_dev: pointer to underlying netdevice
 102 * @stats: MACsec device stats
 103 * @secys: linked list of SecY's on the underlying device
 
 104 * @offload: status of offloading on the MACsec device
 105 */
 106struct macsec_dev {
 107	struct macsec_secy secy;
 108	struct net_device *real_dev;
 109	struct pcpu_secy_stats __percpu *stats;
 110	struct list_head secys;
 111	struct gro_cells gro_cells;
 112	enum macsec_offload offload;
 113};
 114
 115/**
 116 * struct macsec_rxh_data - rx_handler private argument
 117 * @secys: linked list of SecY's on this underlying device
 118 */
 119struct macsec_rxh_data {
 120	struct list_head secys;
 121};
 122
 123static struct macsec_dev *macsec_priv(const struct net_device *dev)
 124{
 125	return (struct macsec_dev *)netdev_priv(dev);
 126}
 127
 128static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev)
 129{
 130	return rcu_dereference_bh(dev->rx_handler_data);
 131}
 132
 133static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev)
 134{
 135	return rtnl_dereference(dev->rx_handler_data);
 136}
 137
 138struct macsec_cb {
 139	struct aead_request *req;
 140	union {
 141		struct macsec_tx_sa *tx_sa;
 142		struct macsec_rx_sa *rx_sa;
 143	};
 144	u8 assoc_num;
 145	bool valid;
 146	bool has_sci;
 147};
 148
 149static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
 150{
 151	struct macsec_rx_sa *sa = rcu_dereference_bh(ptr);
 152
 153	if (!sa || !sa->active)
 154		return NULL;
 155
 156	if (!refcount_inc_not_zero(&sa->refcnt))
 157		return NULL;
 158
 159	return sa;
 160}
 161
 162static void free_rx_sc_rcu(struct rcu_head *head)
 163{
 164	struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
 165
 166	free_percpu(rx_sc->stats);
 167	kfree(rx_sc);
 168}
 169
 170static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc)
 171{
 172	return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL;
 173}
 174
 175static void macsec_rxsc_put(struct macsec_rx_sc *sc)
 176{
 177	if (refcount_dec_and_test(&sc->refcnt))
 178		call_rcu(&sc->rcu_head, free_rx_sc_rcu);
 179}
 180
 181static void free_rxsa(struct rcu_head *head)
 182{
 183	struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu);
 184
 185	crypto_free_aead(sa->key.tfm);
 186	free_percpu(sa->stats);
 187	kfree(sa);
 188}
 189
 190static void macsec_rxsa_put(struct macsec_rx_sa *sa)
 191{
 192	if (refcount_dec_and_test(&sa->refcnt))
 193		call_rcu(&sa->rcu, free_rxsa);
 194}
 195
 196static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr)
 197{
 198	struct macsec_tx_sa *sa = rcu_dereference_bh(ptr);
 199
 200	if (!sa || !sa->active)
 201		return NULL;
 202
 203	if (!refcount_inc_not_zero(&sa->refcnt))
 204		return NULL;
 205
 206	return sa;
 207}
 208
 209static void free_txsa(struct rcu_head *head)
 210{
 211	struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu);
 212
 213	crypto_free_aead(sa->key.tfm);
 214	free_percpu(sa->stats);
 215	kfree(sa);
 216}
 217
 218static void macsec_txsa_put(struct macsec_tx_sa *sa)
 219{
 220	if (refcount_dec_and_test(&sa->refcnt))
 221		call_rcu(&sa->rcu, free_txsa);
 222}
 223
 224static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
 225{
 226	BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb));
 227	return (struct macsec_cb *)skb->cb;
 228}
 229
 230#define MACSEC_PORT_ES (htons(0x0001))
 231#define MACSEC_PORT_SCB (0x0000)
 232#define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
 233#define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff)
 234
 235#define MACSEC_GCM_AES_128_SAK_LEN 16
 236#define MACSEC_GCM_AES_256_SAK_LEN 32
 237
 238#define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
 239#define DEFAULT_XPN false
 240#define DEFAULT_SEND_SCI true
 241#define DEFAULT_ENCRYPT false
 242#define DEFAULT_ENCODING_SA 0
 243
 244static bool send_sci(const struct macsec_secy *secy)
 245{
 246	const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
 247
 248	return tx_sc->send_sci ||
 249		(secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
 250}
 251
 252static sci_t make_sci(u8 *addr, __be16 port)
 253{
 254	sci_t sci;
 255
 256	memcpy(&sci, addr, ETH_ALEN);
 257	memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port));
 258
 259	return sci;
 260}
 261
 262static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present)
 263{
 264	sci_t sci;
 265
 266	if (sci_present)
 267		memcpy(&sci, hdr->secure_channel_id,
 268		       sizeof(hdr->secure_channel_id));
 269	else
 270		sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES);
 271
 272	return sci;
 273}
 274
 275static unsigned int macsec_sectag_len(bool sci_present)
 276{
 277	return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
 278}
 279
 280static unsigned int macsec_hdr_len(bool sci_present)
 281{
 282	return macsec_sectag_len(sci_present) + ETH_HLEN;
 283}
 284
 285static unsigned int macsec_extra_len(bool sci_present)
 286{
 287	return macsec_sectag_len(sci_present) + sizeof(__be16);
 288}
 289
 290/* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
 291static void macsec_fill_sectag(struct macsec_eth_header *h,
 292			       const struct macsec_secy *secy, u32 pn,
 293			       bool sci_present)
 294{
 295	const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
 296
 297	memset(&h->tci_an, 0, macsec_sectag_len(sci_present));
 298	h->eth.h_proto = htons(ETH_P_MACSEC);
 299
 300	if (sci_present) {
 301		h->tci_an |= MACSEC_TCI_SC;
 302		memcpy(&h->secure_channel_id, &secy->sci,
 303		       sizeof(h->secure_channel_id));
 304	} else {
 305		if (tx_sc->end_station)
 306			h->tci_an |= MACSEC_TCI_ES;
 307		if (tx_sc->scb)
 308			h->tci_an |= MACSEC_TCI_SCB;
 309	}
 310
 311	h->packet_number = htonl(pn);
 312
 313	/* with GCM, C/E clear for !encrypt, both set for encrypt */
 314	if (tx_sc->encrypt)
 315		h->tci_an |= MACSEC_TCI_CONFID;
 316	else if (secy->icv_len != DEFAULT_ICV_LEN)
 317		h->tci_an |= MACSEC_TCI_C;
 318
 319	h->tci_an |= tx_sc->encoding_sa;
 320}
 321
 322static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len)
 323{
 324	if (data_len < MIN_NON_SHORT_LEN)
 325		h->short_length = data_len;
 326}
 327
 328/* Checks if a MACsec interface is being offloaded to an hardware engine */
 329static bool macsec_is_offloaded(struct macsec_dev *macsec)
 330{
 331	if (macsec->offload == MACSEC_OFFLOAD_MAC ||
 332	    macsec->offload == MACSEC_OFFLOAD_PHY)
 333		return true;
 334
 335	return false;
 336}
 337
 338/* Checks if underlying layers implement MACsec offloading functions. */
 339static bool macsec_check_offload(enum macsec_offload offload,
 340				 struct macsec_dev *macsec)
 341{
 342	if (!macsec || !macsec->real_dev)
 343		return false;
 344
 345	if (offload == MACSEC_OFFLOAD_PHY)
 346		return macsec->real_dev->phydev &&
 347		       macsec->real_dev->phydev->macsec_ops;
 348	else if (offload == MACSEC_OFFLOAD_MAC)
 349		return macsec->real_dev->features & NETIF_F_HW_MACSEC &&
 350		       macsec->real_dev->macsec_ops;
 351
 352	return false;
 353}
 354
 355static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload,
 356						 struct macsec_dev *macsec,
 357						 struct macsec_context *ctx)
 358{
 359	if (ctx) {
 360		memset(ctx, 0, sizeof(*ctx));
 361		ctx->offload = offload;
 362
 363		if (offload == MACSEC_OFFLOAD_PHY)
 364			ctx->phydev = macsec->real_dev->phydev;
 365		else if (offload == MACSEC_OFFLOAD_MAC)
 366			ctx->netdev = macsec->real_dev;
 367	}
 368
 369	if (offload == MACSEC_OFFLOAD_PHY)
 370		return macsec->real_dev->phydev->macsec_ops;
 371	else
 372		return macsec->real_dev->macsec_ops;
 373}
 374
 375/* Returns a pointer to the MACsec ops struct if any and updates the MACsec
 376 * context device reference if provided.
 377 */
 378static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec,
 379					       struct macsec_context *ctx)
 380{
 381	if (!macsec_check_offload(macsec->offload, macsec))
 382		return NULL;
 383
 384	return __macsec_get_ops(macsec->offload, macsec, ctx);
 385}
 386
 387/* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */
 388static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn)
 389{
 390	struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data;
 391	int len = skb->len - 2 * ETH_ALEN;
 392	int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len;
 393
 394	/* a) It comprises at least 17 octets */
 395	if (skb->len <= 16)
 396		return false;
 397
 398	/* b) MACsec EtherType: already checked */
 399
 400	/* c) V bit is clear */
 401	if (h->tci_an & MACSEC_TCI_VERSION)
 402		return false;
 403
 404	/* d) ES or SCB => !SC */
 405	if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) &&
 406	    (h->tci_an & MACSEC_TCI_SC))
 407		return false;
 408
 409	/* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */
 410	if (h->unused)
 411		return false;
 412
 413	/* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */
 414	if (!h->packet_number && !xpn)
 415		return false;
 416
 417	/* length check, f) g) h) i) */
 418	if (h->short_length)
 419		return len == extra_len + h->short_length;
 420	return len >= extra_len + MIN_NON_SHORT_LEN;
 421}
 422
 423#define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
 424#define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
 425
 426static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn,
 427			       salt_t salt)
 428{
 429	struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv;
 430
 431	gcm_iv->ssci = ssci ^ salt.ssci;
 432	gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn;
 433}
 434
 435static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn)
 436{
 437	struct gcm_iv *gcm_iv = (struct gcm_iv *)iv;
 438
 439	gcm_iv->sci = sci;
 440	gcm_iv->pn = htonl(pn);
 441}
 442
 443static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
 444{
 445	return (struct macsec_eth_header *)skb_mac_header(skb);
 446}
 447
 448static sci_t dev_to_sci(struct net_device *dev, __be16 port)
 449{
 450	return make_sci(dev->dev_addr, port);
 451}
 452
 453static void __macsec_pn_wrapped(struct macsec_secy *secy,
 454				struct macsec_tx_sa *tx_sa)
 455{
 456	pr_debug("PN wrapped, transitioning to !oper\n");
 457	tx_sa->active = false;
 458	if (secy->protect_frames)
 459		secy->operational = false;
 460}
 461
 462void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa)
 463{
 464	spin_lock_bh(&tx_sa->lock);
 465	__macsec_pn_wrapped(secy, tx_sa);
 466	spin_unlock_bh(&tx_sa->lock);
 467}
 468EXPORT_SYMBOL_GPL(macsec_pn_wrapped);
 469
 470static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa,
 471			    struct macsec_secy *secy)
 472{
 473	pn_t pn;
 474
 475	spin_lock_bh(&tx_sa->lock);
 476
 477	pn = tx_sa->next_pn_halves;
 478	if (secy->xpn)
 479		tx_sa->next_pn++;
 480	else
 481		tx_sa->next_pn_halves.lower++;
 482
 483	if (tx_sa->next_pn == 0)
 484		__macsec_pn_wrapped(secy, tx_sa);
 485	spin_unlock_bh(&tx_sa->lock);
 486
 487	return pn;
 488}
 489
 490static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
 491{
 492	struct macsec_dev *macsec = netdev_priv(dev);
 493
 494	skb->dev = macsec->real_dev;
 495	skb_reset_mac_header(skb);
 496	skb->protocol = eth_hdr(skb)->h_proto;
 497}
 498
 499static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
 500			    struct macsec_tx_sa *tx_sa)
 501{
 502	struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
 503
 504	u64_stats_update_begin(&txsc_stats->syncp);
 505	if (tx_sc->encrypt) {
 506		txsc_stats->stats.OutOctetsEncrypted += skb->len;
 507		txsc_stats->stats.OutPktsEncrypted++;
 508		this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
 509	} else {
 510		txsc_stats->stats.OutOctetsProtected += skb->len;
 511		txsc_stats->stats.OutPktsProtected++;
 512		this_cpu_inc(tx_sa->stats->OutPktsProtected);
 513	}
 514	u64_stats_update_end(&txsc_stats->syncp);
 515}
 516
 517static void count_tx(struct net_device *dev, int ret, int len)
 518{
 519	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
 520		struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
 521
 522		u64_stats_update_begin(&stats->syncp);
 523		stats->tx_packets++;
 524		stats->tx_bytes += len;
 525		u64_stats_update_end(&stats->syncp);
 526	}
 527}
 528
 529static void macsec_encrypt_done(struct crypto_async_request *base, int err)
 530{
 531	struct sk_buff *skb = base->data;
 532	struct net_device *dev = skb->dev;
 533	struct macsec_dev *macsec = macsec_priv(dev);
 534	struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa;
 535	int len, ret;
 536
 537	aead_request_free(macsec_skb_cb(skb)->req);
 538
 539	rcu_read_lock_bh();
 540	macsec_encrypt_finish(skb, dev);
 541	macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
 542	len = skb->len;
 543	ret = dev_queue_xmit(skb);
 544	count_tx(dev, ret, len);
 545	rcu_read_unlock_bh();
 546
 547	macsec_txsa_put(sa);
 548	dev_put(dev);
 549}
 550
 551static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
 552					     unsigned char **iv,
 553					     struct scatterlist **sg,
 554					     int num_frags)
 555{
 556	size_t size, iv_offset, sg_offset;
 557	struct aead_request *req;
 558	void *tmp;
 559
 560	size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
 561	iv_offset = size;
 562	size += GCM_AES_IV_LEN;
 563
 564	size = ALIGN(size, __alignof__(struct scatterlist));
 565	sg_offset = size;
 566	size += sizeof(struct scatterlist) * num_frags;
 567
 568	tmp = kmalloc(size, GFP_ATOMIC);
 569	if (!tmp)
 570		return NULL;
 571
 572	*iv = (unsigned char *)(tmp + iv_offset);
 573	*sg = (struct scatterlist *)(tmp + sg_offset);
 574	req = tmp;
 575
 576	aead_request_set_tfm(req, tfm);
 577
 578	return req;
 579}
 580
 581static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
 582				      struct net_device *dev)
 583{
 584	int ret;
 585	struct scatterlist *sg;
 586	struct sk_buff *trailer;
 587	unsigned char *iv;
 588	struct ethhdr *eth;
 589	struct macsec_eth_header *hh;
 590	size_t unprotected_len;
 591	struct aead_request *req;
 592	struct macsec_secy *secy;
 593	struct macsec_tx_sc *tx_sc;
 594	struct macsec_tx_sa *tx_sa;
 595	struct macsec_dev *macsec = macsec_priv(dev);
 596	bool sci_present;
 597	pn_t pn;
 598
 599	secy = &macsec->secy;
 600	tx_sc = &secy->tx_sc;
 601
 602	/* 10.5.1 TX SA assignment */
 603	tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]);
 604	if (!tx_sa) {
 605		secy->operational = false;
 606		kfree_skb(skb);
 607		return ERR_PTR(-EINVAL);
 608	}
 609
 610	if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
 611		     skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
 612		struct sk_buff *nskb = skb_copy_expand(skb,
 613						       MACSEC_NEEDED_HEADROOM,
 614						       MACSEC_NEEDED_TAILROOM,
 615						       GFP_ATOMIC);
 616		if (likely(nskb)) {
 617			consume_skb(skb);
 618			skb = nskb;
 619		} else {
 620			macsec_txsa_put(tx_sa);
 621			kfree_skb(skb);
 622			return ERR_PTR(-ENOMEM);
 623		}
 624	} else {
 625		skb = skb_unshare(skb, GFP_ATOMIC);
 626		if (!skb) {
 627			macsec_txsa_put(tx_sa);
 628			return ERR_PTR(-ENOMEM);
 629		}
 630	}
 631
 632	unprotected_len = skb->len;
 633	eth = eth_hdr(skb);
 634	sci_present = send_sci(secy);
 635	hh = skb_push(skb, macsec_extra_len(sci_present));
 636	memmove(hh, eth, 2 * ETH_ALEN);
 637
 638	pn = tx_sa_update_pn(tx_sa, secy);
 639	if (pn.full64 == 0) {
 640		macsec_txsa_put(tx_sa);
 641		kfree_skb(skb);
 642		return ERR_PTR(-ENOLINK);
 643	}
 644	macsec_fill_sectag(hh, secy, pn.lower, sci_present);
 645	macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
 646
 647	skb_put(skb, secy->icv_len);
 648
 649	if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
 650		struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
 651
 652		u64_stats_update_begin(&secy_stats->syncp);
 653		secy_stats->stats.OutPktsTooLong++;
 654		u64_stats_update_end(&secy_stats->syncp);
 655
 656		macsec_txsa_put(tx_sa);
 657		kfree_skb(skb);
 658		return ERR_PTR(-EINVAL);
 659	}
 660
 661	ret = skb_cow_data(skb, 0, &trailer);
 662	if (unlikely(ret < 0)) {
 663		macsec_txsa_put(tx_sa);
 664		kfree_skb(skb);
 665		return ERR_PTR(ret);
 666	}
 667
 668	req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret);
 669	if (!req) {
 670		macsec_txsa_put(tx_sa);
 671		kfree_skb(skb);
 672		return ERR_PTR(-ENOMEM);
 673	}
 674
 675	if (secy->xpn)
 676		macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt);
 677	else
 678		macsec_fill_iv(iv, secy->sci, pn.lower);
 679
 680	sg_init_table(sg, ret);
 681	ret = skb_to_sgvec(skb, sg, 0, skb->len);
 682	if (unlikely(ret < 0)) {
 683		aead_request_free(req);
 684		macsec_txsa_put(tx_sa);
 685		kfree_skb(skb);
 686		return ERR_PTR(ret);
 687	}
 688
 689	if (tx_sc->encrypt) {
 690		int len = skb->len - macsec_hdr_len(sci_present) -
 691			  secy->icv_len;
 692		aead_request_set_crypt(req, sg, sg, len, iv);
 693		aead_request_set_ad(req, macsec_hdr_len(sci_present));
 694	} else {
 695		aead_request_set_crypt(req, sg, sg, 0, iv);
 696		aead_request_set_ad(req, skb->len - secy->icv_len);
 697	}
 698
 699	macsec_skb_cb(skb)->req = req;
 700	macsec_skb_cb(skb)->tx_sa = tx_sa;
 701	aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
 702
 703	dev_hold(skb->dev);
 704	ret = crypto_aead_encrypt(req);
 705	if (ret == -EINPROGRESS) {
 706		return ERR_PTR(ret);
 707	} else if (ret != 0) {
 708		dev_put(skb->dev);
 709		kfree_skb(skb);
 710		aead_request_free(req);
 711		macsec_txsa_put(tx_sa);
 712		return ERR_PTR(-EINVAL);
 713	}
 714
 715	dev_put(skb->dev);
 716	aead_request_free(req);
 717	macsec_txsa_put(tx_sa);
 718
 719	return skb;
 720}
 721
 722static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn)
 723{
 724	struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
 725	struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats);
 726	struct macsec_eth_header *hdr = macsec_ethhdr(skb);
 727	u32 lowest_pn = 0;
 728
 729	spin_lock(&rx_sa->lock);
 730	if (rx_sa->next_pn_halves.lower >= secy->replay_window)
 731		lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window;
 732
 733	/* Now perform replay protection check again
 734	 * (see IEEE 802.1AE-2006 figure 10-5)
 735	 */
 736	if (secy->replay_protect && pn < lowest_pn &&
 737	    (!secy->xpn || pn_same_half(pn, lowest_pn))) {
 738		spin_unlock(&rx_sa->lock);
 739		u64_stats_update_begin(&rxsc_stats->syncp);
 740		rxsc_stats->stats.InPktsLate++;
 741		u64_stats_update_end(&rxsc_stats->syncp);
 742		return false;
 743	}
 744
 745	if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
 746		u64_stats_update_begin(&rxsc_stats->syncp);
 747		if (hdr->tci_an & MACSEC_TCI_E)
 748			rxsc_stats->stats.InOctetsDecrypted += skb->len;
 749		else
 750			rxsc_stats->stats.InOctetsValidated += skb->len;
 751		u64_stats_update_end(&rxsc_stats->syncp);
 752	}
 753
 754	if (!macsec_skb_cb(skb)->valid) {
 755		spin_unlock(&rx_sa->lock);
 756
 757		/* 10.6.5 */
 758		if (hdr->tci_an & MACSEC_TCI_C ||
 759		    secy->validate_frames == MACSEC_VALIDATE_STRICT) {
 760			u64_stats_update_begin(&rxsc_stats->syncp);
 761			rxsc_stats->stats.InPktsNotValid++;
 762			u64_stats_update_end(&rxsc_stats->syncp);
 763			return false;
 764		}
 765
 766		u64_stats_update_begin(&rxsc_stats->syncp);
 767		if (secy->validate_frames == MACSEC_VALIDATE_CHECK) {
 768			rxsc_stats->stats.InPktsInvalid++;
 769			this_cpu_inc(rx_sa->stats->InPktsInvalid);
 770		} else if (pn < lowest_pn) {
 771			rxsc_stats->stats.InPktsDelayed++;
 772		} else {
 773			rxsc_stats->stats.InPktsUnchecked++;
 774		}
 775		u64_stats_update_end(&rxsc_stats->syncp);
 776	} else {
 777		u64_stats_update_begin(&rxsc_stats->syncp);
 778		if (pn < lowest_pn) {
 779			rxsc_stats->stats.InPktsDelayed++;
 780		} else {
 781			rxsc_stats->stats.InPktsOK++;
 782			this_cpu_inc(rx_sa->stats->InPktsOK);
 783		}
 784		u64_stats_update_end(&rxsc_stats->syncp);
 785
 786		// Instead of "pn >=" - to support pn overflow in xpn
 787		if (pn + 1 > rx_sa->next_pn_halves.lower) {
 788			rx_sa->next_pn_halves.lower = pn + 1;
 789		} else if (secy->xpn &&
 790			   !pn_same_half(pn, rx_sa->next_pn_halves.lower)) {
 791			rx_sa->next_pn_halves.upper++;
 792			rx_sa->next_pn_halves.lower = pn + 1;
 793		}
 794
 795		spin_unlock(&rx_sa->lock);
 796	}
 797
 798	return true;
 799}
 800
 801static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev)
 802{
 803	skb->pkt_type = PACKET_HOST;
 804	skb->protocol = eth_type_trans(skb, dev);
 805
 806	skb_reset_network_header(skb);
 807	if (!skb_transport_header_was_set(skb))
 808		skb_reset_transport_header(skb);
 809	skb_reset_mac_len(skb);
 810}
 811
 812static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
 813{
 814	skb->ip_summed = CHECKSUM_NONE;
 815	memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
 816	skb_pull(skb, hdr_len);
 817	pskb_trim_unique(skb, skb->len - icv_len);
 818}
 819
 820static void count_rx(struct net_device *dev, int len)
 821{
 822	struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
 823
 824	u64_stats_update_begin(&stats->syncp);
 825	stats->rx_packets++;
 826	stats->rx_bytes += len;
 827	u64_stats_update_end(&stats->syncp);
 828}
 829
 830static void macsec_decrypt_done(struct crypto_async_request *base, int err)
 831{
 832	struct sk_buff *skb = base->data;
 833	struct net_device *dev = skb->dev;
 834	struct macsec_dev *macsec = macsec_priv(dev);
 835	struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
 836	struct macsec_rx_sc *rx_sc = rx_sa->sc;
 837	int len;
 838	u32 pn;
 839
 840	aead_request_free(macsec_skb_cb(skb)->req);
 841
 842	if (!err)
 843		macsec_skb_cb(skb)->valid = true;
 844
 845	rcu_read_lock_bh();
 846	pn = ntohl(macsec_ethhdr(skb)->packet_number);
 847	if (!macsec_post_decrypt(skb, &macsec->secy, pn)) {
 848		rcu_read_unlock_bh();
 849		kfree_skb(skb);
 850		goto out;
 851	}
 852
 853	macsec_finalize_skb(skb, macsec->secy.icv_len,
 854			    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
 855	macsec_reset_skb(skb, macsec->secy.netdev);
 856
 857	len = skb->len;
 858	if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
 859		count_rx(dev, len);
 860
 861	rcu_read_unlock_bh();
 862
 863out:
 864	macsec_rxsa_put(rx_sa);
 865	macsec_rxsc_put(rx_sc);
 866	dev_put(dev);
 867}
 868
 869static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
 870				      struct net_device *dev,
 871				      struct macsec_rx_sa *rx_sa,
 872				      sci_t sci,
 873				      struct macsec_secy *secy)
 874{
 875	int ret;
 876	struct scatterlist *sg;
 877	struct sk_buff *trailer;
 878	unsigned char *iv;
 879	struct aead_request *req;
 880	struct macsec_eth_header *hdr;
 881	u32 hdr_pn;
 882	u16 icv_len = secy->icv_len;
 883
 884	macsec_skb_cb(skb)->valid = false;
 885	skb = skb_share_check(skb, GFP_ATOMIC);
 886	if (!skb)
 887		return ERR_PTR(-ENOMEM);
 888
 889	ret = skb_cow_data(skb, 0, &trailer);
 890	if (unlikely(ret < 0)) {
 891		kfree_skb(skb);
 892		return ERR_PTR(ret);
 893	}
 894	req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret);
 895	if (!req) {
 896		kfree_skb(skb);
 897		return ERR_PTR(-ENOMEM);
 898	}
 899
 900	hdr = (struct macsec_eth_header *)skb->data;
 901	hdr_pn = ntohl(hdr->packet_number);
 902
 903	if (secy->xpn) {
 904		pn_t recovered_pn = rx_sa->next_pn_halves;
 905
 906		recovered_pn.lower = hdr_pn;
 907		if (hdr_pn < rx_sa->next_pn_halves.lower &&
 908		    !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower))
 909			recovered_pn.upper++;
 910
 911		macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64,
 912				   rx_sa->key.salt);
 913	} else {
 914		macsec_fill_iv(iv, sci, hdr_pn);
 915	}
 916
 917	sg_init_table(sg, ret);
 918	ret = skb_to_sgvec(skb, sg, 0, skb->len);
 919	if (unlikely(ret < 0)) {
 920		aead_request_free(req);
 921		kfree_skb(skb);
 922		return ERR_PTR(ret);
 923	}
 924
 925	if (hdr->tci_an & MACSEC_TCI_E) {
 926		/* confidentiality: ethernet + macsec header
 927		 * authenticated, encrypted payload
 928		 */
 929		int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci);
 930
 931		aead_request_set_crypt(req, sg, sg, len, iv);
 932		aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci));
 933		skb = skb_unshare(skb, GFP_ATOMIC);
 934		if (!skb) {
 935			aead_request_free(req);
 936			return ERR_PTR(-ENOMEM);
 937		}
 938	} else {
 939		/* integrity only: all headers + data authenticated */
 940		aead_request_set_crypt(req, sg, sg, icv_len, iv);
 941		aead_request_set_ad(req, skb->len - icv_len);
 942	}
 943
 944	macsec_skb_cb(skb)->req = req;
 945	skb->dev = dev;
 946	aead_request_set_callback(req, 0, macsec_decrypt_done, skb);
 947
 948	dev_hold(dev);
 949	ret = crypto_aead_decrypt(req);
 950	if (ret == -EINPROGRESS) {
 951		return ERR_PTR(ret);
 952	} else if (ret != 0) {
 953		/* decryption/authentication failed
 954		 * 10.6 if validateFrames is disabled, deliver anyway
 955		 */
 956		if (ret != -EBADMSG) {
 957			kfree_skb(skb);
 958			skb = ERR_PTR(ret);
 959		}
 960	} else {
 961		macsec_skb_cb(skb)->valid = true;
 962	}
 963	dev_put(dev);
 964
 965	aead_request_free(req);
 966
 967	return skb;
 968}
 969
 970static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci)
 971{
 972	struct macsec_rx_sc *rx_sc;
 973
 974	for_each_rxsc(secy, rx_sc) {
 975		if (rx_sc->sci == sci)
 976			return rx_sc;
 977	}
 978
 979	return NULL;
 980}
 981
 982static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci)
 983{
 984	struct macsec_rx_sc *rx_sc;
 985
 986	for_each_rxsc_rtnl(secy, rx_sc) {
 987		if (rx_sc->sci == sci)
 988			return rx_sc;
 989	}
 990
 991	return NULL;
 992}
 993
 994static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
 995{
 996	/* Deliver to the uncontrolled port by default */
 997	enum rx_handler_result ret = RX_HANDLER_PASS;
 998	struct ethhdr *hdr = eth_hdr(skb);
 999	struct macsec_rxh_data *rxd;
1000	struct macsec_dev *macsec;
1001
1002	rcu_read_lock();
1003	rxd = macsec_data_rcu(skb->dev);
1004
1005	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1006		struct sk_buff *nskb;
1007		struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
1008		struct net_device *ndev = macsec->secy.netdev;
1009
1010		/* If h/w offloading is enabled, HW decodes frames and strips
1011		 * the SecTAG, so we have to deduce which port to deliver to.
1012		 */
1013		if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
1014			if (ether_addr_equal_64bits(hdr->h_dest,
1015						    ndev->dev_addr)) {
1016				/* exact match, divert skb to this port */
1017				skb->dev = ndev;
1018				skb->pkt_type = PACKET_HOST;
1019				ret = RX_HANDLER_ANOTHER;
1020				goto out;
1021			} else if (is_multicast_ether_addr_64bits(
1022					   hdr->h_dest)) {
1023				/* multicast frame, deliver on this port too */
1024				nskb = skb_clone(skb, GFP_ATOMIC);
1025				if (!nskb)
1026					break;
1027
1028				nskb->dev = ndev;
1029				if (ether_addr_equal_64bits(hdr->h_dest,
1030							    ndev->broadcast))
1031					nskb->pkt_type = PACKET_BROADCAST;
1032				else
1033					nskb->pkt_type = PACKET_MULTICAST;
1034
1035				netif_rx(nskb);
1036			}
1037			continue;
1038		}
1039
1040		/* 10.6 If the management control validateFrames is not
1041		 * Strict, frames without a SecTAG are received, counted, and
1042		 * delivered to the Controlled Port
1043		 */
1044		if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1045			u64_stats_update_begin(&secy_stats->syncp);
1046			secy_stats->stats.InPktsNoTag++;
1047			u64_stats_update_end(&secy_stats->syncp);
1048			continue;
1049		}
1050
1051		/* deliver on this port */
1052		nskb = skb_clone(skb, GFP_ATOMIC);
1053		if (!nskb)
1054			break;
1055
1056		nskb->dev = ndev;
1057
1058		if (netif_rx(nskb) == NET_RX_SUCCESS) {
1059			u64_stats_update_begin(&secy_stats->syncp);
1060			secy_stats->stats.InPktsUntagged++;
1061			u64_stats_update_end(&secy_stats->syncp);
1062		}
1063	}
1064
1065out:
1066	rcu_read_unlock();
1067	return ret;
1068}
1069
1070static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
1071{
1072	struct sk_buff *skb = *pskb;
1073	struct net_device *dev = skb->dev;
1074	struct macsec_eth_header *hdr;
1075	struct macsec_secy *secy = NULL;
1076	struct macsec_rx_sc *rx_sc;
1077	struct macsec_rx_sa *rx_sa;
1078	struct macsec_rxh_data *rxd;
1079	struct macsec_dev *macsec;
1080	unsigned int len;
1081	sci_t sci;
1082	u32 hdr_pn;
1083	bool cbit;
1084	struct pcpu_rx_sc_stats *rxsc_stats;
1085	struct pcpu_secy_stats *secy_stats;
1086	bool pulled_sci;
1087	int ret;
1088
1089	if (skb_headroom(skb) < ETH_HLEN)
1090		goto drop_direct;
1091
1092	hdr = macsec_ethhdr(skb);
1093	if (hdr->eth.h_proto != htons(ETH_P_MACSEC))
1094		return handle_not_macsec(skb);
1095
1096	skb = skb_unshare(skb, GFP_ATOMIC);
1097	*pskb = skb;
1098	if (!skb)
1099		return RX_HANDLER_CONSUMED;
1100
1101	pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
1102	if (!pulled_sci) {
1103		if (!pskb_may_pull(skb, macsec_extra_len(false)))
1104			goto drop_direct;
1105	}
1106
1107	hdr = macsec_ethhdr(skb);
1108
1109	/* Frames with a SecTAG that has the TCI E bit set but the C
1110	 * bit clear are discarded, as this reserved encoding is used
1111	 * to identify frames with a SecTAG that are not to be
1112	 * delivered to the Controlled Port.
1113	 */
1114	if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E)
1115		return RX_HANDLER_PASS;
1116
1117	/* now, pull the extra length */
1118	if (hdr->tci_an & MACSEC_TCI_SC) {
1119		if (!pulled_sci)
1120			goto drop_direct;
1121	}
1122
1123	/* ethernet header is part of crypto processing */
1124	skb_push(skb, ETH_HLEN);
1125
1126	macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC);
1127	macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK;
1128	sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci);
1129
1130	rcu_read_lock();
1131	rxd = macsec_data_rcu(skb->dev);
1132
1133	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1134		struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
1135
1136		sc = sc ? macsec_rxsc_get(sc) : NULL;
1137
1138		if (sc) {
1139			secy = &macsec->secy;
1140			rx_sc = sc;
1141			break;
1142		}
1143	}
1144
1145	if (!secy)
1146		goto nosci;
1147
1148	dev = secy->netdev;
1149	macsec = macsec_priv(dev);
1150	secy_stats = this_cpu_ptr(macsec->stats);
1151	rxsc_stats = this_cpu_ptr(rx_sc->stats);
1152
1153	if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) {
1154		u64_stats_update_begin(&secy_stats->syncp);
1155		secy_stats->stats.InPktsBadTag++;
1156		u64_stats_update_end(&secy_stats->syncp);
1157		goto drop_nosa;
1158	}
1159
1160	rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]);
1161	if (!rx_sa) {
1162		/* 10.6.1 if the SA is not in use */
1163
1164		/* If validateFrames is Strict or the C bit in the
1165		 * SecTAG is set, discard
1166		 */
1167		if (hdr->tci_an & MACSEC_TCI_C ||
1168		    secy->validate_frames == MACSEC_VALIDATE_STRICT) {
1169			u64_stats_update_begin(&rxsc_stats->syncp);
1170			rxsc_stats->stats.InPktsNotUsingSA++;
1171			u64_stats_update_end(&rxsc_stats->syncp);
1172			goto drop_nosa;
1173		}
1174
1175		/* not Strict, the frame (with the SecTAG and ICV
1176		 * removed) is delivered to the Controlled Port.
1177		 */
1178		u64_stats_update_begin(&rxsc_stats->syncp);
1179		rxsc_stats->stats.InPktsUnusedSA++;
1180		u64_stats_update_end(&rxsc_stats->syncp);
1181		goto deliver;
1182	}
1183
1184	/* First, PN check to avoid decrypting obviously wrong packets */
1185	hdr_pn = ntohl(hdr->packet_number);
1186	if (secy->replay_protect) {
1187		bool late;
1188
1189		spin_lock(&rx_sa->lock);
1190		late = rx_sa->next_pn_halves.lower >= secy->replay_window &&
1191		       hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window);
1192
1193		if (secy->xpn)
1194			late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn);
1195		spin_unlock(&rx_sa->lock);
1196
1197		if (late) {
1198			u64_stats_update_begin(&rxsc_stats->syncp);
1199			rxsc_stats->stats.InPktsLate++;
1200			u64_stats_update_end(&rxsc_stats->syncp);
1201			goto drop;
1202		}
1203	}
1204
1205	macsec_skb_cb(skb)->rx_sa = rx_sa;
1206
1207	/* Disabled && !changed text => skip validation */
1208	if (hdr->tci_an & MACSEC_TCI_C ||
1209	    secy->validate_frames != MACSEC_VALIDATE_DISABLED)
1210		skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
1211
1212	if (IS_ERR(skb)) {
1213		/* the decrypt callback needs the reference */
1214		if (PTR_ERR(skb) != -EINPROGRESS) {
1215			macsec_rxsa_put(rx_sa);
1216			macsec_rxsc_put(rx_sc);
1217		}
1218		rcu_read_unlock();
1219		*pskb = NULL;
1220		return RX_HANDLER_CONSUMED;
1221	}
1222
1223	if (!macsec_post_decrypt(skb, secy, hdr_pn))
1224		goto drop;
1225
1226deliver:
1227	macsec_finalize_skb(skb, secy->icv_len,
1228			    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1229	macsec_reset_skb(skb, secy->netdev);
1230
1231	if (rx_sa)
1232		macsec_rxsa_put(rx_sa);
1233	macsec_rxsc_put(rx_sc);
1234
1235	skb_orphan(skb);
1236	len = skb->len;
1237	ret = gro_cells_receive(&macsec->gro_cells, skb);
1238	if (ret == NET_RX_SUCCESS)
1239		count_rx(dev, len);
1240	else
1241		macsec->secy.netdev->stats.rx_dropped++;
1242
1243	rcu_read_unlock();
1244
1245	*pskb = NULL;
1246	return RX_HANDLER_CONSUMED;
1247
1248drop:
1249	macsec_rxsa_put(rx_sa);
1250drop_nosa:
1251	macsec_rxsc_put(rx_sc);
1252	rcu_read_unlock();
1253drop_direct:
1254	kfree_skb(skb);
1255	*pskb = NULL;
1256	return RX_HANDLER_CONSUMED;
1257
1258nosci:
1259	/* 10.6.1 if the SC is not found */
1260	cbit = !!(hdr->tci_an & MACSEC_TCI_C);
1261	if (!cbit)
1262		macsec_finalize_skb(skb, DEFAULT_ICV_LEN,
1263				    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1264
1265	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1266		struct sk_buff *nskb;
1267
1268		secy_stats = this_cpu_ptr(macsec->stats);
1269
1270		/* If validateFrames is Strict or the C bit in the
1271		 * SecTAG is set, discard
1272		 */
1273		if (cbit ||
1274		    macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1275			u64_stats_update_begin(&secy_stats->syncp);
1276			secy_stats->stats.InPktsNoSCI++;
1277			u64_stats_update_end(&secy_stats->syncp);
1278			continue;
1279		}
1280
1281		/* not strict, the frame (with the SecTAG and ICV
1282		 * removed) is delivered to the Controlled Port.
1283		 */
1284		nskb = skb_clone(skb, GFP_ATOMIC);
1285		if (!nskb)
1286			break;
1287
1288		macsec_reset_skb(nskb, macsec->secy.netdev);
1289
1290		ret = netif_rx(nskb);
1291		if (ret == NET_RX_SUCCESS) {
1292			u64_stats_update_begin(&secy_stats->syncp);
1293			secy_stats->stats.InPktsUnknownSCI++;
1294			u64_stats_update_end(&secy_stats->syncp);
1295		} else {
1296			macsec->secy.netdev->stats.rx_dropped++;
1297		}
1298	}
1299
1300	rcu_read_unlock();
1301	*pskb = skb;
1302	return RX_HANDLER_PASS;
1303}
1304
1305static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
1306{
1307	struct crypto_aead *tfm;
1308	int ret;
1309
1310	/* Pick a sync gcm(aes) cipher to ensure order is preserved. */
1311	tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
1312
1313	if (IS_ERR(tfm))
1314		return tfm;
1315
1316	ret = crypto_aead_setkey(tfm, key, key_len);
1317	if (ret < 0)
1318		goto fail;
1319
1320	ret = crypto_aead_setauthsize(tfm, icv_len);
1321	if (ret < 0)
1322		goto fail;
1323
1324	return tfm;
1325fail:
1326	crypto_free_aead(tfm);
1327	return ERR_PTR(ret);
1328}
1329
1330static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
1331		      int icv_len)
1332{
1333	rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats);
1334	if (!rx_sa->stats)
1335		return -ENOMEM;
1336
1337	rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
1338	if (IS_ERR(rx_sa->key.tfm)) {
1339		free_percpu(rx_sa->stats);
1340		return PTR_ERR(rx_sa->key.tfm);
1341	}
1342
1343	rx_sa->ssci = MACSEC_UNDEF_SSCI;
1344	rx_sa->active = false;
1345	rx_sa->next_pn = 1;
1346	refcount_set(&rx_sa->refcnt, 1);
1347	spin_lock_init(&rx_sa->lock);
1348
1349	return 0;
1350}
1351
1352static void clear_rx_sa(struct macsec_rx_sa *rx_sa)
1353{
1354	rx_sa->active = false;
1355
1356	macsec_rxsa_put(rx_sa);
1357}
1358
1359static void free_rx_sc(struct macsec_rx_sc *rx_sc)
1360{
1361	int i;
1362
1363	for (i = 0; i < MACSEC_NUM_AN; i++) {
1364		struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]);
1365
1366		RCU_INIT_POINTER(rx_sc->sa[i], NULL);
1367		if (sa)
1368			clear_rx_sa(sa);
1369	}
1370
1371	macsec_rxsc_put(rx_sc);
1372}
1373
1374static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci)
1375{
1376	struct macsec_rx_sc *rx_sc, __rcu **rx_scp;
1377
1378	for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp);
1379	     rx_sc;
1380	     rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) {
1381		if (rx_sc->sci == sci) {
1382			if (rx_sc->active)
1383				secy->n_rx_sc--;
1384			rcu_assign_pointer(*rx_scp, rx_sc->next);
1385			return rx_sc;
1386		}
1387	}
1388
1389	return NULL;
1390}
1391
1392static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci)
1393{
1394	struct macsec_rx_sc *rx_sc;
1395	struct macsec_dev *macsec;
1396	struct net_device *real_dev = macsec_priv(dev)->real_dev;
1397	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
1398	struct macsec_secy *secy;
1399
1400	list_for_each_entry(macsec, &rxd->secys, secys) {
1401		if (find_rx_sc_rtnl(&macsec->secy, sci))
1402			return ERR_PTR(-EEXIST);
1403	}
1404
1405	rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
1406	if (!rx_sc)
1407		return ERR_PTR(-ENOMEM);
1408
1409	rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats);
1410	if (!rx_sc->stats) {
1411		kfree(rx_sc);
1412		return ERR_PTR(-ENOMEM);
1413	}
1414
1415	rx_sc->sci = sci;
1416	rx_sc->active = true;
1417	refcount_set(&rx_sc->refcnt, 1);
1418
1419	secy = &macsec_priv(dev)->secy;
1420	rcu_assign_pointer(rx_sc->next, secy->rx_sc);
1421	rcu_assign_pointer(secy->rx_sc, rx_sc);
1422
1423	if (rx_sc->active)
1424		secy->n_rx_sc++;
1425
1426	return rx_sc;
1427}
1428
1429static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len,
1430		      int icv_len)
1431{
1432	tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats);
1433	if (!tx_sa->stats)
1434		return -ENOMEM;
1435
1436	tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
1437	if (IS_ERR(tx_sa->key.tfm)) {
1438		free_percpu(tx_sa->stats);
1439		return PTR_ERR(tx_sa->key.tfm);
1440	}
1441
1442	tx_sa->ssci = MACSEC_UNDEF_SSCI;
1443	tx_sa->active = false;
1444	refcount_set(&tx_sa->refcnt, 1);
1445	spin_lock_init(&tx_sa->lock);
1446
1447	return 0;
1448}
1449
1450static void clear_tx_sa(struct macsec_tx_sa *tx_sa)
1451{
1452	tx_sa->active = false;
1453
1454	macsec_txsa_put(tx_sa);
1455}
1456
1457static struct genl_family macsec_fam;
1458
1459static struct net_device *get_dev_from_nl(struct net *net,
1460					  struct nlattr **attrs)
1461{
1462	int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]);
1463	struct net_device *dev;
1464
1465	dev = __dev_get_by_index(net, ifindex);
1466	if (!dev)
1467		return ERR_PTR(-ENODEV);
1468
1469	if (!netif_is_macsec(dev))
1470		return ERR_PTR(-ENODEV);
1471
1472	return dev;
1473}
1474
1475static enum macsec_offload nla_get_offload(const struct nlattr *nla)
1476{
1477	return (__force enum macsec_offload)nla_get_u8(nla);
1478}
1479
1480static sci_t nla_get_sci(const struct nlattr *nla)
1481{
1482	return (__force sci_t)nla_get_u64(nla);
1483}
1484
1485static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value,
1486		       int padattr)
1487{
1488	return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr);
1489}
1490
1491static ssci_t nla_get_ssci(const struct nlattr *nla)
1492{
1493	return (__force ssci_t)nla_get_u32(nla);
1494}
1495
1496static int nla_put_ssci(struct sk_buff *skb, int attrtype, ssci_t value)
1497{
1498	return nla_put_u32(skb, attrtype, (__force u64)value);
1499}
1500
1501static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
1502					     struct nlattr **attrs,
1503					     struct nlattr **tb_sa,
1504					     struct net_device **devp,
1505					     struct macsec_secy **secyp,
1506					     struct macsec_tx_sc **scp,
1507					     u8 *assoc_num)
1508{
1509	struct net_device *dev;
1510	struct macsec_secy *secy;
1511	struct macsec_tx_sc *tx_sc;
1512	struct macsec_tx_sa *tx_sa;
1513
1514	if (!tb_sa[MACSEC_SA_ATTR_AN])
1515		return ERR_PTR(-EINVAL);
1516
1517	*assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1518
1519	dev = get_dev_from_nl(net, attrs);
1520	if (IS_ERR(dev))
1521		return ERR_CAST(dev);
1522
1523	if (*assoc_num >= MACSEC_NUM_AN)
1524		return ERR_PTR(-EINVAL);
1525
1526	secy = &macsec_priv(dev)->secy;
1527	tx_sc = &secy->tx_sc;
1528
1529	tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]);
1530	if (!tx_sa)
1531		return ERR_PTR(-ENODEV);
1532
1533	*devp = dev;
1534	*scp = tx_sc;
1535	*secyp = secy;
1536	return tx_sa;
1537}
1538
1539static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net,
1540					     struct nlattr **attrs,
1541					     struct nlattr **tb_rxsc,
1542					     struct net_device **devp,
1543					     struct macsec_secy **secyp)
1544{
1545	struct net_device *dev;
1546	struct macsec_secy *secy;
1547	struct macsec_rx_sc *rx_sc;
1548	sci_t sci;
1549
1550	dev = get_dev_from_nl(net, attrs);
1551	if (IS_ERR(dev))
1552		return ERR_CAST(dev);
1553
1554	secy = &macsec_priv(dev)->secy;
1555
1556	if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
1557		return ERR_PTR(-EINVAL);
1558
1559	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1560	rx_sc = find_rx_sc_rtnl(secy, sci);
1561	if (!rx_sc)
1562		return ERR_PTR(-ENODEV);
1563
1564	*secyp = secy;
1565	*devp = dev;
1566
1567	return rx_sc;
1568}
1569
1570static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net,
1571					     struct nlattr **attrs,
1572					     struct nlattr **tb_rxsc,
1573					     struct nlattr **tb_sa,
1574					     struct net_device **devp,
1575					     struct macsec_secy **secyp,
1576					     struct macsec_rx_sc **scp,
1577					     u8 *assoc_num)
1578{
1579	struct macsec_rx_sc *rx_sc;
1580	struct macsec_rx_sa *rx_sa;
1581
1582	if (!tb_sa[MACSEC_SA_ATTR_AN])
1583		return ERR_PTR(-EINVAL);
1584
1585	*assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1586	if (*assoc_num >= MACSEC_NUM_AN)
1587		return ERR_PTR(-EINVAL);
1588
1589	rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp);
1590	if (IS_ERR(rx_sc))
1591		return ERR_CAST(rx_sc);
1592
1593	rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]);
1594	if (!rx_sa)
1595		return ERR_PTR(-ENODEV);
1596
1597	*scp = rx_sc;
1598	return rx_sa;
1599}
1600
1601static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
1602	[MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 },
1603	[MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED },
1604	[MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED },
1605	[MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED },
1606};
1607
1608static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
1609	[MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 },
1610	[MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 },
1611};
1612
1613static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
1614	[MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
1615	[MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
1616	[MACSEC_SA_ATTR_PN] = { .type = NLA_MIN_LEN, .len = 4 },
1617	[MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
1618				   .len = MACSEC_KEYID_LEN, },
1619	[MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
1620				 .len = MACSEC_MAX_KEY_LEN, },
1621	[MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 },
1622	[MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY,
1623				  .len = MACSEC_SALT_LEN, },
1624};
1625
1626static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = {
1627	[MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 },
1628};
1629
1630/* Offloads an operation to a device driver */
1631static int macsec_offload(int (* const func)(struct macsec_context *),
1632			  struct macsec_context *ctx)
1633{
1634	int ret;
1635
1636	if (unlikely(!func))
1637		return 0;
1638
1639	if (ctx->offload == MACSEC_OFFLOAD_PHY)
1640		mutex_lock(&ctx->phydev->lock);
1641
1642	/* Phase I: prepare. The drive should fail here if there are going to be
1643	 * issues in the commit phase.
1644	 */
1645	ctx->prepare = true;
1646	ret = (*func)(ctx);
1647	if (ret)
1648		goto phy_unlock;
1649
1650	/* Phase II: commit. This step cannot fail. */
1651	ctx->prepare = false;
1652	ret = (*func)(ctx);
1653	/* This should never happen: commit is not allowed to fail */
1654	if (unlikely(ret))
1655		WARN(1, "MACsec offloading commit failed (%d)\n", ret);
1656
1657phy_unlock:
1658	if (ctx->offload == MACSEC_OFFLOAD_PHY)
1659		mutex_unlock(&ctx->phydev->lock);
1660
1661	return ret;
1662}
1663
1664static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
1665{
1666	if (!attrs[MACSEC_ATTR_SA_CONFIG])
1667		return -EINVAL;
1668
1669	if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL))
1670		return -EINVAL;
1671
1672	return 0;
1673}
1674
1675static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc)
1676{
1677	if (!attrs[MACSEC_ATTR_RXSC_CONFIG])
1678		return -EINVAL;
1679
1680	if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL))
1681		return -EINVAL;
1682
1683	return 0;
1684}
1685
1686static bool validate_add_rxsa(struct nlattr **attrs)
1687{
1688	if (!attrs[MACSEC_SA_ATTR_AN] ||
1689	    !attrs[MACSEC_SA_ATTR_KEY] ||
1690	    !attrs[MACSEC_SA_ATTR_KEYID])
1691		return false;
1692
1693	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1694		return false;
1695
1696	if (attrs[MACSEC_SA_ATTR_PN] &&
1697	    *(u64 *)nla_data(attrs[MACSEC_SA_ATTR_PN]) == 0)
1698		return false;
1699
1700	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1701		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1702			return false;
1703	}
1704
1705	if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1706		return false;
1707
1708	return true;
1709}
1710
1711static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
1712{
1713	struct net_device *dev;
1714	struct nlattr **attrs = info->attrs;
1715	struct macsec_secy *secy;
1716	struct macsec_rx_sc *rx_sc;
1717	struct macsec_rx_sa *rx_sa;
1718	unsigned char assoc_num;
1719	int pn_len;
1720	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1721	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1722	int err;
1723
1724	if (!attrs[MACSEC_ATTR_IFINDEX])
1725		return -EINVAL;
1726
1727	if (parse_sa_config(attrs, tb_sa))
1728		return -EINVAL;
1729
1730	if (parse_rxsc_config(attrs, tb_rxsc))
1731		return -EINVAL;
1732
1733	if (!validate_add_rxsa(tb_sa))
1734		return -EINVAL;
1735
1736	rtnl_lock();
1737	rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
1738	if (IS_ERR(rx_sc)) {
1739		rtnl_unlock();
1740		return PTR_ERR(rx_sc);
1741	}
1742
1743	assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1744
1745	if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
1746		pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n",
1747			  nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
1748		rtnl_unlock();
1749		return -EINVAL;
1750	}
1751
1752	pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
1753	if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
1754		pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n",
1755			  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
1756		rtnl_unlock();
1757		return -EINVAL;
1758	}
1759
1760	if (secy->xpn) {
1761		if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
1762			rtnl_unlock();
1763			return -EINVAL;
1764		}
1765
1766		if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
1767			pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
1768				  nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
1769				  MACSEC_SA_ATTR_SALT);
1770			rtnl_unlock();
1771			return -EINVAL;
1772		}
1773	}
1774
1775	rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]);
1776	if (rx_sa) {
1777		rtnl_unlock();
1778		return -EBUSY;
1779	}
1780
1781	rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
1782	if (!rx_sa) {
1783		rtnl_unlock();
1784		return -ENOMEM;
1785	}
1786
1787	err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1788			 secy->key_len, secy->icv_len);
1789	if (err < 0) {
1790		kfree(rx_sa);
1791		rtnl_unlock();
1792		return err;
1793	}
1794
1795	if (tb_sa[MACSEC_SA_ATTR_PN]) {
1796		spin_lock_bh(&rx_sa->lock);
1797		rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
1798		spin_unlock_bh(&rx_sa->lock);
1799	}
1800
1801	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
1802		rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
1803
1804	rx_sa->sc = rx_sc;
1805
1806	/* If h/w offloading is available, propagate to the device */
1807	if (macsec_is_offloaded(netdev_priv(dev))) {
1808		const struct macsec_ops *ops;
1809		struct macsec_context ctx;
1810
1811		ops = macsec_get_ops(netdev_priv(dev), &ctx);
1812		if (!ops) {
1813			err = -EOPNOTSUPP;
1814			goto cleanup;
1815		}
1816
1817		ctx.sa.assoc_num = assoc_num;
1818		ctx.sa.rx_sa = rx_sa;
1819		ctx.secy = secy;
1820		memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1821		       MACSEC_KEYID_LEN);
1822
1823		err = macsec_offload(ops->mdo_add_rxsa, &ctx);
1824		if (err)
1825			goto cleanup;
1826	}
1827
1828	if (secy->xpn) {
1829		rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
1830		nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
1831			   MACSEC_SALT_LEN);
1832	}
1833
1834	nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
1835	rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
1836
1837	rtnl_unlock();
1838
1839	return 0;
1840
1841cleanup:
1842	kfree(rx_sa);
1843	rtnl_unlock();
1844	return err;
1845}
1846
1847static bool validate_add_rxsc(struct nlattr **attrs)
1848{
1849	if (!attrs[MACSEC_RXSC_ATTR_SCI])
1850		return false;
1851
1852	if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) {
1853		if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1)
1854			return false;
1855	}
1856
1857	return true;
1858}
1859
1860static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
1861{
1862	struct net_device *dev;
1863	sci_t sci = MACSEC_UNDEF_SCI;
1864	struct nlattr **attrs = info->attrs;
1865	struct macsec_rx_sc *rx_sc;
1866	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1867	struct macsec_secy *secy;
1868	bool was_active;
1869	int ret;
1870
1871	if (!attrs[MACSEC_ATTR_IFINDEX])
1872		return -EINVAL;
1873
1874	if (parse_rxsc_config(attrs, tb_rxsc))
1875		return -EINVAL;
1876
1877	if (!validate_add_rxsc(tb_rxsc))
1878		return -EINVAL;
1879
1880	rtnl_lock();
1881	dev = get_dev_from_nl(genl_info_net(info), attrs);
1882	if (IS_ERR(dev)) {
1883		rtnl_unlock();
1884		return PTR_ERR(dev);
1885	}
1886
1887	secy = &macsec_priv(dev)->secy;
1888	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1889
1890	rx_sc = create_rx_sc(dev, sci);
1891	if (IS_ERR(rx_sc)) {
1892		rtnl_unlock();
1893		return PTR_ERR(rx_sc);
1894	}
1895
1896	was_active = rx_sc->active;
1897	if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
1898		rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
1899
1900	if (macsec_is_offloaded(netdev_priv(dev))) {
1901		const struct macsec_ops *ops;
1902		struct macsec_context ctx;
1903
1904		ops = macsec_get_ops(netdev_priv(dev), &ctx);
1905		if (!ops) {
1906			ret = -EOPNOTSUPP;
1907			goto cleanup;
1908		}
1909
1910		ctx.rx_sc = rx_sc;
1911		ctx.secy = secy;
1912
1913		ret = macsec_offload(ops->mdo_add_rxsc, &ctx);
1914		if (ret)
1915			goto cleanup;
1916	}
1917
1918	rtnl_unlock();
1919
1920	return 0;
1921
1922cleanup:
1923	rx_sc->active = was_active;
1924	rtnl_unlock();
1925	return ret;
1926}
1927
1928static bool validate_add_txsa(struct nlattr **attrs)
1929{
1930	if (!attrs[MACSEC_SA_ATTR_AN] ||
1931	    !attrs[MACSEC_SA_ATTR_PN] ||
1932	    !attrs[MACSEC_SA_ATTR_KEY] ||
1933	    !attrs[MACSEC_SA_ATTR_KEYID])
1934		return false;
1935
1936	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1937		return false;
1938
1939	if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
1940		return false;
1941
1942	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1943		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1944			return false;
1945	}
1946
1947	if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1948		return false;
1949
1950	return true;
1951}
1952
1953static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
1954{
1955	struct net_device *dev;
1956	struct nlattr **attrs = info->attrs;
1957	struct macsec_secy *secy;
1958	struct macsec_tx_sc *tx_sc;
1959	struct macsec_tx_sa *tx_sa;
1960	unsigned char assoc_num;
1961	int pn_len;
1962	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1963	bool was_operational;
1964	int err;
1965
1966	if (!attrs[MACSEC_ATTR_IFINDEX])
1967		return -EINVAL;
1968
1969	if (parse_sa_config(attrs, tb_sa))
1970		return -EINVAL;
1971
1972	if (!validate_add_txsa(tb_sa))
1973		return -EINVAL;
1974
1975	rtnl_lock();
1976	dev = get_dev_from_nl(genl_info_net(info), attrs);
1977	if (IS_ERR(dev)) {
1978		rtnl_unlock();
1979		return PTR_ERR(dev);
1980	}
1981
1982	secy = &macsec_priv(dev)->secy;
1983	tx_sc = &secy->tx_sc;
1984
1985	assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1986
1987	if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
1988		pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n",
1989			  nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
1990		rtnl_unlock();
1991		return -EINVAL;
1992	}
1993
1994	pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
1995	if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
1996		pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n",
1997			  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
1998		rtnl_unlock();
1999		return -EINVAL;
2000	}
2001
2002	if (secy->xpn) {
2003		if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
2004			rtnl_unlock();
2005			return -EINVAL;
2006		}
2007
2008		if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
2009			pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
2010				  nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
2011				  MACSEC_SA_ATTR_SALT);
2012			rtnl_unlock();
2013			return -EINVAL;
2014		}
2015	}
2016
2017	tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]);
2018	if (tx_sa) {
2019		rtnl_unlock();
2020		return -EBUSY;
2021	}
2022
2023	tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
2024	if (!tx_sa) {
2025		rtnl_unlock();
2026		return -ENOMEM;
2027	}
2028
2029	err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
2030			 secy->key_len, secy->icv_len);
2031	if (err < 0) {
2032		kfree(tx_sa);
2033		rtnl_unlock();
2034		return err;
2035	}
2036
2037	spin_lock_bh(&tx_sa->lock);
2038	tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
2039	spin_unlock_bh(&tx_sa->lock);
2040
2041	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2042		tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2043
2044	was_operational = secy->operational;
2045	if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
2046		secy->operational = true;
2047
2048	/* If h/w offloading is available, propagate to the device */
2049	if (macsec_is_offloaded(netdev_priv(dev))) {
2050		const struct macsec_ops *ops;
2051		struct macsec_context ctx;
2052
2053		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2054		if (!ops) {
2055			err = -EOPNOTSUPP;
2056			goto cleanup;
2057		}
2058
2059		ctx.sa.assoc_num = assoc_num;
2060		ctx.sa.tx_sa = tx_sa;
2061		ctx.secy = secy;
2062		memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
2063		       MACSEC_KEYID_LEN);
2064
2065		err = macsec_offload(ops->mdo_add_txsa, &ctx);
2066		if (err)
2067			goto cleanup;
2068	}
2069
2070	if (secy->xpn) {
2071		tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
2072		nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
2073			   MACSEC_SALT_LEN);
2074	}
2075
2076	nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
2077	rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
2078
2079	rtnl_unlock();
2080
2081	return 0;
2082
2083cleanup:
2084	secy->operational = was_operational;
2085	kfree(tx_sa);
2086	rtnl_unlock();
2087	return err;
2088}
2089
2090static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
2091{
2092	struct nlattr **attrs = info->attrs;
2093	struct net_device *dev;
2094	struct macsec_secy *secy;
2095	struct macsec_rx_sc *rx_sc;
2096	struct macsec_rx_sa *rx_sa;
2097	u8 assoc_num;
2098	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2099	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2100	int ret;
2101
2102	if (!attrs[MACSEC_ATTR_IFINDEX])
2103		return -EINVAL;
2104
2105	if (parse_sa_config(attrs, tb_sa))
2106		return -EINVAL;
2107
2108	if (parse_rxsc_config(attrs, tb_rxsc))
2109		return -EINVAL;
2110
2111	rtnl_lock();
2112	rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
2113				 &dev, &secy, &rx_sc, &assoc_num);
2114	if (IS_ERR(rx_sa)) {
2115		rtnl_unlock();
2116		return PTR_ERR(rx_sa);
2117	}
2118
2119	if (rx_sa->active) {
2120		rtnl_unlock();
2121		return -EBUSY;
2122	}
2123
2124	/* If h/w offloading is available, propagate to the device */
2125	if (macsec_is_offloaded(netdev_priv(dev))) {
2126		const struct macsec_ops *ops;
2127		struct macsec_context ctx;
2128
2129		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2130		if (!ops) {
2131			ret = -EOPNOTSUPP;
2132			goto cleanup;
2133		}
2134
2135		ctx.sa.assoc_num = assoc_num;
2136		ctx.sa.rx_sa = rx_sa;
2137		ctx.secy = secy;
2138
2139		ret = macsec_offload(ops->mdo_del_rxsa, &ctx);
2140		if (ret)
2141			goto cleanup;
2142	}
2143
2144	RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL);
2145	clear_rx_sa(rx_sa);
2146
2147	rtnl_unlock();
2148
2149	return 0;
2150
2151cleanup:
2152	rtnl_unlock();
2153	return ret;
2154}
2155
2156static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
2157{
2158	struct nlattr **attrs = info->attrs;
2159	struct net_device *dev;
2160	struct macsec_secy *secy;
2161	struct macsec_rx_sc *rx_sc;
2162	sci_t sci;
2163	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2164	int ret;
2165
2166	if (!attrs[MACSEC_ATTR_IFINDEX])
2167		return -EINVAL;
2168
2169	if (parse_rxsc_config(attrs, tb_rxsc))
2170		return -EINVAL;
2171
2172	if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
2173		return -EINVAL;
2174
2175	rtnl_lock();
2176	dev = get_dev_from_nl(genl_info_net(info), info->attrs);
2177	if (IS_ERR(dev)) {
2178		rtnl_unlock();
2179		return PTR_ERR(dev);
2180	}
2181
2182	secy = &macsec_priv(dev)->secy;
2183	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
2184
2185	rx_sc = del_rx_sc(secy, sci);
2186	if (!rx_sc) {
2187		rtnl_unlock();
2188		return -ENODEV;
2189	}
2190
2191	/* If h/w offloading is available, propagate to the device */
2192	if (macsec_is_offloaded(netdev_priv(dev))) {
2193		const struct macsec_ops *ops;
2194		struct macsec_context ctx;
2195
2196		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2197		if (!ops) {
2198			ret = -EOPNOTSUPP;
2199			goto cleanup;
2200		}
2201
2202		ctx.rx_sc = rx_sc;
2203		ctx.secy = secy;
2204		ret = macsec_offload(ops->mdo_del_rxsc, &ctx);
2205		if (ret)
2206			goto cleanup;
2207	}
2208
2209	free_rx_sc(rx_sc);
2210	rtnl_unlock();
2211
2212	return 0;
2213
2214cleanup:
2215	rtnl_unlock();
2216	return ret;
2217}
2218
2219static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
2220{
2221	struct nlattr **attrs = info->attrs;
2222	struct net_device *dev;
2223	struct macsec_secy *secy;
2224	struct macsec_tx_sc *tx_sc;
2225	struct macsec_tx_sa *tx_sa;
2226	u8 assoc_num;
2227	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2228	int ret;
2229
2230	if (!attrs[MACSEC_ATTR_IFINDEX])
2231		return -EINVAL;
2232
2233	if (parse_sa_config(attrs, tb_sa))
2234		return -EINVAL;
2235
2236	rtnl_lock();
2237	tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2238				 &dev, &secy, &tx_sc, &assoc_num);
2239	if (IS_ERR(tx_sa)) {
2240		rtnl_unlock();
2241		return PTR_ERR(tx_sa);
2242	}
2243
2244	if (tx_sa->active) {
2245		rtnl_unlock();
2246		return -EBUSY;
2247	}
2248
2249	/* If h/w offloading is available, propagate to the device */
2250	if (macsec_is_offloaded(netdev_priv(dev))) {
2251		const struct macsec_ops *ops;
2252		struct macsec_context ctx;
2253
2254		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2255		if (!ops) {
2256			ret = -EOPNOTSUPP;
2257			goto cleanup;
2258		}
2259
2260		ctx.sa.assoc_num = assoc_num;
2261		ctx.sa.tx_sa = tx_sa;
2262		ctx.secy = secy;
2263
2264		ret = macsec_offload(ops->mdo_del_txsa, &ctx);
2265		if (ret)
2266			goto cleanup;
2267	}
2268
2269	RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL);
2270	clear_tx_sa(tx_sa);
2271
2272	rtnl_unlock();
2273
2274	return 0;
2275
2276cleanup:
2277	rtnl_unlock();
2278	return ret;
2279}
2280
2281static bool validate_upd_sa(struct nlattr **attrs)
2282{
2283	if (!attrs[MACSEC_SA_ATTR_AN] ||
2284	    attrs[MACSEC_SA_ATTR_KEY] ||
2285	    attrs[MACSEC_SA_ATTR_KEYID] ||
2286	    attrs[MACSEC_SA_ATTR_SSCI] ||
2287	    attrs[MACSEC_SA_ATTR_SALT])
2288		return false;
2289
2290	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
2291		return false;
2292
2293	if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
2294		return false;
2295
2296	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
2297		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
2298			return false;
2299	}
2300
2301	return true;
2302}
2303
2304static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
2305{
2306	struct nlattr **attrs = info->attrs;
2307	struct net_device *dev;
2308	struct macsec_secy *secy;
2309	struct macsec_tx_sc *tx_sc;
2310	struct macsec_tx_sa *tx_sa;
2311	u8 assoc_num;
2312	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2313	bool was_operational, was_active;
2314	pn_t prev_pn;
2315	int ret = 0;
2316
2317	prev_pn.full64 = 0;
2318
2319	if (!attrs[MACSEC_ATTR_IFINDEX])
2320		return -EINVAL;
2321
2322	if (parse_sa_config(attrs, tb_sa))
2323		return -EINVAL;
2324
2325	if (!validate_upd_sa(tb_sa))
2326		return -EINVAL;
2327
2328	rtnl_lock();
2329	tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2330				 &dev, &secy, &tx_sc, &assoc_num);
2331	if (IS_ERR(tx_sa)) {
2332		rtnl_unlock();
2333		return PTR_ERR(tx_sa);
2334	}
2335
2336	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2337		int pn_len;
2338
2339		pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2340		if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2341			pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n",
2342				  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2343			rtnl_unlock();
2344			return -EINVAL;
2345		}
2346
2347		spin_lock_bh(&tx_sa->lock);
2348		prev_pn = tx_sa->next_pn_halves;
2349		tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
2350		spin_unlock_bh(&tx_sa->lock);
2351	}
2352
2353	was_active = tx_sa->active;
2354	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2355		tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2356
2357	was_operational = secy->operational;
2358	if (assoc_num == tx_sc->encoding_sa)
2359		secy->operational = tx_sa->active;
2360
2361	/* If h/w offloading is available, propagate to the device */
2362	if (macsec_is_offloaded(netdev_priv(dev))) {
2363		const struct macsec_ops *ops;
2364		struct macsec_context ctx;
2365
2366		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2367		if (!ops) {
2368			ret = -EOPNOTSUPP;
2369			goto cleanup;
2370		}
2371
2372		ctx.sa.assoc_num = assoc_num;
2373		ctx.sa.tx_sa = tx_sa;
2374		ctx.secy = secy;
2375
2376		ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
2377		if (ret)
2378			goto cleanup;
2379	}
2380
2381	rtnl_unlock();
2382
2383	return 0;
2384
2385cleanup:
2386	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2387		spin_lock_bh(&tx_sa->lock);
2388		tx_sa->next_pn_halves = prev_pn;
2389		spin_unlock_bh(&tx_sa->lock);
2390	}
2391	tx_sa->active = was_active;
2392	secy->operational = was_operational;
2393	rtnl_unlock();
2394	return ret;
2395}
2396
2397static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
2398{
2399	struct nlattr **attrs = info->attrs;
2400	struct net_device *dev;
2401	struct macsec_secy *secy;
2402	struct macsec_rx_sc *rx_sc;
2403	struct macsec_rx_sa *rx_sa;
2404	u8 assoc_num;
2405	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2406	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2407	bool was_active;
2408	pn_t prev_pn;
2409	int ret = 0;
2410
2411	prev_pn.full64 = 0;
2412
2413	if (!attrs[MACSEC_ATTR_IFINDEX])
2414		return -EINVAL;
2415
2416	if (parse_rxsc_config(attrs, tb_rxsc))
2417		return -EINVAL;
2418
2419	if (parse_sa_config(attrs, tb_sa))
2420		return -EINVAL;
2421
2422	if (!validate_upd_sa(tb_sa))
2423		return -EINVAL;
2424
2425	rtnl_lock();
2426	rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
2427				 &dev, &secy, &rx_sc, &assoc_num);
2428	if (IS_ERR(rx_sa)) {
2429		rtnl_unlock();
2430		return PTR_ERR(rx_sa);
2431	}
2432
2433	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2434		int pn_len;
2435
2436		pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2437		if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2438			pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n",
2439				  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2440			rtnl_unlock();
2441			return -EINVAL;
2442		}
2443
2444		spin_lock_bh(&rx_sa->lock);
2445		prev_pn = rx_sa->next_pn_halves;
2446		rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
2447		spin_unlock_bh(&rx_sa->lock);
2448	}
2449
2450	was_active = rx_sa->active;
2451	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2452		rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2453
2454	/* If h/w offloading is available, propagate to the device */
2455	if (macsec_is_offloaded(netdev_priv(dev))) {
2456		const struct macsec_ops *ops;
2457		struct macsec_context ctx;
2458
2459		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2460		if (!ops) {
2461			ret = -EOPNOTSUPP;
2462			goto cleanup;
2463		}
2464
2465		ctx.sa.assoc_num = assoc_num;
2466		ctx.sa.rx_sa = rx_sa;
2467		ctx.secy = secy;
2468
2469		ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
2470		if (ret)
2471			goto cleanup;
2472	}
2473
2474	rtnl_unlock();
2475	return 0;
2476
2477cleanup:
2478	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2479		spin_lock_bh(&rx_sa->lock);
2480		rx_sa->next_pn_halves = prev_pn;
2481		spin_unlock_bh(&rx_sa->lock);
2482	}
2483	rx_sa->active = was_active;
2484	rtnl_unlock();
2485	return ret;
2486}
2487
2488static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
2489{
2490	struct nlattr **attrs = info->attrs;
2491	struct net_device *dev;
2492	struct macsec_secy *secy;
2493	struct macsec_rx_sc *rx_sc;
2494	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2495	unsigned int prev_n_rx_sc;
2496	bool was_active;
2497	int ret;
2498
2499	if (!attrs[MACSEC_ATTR_IFINDEX])
2500		return -EINVAL;
2501
2502	if (parse_rxsc_config(attrs, tb_rxsc))
2503		return -EINVAL;
2504
2505	if (!validate_add_rxsc(tb_rxsc))
2506		return -EINVAL;
2507
2508	rtnl_lock();
2509	rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
2510	if (IS_ERR(rx_sc)) {
2511		rtnl_unlock();
2512		return PTR_ERR(rx_sc);
2513	}
2514
2515	was_active = rx_sc->active;
2516	prev_n_rx_sc = secy->n_rx_sc;
2517	if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) {
2518		bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
2519
2520		if (rx_sc->active != new)
2521			secy->n_rx_sc += new ? 1 : -1;
2522
2523		rx_sc->active = new;
2524	}
2525
2526	/* If h/w offloading is available, propagate to the device */
2527	if (macsec_is_offloaded(netdev_priv(dev))) {
2528		const struct macsec_ops *ops;
2529		struct macsec_context ctx;
2530
2531		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2532		if (!ops) {
2533			ret = -EOPNOTSUPP;
2534			goto cleanup;
2535		}
2536
2537		ctx.rx_sc = rx_sc;
2538		ctx.secy = secy;
2539
2540		ret = macsec_offload(ops->mdo_upd_rxsc, &ctx);
2541		if (ret)
2542			goto cleanup;
2543	}
2544
2545	rtnl_unlock();
2546
2547	return 0;
2548
2549cleanup:
2550	secy->n_rx_sc = prev_n_rx_sc;
2551	rx_sc->active = was_active;
2552	rtnl_unlock();
2553	return ret;
2554}
2555
2556static bool macsec_is_configured(struct macsec_dev *macsec)
2557{
2558	struct macsec_secy *secy = &macsec->secy;
2559	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
2560	int i;
2561
2562	if (secy->n_rx_sc > 0)
2563		return true;
2564
2565	for (i = 0; i < MACSEC_NUM_AN; i++)
2566		if (tx_sc->sa[i])
2567			return true;
2568
2569	return false;
2570}
2571
2572static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
2573{
2574	struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1];
2575	enum macsec_offload offload, prev_offload;
2576	int (*func)(struct macsec_context *ctx);
2577	struct nlattr **attrs = info->attrs;
2578	struct net_device *dev;
2579	const struct macsec_ops *ops;
2580	struct macsec_context ctx;
2581	struct macsec_dev *macsec;
2582	int ret;
2583
2584	if (!attrs[MACSEC_ATTR_IFINDEX])
2585		return -EINVAL;
2586
2587	if (!attrs[MACSEC_ATTR_OFFLOAD])
2588		return -EINVAL;
2589
2590	if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX,
2591					attrs[MACSEC_ATTR_OFFLOAD],
2592					macsec_genl_offload_policy, NULL))
2593		return -EINVAL;
2594
2595	dev = get_dev_from_nl(genl_info_net(info), attrs);
2596	if (IS_ERR(dev))
2597		return PTR_ERR(dev);
2598	macsec = macsec_priv(dev);
2599
2600	if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE])
2601		return -EINVAL;
2602
2603	offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
2604	if (macsec->offload == offload)
2605		return 0;
2606
2607	/* Check if the offloading mode is supported by the underlying layers */
2608	if (offload != MACSEC_OFFLOAD_OFF &&
2609	    !macsec_check_offload(offload, macsec))
2610		return -EOPNOTSUPP;
2611
2612	/* Check if the net device is busy. */
2613	if (netif_running(dev))
2614		return -EBUSY;
2615
2616	rtnl_lock();
2617
2618	prev_offload = macsec->offload;
2619	macsec->offload = offload;
2620
2621	/* Check if the device already has rules configured: we do not support
2622	 * rules migration.
2623	 */
2624	if (macsec_is_configured(macsec)) {
2625		ret = -EBUSY;
2626		goto rollback;
2627	}
2628
2629	ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
2630			       macsec, &ctx);
2631	if (!ops) {
2632		ret = -EOPNOTSUPP;
2633		goto rollback;
2634	}
2635
2636	if (prev_offload == MACSEC_OFFLOAD_OFF)
2637		func = ops->mdo_add_secy;
2638	else
2639		func = ops->mdo_del_secy;
2640
2641	ctx.secy = &macsec->secy;
2642	ret = macsec_offload(func, &ctx);
2643	if (ret)
2644		goto rollback;
2645
2646	/* Force features update, since they are different for SW MACSec and
2647	 * HW offloading cases.
2648	 */
2649	netdev_update_features(dev);
2650
2651	rtnl_unlock();
2652	return 0;
2653
2654rollback:
2655	macsec->offload = prev_offload;
2656
2657	rtnl_unlock();
2658	return ret;
2659}
2660
2661static void get_tx_sa_stats(struct net_device *dev, int an,
2662			    struct macsec_tx_sa *tx_sa,
2663			    struct macsec_tx_sa_stats *sum)
2664{
2665	struct macsec_dev *macsec = macsec_priv(dev);
2666	int cpu;
2667
2668	/* If h/w offloading is available, propagate to the device */
2669	if (macsec_is_offloaded(macsec)) {
2670		const struct macsec_ops *ops;
2671		struct macsec_context ctx;
2672
2673		ops = macsec_get_ops(macsec, &ctx);
2674		if (ops) {
2675			ctx.sa.assoc_num = an;
2676			ctx.sa.tx_sa = tx_sa;
2677			ctx.stats.tx_sa_stats = sum;
2678			ctx.secy = &macsec_priv(dev)->secy;
2679			macsec_offload(ops->mdo_get_tx_sa_stats, &ctx);
2680		}
2681		return;
2682	}
2683
2684	for_each_possible_cpu(cpu) {
2685		const struct macsec_tx_sa_stats *stats =
2686			per_cpu_ptr(tx_sa->stats, cpu);
2687
2688		sum->OutPktsProtected += stats->OutPktsProtected;
2689		sum->OutPktsEncrypted += stats->OutPktsEncrypted;
2690	}
2691}
2692
2693static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum)
2694{
2695	if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED,
2696			sum->OutPktsProtected) ||
2697	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED,
2698			sum->OutPktsEncrypted))
2699		return -EMSGSIZE;
2700
2701	return 0;
2702}
2703
2704static void get_rx_sa_stats(struct net_device *dev,
2705			    struct macsec_rx_sc *rx_sc, int an,
2706			    struct macsec_rx_sa *rx_sa,
2707			    struct macsec_rx_sa_stats *sum)
2708{
2709	struct macsec_dev *macsec = macsec_priv(dev);
2710	int cpu;
2711
2712	/* If h/w offloading is available, propagate to the device */
2713	if (macsec_is_offloaded(macsec)) {
2714		const struct macsec_ops *ops;
2715		struct macsec_context ctx;
2716
2717		ops = macsec_get_ops(macsec, &ctx);
2718		if (ops) {
2719			ctx.sa.assoc_num = an;
2720			ctx.sa.rx_sa = rx_sa;
2721			ctx.stats.rx_sa_stats = sum;
2722			ctx.secy = &macsec_priv(dev)->secy;
2723			ctx.rx_sc = rx_sc;
2724			macsec_offload(ops->mdo_get_rx_sa_stats, &ctx);
2725		}
2726		return;
2727	}
2728
2729	for_each_possible_cpu(cpu) {
2730		const struct macsec_rx_sa_stats *stats =
2731			per_cpu_ptr(rx_sa->stats, cpu);
2732
2733		sum->InPktsOK         += stats->InPktsOK;
2734		sum->InPktsInvalid    += stats->InPktsInvalid;
2735		sum->InPktsNotValid   += stats->InPktsNotValid;
2736		sum->InPktsNotUsingSA += stats->InPktsNotUsingSA;
2737		sum->InPktsUnusedSA   += stats->InPktsUnusedSA;
2738	}
2739}
2740
2741static int copy_rx_sa_stats(struct sk_buff *skb,
2742			    struct macsec_rx_sa_stats *sum)
2743{
2744	if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) ||
2745	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID,
2746			sum->InPktsInvalid) ||
2747	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID,
2748			sum->InPktsNotValid) ||
2749	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA,
2750			sum->InPktsNotUsingSA) ||
2751	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA,
2752			sum->InPktsUnusedSA))
2753		return -EMSGSIZE;
2754
2755	return 0;
2756}
2757
2758static void get_rx_sc_stats(struct net_device *dev,
2759			    struct macsec_rx_sc *rx_sc,
2760			    struct macsec_rx_sc_stats *sum)
2761{
2762	struct macsec_dev *macsec = macsec_priv(dev);
2763	int cpu;
2764
2765	/* If h/w offloading is available, propagate to the device */
2766	if (macsec_is_offloaded(macsec)) {
2767		const struct macsec_ops *ops;
2768		struct macsec_context ctx;
2769
2770		ops = macsec_get_ops(macsec, &ctx);
2771		if (ops) {
2772			ctx.stats.rx_sc_stats = sum;
2773			ctx.secy = &macsec_priv(dev)->secy;
2774			ctx.rx_sc = rx_sc;
2775			macsec_offload(ops->mdo_get_rx_sc_stats, &ctx);
2776		}
2777		return;
2778	}
2779
2780	for_each_possible_cpu(cpu) {
2781		const struct pcpu_rx_sc_stats *stats;
2782		struct macsec_rx_sc_stats tmp;
2783		unsigned int start;
2784
2785		stats = per_cpu_ptr(rx_sc->stats, cpu);
2786		do {
2787			start = u64_stats_fetch_begin_irq(&stats->syncp);
2788			memcpy(&tmp, &stats->stats, sizeof(tmp));
2789		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2790
2791		sum->InOctetsValidated += tmp.InOctetsValidated;
2792		sum->InOctetsDecrypted += tmp.InOctetsDecrypted;
2793		sum->InPktsUnchecked   += tmp.InPktsUnchecked;
2794		sum->InPktsDelayed     += tmp.InPktsDelayed;
2795		sum->InPktsOK          += tmp.InPktsOK;
2796		sum->InPktsInvalid     += tmp.InPktsInvalid;
2797		sum->InPktsLate        += tmp.InPktsLate;
2798		sum->InPktsNotValid    += tmp.InPktsNotValid;
2799		sum->InPktsNotUsingSA  += tmp.InPktsNotUsingSA;
2800		sum->InPktsUnusedSA    += tmp.InPktsUnusedSA;
2801	}
2802}
2803
2804static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum)
2805{
2806	if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
2807			      sum->InOctetsValidated,
2808			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2809	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
2810			      sum->InOctetsDecrypted,
2811			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2812	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
2813			      sum->InPktsUnchecked,
2814			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2815	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
2816			      sum->InPktsDelayed,
2817			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2818	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
2819			      sum->InPktsOK,
2820			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2821	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
2822			      sum->InPktsInvalid,
2823			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2824	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
2825			      sum->InPktsLate,
2826			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2827	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
2828			      sum->InPktsNotValid,
2829			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2830	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
2831			      sum->InPktsNotUsingSA,
2832			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2833	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
2834			      sum->InPktsUnusedSA,
2835			      MACSEC_RXSC_STATS_ATTR_PAD))
2836		return -EMSGSIZE;
2837
2838	return 0;
2839}
2840
2841static void get_tx_sc_stats(struct net_device *dev,
2842			    struct macsec_tx_sc_stats *sum)
2843{
2844	struct macsec_dev *macsec = macsec_priv(dev);
2845	int cpu;
2846
2847	/* If h/w offloading is available, propagate to the device */
2848	if (macsec_is_offloaded(macsec)) {
2849		const struct macsec_ops *ops;
2850		struct macsec_context ctx;
2851
2852		ops = macsec_get_ops(macsec, &ctx);
2853		if (ops) {
2854			ctx.stats.tx_sc_stats = sum;
2855			ctx.secy = &macsec_priv(dev)->secy;
2856			macsec_offload(ops->mdo_get_tx_sc_stats, &ctx);
2857		}
2858		return;
2859	}
2860
2861	for_each_possible_cpu(cpu) {
2862		const struct pcpu_tx_sc_stats *stats;
2863		struct macsec_tx_sc_stats tmp;
2864		unsigned int start;
2865
2866		stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu);
2867		do {
2868			start = u64_stats_fetch_begin_irq(&stats->syncp);
2869			memcpy(&tmp, &stats->stats, sizeof(tmp));
2870		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2871
2872		sum->OutPktsProtected   += tmp.OutPktsProtected;
2873		sum->OutPktsEncrypted   += tmp.OutPktsEncrypted;
2874		sum->OutOctetsProtected += tmp.OutOctetsProtected;
2875		sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted;
2876	}
2877}
2878
2879static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum)
2880{
2881	if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
2882			      sum->OutPktsProtected,
2883			      MACSEC_TXSC_STATS_ATTR_PAD) ||
2884	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
2885			      sum->OutPktsEncrypted,
2886			      MACSEC_TXSC_STATS_ATTR_PAD) ||
2887	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
2888			      sum->OutOctetsProtected,
2889			      MACSEC_TXSC_STATS_ATTR_PAD) ||
2890	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
2891			      sum->OutOctetsEncrypted,
2892			      MACSEC_TXSC_STATS_ATTR_PAD))
2893		return -EMSGSIZE;
2894
2895	return 0;
2896}
2897
2898static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum)
2899{
2900	struct macsec_dev *macsec = macsec_priv(dev);
2901	int cpu;
2902
2903	/* If h/w offloading is available, propagate to the device */
2904	if (macsec_is_offloaded(macsec)) {
2905		const struct macsec_ops *ops;
2906		struct macsec_context ctx;
2907
2908		ops = macsec_get_ops(macsec, &ctx);
2909		if (ops) {
2910			ctx.stats.dev_stats = sum;
2911			ctx.secy = &macsec_priv(dev)->secy;
2912			macsec_offload(ops->mdo_get_dev_stats, &ctx);
2913		}
2914		return;
2915	}
2916
2917	for_each_possible_cpu(cpu) {
2918		const struct pcpu_secy_stats *stats;
2919		struct macsec_dev_stats tmp;
2920		unsigned int start;
2921
2922		stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu);
2923		do {
2924			start = u64_stats_fetch_begin_irq(&stats->syncp);
2925			memcpy(&tmp, &stats->stats, sizeof(tmp));
2926		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2927
2928		sum->OutPktsUntagged  += tmp.OutPktsUntagged;
2929		sum->InPktsUntagged   += tmp.InPktsUntagged;
2930		sum->OutPktsTooLong   += tmp.OutPktsTooLong;
2931		sum->InPktsNoTag      += tmp.InPktsNoTag;
2932		sum->InPktsBadTag     += tmp.InPktsBadTag;
2933		sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI;
2934		sum->InPktsNoSCI      += tmp.InPktsNoSCI;
2935		sum->InPktsOverrun    += tmp.InPktsOverrun;
2936	}
2937}
2938
2939static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum)
2940{
2941	if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
2942			      sum->OutPktsUntagged,
2943			      MACSEC_SECY_STATS_ATTR_PAD) ||
2944	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
2945			      sum->InPktsUntagged,
2946			      MACSEC_SECY_STATS_ATTR_PAD) ||
2947	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
2948			      sum->OutPktsTooLong,
2949			      MACSEC_SECY_STATS_ATTR_PAD) ||
2950	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
2951			      sum->InPktsNoTag,
2952			      MACSEC_SECY_STATS_ATTR_PAD) ||
2953	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
2954			      sum->InPktsBadTag,
2955			      MACSEC_SECY_STATS_ATTR_PAD) ||
2956	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
2957			      sum->InPktsUnknownSCI,
2958			      MACSEC_SECY_STATS_ATTR_PAD) ||
2959	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
2960			      sum->InPktsNoSCI,
2961			      MACSEC_SECY_STATS_ATTR_PAD) ||
2962	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
2963			      sum->InPktsOverrun,
2964			      MACSEC_SECY_STATS_ATTR_PAD))
2965		return -EMSGSIZE;
2966
2967	return 0;
2968}
2969
2970static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
2971{
2972	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
2973	struct nlattr *secy_nest = nla_nest_start_noflag(skb,
2974							 MACSEC_ATTR_SECY);
2975	u64 csid;
2976
2977	if (!secy_nest)
2978		return 1;
2979
2980	switch (secy->key_len) {
2981	case MACSEC_GCM_AES_128_SAK_LEN:
2982		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
2983		break;
2984	case MACSEC_GCM_AES_256_SAK_LEN:
2985		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
2986		break;
2987	default:
2988		goto cancel;
2989	}
2990
2991	if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
2992			MACSEC_SECY_ATTR_PAD) ||
2993	    nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
2994			      csid, MACSEC_SECY_ATTR_PAD) ||
2995	    nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
2996	    nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
2997	    nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
2998	    nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) ||
2999	    nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) ||
3000	    nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) ||
3001	    nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) ||
3002	    nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) ||
3003	    nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) ||
3004	    nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa))
3005		goto cancel;
3006
3007	if (secy->replay_protect) {
3008		if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window))
3009			goto cancel;
3010	}
3011
3012	nla_nest_end(skb, secy_nest);
3013	return 0;
3014
3015cancel:
3016	nla_nest_cancel(skb, secy_nest);
3017	return 1;
3018}
3019
3020static noinline_for_stack int
3021dump_secy(struct macsec_secy *secy, struct net_device *dev,
3022	  struct sk_buff *skb, struct netlink_callback *cb)
3023{
3024	struct macsec_tx_sc_stats tx_sc_stats = {0, };
3025	struct macsec_tx_sa_stats tx_sa_stats = {0, };
3026	struct macsec_rx_sc_stats rx_sc_stats = {0, };
3027	struct macsec_rx_sa_stats rx_sa_stats = {0, };
3028	struct macsec_dev *macsec = netdev_priv(dev);
3029	struct macsec_dev_stats dev_stats = {0, };
3030	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
3031	struct nlattr *txsa_list, *rxsc_list;
3032	struct macsec_rx_sc *rx_sc;
3033	struct nlattr *attr;
3034	void *hdr;
3035	int i, j;
3036
3037	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3038			  &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC);
3039	if (!hdr)
3040		return -EMSGSIZE;
3041
3042	genl_dump_check_consistent(cb, hdr);
3043
3044	if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
3045		goto nla_put_failure;
3046
3047	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD);
3048	if (!attr)
3049		goto nla_put_failure;
3050	if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload))
3051		goto nla_put_failure;
3052	nla_nest_end(skb, attr);
3053
3054	if (nla_put_secy(secy, skb))
3055		goto nla_put_failure;
3056
3057	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS);
3058	if (!attr)
3059		goto nla_put_failure;
3060
3061	get_tx_sc_stats(dev, &tx_sc_stats);
3062	if (copy_tx_sc_stats(skb, &tx_sc_stats)) {
3063		nla_nest_cancel(skb, attr);
3064		goto nla_put_failure;
3065	}
3066	nla_nest_end(skb, attr);
3067
3068	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS);
3069	if (!attr)
3070		goto nla_put_failure;
3071	get_secy_stats(dev, &dev_stats);
3072	if (copy_secy_stats(skb, &dev_stats)) {
3073		nla_nest_cancel(skb, attr);
3074		goto nla_put_failure;
3075	}
3076	nla_nest_end(skb, attr);
3077
3078	txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST);
3079	if (!txsa_list)
3080		goto nla_put_failure;
3081	for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) {
3082		struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]);
3083		struct nlattr *txsa_nest;
3084		u64 pn;
3085		int pn_len;
3086
3087		if (!tx_sa)
3088			continue;
3089
3090		txsa_nest = nla_nest_start_noflag(skb, j++);
3091		if (!txsa_nest) {
3092			nla_nest_cancel(skb, txsa_list);
3093			goto nla_put_failure;
3094		}
3095
3096		attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS);
3097		if (!attr) {
3098			nla_nest_cancel(skb, txsa_nest);
3099			nla_nest_cancel(skb, txsa_list);
3100			goto nla_put_failure;
3101		}
3102		memset(&tx_sa_stats, 0, sizeof(tx_sa_stats));
3103		get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats);
3104		if (copy_tx_sa_stats(skb, &tx_sa_stats)) {
3105			nla_nest_cancel(skb, attr);
3106			nla_nest_cancel(skb, txsa_nest);
3107			nla_nest_cancel(skb, txsa_list);
3108			goto nla_put_failure;
3109		}
3110		nla_nest_end(skb, attr);
3111
3112		if (secy->xpn) {
3113			pn = tx_sa->next_pn;
3114			pn_len = MACSEC_XPN_PN_LEN;
3115		} else {
3116			pn = tx_sa->next_pn_halves.lower;
3117			pn_len = MACSEC_DEFAULT_PN_LEN;
3118		}
3119
3120		if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
3121		    nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
3122		    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
3123		    (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, tx_sa->ssci)) ||
3124		    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
3125			nla_nest_cancel(skb, txsa_nest);
3126			nla_nest_cancel(skb, txsa_list);
3127			goto nla_put_failure;
3128		}
3129
3130		nla_nest_end(skb, txsa_nest);
3131	}
3132	nla_nest_end(skb, txsa_list);
3133
3134	rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST);
3135	if (!rxsc_list)
3136		goto nla_put_failure;
3137
3138	j = 1;
3139	for_each_rxsc_rtnl(secy, rx_sc) {
3140		int k;
3141		struct nlattr *rxsa_list;
3142		struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++);
3143
3144		if (!rxsc_nest) {
3145			nla_nest_cancel(skb, rxsc_list);
3146			goto nla_put_failure;
3147		}
3148
3149		if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) ||
3150		    nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci,
3151				MACSEC_RXSC_ATTR_PAD)) {
3152			nla_nest_cancel(skb, rxsc_nest);
3153			nla_nest_cancel(skb, rxsc_list);
3154			goto nla_put_failure;
3155		}
3156
3157		attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS);
3158		if (!attr) {
3159			nla_nest_cancel(skb, rxsc_nest);
3160			nla_nest_cancel(skb, rxsc_list);
3161			goto nla_put_failure;
3162		}
3163		memset(&rx_sc_stats, 0, sizeof(rx_sc_stats));
3164		get_rx_sc_stats(dev, rx_sc, &rx_sc_stats);
3165		if (copy_rx_sc_stats(skb, &rx_sc_stats)) {
3166			nla_nest_cancel(skb, attr);
3167			nla_nest_cancel(skb, rxsc_nest);
3168			nla_nest_cancel(skb, rxsc_list);
3169			goto nla_put_failure;
3170		}
3171		nla_nest_end(skb, attr);
3172
3173		rxsa_list = nla_nest_start_noflag(skb,
3174						  MACSEC_RXSC_ATTR_SA_LIST);
3175		if (!rxsa_list) {
3176			nla_nest_cancel(skb, rxsc_nest);
3177			nla_nest_cancel(skb, rxsc_list);
3178			goto nla_put_failure;
3179		}
3180
3181		for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) {
3182			struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]);
3183			struct nlattr *rxsa_nest;
3184			u64 pn;
3185			int pn_len;
3186
3187			if (!rx_sa)
3188				continue;
3189
3190			rxsa_nest = nla_nest_start_noflag(skb, k++);
3191			if (!rxsa_nest) {
3192				nla_nest_cancel(skb, rxsa_list);
3193				nla_nest_cancel(skb, rxsc_nest);
3194				nla_nest_cancel(skb, rxsc_list);
3195				goto nla_put_failure;
3196			}
3197
3198			attr = nla_nest_start_noflag(skb,
3199						     MACSEC_SA_ATTR_STATS);
3200			if (!attr) {
3201				nla_nest_cancel(skb, rxsa_list);
3202				nla_nest_cancel(skb, rxsc_nest);
3203				nla_nest_cancel(skb, rxsc_list);
3204				goto nla_put_failure;
3205			}
3206			memset(&rx_sa_stats, 0, sizeof(rx_sa_stats));
3207			get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats);
3208			if (copy_rx_sa_stats(skb, &rx_sa_stats)) {
3209				nla_nest_cancel(skb, attr);
3210				nla_nest_cancel(skb, rxsa_list);
3211				nla_nest_cancel(skb, rxsc_nest);
3212				nla_nest_cancel(skb, rxsc_list);
3213				goto nla_put_failure;
3214			}
3215			nla_nest_end(skb, attr);
3216
3217			if (secy->xpn) {
3218				pn = rx_sa->next_pn;
3219				pn_len = MACSEC_XPN_PN_LEN;
3220			} else {
3221				pn = rx_sa->next_pn_halves.lower;
3222				pn_len = MACSEC_DEFAULT_PN_LEN;
3223			}
3224
3225			if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
3226			    nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
3227			    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
3228			    (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, rx_sa->ssci)) ||
3229			    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
3230				nla_nest_cancel(skb, rxsa_nest);
3231				nla_nest_cancel(skb, rxsc_nest);
3232				nla_nest_cancel(skb, rxsc_list);
3233				goto nla_put_failure;
3234			}
3235			nla_nest_end(skb, rxsa_nest);
3236		}
3237
3238		nla_nest_end(skb, rxsa_list);
3239		nla_nest_end(skb, rxsc_nest);
3240	}
3241
3242	nla_nest_end(skb, rxsc_list);
3243
3244	genlmsg_end(skb, hdr);
3245
3246	return 0;
3247
3248nla_put_failure:
3249	genlmsg_cancel(skb, hdr);
3250	return -EMSGSIZE;
3251}
3252
3253static int macsec_generation = 1; /* protected by RTNL */
3254
3255static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
3256{
3257	struct net *net = sock_net(skb->sk);
3258	struct net_device *dev;
3259	int dev_idx, d;
3260
3261	dev_idx = cb->args[0];
3262
3263	d = 0;
3264	rtnl_lock();
3265
3266	cb->seq = macsec_generation;
3267
3268	for_each_netdev(net, dev) {
3269		struct macsec_secy *secy;
3270
3271		if (d < dev_idx)
3272			goto next;
3273
3274		if (!netif_is_macsec(dev))
3275			goto next;
3276
3277		secy = &macsec_priv(dev)->secy;
3278		if (dump_secy(secy, dev, skb, cb) < 0)
3279			goto done;
3280next:
3281		d++;
3282	}
3283
3284done:
3285	rtnl_unlock();
3286	cb->args[0] = d;
3287	return skb->len;
3288}
3289
3290static const struct genl_ops macsec_genl_ops[] = {
3291	{
3292		.cmd = MACSEC_CMD_GET_TXSC,
3293		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3294		.dumpit = macsec_dump_txsc,
3295	},
3296	{
3297		.cmd = MACSEC_CMD_ADD_RXSC,
3298		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3299		.doit = macsec_add_rxsc,
3300		.flags = GENL_ADMIN_PERM,
3301	},
3302	{
3303		.cmd = MACSEC_CMD_DEL_RXSC,
3304		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3305		.doit = macsec_del_rxsc,
3306		.flags = GENL_ADMIN_PERM,
3307	},
3308	{
3309		.cmd = MACSEC_CMD_UPD_RXSC,
3310		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3311		.doit = macsec_upd_rxsc,
3312		.flags = GENL_ADMIN_PERM,
3313	},
3314	{
3315		.cmd = MACSEC_CMD_ADD_TXSA,
3316		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3317		.doit = macsec_add_txsa,
3318		.flags = GENL_ADMIN_PERM,
3319	},
3320	{
3321		.cmd = MACSEC_CMD_DEL_TXSA,
3322		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3323		.doit = macsec_del_txsa,
3324		.flags = GENL_ADMIN_PERM,
3325	},
3326	{
3327		.cmd = MACSEC_CMD_UPD_TXSA,
3328		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3329		.doit = macsec_upd_txsa,
3330		.flags = GENL_ADMIN_PERM,
3331	},
3332	{
3333		.cmd = MACSEC_CMD_ADD_RXSA,
3334		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3335		.doit = macsec_add_rxsa,
3336		.flags = GENL_ADMIN_PERM,
3337	},
3338	{
3339		.cmd = MACSEC_CMD_DEL_RXSA,
3340		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3341		.doit = macsec_del_rxsa,
3342		.flags = GENL_ADMIN_PERM,
3343	},
3344	{
3345		.cmd = MACSEC_CMD_UPD_RXSA,
3346		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3347		.doit = macsec_upd_rxsa,
3348		.flags = GENL_ADMIN_PERM,
3349	},
3350	{
3351		.cmd = MACSEC_CMD_UPD_OFFLOAD,
3352		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3353		.doit = macsec_upd_offload,
3354		.flags = GENL_ADMIN_PERM,
3355	},
3356};
3357
3358static struct genl_family macsec_fam __ro_after_init = {
3359	.name		= MACSEC_GENL_NAME,
3360	.hdrsize	= 0,
3361	.version	= MACSEC_GENL_VERSION,
3362	.maxattr	= MACSEC_ATTR_MAX,
3363	.policy = macsec_genl_policy,
3364	.netnsok	= true,
3365	.module		= THIS_MODULE,
3366	.ops		= macsec_genl_ops,
3367	.n_ops		= ARRAY_SIZE(macsec_genl_ops),
3368};
3369
3370static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
3371				     struct net_device *dev)
3372{
3373	struct macsec_dev *macsec = netdev_priv(dev);
3374	struct macsec_secy *secy = &macsec->secy;
3375	struct pcpu_secy_stats *secy_stats;
3376	int ret, len;
3377
3378	if (macsec_is_offloaded(netdev_priv(dev))) {
3379		skb->dev = macsec->real_dev;
3380		return dev_queue_xmit(skb);
3381	}
3382
3383	/* 10.5 */
3384	if (!secy->protect_frames) {
3385		secy_stats = this_cpu_ptr(macsec->stats);
3386		u64_stats_update_begin(&secy_stats->syncp);
3387		secy_stats->stats.OutPktsUntagged++;
3388		u64_stats_update_end(&secy_stats->syncp);
3389		skb->dev = macsec->real_dev;
3390		len = skb->len;
3391		ret = dev_queue_xmit(skb);
3392		count_tx(dev, ret, len);
3393		return ret;
3394	}
3395
3396	if (!secy->operational) {
3397		kfree_skb(skb);
3398		dev->stats.tx_dropped++;
3399		return NETDEV_TX_OK;
3400	}
3401
3402	skb = macsec_encrypt(skb, dev);
3403	if (IS_ERR(skb)) {
3404		if (PTR_ERR(skb) != -EINPROGRESS)
3405			dev->stats.tx_dropped++;
3406		return NETDEV_TX_OK;
3407	}
3408
3409	macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
3410
3411	macsec_encrypt_finish(skb, dev);
3412	len = skb->len;
3413	ret = dev_queue_xmit(skb);
3414	count_tx(dev, ret, len);
3415	return ret;
3416}
3417
3418#define SW_MACSEC_FEATURES \
3419	(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
3420
3421/* If h/w offloading is enabled, use real device features save for
3422 *   VLAN_FEATURES - they require additional ops
3423 *   HW_MACSEC - no reason to report it
3424 */
3425#define REAL_DEV_FEATURES(dev) \
3426	((dev)->features & ~(NETIF_F_VLAN_FEATURES | NETIF_F_HW_MACSEC))
3427
3428static int macsec_dev_init(struct net_device *dev)
3429{
3430	struct macsec_dev *macsec = macsec_priv(dev);
3431	struct net_device *real_dev = macsec->real_dev;
3432	int err;
3433
3434	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
3435	if (!dev->tstats)
3436		return -ENOMEM;
3437
3438	err = gro_cells_init(&macsec->gro_cells, dev);
3439	if (err) {
3440		free_percpu(dev->tstats);
3441		return err;
3442	}
3443
3444	if (macsec_is_offloaded(macsec)) {
3445		dev->features = REAL_DEV_FEATURES(real_dev);
3446	} else {
3447		dev->features = real_dev->features & SW_MACSEC_FEATURES;
3448		dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
3449	}
3450
3451	dev->needed_headroom = real_dev->needed_headroom +
3452			       MACSEC_NEEDED_HEADROOM;
3453	dev->needed_tailroom = real_dev->needed_tailroom +
3454			       MACSEC_NEEDED_TAILROOM;
3455
3456	if (is_zero_ether_addr(dev->dev_addr))
3457		eth_hw_addr_inherit(dev, real_dev);
3458	if (is_zero_ether_addr(dev->broadcast))
3459		memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
3460
3461	return 0;
3462}
3463
3464static void macsec_dev_uninit(struct net_device *dev)
3465{
3466	struct macsec_dev *macsec = macsec_priv(dev);
3467
3468	gro_cells_destroy(&macsec->gro_cells);
3469	free_percpu(dev->tstats);
3470}
3471
3472static netdev_features_t macsec_fix_features(struct net_device *dev,
3473					     netdev_features_t features)
3474{
3475	struct macsec_dev *macsec = macsec_priv(dev);
3476	struct net_device *real_dev = macsec->real_dev;
3477
3478	if (macsec_is_offloaded(macsec))
3479		return REAL_DEV_FEATURES(real_dev);
3480
3481	features &= (real_dev->features & SW_MACSEC_FEATURES) |
3482		    NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
3483	features |= NETIF_F_LLTX;
3484
3485	return features;
3486}
3487
3488static int macsec_dev_open(struct net_device *dev)
3489{
3490	struct macsec_dev *macsec = macsec_priv(dev);
3491	struct net_device *real_dev = macsec->real_dev;
3492	int err;
3493
3494	err = dev_uc_add(real_dev, dev->dev_addr);
3495	if (err < 0)
3496		return err;
3497
3498	if (dev->flags & IFF_ALLMULTI) {
3499		err = dev_set_allmulti(real_dev, 1);
3500		if (err < 0)
3501			goto del_unicast;
3502	}
3503
3504	if (dev->flags & IFF_PROMISC) {
3505		err = dev_set_promiscuity(real_dev, 1);
3506		if (err < 0)
3507			goto clear_allmulti;
3508	}
3509
3510	/* If h/w offloading is available, propagate to the device */
3511	if (macsec_is_offloaded(macsec)) {
3512		const struct macsec_ops *ops;
3513		struct macsec_context ctx;
3514
3515		ops = macsec_get_ops(netdev_priv(dev), &ctx);
3516		if (!ops) {
3517			err = -EOPNOTSUPP;
3518			goto clear_allmulti;
3519		}
3520
3521		ctx.secy = &macsec->secy;
3522		err = macsec_offload(ops->mdo_dev_open, &ctx);
3523		if (err)
3524			goto clear_allmulti;
3525	}
3526
3527	if (netif_carrier_ok(real_dev))
3528		netif_carrier_on(dev);
3529
3530	return 0;
3531clear_allmulti:
3532	if (dev->flags & IFF_ALLMULTI)
3533		dev_set_allmulti(real_dev, -1);
3534del_unicast:
3535	dev_uc_del(real_dev, dev->dev_addr);
3536	netif_carrier_off(dev);
3537	return err;
3538}
3539
3540static int macsec_dev_stop(struct net_device *dev)
3541{
3542	struct macsec_dev *macsec = macsec_priv(dev);
3543	struct net_device *real_dev = macsec->real_dev;
3544
3545	netif_carrier_off(dev);
3546
3547	/* If h/w offloading is available, propagate to the device */
3548	if (macsec_is_offloaded(macsec)) {
3549		const struct macsec_ops *ops;
3550		struct macsec_context ctx;
3551
3552		ops = macsec_get_ops(macsec, &ctx);
3553		if (ops) {
3554			ctx.secy = &macsec->secy;
3555			macsec_offload(ops->mdo_dev_stop, &ctx);
3556		}
3557	}
3558
3559	dev_mc_unsync(real_dev, dev);
3560	dev_uc_unsync(real_dev, dev);
3561
3562	if (dev->flags & IFF_ALLMULTI)
3563		dev_set_allmulti(real_dev, -1);
3564
3565	if (dev->flags & IFF_PROMISC)
3566		dev_set_promiscuity(real_dev, -1);
3567
3568	dev_uc_del(real_dev, dev->dev_addr);
3569
3570	return 0;
3571}
3572
3573static void macsec_dev_change_rx_flags(struct net_device *dev, int change)
3574{
3575	struct net_device *real_dev = macsec_priv(dev)->real_dev;
3576
3577	if (!(dev->flags & IFF_UP))
3578		return;
3579
3580	if (change & IFF_ALLMULTI)
3581		dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
3582
3583	if (change & IFF_PROMISC)
3584		dev_set_promiscuity(real_dev,
3585				    dev->flags & IFF_PROMISC ? 1 : -1);
3586}
3587
3588static void macsec_dev_set_rx_mode(struct net_device *dev)
3589{
3590	struct net_device *real_dev = macsec_priv(dev)->real_dev;
3591
3592	dev_mc_sync(real_dev, dev);
3593	dev_uc_sync(real_dev, dev);
3594}
3595
3596static int macsec_set_mac_address(struct net_device *dev, void *p)
3597{
3598	struct macsec_dev *macsec = macsec_priv(dev);
3599	struct net_device *real_dev = macsec->real_dev;
3600	struct sockaddr *addr = p;
3601	int err;
3602
3603	if (!is_valid_ether_addr(addr->sa_data))
3604		return -EADDRNOTAVAIL;
3605
3606	if (!(dev->flags & IFF_UP))
3607		goto out;
3608
3609	err = dev_uc_add(real_dev, addr->sa_data);
3610	if (err < 0)
3611		return err;
3612
3613	dev_uc_del(real_dev, dev->dev_addr);
3614
3615out:
3616	ether_addr_copy(dev->dev_addr, addr->sa_data);
3617	macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
3618
3619	/* If h/w offloading is available, propagate to the device */
3620	if (macsec_is_offloaded(macsec)) {
3621		const struct macsec_ops *ops;
3622		struct macsec_context ctx;
3623
3624		ops = macsec_get_ops(macsec, &ctx);
3625		if (ops) {
3626			ctx.secy = &macsec->secy;
3627			macsec_offload(ops->mdo_upd_secy, &ctx);
3628		}
3629	}
3630
3631	return 0;
3632}
3633
3634static int macsec_change_mtu(struct net_device *dev, int new_mtu)
3635{
3636	struct macsec_dev *macsec = macsec_priv(dev);
3637	unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true);
3638
3639	if (macsec->real_dev->mtu - extra < new_mtu)
3640		return -ERANGE;
3641
3642	dev->mtu = new_mtu;
3643
3644	return 0;
3645}
3646
3647static void macsec_get_stats64(struct net_device *dev,
3648			       struct rtnl_link_stats64 *s)
3649{
3650	int cpu;
3651
3652	if (!dev->tstats)
3653		return;
3654
3655	for_each_possible_cpu(cpu) {
3656		struct pcpu_sw_netstats *stats;
3657		struct pcpu_sw_netstats tmp;
3658		int start;
3659
3660		stats = per_cpu_ptr(dev->tstats, cpu);
3661		do {
3662			start = u64_stats_fetch_begin_irq(&stats->syncp);
3663			tmp.rx_packets = stats->rx_packets;
3664			tmp.rx_bytes   = stats->rx_bytes;
3665			tmp.tx_packets = stats->tx_packets;
3666			tmp.tx_bytes   = stats->tx_bytes;
3667		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
3668
3669		s->rx_packets += tmp.rx_packets;
3670		s->rx_bytes   += tmp.rx_bytes;
3671		s->tx_packets += tmp.tx_packets;
3672		s->tx_bytes   += tmp.tx_bytes;
3673	}
3674
3675	s->rx_dropped = dev->stats.rx_dropped;
3676	s->tx_dropped = dev->stats.tx_dropped;
3677}
3678
3679static int macsec_get_iflink(const struct net_device *dev)
3680{
3681	return macsec_priv(dev)->real_dev->ifindex;
3682}
3683
3684static const struct net_device_ops macsec_netdev_ops = {
3685	.ndo_init		= macsec_dev_init,
3686	.ndo_uninit		= macsec_dev_uninit,
3687	.ndo_open		= macsec_dev_open,
3688	.ndo_stop		= macsec_dev_stop,
3689	.ndo_fix_features	= macsec_fix_features,
3690	.ndo_change_mtu		= macsec_change_mtu,
3691	.ndo_set_rx_mode	= macsec_dev_set_rx_mode,
3692	.ndo_change_rx_flags	= macsec_dev_change_rx_flags,
3693	.ndo_set_mac_address	= macsec_set_mac_address,
3694	.ndo_start_xmit		= macsec_start_xmit,
3695	.ndo_get_stats64	= macsec_get_stats64,
3696	.ndo_get_iflink		= macsec_get_iflink,
3697};
3698
3699static const struct device_type macsec_type = {
3700	.name = "macsec",
3701};
3702
3703static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
3704	[IFLA_MACSEC_SCI] = { .type = NLA_U64 },
3705	[IFLA_MACSEC_PORT] = { .type = NLA_U16 },
3706	[IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
3707	[IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
3708	[IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
3709	[IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 },
3710	[IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 },
3711	[IFLA_MACSEC_PROTECT] = { .type = NLA_U8 },
3712	[IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 },
3713	[IFLA_MACSEC_ES] = { .type = NLA_U8 },
3714	[IFLA_MACSEC_SCB] = { .type = NLA_U8 },
3715	[IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
3716	[IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
3717};
3718
3719static void macsec_free_netdev(struct net_device *dev)
3720{
3721	struct macsec_dev *macsec = macsec_priv(dev);
3722
3723	free_percpu(macsec->stats);
3724	free_percpu(macsec->secy.tx_sc.stats);
3725
3726}
3727
3728static void macsec_setup(struct net_device *dev)
3729{
3730	ether_setup(dev);
3731	dev->min_mtu = 0;
3732	dev->max_mtu = ETH_MAX_MTU;
3733	dev->priv_flags |= IFF_NO_QUEUE;
3734	dev->netdev_ops = &macsec_netdev_ops;
3735	dev->needs_free_netdev = true;
3736	dev->priv_destructor = macsec_free_netdev;
3737	SET_NETDEV_DEVTYPE(dev, &macsec_type);
3738
3739	eth_zero_addr(dev->broadcast);
3740}
3741
3742static int macsec_changelink_common(struct net_device *dev,
3743				    struct nlattr *data[])
3744{
3745	struct macsec_secy *secy;
3746	struct macsec_tx_sc *tx_sc;
3747
3748	secy = &macsec_priv(dev)->secy;
3749	tx_sc = &secy->tx_sc;
3750
3751	if (data[IFLA_MACSEC_ENCODING_SA]) {
3752		struct macsec_tx_sa *tx_sa;
3753
3754		tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]);
3755		tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]);
3756
3757		secy->operational = tx_sa && tx_sa->active;
3758	}
3759
3760	if (data[IFLA_MACSEC_WINDOW])
3761		secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
3762
3763	if (data[IFLA_MACSEC_ENCRYPT])
3764		tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
3765
3766	if (data[IFLA_MACSEC_PROTECT])
3767		secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]);
3768
3769	if (data[IFLA_MACSEC_INC_SCI])
3770		tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
3771
3772	if (data[IFLA_MACSEC_ES])
3773		tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]);
3774
3775	if (data[IFLA_MACSEC_SCB])
3776		tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]);
3777
3778	if (data[IFLA_MACSEC_REPLAY_PROTECT])
3779		secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]);
3780
3781	if (data[IFLA_MACSEC_VALIDATION])
3782		secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]);
3783
3784	if (data[IFLA_MACSEC_CIPHER_SUITE]) {
3785		switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) {
3786		case MACSEC_CIPHER_ID_GCM_AES_128:
3787		case MACSEC_DEFAULT_CIPHER_ID:
3788			secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
3789			secy->xpn = false;
3790			break;
3791		case MACSEC_CIPHER_ID_GCM_AES_256:
3792			secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
3793			secy->xpn = false;
3794			break;
3795		case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
3796			secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
3797			secy->xpn = true;
3798			break;
3799		case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
3800			secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
3801			secy->xpn = true;
3802			break;
3803		default:
3804			return -EINVAL;
3805		}
3806	}
3807
3808	return 0;
3809}
3810
3811static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
3812			     struct nlattr *data[],
3813			     struct netlink_ext_ack *extack)
3814{
3815	struct macsec_dev *macsec = macsec_priv(dev);
3816	struct macsec_tx_sc tx_sc;
3817	struct macsec_secy secy;
3818	int ret;
3819
3820	if (!data)
3821		return 0;
3822
3823	if (data[IFLA_MACSEC_CIPHER_SUITE] ||
3824	    data[IFLA_MACSEC_ICV_LEN] ||
3825	    data[IFLA_MACSEC_SCI] ||
3826	    data[IFLA_MACSEC_PORT])
3827		return -EINVAL;
3828
3829	/* Keep a copy of unmodified secy and tx_sc, in case the offload
3830	 * propagation fails, to revert macsec_changelink_common.
3831	 */
3832	memcpy(&secy, &macsec->secy, sizeof(secy));
3833	memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc));
3834
3835	ret = macsec_changelink_common(dev, data);
3836	if (ret)
3837		return ret;
3838
3839	/* If h/w offloading is available, propagate to the device */
3840	if (macsec_is_offloaded(macsec)) {
3841		const struct macsec_ops *ops;
3842		struct macsec_context ctx;
3843		int ret;
3844
3845		ops = macsec_get_ops(netdev_priv(dev), &ctx);
3846		if (!ops) {
3847			ret = -EOPNOTSUPP;
3848			goto cleanup;
3849		}
3850
3851		ctx.secy = &macsec->secy;
3852		ret = macsec_offload(ops->mdo_upd_secy, &ctx);
3853		if (ret)
3854			goto cleanup;
3855	}
3856
3857	return 0;
3858
3859cleanup:
3860	memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc));
3861	memcpy(&macsec->secy, &secy, sizeof(secy));
3862
3863	return ret;
3864}
3865
3866static void macsec_del_dev(struct macsec_dev *macsec)
3867{
3868	int i;
3869
3870	while (macsec->secy.rx_sc) {
3871		struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc);
3872
3873		rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next);
3874		free_rx_sc(rx_sc);
3875	}
3876
3877	for (i = 0; i < MACSEC_NUM_AN; i++) {
3878		struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]);
3879
3880		if (sa) {
3881			RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL);
3882			clear_tx_sa(sa);
3883		}
3884	}
3885}
3886
3887static void macsec_common_dellink(struct net_device *dev, struct list_head *head)
3888{
3889	struct macsec_dev *macsec = macsec_priv(dev);
3890	struct net_device *real_dev = macsec->real_dev;
3891
3892	unregister_netdevice_queue(dev, head);
3893	list_del_rcu(&macsec->secys);
3894	macsec_del_dev(macsec);
3895	netdev_upper_dev_unlink(real_dev, dev);
3896
3897	macsec_generation++;
3898}
3899
3900static void macsec_dellink(struct net_device *dev, struct list_head *head)
3901{
3902	struct macsec_dev *macsec = macsec_priv(dev);
3903	struct net_device *real_dev = macsec->real_dev;
3904	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3905
3906	/* If h/w offloading is available, propagate to the device */
3907	if (macsec_is_offloaded(macsec)) {
3908		const struct macsec_ops *ops;
3909		struct macsec_context ctx;
3910
3911		ops = macsec_get_ops(netdev_priv(dev), &ctx);
3912		if (ops) {
3913			ctx.secy = &macsec->secy;
3914			macsec_offload(ops->mdo_del_secy, &ctx);
3915		}
3916	}
3917
3918	macsec_common_dellink(dev, head);
3919
3920	if (list_empty(&rxd->secys)) {
3921		netdev_rx_handler_unregister(real_dev);
3922		kfree(rxd);
3923	}
3924}
3925
3926static int register_macsec_dev(struct net_device *real_dev,
3927			       struct net_device *dev)
3928{
3929	struct macsec_dev *macsec = macsec_priv(dev);
3930	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3931
3932	if (!rxd) {
3933		int err;
3934
3935		rxd = kmalloc(sizeof(*rxd), GFP_KERNEL);
3936		if (!rxd)
3937			return -ENOMEM;
3938
3939		INIT_LIST_HEAD(&rxd->secys);
3940
3941		err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
3942						 rxd);
3943		if (err < 0) {
3944			kfree(rxd);
3945			return err;
3946		}
3947	}
3948
3949	list_add_tail_rcu(&macsec->secys, &rxd->secys);
3950	return 0;
3951}
3952
3953static bool sci_exists(struct net_device *dev, sci_t sci)
3954{
3955	struct macsec_rxh_data *rxd = macsec_data_rtnl(dev);
3956	struct macsec_dev *macsec;
3957
3958	list_for_each_entry(macsec, &rxd->secys, secys) {
3959		if (macsec->secy.sci == sci)
3960			return true;
3961	}
3962
3963	return false;
3964}
3965
3966static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
3967{
3968	struct macsec_dev *macsec = macsec_priv(dev);
3969	struct macsec_secy *secy = &macsec->secy;
3970
3971	macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats);
3972	if (!macsec->stats)
3973		return -ENOMEM;
3974
3975	secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats);
3976	if (!secy->tx_sc.stats) {
3977		free_percpu(macsec->stats);
3978		return -ENOMEM;
3979	}
3980
3981	if (sci == MACSEC_UNDEF_SCI)
3982		sci = dev_to_sci(dev, MACSEC_PORT_ES);
3983
3984	secy->netdev = dev;
3985	secy->operational = true;
3986	secy->key_len = DEFAULT_SAK_LEN;
3987	secy->icv_len = icv_len;
3988	secy->validate_frames = MACSEC_VALIDATE_DEFAULT;
3989	secy->protect_frames = true;
3990	secy->replay_protect = false;
3991	secy->xpn = DEFAULT_XPN;
3992
3993	secy->sci = sci;
3994	secy->tx_sc.active = true;
3995	secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA;
3996	secy->tx_sc.encrypt = DEFAULT_ENCRYPT;
3997	secy->tx_sc.send_sci = DEFAULT_SEND_SCI;
3998	secy->tx_sc.end_station = false;
3999	secy->tx_sc.scb = false;
4000
4001	return 0;
4002}
4003
4004static struct lock_class_key macsec_netdev_addr_lock_key;
4005
4006static int macsec_newlink(struct net *net, struct net_device *dev,
4007			  struct nlattr *tb[], struct nlattr *data[],
4008			  struct netlink_ext_ack *extack)
4009{
4010	struct macsec_dev *macsec = macsec_priv(dev);
4011	rx_handler_func_t *rx_handler;
4012	u8 icv_len = DEFAULT_ICV_LEN;
4013	struct net_device *real_dev;
4014	int err, mtu;
4015	sci_t sci;
4016
4017	if (!tb[IFLA_LINK])
4018		return -EINVAL;
4019	real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
4020	if (!real_dev)
4021		return -ENODEV;
4022	if (real_dev->type != ARPHRD_ETHER)
4023		return -EINVAL;
4024
4025	dev->priv_flags |= IFF_MACSEC;
4026
4027	macsec->real_dev = real_dev;
4028
4029	if (data && data[IFLA_MACSEC_OFFLOAD])
4030		macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]);
4031	else
4032		/* MACsec offloading is off by default */
4033		macsec->offload = MACSEC_OFFLOAD_OFF;
4034
4035	/* Check if the offloading mode is supported by the underlying layers */
4036	if (macsec->offload != MACSEC_OFFLOAD_OFF &&
4037	    !macsec_check_offload(macsec->offload, macsec))
4038		return -EOPNOTSUPP;
4039
4040	if (data && data[IFLA_MACSEC_ICV_LEN])
4041		icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
4042	mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
4043	if (mtu < 0)
4044		dev->mtu = 0;
4045	else
4046		dev->mtu = mtu;
4047
4048	rx_handler = rtnl_dereference(real_dev->rx_handler);
4049	if (rx_handler && rx_handler != macsec_handle_frame)
4050		return -EBUSY;
4051
4052	err = register_netdevice(dev);
4053	if (err < 0)
4054		return err;
4055
4056	netdev_lockdep_set_classes(dev);
4057	lockdep_set_class(&dev->addr_list_lock,
4058			  &macsec_netdev_addr_lock_key);
4059
4060	err = netdev_upper_dev_link(real_dev, dev, extack);
4061	if (err < 0)
4062		goto unregister;
4063
4064	/* need to be already registered so that ->init has run and
4065	 * the MAC addr is set
4066	 */
4067	if (data && data[IFLA_MACSEC_SCI])
4068		sci = nla_get_sci(data[IFLA_MACSEC_SCI]);
4069	else if (data && data[IFLA_MACSEC_PORT])
4070		sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT]));
4071	else
4072		sci = dev_to_sci(dev, MACSEC_PORT_ES);
4073
4074	if (rx_handler && sci_exists(real_dev, sci)) {
4075		err = -EBUSY;
4076		goto unlink;
4077	}
4078
4079	err = macsec_add_dev(dev, sci, icv_len);
4080	if (err)
4081		goto unlink;
4082
4083	if (data) {
4084		err = macsec_changelink_common(dev, data);
4085		if (err)
4086			goto del_dev;
4087	}
4088
4089	/* If h/w offloading is available, propagate to the device */
4090	if (macsec_is_offloaded(macsec)) {
4091		const struct macsec_ops *ops;
4092		struct macsec_context ctx;
4093
4094		ops = macsec_get_ops(macsec, &ctx);
4095		if (ops) {
4096			ctx.secy = &macsec->secy;
4097			err = macsec_offload(ops->mdo_add_secy, &ctx);
4098			if (err)
4099				goto del_dev;
4100		}
4101	}
4102
4103	err = register_macsec_dev(real_dev, dev);
4104	if (err < 0)
4105		goto del_dev;
4106
4107	netif_stacked_transfer_operstate(real_dev, dev);
4108	linkwatch_fire_event(dev);
4109
4110	macsec_generation++;
4111
4112	return 0;
4113
4114del_dev:
4115	macsec_del_dev(macsec);
4116unlink:
4117	netdev_upper_dev_unlink(real_dev, dev);
4118unregister:
4119	unregister_netdevice(dev);
4120	return err;
4121}
4122
4123static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
4124				struct netlink_ext_ack *extack)
4125{
4126	u64 csid = MACSEC_DEFAULT_CIPHER_ID;
4127	u8 icv_len = DEFAULT_ICV_LEN;
4128	int flag;
4129	bool es, scb, sci;
4130
4131	if (!data)
4132		return 0;
4133
4134	if (data[IFLA_MACSEC_CIPHER_SUITE])
4135		csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]);
4136
4137	if (data[IFLA_MACSEC_ICV_LEN]) {
4138		icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
4139		if (icv_len != DEFAULT_ICV_LEN) {
4140			char dummy_key[DEFAULT_SAK_LEN] = { 0 };
4141			struct crypto_aead *dummy_tfm;
4142
4143			dummy_tfm = macsec_alloc_tfm(dummy_key,
4144						     DEFAULT_SAK_LEN,
4145						     icv_len);
4146			if (IS_ERR(dummy_tfm))
4147				return PTR_ERR(dummy_tfm);
4148			crypto_free_aead(dummy_tfm);
4149		}
4150	}
4151
4152	switch (csid) {
4153	case MACSEC_CIPHER_ID_GCM_AES_128:
4154	case MACSEC_CIPHER_ID_GCM_AES_256:
4155	case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
4156	case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
4157	case MACSEC_DEFAULT_CIPHER_ID:
4158		if (icv_len < MACSEC_MIN_ICV_LEN ||
4159		    icv_len > MACSEC_STD_ICV_LEN)
4160			return -EINVAL;
4161		break;
4162	default:
4163		return -EINVAL;
4164	}
4165
4166	if (data[IFLA_MACSEC_ENCODING_SA]) {
4167		if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN)
4168			return -EINVAL;
4169	}
4170
4171	for (flag = IFLA_MACSEC_ENCODING_SA + 1;
4172	     flag < IFLA_MACSEC_VALIDATION;
4173	     flag++) {
4174		if (data[flag]) {
4175			if (nla_get_u8(data[flag]) > 1)
4176				return -EINVAL;
4177		}
4178	}
4179
4180	es  = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false;
4181	sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false;
4182	scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false;
4183
4184	if ((sci && (scb || es)) || (scb && es))
4185		return -EINVAL;
4186
4187	if (data[IFLA_MACSEC_VALIDATION] &&
4188	    nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
4189		return -EINVAL;
4190
4191	if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
4192	     nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
4193	    !data[IFLA_MACSEC_WINDOW])
4194		return -EINVAL;
4195
4196	return 0;
4197}
4198
4199static struct net *macsec_get_link_net(const struct net_device *dev)
4200{
4201	return dev_net(macsec_priv(dev)->real_dev);
4202}
4203
4204static size_t macsec_get_size(const struct net_device *dev)
4205{
4206	return  nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */
4207		nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */
4208		nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */
4209		nla_total_size(4) + /* IFLA_MACSEC_WINDOW */
4210		nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */
4211		nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */
4212		nla_total_size(1) + /* IFLA_MACSEC_PROTECT */
4213		nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */
4214		nla_total_size(1) + /* IFLA_MACSEC_ES */
4215		nla_total_size(1) + /* IFLA_MACSEC_SCB */
4216		nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
4217		nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
4218		0;
4219}
4220
4221static int macsec_fill_info(struct sk_buff *skb,
4222			    const struct net_device *dev)
4223{
4224	struct macsec_secy *secy = &macsec_priv(dev)->secy;
4225	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
4226	u64 csid;
4227
4228	switch (secy->key_len) {
4229	case MACSEC_GCM_AES_128_SAK_LEN:
4230		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
4231		break;
4232	case MACSEC_GCM_AES_256_SAK_LEN:
4233		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
4234		break;
4235	default:
4236		goto nla_put_failure;
4237	}
4238
4239	if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
4240			IFLA_MACSEC_PAD) ||
4241	    nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
4242	    nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
4243			      csid, IFLA_MACSEC_PAD) ||
4244	    nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
4245	    nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
4246	    nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
4247	    nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) ||
4248	    nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) ||
4249	    nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
4250	    nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
4251	    nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) ||
4252	    0)
4253		goto nla_put_failure;
4254
4255	if (secy->replay_protect) {
4256		if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window))
4257			goto nla_put_failure;
4258	}
4259
4260	return 0;
4261
4262nla_put_failure:
4263	return -EMSGSIZE;
4264}
4265
4266static struct rtnl_link_ops macsec_link_ops __read_mostly = {
4267	.kind		= "macsec",
4268	.priv_size	= sizeof(struct macsec_dev),
4269	.maxtype	= IFLA_MACSEC_MAX,
4270	.policy		= macsec_rtnl_policy,
4271	.setup		= macsec_setup,
4272	.validate	= macsec_validate_attr,
4273	.newlink	= macsec_newlink,
4274	.changelink	= macsec_changelink,
4275	.dellink	= macsec_dellink,
4276	.get_size	= macsec_get_size,
4277	.fill_info	= macsec_fill_info,
4278	.get_link_net	= macsec_get_link_net,
4279};
4280
4281static bool is_macsec_master(struct net_device *dev)
4282{
4283	return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame;
4284}
4285
4286static int macsec_notify(struct notifier_block *this, unsigned long event,
4287			 void *ptr)
4288{
4289	struct net_device *real_dev = netdev_notifier_info_to_dev(ptr);
4290	LIST_HEAD(head);
4291
4292	if (!is_macsec_master(real_dev))
4293		return NOTIFY_DONE;
4294
4295	switch (event) {
4296	case NETDEV_DOWN:
4297	case NETDEV_UP:
4298	case NETDEV_CHANGE: {
4299		struct macsec_dev *m, *n;
4300		struct macsec_rxh_data *rxd;
4301
4302		rxd = macsec_data_rtnl(real_dev);
4303		list_for_each_entry_safe(m, n, &rxd->secys, secys) {
4304			struct net_device *dev = m->secy.netdev;
4305
4306			netif_stacked_transfer_operstate(real_dev, dev);
4307		}
4308		break;
4309	}
4310	case NETDEV_UNREGISTER: {
4311		struct macsec_dev *m, *n;
4312		struct macsec_rxh_data *rxd;
4313
4314		rxd = macsec_data_rtnl(real_dev);
4315		list_for_each_entry_safe(m, n, &rxd->secys, secys) {
4316			macsec_common_dellink(m->secy.netdev, &head);
4317		}
4318
4319		netdev_rx_handler_unregister(real_dev);
4320		kfree(rxd);
4321
4322		unregister_netdevice_many(&head);
4323		break;
4324	}
4325	case NETDEV_CHANGEMTU: {
4326		struct macsec_dev *m;
4327		struct macsec_rxh_data *rxd;
4328
4329		rxd = macsec_data_rtnl(real_dev);
4330		list_for_each_entry(m, &rxd->secys, secys) {
4331			struct net_device *dev = m->secy.netdev;
4332			unsigned int mtu = real_dev->mtu - (m->secy.icv_len +
4333							    macsec_extra_len(true));
4334
4335			if (dev->mtu > mtu)
4336				dev_set_mtu(dev, mtu);
4337		}
4338	}
4339	}
4340
4341	return NOTIFY_OK;
4342}
4343
4344static struct notifier_block macsec_notifier = {
4345	.notifier_call = macsec_notify,
4346};
4347
4348static int __init macsec_init(void)
4349{
4350	int err;
4351
4352	pr_info("MACsec IEEE 802.1AE\n");
4353	err = register_netdevice_notifier(&macsec_notifier);
4354	if (err)
4355		return err;
4356
4357	err = rtnl_link_register(&macsec_link_ops);
4358	if (err)
4359		goto notifier;
4360
4361	err = genl_register_family(&macsec_fam);
4362	if (err)
4363		goto rtnl;
4364
4365	return 0;
4366
4367rtnl:
4368	rtnl_link_unregister(&macsec_link_ops);
4369notifier:
4370	unregister_netdevice_notifier(&macsec_notifier);
4371	return err;
4372}
4373
4374static void __exit macsec_exit(void)
4375{
4376	genl_unregister_family(&macsec_fam);
4377	rtnl_link_unregister(&macsec_link_ops);
4378	unregister_netdevice_notifier(&macsec_notifier);
4379	rcu_barrier();
4380}
4381
4382module_init(macsec_init);
4383module_exit(macsec_exit);
4384
4385MODULE_ALIAS_RTNL_LINK("macsec");
4386MODULE_ALIAS_GENL_FAMILY("macsec");
4387
4388MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
4389MODULE_LICENSE("GPL v2");