Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * drivers/net/macsec.c - MACsec device
   4 *
   5 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
   6 */
   7
   8#include <linux/types.h>
   9#include <linux/skbuff.h>
  10#include <linux/socket.h>
  11#include <linux/module.h>
  12#include <crypto/aead.h>
  13#include <linux/etherdevice.h>
  14#include <linux/netdevice.h>
  15#include <linux/rtnetlink.h>
  16#include <linux/refcount.h>
  17#include <net/genetlink.h>
  18#include <net/sock.h>
  19#include <net/gro_cells.h>
  20#include <net/macsec.h>
  21#include <net/dst_metadata.h>
  22#include <linux/phy.h>
  23#include <linux/byteorder/generic.h>
  24#include <linux/if_arp.h>
  25
  26#include <uapi/linux/if_macsec.h>
  27
 
 
  28/* SecTAG length = macsec_eth_header without the optional SCI */
  29#define MACSEC_TAG_LEN 6
  30
  31struct macsec_eth_header {
  32	struct ethhdr eth;
  33	/* SecTAG */
  34	u8  tci_an;
  35#if defined(__LITTLE_ENDIAN_BITFIELD)
  36	u8  short_length:6,
  37		  unused:2;
  38#elif defined(__BIG_ENDIAN_BITFIELD)
  39	u8        unused:2,
  40	    short_length:6;
  41#else
  42#error	"Please fix <asm/byteorder.h>"
  43#endif
  44	__be32 packet_number;
  45	u8 secure_channel_id[8]; /* optional */
  46} __packed;
  47
 
 
 
 
 
 
 
 
 
  48/* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
  49#define MIN_NON_SHORT_LEN 48
  50
  51#define GCM_AES_IV_LEN 12
 
  52
  53#define for_each_rxsc(secy, sc)				\
  54	for (sc = rcu_dereference_bh(secy->rx_sc);	\
  55	     sc;					\
  56	     sc = rcu_dereference_bh(sc->next))
  57#define for_each_rxsc_rtnl(secy, sc)			\
  58	for (sc = rtnl_dereference(secy->rx_sc);	\
  59	     sc;					\
  60	     sc = rtnl_dereference(sc->next))
  61
  62#define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31)))
  63
  64struct gcm_iv_xpn {
  65	union {
  66		u8 short_secure_channel_id[4];
  67		ssci_t ssci;
  68	};
  69	__be64 pn;
  70} __packed;
  71
  72struct gcm_iv {
  73	union {
  74		u8 secure_channel_id[8];
  75		sci_t sci;
  76	};
  77	__be32 pn;
  78};
  79
  80#define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
  81
  82struct pcpu_secy_stats {
  83	struct macsec_dev_stats stats;
  84	struct u64_stats_sync syncp;
  85};
  86
  87/**
  88 * struct macsec_dev - private data
  89 * @secy: SecY config
  90 * @real_dev: pointer to underlying netdevice
  91 * @dev_tracker: refcount tracker for @real_dev reference
  92 * @stats: MACsec device stats
  93 * @secys: linked list of SecY's on the underlying device
  94 * @gro_cells: pointer to the Generic Receive Offload cell
  95 * @offload: status of offloading on the MACsec device
  96 * @insert_tx_tag: when offloading, device requires to insert an
  97 *	additional tag
  98 */
  99struct macsec_dev {
 100	struct macsec_secy secy;
 101	struct net_device *real_dev;
 102	netdevice_tracker dev_tracker;
 103	struct pcpu_secy_stats __percpu *stats;
 104	struct list_head secys;
 105	struct gro_cells gro_cells;
 106	enum macsec_offload offload;
 107	bool insert_tx_tag;
 108};
 109
 110/**
 111 * struct macsec_rxh_data - rx_handler private argument
 112 * @secys: linked list of SecY's on this underlying device
 113 */
 114struct macsec_rxh_data {
 115	struct list_head secys;
 116};
 117
 118static struct macsec_dev *macsec_priv(const struct net_device *dev)
 119{
 120	return (struct macsec_dev *)netdev_priv(dev);
 121}
 122
 123static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev)
 124{
 125	return rcu_dereference_bh(dev->rx_handler_data);
 126}
 127
 128static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev)
 129{
 130	return rtnl_dereference(dev->rx_handler_data);
 131}
 132
 133struct macsec_cb {
 134	struct aead_request *req;
 135	union {
 136		struct macsec_tx_sa *tx_sa;
 137		struct macsec_rx_sa *rx_sa;
 138	};
 139	u8 assoc_num;
 140	bool valid;
 141	bool has_sci;
 142};
 143
 144static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
 145{
 146	struct macsec_rx_sa *sa = rcu_dereference_bh(ptr);
 147
 148	if (!sa || !sa->active)
 149		return NULL;
 150
 151	if (!refcount_inc_not_zero(&sa->refcnt))
 152		return NULL;
 153
 154	return sa;
 155}
 156
 157static void free_rx_sc_rcu(struct rcu_head *head)
 158{
 159	struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
 160
 161	free_percpu(rx_sc->stats);
 162	kfree(rx_sc);
 163}
 164
 165static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc)
 166{
 167	return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL;
 168}
 169
 170static void macsec_rxsc_put(struct macsec_rx_sc *sc)
 171{
 172	if (refcount_dec_and_test(&sc->refcnt))
 173		call_rcu(&sc->rcu_head, free_rx_sc_rcu);
 174}
 175
 176static void free_rxsa(struct rcu_head *head)
 177{
 178	struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu);
 179
 180	crypto_free_aead(sa->key.tfm);
 181	free_percpu(sa->stats);
 182	kfree(sa);
 183}
 184
 185static void macsec_rxsa_put(struct macsec_rx_sa *sa)
 186{
 187	if (refcount_dec_and_test(&sa->refcnt))
 188		call_rcu(&sa->rcu, free_rxsa);
 189}
 190
 191static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr)
 192{
 193	struct macsec_tx_sa *sa = rcu_dereference_bh(ptr);
 194
 195	if (!sa || !sa->active)
 196		return NULL;
 197
 198	if (!refcount_inc_not_zero(&sa->refcnt))
 199		return NULL;
 200
 201	return sa;
 202}
 203
 204static void free_txsa(struct rcu_head *head)
 205{
 206	struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu);
 207
 208	crypto_free_aead(sa->key.tfm);
 209	free_percpu(sa->stats);
 210	kfree(sa);
 211}
 212
 213static void macsec_txsa_put(struct macsec_tx_sa *sa)
 214{
 215	if (refcount_dec_and_test(&sa->refcnt))
 216		call_rcu(&sa->rcu, free_txsa);
 217}
 218
 219static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
 220{
 221	BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb));
 222	return (struct macsec_cb *)skb->cb;
 223}
 224
 
 225#define MACSEC_PORT_SCB (0x0000)
 226#define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
 227#define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff)
 228
 229#define MACSEC_GCM_AES_128_SAK_LEN 16
 230#define MACSEC_GCM_AES_256_SAK_LEN 32
 231
 232#define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
 233#define DEFAULT_XPN false
 234#define DEFAULT_SEND_SCI true
 235#define DEFAULT_ENCRYPT false
 236#define DEFAULT_ENCODING_SA 0
 237#define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1))
 238
 239static sci_t make_sci(const u8 *addr, __be16 port)
 
 
 
 
 
 
 
 
 240{
 241	sci_t sci;
 242
 243	memcpy(&sci, addr, ETH_ALEN);
 244	memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port));
 245
 246	return sci;
 247}
 248
 249static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present)
 250{
 251	sci_t sci;
 252
 253	if (sci_present)
 254		memcpy(&sci, hdr->secure_channel_id,
 255		       sizeof(hdr->secure_channel_id));
 256	else
 257		sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES);
 258
 259	return sci;
 260}
 261
 262static unsigned int macsec_sectag_len(bool sci_present)
 263{
 264	return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
 265}
 266
 267static unsigned int macsec_hdr_len(bool sci_present)
 268{
 269	return macsec_sectag_len(sci_present) + ETH_HLEN;
 270}
 271
 272static unsigned int macsec_extra_len(bool sci_present)
 273{
 274	return macsec_sectag_len(sci_present) + sizeof(__be16);
 275}
 276
 277/* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
 278static void macsec_fill_sectag(struct macsec_eth_header *h,
 279			       const struct macsec_secy *secy, u32 pn,
 280			       bool sci_present)
 281{
 282	const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
 283
 284	memset(&h->tci_an, 0, macsec_sectag_len(sci_present));
 285	h->eth.h_proto = htons(ETH_P_MACSEC);
 286
 287	if (sci_present) {
 288		h->tci_an |= MACSEC_TCI_SC;
 289		memcpy(&h->secure_channel_id, &secy->sci,
 290		       sizeof(h->secure_channel_id));
 291	} else {
 292		if (tx_sc->end_station)
 293			h->tci_an |= MACSEC_TCI_ES;
 294		if (tx_sc->scb)
 295			h->tci_an |= MACSEC_TCI_SCB;
 296	}
 297
 298	h->packet_number = htonl(pn);
 299
 300	/* with GCM, C/E clear for !encrypt, both set for encrypt */
 301	if (tx_sc->encrypt)
 302		h->tci_an |= MACSEC_TCI_CONFID;
 303	else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
 304		h->tci_an |= MACSEC_TCI_C;
 305
 306	h->tci_an |= tx_sc->encoding_sa;
 307}
 308
 309static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len)
 310{
 311	if (data_len < MIN_NON_SHORT_LEN)
 312		h->short_length = data_len;
 313}
 314
 315/* Checks if a MACsec interface is being offloaded to an hardware engine */
 316static bool macsec_is_offloaded(struct macsec_dev *macsec)
 317{
 318	if (macsec->offload == MACSEC_OFFLOAD_MAC ||
 319	    macsec->offload == MACSEC_OFFLOAD_PHY)
 320		return true;
 321
 322	return false;
 323}
 324
 325/* Checks if underlying layers implement MACsec offloading functions. */
 326static bool macsec_check_offload(enum macsec_offload offload,
 327				 struct macsec_dev *macsec)
 328{
 329	if (!macsec || !macsec->real_dev)
 330		return false;
 331
 332	if (offload == MACSEC_OFFLOAD_PHY)
 333		return macsec->real_dev->phydev &&
 334		       macsec->real_dev->phydev->macsec_ops;
 335	else if (offload == MACSEC_OFFLOAD_MAC)
 336		return macsec->real_dev->features & NETIF_F_HW_MACSEC &&
 337		       macsec->real_dev->macsec_ops;
 338
 339	return false;
 340}
 341
 342static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload,
 343						 struct macsec_dev *macsec,
 344						 struct macsec_context *ctx)
 345{
 346	if (ctx) {
 347		memset(ctx, 0, sizeof(*ctx));
 348		ctx->offload = offload;
 349
 350		if (offload == MACSEC_OFFLOAD_PHY)
 351			ctx->phydev = macsec->real_dev->phydev;
 352		else if (offload == MACSEC_OFFLOAD_MAC)
 353			ctx->netdev = macsec->real_dev;
 354	}
 355
 356	if (offload == MACSEC_OFFLOAD_PHY)
 357		return macsec->real_dev->phydev->macsec_ops;
 358	else
 359		return macsec->real_dev->macsec_ops;
 360}
 361
 362/* Returns a pointer to the MACsec ops struct if any and updates the MACsec
 363 * context device reference if provided.
 364 */
 365static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec,
 366					       struct macsec_context *ctx)
 367{
 368	if (!macsec_check_offload(macsec->offload, macsec))
 369		return NULL;
 370
 371	return __macsec_get_ops(macsec->offload, macsec, ctx);
 372}
 373
 374/* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */
 375static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn)
 376{
 377	struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data;
 378	int len = skb->len - 2 * ETH_ALEN;
 379	int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len;
 380
 381	/* a) It comprises at least 17 octets */
 382	if (skb->len <= 16)
 383		return false;
 384
 385	/* b) MACsec EtherType: already checked */
 386
 387	/* c) V bit is clear */
 388	if (h->tci_an & MACSEC_TCI_VERSION)
 389		return false;
 390
 391	/* d) ES or SCB => !SC */
 392	if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) &&
 393	    (h->tci_an & MACSEC_TCI_SC))
 394		return false;
 395
 396	/* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */
 397	if (h->unused)
 398		return false;
 399
 400	/* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */
 401	if (!h->packet_number && !xpn)
 402		return false;
 403
 404	/* length check, f) g) h) i) */
 405	if (h->short_length)
 406		return len == extra_len + h->short_length;
 407	return len >= extra_len + MIN_NON_SHORT_LEN;
 408}
 409
 410#define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
 411#define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
 412
 413static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn,
 414			       salt_t salt)
 415{
 416	struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv;
 417
 418	gcm_iv->ssci = ssci ^ salt.ssci;
 419	gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn;
 420}
 421
 422static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn)
 423{
 424	struct gcm_iv *gcm_iv = (struct gcm_iv *)iv;
 425
 426	gcm_iv->sci = sci;
 427	gcm_iv->pn = htonl(pn);
 428}
 429
 430static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
 431{
 432	return (struct macsec_eth_header *)skb_mac_header(skb);
 433}
 434
 
 
 
 
 
 435static void __macsec_pn_wrapped(struct macsec_secy *secy,
 436				struct macsec_tx_sa *tx_sa)
 437{
 438	pr_debug("PN wrapped, transitioning to !oper\n");
 439	tx_sa->active = false;
 440	if (secy->protect_frames)
 441		secy->operational = false;
 442}
 443
 444void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa)
 445{
 446	spin_lock_bh(&tx_sa->lock);
 447	__macsec_pn_wrapped(secy, tx_sa);
 448	spin_unlock_bh(&tx_sa->lock);
 449}
 450EXPORT_SYMBOL_GPL(macsec_pn_wrapped);
 451
 452static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa,
 453			    struct macsec_secy *secy)
 454{
 455	pn_t pn;
 456
 457	spin_lock_bh(&tx_sa->lock);
 458
 459	pn = tx_sa->next_pn_halves;
 460	if (secy->xpn)
 461		tx_sa->next_pn++;
 462	else
 463		tx_sa->next_pn_halves.lower++;
 464
 465	if (tx_sa->next_pn == 0)
 466		__macsec_pn_wrapped(secy, tx_sa);
 467	spin_unlock_bh(&tx_sa->lock);
 468
 469	return pn;
 470}
 471
 472static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
 473{
 474	struct macsec_dev *macsec = netdev_priv(dev);
 475
 476	skb->dev = macsec->real_dev;
 477	skb_reset_mac_header(skb);
 478	skb->protocol = eth_hdr(skb)->h_proto;
 479}
 480
 481static unsigned int macsec_msdu_len(struct sk_buff *skb)
 482{
 483	struct macsec_dev *macsec = macsec_priv(skb->dev);
 484	struct macsec_secy *secy = &macsec->secy;
 485	bool sci_present = macsec_skb_cb(skb)->has_sci;
 486
 487	return skb->len - macsec_hdr_len(sci_present) - secy->icv_len;
 488}
 489
 490static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
 491			    struct macsec_tx_sa *tx_sa)
 492{
 493	unsigned int msdu_len = macsec_msdu_len(skb);
 494	struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
 495
 496	u64_stats_update_begin(&txsc_stats->syncp);
 497	if (tx_sc->encrypt) {
 498		txsc_stats->stats.OutOctetsEncrypted += msdu_len;
 499		txsc_stats->stats.OutPktsEncrypted++;
 500		this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
 501	} else {
 502		txsc_stats->stats.OutOctetsProtected += msdu_len;
 503		txsc_stats->stats.OutPktsProtected++;
 504		this_cpu_inc(tx_sa->stats->OutPktsProtected);
 505	}
 506	u64_stats_update_end(&txsc_stats->syncp);
 507}
 508
 509static void count_tx(struct net_device *dev, int ret, int len)
 510{
 511	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN))
 512		dev_sw_netstats_tx_add(dev, 1, len);
 
 
 
 
 
 
 513}
 514
 515static void macsec_encrypt_done(void *data, int err)
 516{
 517	struct sk_buff *skb = data;
 518	struct net_device *dev = skb->dev;
 519	struct macsec_dev *macsec = macsec_priv(dev);
 520	struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa;
 521	int len, ret;
 522
 523	aead_request_free(macsec_skb_cb(skb)->req);
 524
 525	rcu_read_lock_bh();
 526	macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
 527	/* packet is encrypted/protected so tx_bytes must be calculated */
 528	len = macsec_msdu_len(skb) + 2 * ETH_ALEN;
 529	macsec_encrypt_finish(skb, dev);
 
 
 530	ret = dev_queue_xmit(skb);
 531	count_tx(dev, ret, len);
 532	rcu_read_unlock_bh();
 533
 534	macsec_txsa_put(sa);
 535	dev_put(dev);
 536}
 537
 538static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
 539					     unsigned char **iv,
 540					     struct scatterlist **sg,
 541					     int num_frags)
 542{
 543	size_t size, iv_offset, sg_offset;
 544	struct aead_request *req;
 545	void *tmp;
 546
 547	size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
 548	iv_offset = size;
 549	size += GCM_AES_IV_LEN;
 550
 551	size = ALIGN(size, __alignof__(struct scatterlist));
 552	sg_offset = size;
 553	size += sizeof(struct scatterlist) * num_frags;
 554
 555	tmp = kmalloc(size, GFP_ATOMIC);
 556	if (!tmp)
 557		return NULL;
 558
 559	*iv = (unsigned char *)(tmp + iv_offset);
 560	*sg = (struct scatterlist *)(tmp + sg_offset);
 561	req = tmp;
 562
 563	aead_request_set_tfm(req, tfm);
 564
 565	return req;
 566}
 567
 568static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
 569				      struct net_device *dev)
 570{
 571	int ret;
 572	struct scatterlist *sg;
 573	struct sk_buff *trailer;
 574	unsigned char *iv;
 575	struct ethhdr *eth;
 576	struct macsec_eth_header *hh;
 577	size_t unprotected_len;
 578	struct aead_request *req;
 579	struct macsec_secy *secy;
 580	struct macsec_tx_sc *tx_sc;
 581	struct macsec_tx_sa *tx_sa;
 582	struct macsec_dev *macsec = macsec_priv(dev);
 583	bool sci_present;
 584	pn_t pn;
 585
 586	secy = &macsec->secy;
 587	tx_sc = &secy->tx_sc;
 588
 589	/* 10.5.1 TX SA assignment */
 590	tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]);
 591	if (!tx_sa) {
 592		secy->operational = false;
 593		kfree_skb(skb);
 594		return ERR_PTR(-EINVAL);
 595	}
 596
 597	if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
 598		     skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
 599		struct sk_buff *nskb = skb_copy_expand(skb,
 600						       MACSEC_NEEDED_HEADROOM,
 601						       MACSEC_NEEDED_TAILROOM,
 602						       GFP_ATOMIC);
 603		if (likely(nskb)) {
 604			consume_skb(skb);
 605			skb = nskb;
 606		} else {
 607			macsec_txsa_put(tx_sa);
 608			kfree_skb(skb);
 609			return ERR_PTR(-ENOMEM);
 610		}
 611	} else {
 612		skb = skb_unshare(skb, GFP_ATOMIC);
 613		if (!skb) {
 614			macsec_txsa_put(tx_sa);
 615			return ERR_PTR(-ENOMEM);
 616		}
 617	}
 618
 619	unprotected_len = skb->len;
 620	eth = eth_hdr(skb);
 621	sci_present = macsec_send_sci(secy);
 622	hh = skb_push(skb, macsec_extra_len(sci_present));
 623	memmove(hh, eth, 2 * ETH_ALEN);
 624
 625	pn = tx_sa_update_pn(tx_sa, secy);
 626	if (pn.full64 == 0) {
 627		macsec_txsa_put(tx_sa);
 628		kfree_skb(skb);
 629		return ERR_PTR(-ENOLINK);
 630	}
 631	macsec_fill_sectag(hh, secy, pn.lower, sci_present);
 632	macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
 633
 634	skb_put(skb, secy->icv_len);
 635
 636	if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
 637		struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
 638
 639		u64_stats_update_begin(&secy_stats->syncp);
 640		secy_stats->stats.OutPktsTooLong++;
 641		u64_stats_update_end(&secy_stats->syncp);
 642
 643		macsec_txsa_put(tx_sa);
 644		kfree_skb(skb);
 645		return ERR_PTR(-EINVAL);
 646	}
 647
 648	ret = skb_cow_data(skb, 0, &trailer);
 649	if (unlikely(ret < 0)) {
 650		macsec_txsa_put(tx_sa);
 651		kfree_skb(skb);
 652		return ERR_PTR(ret);
 653	}
 654
 655	req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret);
 656	if (!req) {
 657		macsec_txsa_put(tx_sa);
 658		kfree_skb(skb);
 659		return ERR_PTR(-ENOMEM);
 660	}
 661
 662	if (secy->xpn)
 663		macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt);
 664	else
 665		macsec_fill_iv(iv, secy->sci, pn.lower);
 666
 667	sg_init_table(sg, ret);
 668	ret = skb_to_sgvec(skb, sg, 0, skb->len);
 669	if (unlikely(ret < 0)) {
 670		aead_request_free(req);
 671		macsec_txsa_put(tx_sa);
 672		kfree_skb(skb);
 673		return ERR_PTR(ret);
 674	}
 675
 676	if (tx_sc->encrypt) {
 677		int len = skb->len - macsec_hdr_len(sci_present) -
 678			  secy->icv_len;
 679		aead_request_set_crypt(req, sg, sg, len, iv);
 680		aead_request_set_ad(req, macsec_hdr_len(sci_present));
 681	} else {
 682		aead_request_set_crypt(req, sg, sg, 0, iv);
 683		aead_request_set_ad(req, skb->len - secy->icv_len);
 684	}
 685
 686	macsec_skb_cb(skb)->req = req;
 687	macsec_skb_cb(skb)->tx_sa = tx_sa;
 688	macsec_skb_cb(skb)->has_sci = sci_present;
 689	aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
 690
 691	dev_hold(skb->dev);
 692	ret = crypto_aead_encrypt(req);
 693	if (ret == -EINPROGRESS) {
 694		return ERR_PTR(ret);
 695	} else if (ret != 0) {
 696		dev_put(skb->dev);
 697		kfree_skb(skb);
 698		aead_request_free(req);
 699		macsec_txsa_put(tx_sa);
 700		return ERR_PTR(-EINVAL);
 701	}
 702
 703	dev_put(skb->dev);
 704	aead_request_free(req);
 705	macsec_txsa_put(tx_sa);
 706
 707	return skb;
 708}
 709
 710static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn)
 711{
 712	struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
 713	struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats);
 714	struct macsec_eth_header *hdr = macsec_ethhdr(skb);
 715	u32 lowest_pn = 0;
 716
 717	spin_lock(&rx_sa->lock);
 718	if (rx_sa->next_pn_halves.lower >= secy->replay_window)
 719		lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window;
 720
 721	/* Now perform replay protection check again
 722	 * (see IEEE 802.1AE-2006 figure 10-5)
 723	 */
 724	if (secy->replay_protect && pn < lowest_pn &&
 725	    (!secy->xpn || pn_same_half(pn, lowest_pn))) {
 726		spin_unlock(&rx_sa->lock);
 727		u64_stats_update_begin(&rxsc_stats->syncp);
 728		rxsc_stats->stats.InPktsLate++;
 729		u64_stats_update_end(&rxsc_stats->syncp);
 730		DEV_STATS_INC(secy->netdev, rx_dropped);
 731		return false;
 732	}
 733
 734	if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
 735		unsigned int msdu_len = macsec_msdu_len(skb);
 736		u64_stats_update_begin(&rxsc_stats->syncp);
 737		if (hdr->tci_an & MACSEC_TCI_E)
 738			rxsc_stats->stats.InOctetsDecrypted += msdu_len;
 739		else
 740			rxsc_stats->stats.InOctetsValidated += msdu_len;
 741		u64_stats_update_end(&rxsc_stats->syncp);
 742	}
 743
 744	if (!macsec_skb_cb(skb)->valid) {
 745		spin_unlock(&rx_sa->lock);
 746
 747		/* 10.6.5 */
 748		if (hdr->tci_an & MACSEC_TCI_C ||
 749		    secy->validate_frames == MACSEC_VALIDATE_STRICT) {
 750			u64_stats_update_begin(&rxsc_stats->syncp);
 751			rxsc_stats->stats.InPktsNotValid++;
 752			u64_stats_update_end(&rxsc_stats->syncp);
 753			this_cpu_inc(rx_sa->stats->InPktsNotValid);
 754			DEV_STATS_INC(secy->netdev, rx_errors);
 755			return false;
 756		}
 757
 758		u64_stats_update_begin(&rxsc_stats->syncp);
 759		if (secy->validate_frames == MACSEC_VALIDATE_CHECK) {
 760			rxsc_stats->stats.InPktsInvalid++;
 761			this_cpu_inc(rx_sa->stats->InPktsInvalid);
 762		} else if (pn < lowest_pn) {
 763			rxsc_stats->stats.InPktsDelayed++;
 764		} else {
 765			rxsc_stats->stats.InPktsUnchecked++;
 766		}
 767		u64_stats_update_end(&rxsc_stats->syncp);
 768	} else {
 769		u64_stats_update_begin(&rxsc_stats->syncp);
 770		if (pn < lowest_pn) {
 771			rxsc_stats->stats.InPktsDelayed++;
 772		} else {
 773			rxsc_stats->stats.InPktsOK++;
 774			this_cpu_inc(rx_sa->stats->InPktsOK);
 775		}
 776		u64_stats_update_end(&rxsc_stats->syncp);
 777
 778		// Instead of "pn >=" - to support pn overflow in xpn
 779		if (pn + 1 > rx_sa->next_pn_halves.lower) {
 780			rx_sa->next_pn_halves.lower = pn + 1;
 781		} else if (secy->xpn &&
 782			   !pn_same_half(pn, rx_sa->next_pn_halves.lower)) {
 783			rx_sa->next_pn_halves.upper++;
 784			rx_sa->next_pn_halves.lower = pn + 1;
 785		}
 786
 787		spin_unlock(&rx_sa->lock);
 788	}
 789
 790	return true;
 791}
 792
 793static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev)
 794{
 795	skb->pkt_type = PACKET_HOST;
 796	skb->protocol = eth_type_trans(skb, dev);
 797
 798	skb_reset_network_header(skb);
 799	if (!skb_transport_header_was_set(skb))
 800		skb_reset_transport_header(skb);
 801	skb_reset_mac_len(skb);
 802}
 803
 804static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
 805{
 806	skb->ip_summed = CHECKSUM_NONE;
 807	memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
 808	skb_pull(skb, hdr_len);
 809	pskb_trim_unique(skb, skb->len - icv_len);
 810}
 811
 812static void count_rx(struct net_device *dev, int len)
 813{
 814	dev_sw_netstats_rx_add(dev, len);
 
 
 
 
 
 815}
 816
 817static void macsec_decrypt_done(void *data, int err)
 818{
 819	struct sk_buff *skb = data;
 820	struct net_device *dev = skb->dev;
 821	struct macsec_dev *macsec = macsec_priv(dev);
 822	struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
 823	struct macsec_rx_sc *rx_sc = rx_sa->sc;
 824	int len;
 825	u32 pn;
 826
 827	aead_request_free(macsec_skb_cb(skb)->req);
 828
 829	if (!err)
 830		macsec_skb_cb(skb)->valid = true;
 831
 832	rcu_read_lock_bh();
 833	pn = ntohl(macsec_ethhdr(skb)->packet_number);
 834	if (!macsec_post_decrypt(skb, &macsec->secy, pn)) {
 835		rcu_read_unlock_bh();
 836		kfree_skb(skb);
 837		goto out;
 838	}
 839
 840	macsec_finalize_skb(skb, macsec->secy.icv_len,
 841			    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
 842	len = skb->len;
 843	macsec_reset_skb(skb, macsec->secy.netdev);
 844
 
 845	if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
 846		count_rx(dev, len);
 847
 848	rcu_read_unlock_bh();
 849
 850out:
 851	macsec_rxsa_put(rx_sa);
 852	macsec_rxsc_put(rx_sc);
 853	dev_put(dev);
 854}
 855
 856static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
 857				      struct net_device *dev,
 858				      struct macsec_rx_sa *rx_sa,
 859				      sci_t sci,
 860				      struct macsec_secy *secy)
 861{
 862	int ret;
 863	struct scatterlist *sg;
 864	struct sk_buff *trailer;
 865	unsigned char *iv;
 866	struct aead_request *req;
 867	struct macsec_eth_header *hdr;
 868	u32 hdr_pn;
 869	u16 icv_len = secy->icv_len;
 870
 871	macsec_skb_cb(skb)->valid = false;
 872	skb = skb_share_check(skb, GFP_ATOMIC);
 873	if (!skb)
 874		return ERR_PTR(-ENOMEM);
 875
 876	ret = skb_cow_data(skb, 0, &trailer);
 877	if (unlikely(ret < 0)) {
 878		kfree_skb(skb);
 879		return ERR_PTR(ret);
 880	}
 881	req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret);
 882	if (!req) {
 883		kfree_skb(skb);
 884		return ERR_PTR(-ENOMEM);
 885	}
 886
 887	hdr = (struct macsec_eth_header *)skb->data;
 888	hdr_pn = ntohl(hdr->packet_number);
 889
 890	if (secy->xpn) {
 891		pn_t recovered_pn = rx_sa->next_pn_halves;
 892
 893		recovered_pn.lower = hdr_pn;
 894		if (hdr_pn < rx_sa->next_pn_halves.lower &&
 895		    !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower))
 896			recovered_pn.upper++;
 897
 898		macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64,
 899				   rx_sa->key.salt);
 900	} else {
 901		macsec_fill_iv(iv, sci, hdr_pn);
 902	}
 903
 904	sg_init_table(sg, ret);
 905	ret = skb_to_sgvec(skb, sg, 0, skb->len);
 906	if (unlikely(ret < 0)) {
 907		aead_request_free(req);
 908		kfree_skb(skb);
 909		return ERR_PTR(ret);
 910	}
 911
 912	if (hdr->tci_an & MACSEC_TCI_E) {
 913		/* confidentiality: ethernet + macsec header
 914		 * authenticated, encrypted payload
 915		 */
 916		int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci);
 917
 918		aead_request_set_crypt(req, sg, sg, len, iv);
 919		aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci));
 920		skb = skb_unshare(skb, GFP_ATOMIC);
 921		if (!skb) {
 922			aead_request_free(req);
 923			return ERR_PTR(-ENOMEM);
 924		}
 925	} else {
 926		/* integrity only: all headers + data authenticated */
 927		aead_request_set_crypt(req, sg, sg, icv_len, iv);
 928		aead_request_set_ad(req, skb->len - icv_len);
 929	}
 930
 931	macsec_skb_cb(skb)->req = req;
 932	skb->dev = dev;
 933	aead_request_set_callback(req, 0, macsec_decrypt_done, skb);
 934
 935	dev_hold(dev);
 936	ret = crypto_aead_decrypt(req);
 937	if (ret == -EINPROGRESS) {
 938		return ERR_PTR(ret);
 939	} else if (ret != 0) {
 940		/* decryption/authentication failed
 941		 * 10.6 if validateFrames is disabled, deliver anyway
 942		 */
 943		if (ret != -EBADMSG) {
 944			kfree_skb(skb);
 945			skb = ERR_PTR(ret);
 946		}
 947	} else {
 948		macsec_skb_cb(skb)->valid = true;
 949	}
 950	dev_put(dev);
 951
 952	aead_request_free(req);
 953
 954	return skb;
 955}
 956
 957static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci)
 958{
 959	struct macsec_rx_sc *rx_sc;
 960
 961	for_each_rxsc(secy, rx_sc) {
 962		if (rx_sc->sci == sci)
 963			return rx_sc;
 964	}
 965
 966	return NULL;
 967}
 968
 969static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci)
 970{
 971	struct macsec_rx_sc *rx_sc;
 972
 973	for_each_rxsc_rtnl(secy, rx_sc) {
 974		if (rx_sc->sci == sci)
 975			return rx_sc;
 976	}
 977
 978	return NULL;
 979}
 980
 981static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
 982{
 983	/* Deliver to the uncontrolled port by default */
 984	enum rx_handler_result ret = RX_HANDLER_PASS;
 985	struct ethhdr *hdr = eth_hdr(skb);
 986	struct metadata_dst *md_dst;
 987	struct macsec_rxh_data *rxd;
 988	struct macsec_dev *macsec;
 989	bool is_macsec_md_dst;
 990
 991	rcu_read_lock();
 992	rxd = macsec_data_rcu(skb->dev);
 993	md_dst = skb_metadata_dst(skb);
 994	is_macsec_md_dst = md_dst && md_dst->type == METADATA_MACSEC;
 995
 996	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
 997		struct sk_buff *nskb;
 998		struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
 999		struct net_device *ndev = macsec->secy.netdev;
1000
1001		/* If h/w offloading is enabled, HW decodes frames and strips
1002		 * the SecTAG, so we have to deduce which port to deliver to.
1003		 */
1004		if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
1005			const struct macsec_ops *ops;
1006
1007			ops = macsec_get_ops(macsec, NULL);
1008
1009			if (ops->rx_uses_md_dst && !is_macsec_md_dst)
1010				continue;
1011
1012			if (is_macsec_md_dst) {
1013				struct macsec_rx_sc *rx_sc;
1014
1015				/* All drivers that implement MACsec offload
1016				 * support using skb metadata destinations must
1017				 * indicate that they do so.
1018				 */
1019				DEBUG_NET_WARN_ON_ONCE(!ops->rx_uses_md_dst);
1020				rx_sc = find_rx_sc(&macsec->secy,
1021						   md_dst->u.macsec_info.sci);
1022				if (!rx_sc)
1023					continue;
1024				/* device indicated macsec offload occurred */
1025				skb->dev = ndev;
1026				skb->pkt_type = PACKET_HOST;
1027				eth_skb_pkt_type(skb, ndev);
1028				ret = RX_HANDLER_ANOTHER;
1029				goto out;
1030			}
1031
1032			/* This datapath is insecure because it is unable to
1033			 * enforce isolation of broadcast/multicast traffic and
1034			 * unicast traffic with promiscuous mode on the macsec
1035			 * netdev. Since the core stack has no mechanism to
1036			 * check that the hardware did indeed receive MACsec
1037			 * traffic, it is possible that the response handling
1038			 * done by the MACsec port was to a plaintext packet.
1039			 * This violates the MACsec protocol standard.
1040			 */
1041			if (ether_addr_equal_64bits(hdr->h_dest,
1042						    ndev->dev_addr)) {
1043				/* exact match, divert skb to this port */
1044				skb->dev = ndev;
1045				skb->pkt_type = PACKET_HOST;
1046				ret = RX_HANDLER_ANOTHER;
1047				goto out;
1048			} else if (is_multicast_ether_addr_64bits(
1049					   hdr->h_dest)) {
1050				/* multicast frame, deliver on this port too */
1051				nskb = skb_clone(skb, GFP_ATOMIC);
1052				if (!nskb)
1053					break;
1054
1055				nskb->dev = ndev;
1056				eth_skb_pkt_type(nskb, ndev);
 
 
 
 
1057
1058				__netif_rx(nskb);
1059			} else if (ndev->flags & IFF_PROMISC) {
1060				skb->dev = ndev;
1061				skb->pkt_type = PACKET_HOST;
1062				ret = RX_HANDLER_ANOTHER;
1063				goto out;
1064			}
1065
1066			continue;
1067		}
1068
1069		/* 10.6 If the management control validateFrames is not
1070		 * Strict, frames without a SecTAG are received, counted, and
1071		 * delivered to the Controlled Port
1072		 */
1073		if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1074			u64_stats_update_begin(&secy_stats->syncp);
1075			secy_stats->stats.InPktsNoTag++;
1076			u64_stats_update_end(&secy_stats->syncp);
1077			DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
1078			continue;
1079		}
1080
1081		/* deliver on this port */
1082		nskb = skb_clone(skb, GFP_ATOMIC);
1083		if (!nskb)
1084			break;
1085
1086		nskb->dev = ndev;
1087
1088		if (__netif_rx(nskb) == NET_RX_SUCCESS) {
1089			u64_stats_update_begin(&secy_stats->syncp);
1090			secy_stats->stats.InPktsUntagged++;
1091			u64_stats_update_end(&secy_stats->syncp);
1092		}
1093	}
1094
1095out:
1096	rcu_read_unlock();
1097	return ret;
1098}
1099
1100static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
1101{
1102	struct sk_buff *skb = *pskb;
1103	struct net_device *dev = skb->dev;
1104	struct macsec_eth_header *hdr;
1105	struct macsec_secy *secy = NULL;
1106	struct macsec_rx_sc *rx_sc;
1107	struct macsec_rx_sa *rx_sa;
1108	struct macsec_rxh_data *rxd;
1109	struct macsec_dev *macsec;
1110	unsigned int len;
1111	sci_t sci;
1112	u32 hdr_pn;
1113	bool cbit;
1114	struct pcpu_rx_sc_stats *rxsc_stats;
1115	struct pcpu_secy_stats *secy_stats;
1116	bool pulled_sci;
1117	int ret;
1118
1119	if (skb_headroom(skb) < ETH_HLEN)
1120		goto drop_direct;
1121
1122	hdr = macsec_ethhdr(skb);
1123	if (hdr->eth.h_proto != htons(ETH_P_MACSEC))
1124		return handle_not_macsec(skb);
1125
1126	skb = skb_unshare(skb, GFP_ATOMIC);
1127	*pskb = skb;
1128	if (!skb)
1129		return RX_HANDLER_CONSUMED;
1130
1131	pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
1132	if (!pulled_sci) {
1133		if (!pskb_may_pull(skb, macsec_extra_len(false)))
1134			goto drop_direct;
1135	}
1136
1137	hdr = macsec_ethhdr(skb);
1138
1139	/* Frames with a SecTAG that has the TCI E bit set but the C
1140	 * bit clear are discarded, as this reserved encoding is used
1141	 * to identify frames with a SecTAG that are not to be
1142	 * delivered to the Controlled Port.
1143	 */
1144	if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E)
1145		return RX_HANDLER_PASS;
1146
1147	/* now, pull the extra length */
1148	if (hdr->tci_an & MACSEC_TCI_SC) {
1149		if (!pulled_sci)
1150			goto drop_direct;
1151	}
1152
1153	/* ethernet header is part of crypto processing */
1154	skb_push(skb, ETH_HLEN);
1155
1156	macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC);
1157	macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK;
1158	sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci);
1159
1160	rcu_read_lock();
1161	rxd = macsec_data_rcu(skb->dev);
1162
1163	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1164		struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
1165
1166		sc = sc ? macsec_rxsc_get(sc) : NULL;
1167
1168		if (sc) {
1169			secy = &macsec->secy;
1170			rx_sc = sc;
1171			break;
1172		}
1173	}
1174
1175	if (!secy)
1176		goto nosci;
1177
1178	dev = secy->netdev;
1179	macsec = macsec_priv(dev);
1180	secy_stats = this_cpu_ptr(macsec->stats);
1181	rxsc_stats = this_cpu_ptr(rx_sc->stats);
1182
1183	if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) {
1184		u64_stats_update_begin(&secy_stats->syncp);
1185		secy_stats->stats.InPktsBadTag++;
1186		u64_stats_update_end(&secy_stats->syncp);
1187		DEV_STATS_INC(secy->netdev, rx_errors);
1188		goto drop_nosa;
1189	}
1190
1191	rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]);
1192	if (!rx_sa) {
1193		/* 10.6.1 if the SA is not in use */
1194
1195		/* If validateFrames is Strict or the C bit in the
1196		 * SecTAG is set, discard
1197		 */
1198		if (hdr->tci_an & MACSEC_TCI_C ||
1199		    secy->validate_frames == MACSEC_VALIDATE_STRICT) {
1200			u64_stats_update_begin(&rxsc_stats->syncp);
1201			rxsc_stats->stats.InPktsNotUsingSA++;
1202			u64_stats_update_end(&rxsc_stats->syncp);
1203			DEV_STATS_INC(secy->netdev, rx_errors);
1204			goto drop_nosa;
1205		}
1206
1207		/* not Strict, the frame (with the SecTAG and ICV
1208		 * removed) is delivered to the Controlled Port.
1209		 */
1210		u64_stats_update_begin(&rxsc_stats->syncp);
1211		rxsc_stats->stats.InPktsUnusedSA++;
1212		u64_stats_update_end(&rxsc_stats->syncp);
1213		goto deliver;
1214	}
1215
1216	/* First, PN check to avoid decrypting obviously wrong packets */
1217	hdr_pn = ntohl(hdr->packet_number);
1218	if (secy->replay_protect) {
1219		bool late;
1220
1221		spin_lock(&rx_sa->lock);
1222		late = rx_sa->next_pn_halves.lower >= secy->replay_window &&
1223		       hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window);
1224
1225		if (secy->xpn)
1226			late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn);
1227		spin_unlock(&rx_sa->lock);
1228
1229		if (late) {
1230			u64_stats_update_begin(&rxsc_stats->syncp);
1231			rxsc_stats->stats.InPktsLate++;
1232			u64_stats_update_end(&rxsc_stats->syncp);
1233			DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
1234			goto drop;
1235		}
1236	}
1237
1238	macsec_skb_cb(skb)->rx_sa = rx_sa;
1239
1240	/* Disabled && !changed text => skip validation */
1241	if (hdr->tci_an & MACSEC_TCI_C ||
1242	    secy->validate_frames != MACSEC_VALIDATE_DISABLED)
1243		skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
1244
1245	if (IS_ERR(skb)) {
1246		/* the decrypt callback needs the reference */
1247		if (PTR_ERR(skb) != -EINPROGRESS) {
1248			macsec_rxsa_put(rx_sa);
1249			macsec_rxsc_put(rx_sc);
1250		}
1251		rcu_read_unlock();
1252		*pskb = NULL;
1253		return RX_HANDLER_CONSUMED;
1254	}
1255
1256	if (!macsec_post_decrypt(skb, secy, hdr_pn))
1257		goto drop;
1258
1259deliver:
1260	macsec_finalize_skb(skb, secy->icv_len,
1261			    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1262	len = skb->len;
1263	macsec_reset_skb(skb, secy->netdev);
1264
1265	if (rx_sa)
1266		macsec_rxsa_put(rx_sa);
1267	macsec_rxsc_put(rx_sc);
1268
1269	skb_orphan(skb);
 
1270	ret = gro_cells_receive(&macsec->gro_cells, skb);
1271	if (ret == NET_RX_SUCCESS)
1272		count_rx(dev, len);
1273	else
1274		DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
1275
1276	rcu_read_unlock();
1277
1278	*pskb = NULL;
1279	return RX_HANDLER_CONSUMED;
1280
1281drop:
1282	macsec_rxsa_put(rx_sa);
1283drop_nosa:
1284	macsec_rxsc_put(rx_sc);
1285	rcu_read_unlock();
1286drop_direct:
1287	kfree_skb(skb);
1288	*pskb = NULL;
1289	return RX_HANDLER_CONSUMED;
1290
1291nosci:
1292	/* 10.6.1 if the SC is not found */
1293	cbit = !!(hdr->tci_an & MACSEC_TCI_C);
1294	if (!cbit)
1295		macsec_finalize_skb(skb, MACSEC_DEFAULT_ICV_LEN,
1296				    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1297
1298	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1299		struct sk_buff *nskb;
1300
1301		secy_stats = this_cpu_ptr(macsec->stats);
1302
1303		/* If validateFrames is Strict or the C bit in the
1304		 * SecTAG is set, discard
1305		 */
1306		if (cbit ||
1307		    macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1308			u64_stats_update_begin(&secy_stats->syncp);
1309			secy_stats->stats.InPktsNoSCI++;
1310			u64_stats_update_end(&secy_stats->syncp);
1311			DEV_STATS_INC(macsec->secy.netdev, rx_errors);
1312			continue;
1313		}
1314
1315		/* not strict, the frame (with the SecTAG and ICV
1316		 * removed) is delivered to the Controlled Port.
1317		 */
1318		nskb = skb_clone(skb, GFP_ATOMIC);
1319		if (!nskb)
1320			break;
1321
1322		macsec_reset_skb(nskb, macsec->secy.netdev);
1323
1324		ret = __netif_rx(nskb);
1325		if (ret == NET_RX_SUCCESS) {
1326			u64_stats_update_begin(&secy_stats->syncp);
1327			secy_stats->stats.InPktsUnknownSCI++;
1328			u64_stats_update_end(&secy_stats->syncp);
1329		} else {
1330			DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
1331		}
1332	}
1333
1334	rcu_read_unlock();
1335	*pskb = skb;
1336	return RX_HANDLER_PASS;
1337}
1338
1339static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
1340{
1341	struct crypto_aead *tfm;
1342	int ret;
1343
1344	tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
 
1345
1346	if (IS_ERR(tfm))
1347		return tfm;
1348
1349	ret = crypto_aead_setkey(tfm, key, key_len);
1350	if (ret < 0)
1351		goto fail;
1352
1353	ret = crypto_aead_setauthsize(tfm, icv_len);
1354	if (ret < 0)
1355		goto fail;
1356
1357	return tfm;
1358fail:
1359	crypto_free_aead(tfm);
1360	return ERR_PTR(ret);
1361}
1362
1363static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
1364		      int icv_len)
1365{
1366	rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats);
1367	if (!rx_sa->stats)
1368		return -ENOMEM;
1369
1370	rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
1371	if (IS_ERR(rx_sa->key.tfm)) {
1372		free_percpu(rx_sa->stats);
1373		return PTR_ERR(rx_sa->key.tfm);
1374	}
1375
1376	rx_sa->ssci = MACSEC_UNDEF_SSCI;
1377	rx_sa->active = false;
1378	rx_sa->next_pn = 1;
1379	refcount_set(&rx_sa->refcnt, 1);
1380	spin_lock_init(&rx_sa->lock);
1381
1382	return 0;
1383}
1384
1385static void clear_rx_sa(struct macsec_rx_sa *rx_sa)
1386{
1387	rx_sa->active = false;
1388
1389	macsec_rxsa_put(rx_sa);
1390}
1391
1392static void free_rx_sc(struct macsec_rx_sc *rx_sc)
1393{
1394	int i;
1395
1396	for (i = 0; i < MACSEC_NUM_AN; i++) {
1397		struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]);
1398
1399		RCU_INIT_POINTER(rx_sc->sa[i], NULL);
1400		if (sa)
1401			clear_rx_sa(sa);
1402	}
1403
1404	macsec_rxsc_put(rx_sc);
1405}
1406
1407static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci)
1408{
1409	struct macsec_rx_sc *rx_sc, __rcu **rx_scp;
1410
1411	for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp);
1412	     rx_sc;
1413	     rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) {
1414		if (rx_sc->sci == sci) {
1415			if (rx_sc->active)
1416				secy->n_rx_sc--;
1417			rcu_assign_pointer(*rx_scp, rx_sc->next);
1418			return rx_sc;
1419		}
1420	}
1421
1422	return NULL;
1423}
1424
1425static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci,
1426					 bool active)
1427{
1428	struct macsec_rx_sc *rx_sc;
1429	struct macsec_dev *macsec;
1430	struct net_device *real_dev = macsec_priv(dev)->real_dev;
1431	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
1432	struct macsec_secy *secy;
1433
1434	list_for_each_entry(macsec, &rxd->secys, secys) {
1435		if (find_rx_sc_rtnl(&macsec->secy, sci))
1436			return ERR_PTR(-EEXIST);
1437	}
1438
1439	rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
1440	if (!rx_sc)
1441		return ERR_PTR(-ENOMEM);
1442
1443	rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats);
1444	if (!rx_sc->stats) {
1445		kfree(rx_sc);
1446		return ERR_PTR(-ENOMEM);
1447	}
1448
1449	rx_sc->sci = sci;
1450	rx_sc->active = active;
1451	refcount_set(&rx_sc->refcnt, 1);
1452
1453	secy = &macsec_priv(dev)->secy;
1454	rcu_assign_pointer(rx_sc->next, secy->rx_sc);
1455	rcu_assign_pointer(secy->rx_sc, rx_sc);
1456
1457	if (rx_sc->active)
1458		secy->n_rx_sc++;
1459
1460	return rx_sc;
1461}
1462
1463static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len,
1464		      int icv_len)
1465{
1466	tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats);
1467	if (!tx_sa->stats)
1468		return -ENOMEM;
1469
1470	tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
1471	if (IS_ERR(tx_sa->key.tfm)) {
1472		free_percpu(tx_sa->stats);
1473		return PTR_ERR(tx_sa->key.tfm);
1474	}
1475
1476	tx_sa->ssci = MACSEC_UNDEF_SSCI;
1477	tx_sa->active = false;
1478	refcount_set(&tx_sa->refcnt, 1);
1479	spin_lock_init(&tx_sa->lock);
1480
1481	return 0;
1482}
1483
1484static void clear_tx_sa(struct macsec_tx_sa *tx_sa)
1485{
1486	tx_sa->active = false;
1487
1488	macsec_txsa_put(tx_sa);
1489}
1490
1491static struct genl_family macsec_fam;
1492
1493static struct net_device *get_dev_from_nl(struct net *net,
1494					  struct nlattr **attrs)
1495{
1496	int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]);
1497	struct net_device *dev;
1498
1499	dev = __dev_get_by_index(net, ifindex);
1500	if (!dev)
1501		return ERR_PTR(-ENODEV);
1502
1503	if (!netif_is_macsec(dev))
1504		return ERR_PTR(-ENODEV);
1505
1506	return dev;
1507}
1508
1509static enum macsec_offload nla_get_offload(const struct nlattr *nla)
1510{
1511	return (__force enum macsec_offload)nla_get_u8(nla);
1512}
1513
1514static sci_t nla_get_sci(const struct nlattr *nla)
1515{
1516	return (__force sci_t)nla_get_u64(nla);
1517}
1518
1519static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value,
1520		       int padattr)
1521{
1522	return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr);
1523}
1524
1525static ssci_t nla_get_ssci(const struct nlattr *nla)
1526{
1527	return (__force ssci_t)nla_get_u32(nla);
1528}
1529
1530static int nla_put_ssci(struct sk_buff *skb, int attrtype, ssci_t value)
1531{
1532	return nla_put_u32(skb, attrtype, (__force u64)value);
1533}
1534
1535static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
1536					     struct nlattr **attrs,
1537					     struct nlattr **tb_sa,
1538					     struct net_device **devp,
1539					     struct macsec_secy **secyp,
1540					     struct macsec_tx_sc **scp,
1541					     u8 *assoc_num)
1542{
1543	struct net_device *dev;
1544	struct macsec_secy *secy;
1545	struct macsec_tx_sc *tx_sc;
1546	struct macsec_tx_sa *tx_sa;
1547
1548	if (!tb_sa[MACSEC_SA_ATTR_AN])
1549		return ERR_PTR(-EINVAL);
1550
1551	*assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1552
1553	dev = get_dev_from_nl(net, attrs);
1554	if (IS_ERR(dev))
1555		return ERR_CAST(dev);
1556
1557	if (*assoc_num >= MACSEC_NUM_AN)
1558		return ERR_PTR(-EINVAL);
1559
1560	secy = &macsec_priv(dev)->secy;
1561	tx_sc = &secy->tx_sc;
1562
1563	tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]);
1564	if (!tx_sa)
1565		return ERR_PTR(-ENODEV);
1566
1567	*devp = dev;
1568	*scp = tx_sc;
1569	*secyp = secy;
1570	return tx_sa;
1571}
1572
1573static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net,
1574					     struct nlattr **attrs,
1575					     struct nlattr **tb_rxsc,
1576					     struct net_device **devp,
1577					     struct macsec_secy **secyp)
1578{
1579	struct net_device *dev;
1580	struct macsec_secy *secy;
1581	struct macsec_rx_sc *rx_sc;
1582	sci_t sci;
1583
1584	dev = get_dev_from_nl(net, attrs);
1585	if (IS_ERR(dev))
1586		return ERR_CAST(dev);
1587
1588	secy = &macsec_priv(dev)->secy;
1589
1590	if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
1591		return ERR_PTR(-EINVAL);
1592
1593	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1594	rx_sc = find_rx_sc_rtnl(secy, sci);
1595	if (!rx_sc)
1596		return ERR_PTR(-ENODEV);
1597
1598	*secyp = secy;
1599	*devp = dev;
1600
1601	return rx_sc;
1602}
1603
1604static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net,
1605					     struct nlattr **attrs,
1606					     struct nlattr **tb_rxsc,
1607					     struct nlattr **tb_sa,
1608					     struct net_device **devp,
1609					     struct macsec_secy **secyp,
1610					     struct macsec_rx_sc **scp,
1611					     u8 *assoc_num)
1612{
1613	struct macsec_rx_sc *rx_sc;
1614	struct macsec_rx_sa *rx_sa;
1615
1616	if (!tb_sa[MACSEC_SA_ATTR_AN])
1617		return ERR_PTR(-EINVAL);
1618
1619	*assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1620	if (*assoc_num >= MACSEC_NUM_AN)
1621		return ERR_PTR(-EINVAL);
1622
1623	rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp);
1624	if (IS_ERR(rx_sc))
1625		return ERR_CAST(rx_sc);
1626
1627	rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]);
1628	if (!rx_sa)
1629		return ERR_PTR(-ENODEV);
1630
1631	*scp = rx_sc;
1632	return rx_sa;
1633}
1634
1635static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
1636	[MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 },
1637	[MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED },
1638	[MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED },
1639	[MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED },
1640};
1641
1642static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
1643	[MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 },
1644	[MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 },
1645};
1646
1647static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
1648	[MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
1649	[MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
1650	[MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN_LEN(4),
1651	[MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
1652				   .len = MACSEC_KEYID_LEN, },
1653	[MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
1654				 .len = MACSEC_MAX_KEY_LEN, },
1655	[MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 },
1656	[MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY,
1657				  .len = MACSEC_SALT_LEN, },
1658};
1659
1660static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = {
1661	[MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 },
1662};
1663
1664/* Offloads an operation to a device driver */
1665static int macsec_offload(int (* const func)(struct macsec_context *),
1666			  struct macsec_context *ctx)
1667{
1668	int ret;
1669
1670	if (unlikely(!func))
1671		return 0;
1672
1673	if (ctx->offload == MACSEC_OFFLOAD_PHY)
1674		mutex_lock(&ctx->phydev->lock);
1675
 
 
 
 
 
 
 
 
 
 
1676	ret = (*func)(ctx);
 
 
 
1677
 
1678	if (ctx->offload == MACSEC_OFFLOAD_PHY)
1679		mutex_unlock(&ctx->phydev->lock);
1680
1681	return ret;
1682}
1683
1684static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
1685{
1686	if (!attrs[MACSEC_ATTR_SA_CONFIG])
1687		return -EINVAL;
1688
1689	if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL))
1690		return -EINVAL;
1691
1692	return 0;
1693}
1694
1695static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc)
1696{
1697	if (!attrs[MACSEC_ATTR_RXSC_CONFIG])
1698		return -EINVAL;
1699
1700	if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL))
1701		return -EINVAL;
1702
1703	return 0;
1704}
1705
1706static bool validate_add_rxsa(struct nlattr **attrs)
1707{
1708	if (!attrs[MACSEC_SA_ATTR_AN] ||
1709	    !attrs[MACSEC_SA_ATTR_KEY] ||
1710	    !attrs[MACSEC_SA_ATTR_KEYID])
1711		return false;
1712
1713	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1714		return false;
1715
1716	if (attrs[MACSEC_SA_ATTR_PN] &&
1717	    nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
1718		return false;
1719
1720	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1721		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1722			return false;
1723	}
1724
1725	if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1726		return false;
1727
1728	return true;
1729}
1730
1731static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
1732{
1733	struct net_device *dev;
1734	struct nlattr **attrs = info->attrs;
1735	struct macsec_secy *secy;
1736	struct macsec_rx_sc *rx_sc;
1737	struct macsec_rx_sa *rx_sa;
1738	unsigned char assoc_num;
1739	int pn_len;
1740	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1741	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1742	int err;
1743
1744	if (!attrs[MACSEC_ATTR_IFINDEX])
1745		return -EINVAL;
1746
1747	if (parse_sa_config(attrs, tb_sa))
1748		return -EINVAL;
1749
1750	if (parse_rxsc_config(attrs, tb_rxsc))
1751		return -EINVAL;
1752
1753	if (!validate_add_rxsa(tb_sa))
1754		return -EINVAL;
1755
1756	rtnl_lock();
1757	rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
1758	if (IS_ERR(rx_sc)) {
1759		rtnl_unlock();
1760		return PTR_ERR(rx_sc);
1761	}
1762
1763	assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1764
1765	if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
1766		pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n",
1767			  nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
1768		rtnl_unlock();
1769		return -EINVAL;
1770	}
1771
1772	pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
1773	if (tb_sa[MACSEC_SA_ATTR_PN] &&
1774	    nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
1775		pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n",
1776			  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
1777		rtnl_unlock();
1778		return -EINVAL;
1779	}
1780
1781	if (secy->xpn) {
1782		if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
1783			rtnl_unlock();
1784			return -EINVAL;
1785		}
1786
1787		if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
1788			pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
1789				  nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
1790				  MACSEC_SALT_LEN);
1791			rtnl_unlock();
1792			return -EINVAL;
1793		}
1794	}
1795
1796	rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]);
1797	if (rx_sa) {
1798		rtnl_unlock();
1799		return -EBUSY;
1800	}
1801
1802	rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
1803	if (!rx_sa) {
1804		rtnl_unlock();
1805		return -ENOMEM;
1806	}
1807
1808	err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1809			 secy->key_len, secy->icv_len);
1810	if (err < 0) {
1811		kfree(rx_sa);
1812		rtnl_unlock();
1813		return err;
1814	}
1815
1816	if (tb_sa[MACSEC_SA_ATTR_PN]) {
1817		spin_lock_bh(&rx_sa->lock);
1818		rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
1819		spin_unlock_bh(&rx_sa->lock);
1820	}
1821
1822	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
1823		rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
1824
1825	rx_sa->sc = rx_sc;
1826
1827	if (secy->xpn) {
1828		rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
1829		nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
1830			   MACSEC_SALT_LEN);
1831	}
1832
1833	/* If h/w offloading is available, propagate to the device */
1834	if (macsec_is_offloaded(netdev_priv(dev))) {
1835		const struct macsec_ops *ops;
1836		struct macsec_context ctx;
1837
1838		ops = macsec_get_ops(netdev_priv(dev), &ctx);
1839		if (!ops) {
1840			err = -EOPNOTSUPP;
1841			goto cleanup;
1842		}
1843
1844		ctx.sa.assoc_num = assoc_num;
1845		ctx.sa.rx_sa = rx_sa;
1846		ctx.secy = secy;
1847		memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1848		       secy->key_len);
1849
1850		err = macsec_offload(ops->mdo_add_rxsa, &ctx);
1851		memzero_explicit(ctx.sa.key, secy->key_len);
1852		if (err)
1853			goto cleanup;
1854	}
1855
 
 
 
 
 
 
1856	nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
1857	rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
1858
1859	rtnl_unlock();
1860
1861	return 0;
1862
1863cleanup:
1864	macsec_rxsa_put(rx_sa);
1865	rtnl_unlock();
1866	return err;
1867}
1868
1869static bool validate_add_rxsc(struct nlattr **attrs)
1870{
1871	if (!attrs[MACSEC_RXSC_ATTR_SCI])
1872		return false;
1873
1874	if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) {
1875		if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1)
1876			return false;
1877	}
1878
1879	return true;
1880}
1881
1882static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
1883{
1884	struct net_device *dev;
1885	sci_t sci = MACSEC_UNDEF_SCI;
1886	struct nlattr **attrs = info->attrs;
1887	struct macsec_rx_sc *rx_sc;
1888	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1889	struct macsec_secy *secy;
1890	bool active = true;
1891	int ret;
1892
1893	if (!attrs[MACSEC_ATTR_IFINDEX])
1894		return -EINVAL;
1895
1896	if (parse_rxsc_config(attrs, tb_rxsc))
1897		return -EINVAL;
1898
1899	if (!validate_add_rxsc(tb_rxsc))
1900		return -EINVAL;
1901
1902	rtnl_lock();
1903	dev = get_dev_from_nl(genl_info_net(info), attrs);
1904	if (IS_ERR(dev)) {
1905		rtnl_unlock();
1906		return PTR_ERR(dev);
1907	}
1908
1909	secy = &macsec_priv(dev)->secy;
1910	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1911
1912	if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
1913		active = nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
1914
1915	rx_sc = create_rx_sc(dev, sci, active);
1916	if (IS_ERR(rx_sc)) {
1917		rtnl_unlock();
1918		return PTR_ERR(rx_sc);
1919	}
1920
 
 
 
 
1921	if (macsec_is_offloaded(netdev_priv(dev))) {
1922		const struct macsec_ops *ops;
1923		struct macsec_context ctx;
1924
1925		ops = macsec_get_ops(netdev_priv(dev), &ctx);
1926		if (!ops) {
1927			ret = -EOPNOTSUPP;
1928			goto cleanup;
1929		}
1930
1931		ctx.rx_sc = rx_sc;
1932		ctx.secy = secy;
1933
1934		ret = macsec_offload(ops->mdo_add_rxsc, &ctx);
1935		if (ret)
1936			goto cleanup;
1937	}
1938
1939	rtnl_unlock();
1940
1941	return 0;
1942
1943cleanup:
1944	del_rx_sc(secy, sci);
1945	free_rx_sc(rx_sc);
1946	rtnl_unlock();
1947	return ret;
1948}
1949
1950static bool validate_add_txsa(struct nlattr **attrs)
1951{
1952	if (!attrs[MACSEC_SA_ATTR_AN] ||
1953	    !attrs[MACSEC_SA_ATTR_PN] ||
1954	    !attrs[MACSEC_SA_ATTR_KEY] ||
1955	    !attrs[MACSEC_SA_ATTR_KEYID])
1956		return false;
1957
1958	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1959		return false;
1960
1961	if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
1962		return false;
1963
1964	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1965		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1966			return false;
1967	}
1968
1969	if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1970		return false;
1971
1972	return true;
1973}
1974
1975static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
1976{
1977	struct net_device *dev;
1978	struct nlattr **attrs = info->attrs;
1979	struct macsec_secy *secy;
1980	struct macsec_tx_sc *tx_sc;
1981	struct macsec_tx_sa *tx_sa;
1982	unsigned char assoc_num;
1983	int pn_len;
1984	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1985	bool was_operational;
1986	int err;
1987
1988	if (!attrs[MACSEC_ATTR_IFINDEX])
1989		return -EINVAL;
1990
1991	if (parse_sa_config(attrs, tb_sa))
1992		return -EINVAL;
1993
1994	if (!validate_add_txsa(tb_sa))
1995		return -EINVAL;
1996
1997	rtnl_lock();
1998	dev = get_dev_from_nl(genl_info_net(info), attrs);
1999	if (IS_ERR(dev)) {
2000		rtnl_unlock();
2001		return PTR_ERR(dev);
2002	}
2003
2004	secy = &macsec_priv(dev)->secy;
2005	tx_sc = &secy->tx_sc;
2006
2007	assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
2008
2009	if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
2010		pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n",
2011			  nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
2012		rtnl_unlock();
2013		return -EINVAL;
2014	}
2015
2016	pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2017	if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2018		pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n",
2019			  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2020		rtnl_unlock();
2021		return -EINVAL;
2022	}
2023
2024	if (secy->xpn) {
2025		if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
2026			rtnl_unlock();
2027			return -EINVAL;
2028		}
2029
2030		if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
2031			pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
2032				  nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
2033				  MACSEC_SALT_LEN);
2034			rtnl_unlock();
2035			return -EINVAL;
2036		}
2037	}
2038
2039	tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]);
2040	if (tx_sa) {
2041		rtnl_unlock();
2042		return -EBUSY;
2043	}
2044
2045	tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
2046	if (!tx_sa) {
2047		rtnl_unlock();
2048		return -ENOMEM;
2049	}
2050
2051	err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
2052			 secy->key_len, secy->icv_len);
2053	if (err < 0) {
2054		kfree(tx_sa);
2055		rtnl_unlock();
2056		return err;
2057	}
2058
2059	spin_lock_bh(&tx_sa->lock);
2060	tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
2061	spin_unlock_bh(&tx_sa->lock);
2062
2063	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2064		tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2065
2066	was_operational = secy->operational;
2067	if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
2068		secy->operational = true;
2069
2070	if (secy->xpn) {
2071		tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
2072		nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
2073			   MACSEC_SALT_LEN);
2074	}
2075
2076	/* If h/w offloading is available, propagate to the device */
2077	if (macsec_is_offloaded(netdev_priv(dev))) {
2078		const struct macsec_ops *ops;
2079		struct macsec_context ctx;
2080
2081		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2082		if (!ops) {
2083			err = -EOPNOTSUPP;
2084			goto cleanup;
2085		}
2086
2087		ctx.sa.assoc_num = assoc_num;
2088		ctx.sa.tx_sa = tx_sa;
2089		ctx.secy = secy;
2090		memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
2091		       secy->key_len);
2092
2093		err = macsec_offload(ops->mdo_add_txsa, &ctx);
2094		memzero_explicit(ctx.sa.key, secy->key_len);
2095		if (err)
2096			goto cleanup;
2097	}
2098
 
 
 
 
 
 
2099	nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
2100	rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
2101
2102	rtnl_unlock();
2103
2104	return 0;
2105
2106cleanup:
2107	secy->operational = was_operational;
2108	macsec_txsa_put(tx_sa);
2109	rtnl_unlock();
2110	return err;
2111}
2112
2113static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
2114{
2115	struct nlattr **attrs = info->attrs;
2116	struct net_device *dev;
2117	struct macsec_secy *secy;
2118	struct macsec_rx_sc *rx_sc;
2119	struct macsec_rx_sa *rx_sa;
2120	u8 assoc_num;
2121	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2122	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2123	int ret;
2124
2125	if (!attrs[MACSEC_ATTR_IFINDEX])
2126		return -EINVAL;
2127
2128	if (parse_sa_config(attrs, tb_sa))
2129		return -EINVAL;
2130
2131	if (parse_rxsc_config(attrs, tb_rxsc))
2132		return -EINVAL;
2133
2134	rtnl_lock();
2135	rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
2136				 &dev, &secy, &rx_sc, &assoc_num);
2137	if (IS_ERR(rx_sa)) {
2138		rtnl_unlock();
2139		return PTR_ERR(rx_sa);
2140	}
2141
2142	if (rx_sa->active) {
2143		rtnl_unlock();
2144		return -EBUSY;
2145	}
2146
2147	/* If h/w offloading is available, propagate to the device */
2148	if (macsec_is_offloaded(netdev_priv(dev))) {
2149		const struct macsec_ops *ops;
2150		struct macsec_context ctx;
2151
2152		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2153		if (!ops) {
2154			ret = -EOPNOTSUPP;
2155			goto cleanup;
2156		}
2157
2158		ctx.sa.assoc_num = assoc_num;
2159		ctx.sa.rx_sa = rx_sa;
2160		ctx.secy = secy;
2161
2162		ret = macsec_offload(ops->mdo_del_rxsa, &ctx);
2163		if (ret)
2164			goto cleanup;
2165	}
2166
2167	RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL);
2168	clear_rx_sa(rx_sa);
2169
2170	rtnl_unlock();
2171
2172	return 0;
2173
2174cleanup:
2175	rtnl_unlock();
2176	return ret;
2177}
2178
2179static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
2180{
2181	struct nlattr **attrs = info->attrs;
2182	struct net_device *dev;
2183	struct macsec_secy *secy;
2184	struct macsec_rx_sc *rx_sc;
2185	sci_t sci;
2186	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2187	int ret;
2188
2189	if (!attrs[MACSEC_ATTR_IFINDEX])
2190		return -EINVAL;
2191
2192	if (parse_rxsc_config(attrs, tb_rxsc))
2193		return -EINVAL;
2194
2195	if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
2196		return -EINVAL;
2197
2198	rtnl_lock();
2199	dev = get_dev_from_nl(genl_info_net(info), info->attrs);
2200	if (IS_ERR(dev)) {
2201		rtnl_unlock();
2202		return PTR_ERR(dev);
2203	}
2204
2205	secy = &macsec_priv(dev)->secy;
2206	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
2207
2208	rx_sc = del_rx_sc(secy, sci);
2209	if (!rx_sc) {
2210		rtnl_unlock();
2211		return -ENODEV;
2212	}
2213
2214	/* If h/w offloading is available, propagate to the device */
2215	if (macsec_is_offloaded(netdev_priv(dev))) {
2216		const struct macsec_ops *ops;
2217		struct macsec_context ctx;
2218
2219		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2220		if (!ops) {
2221			ret = -EOPNOTSUPP;
2222			goto cleanup;
2223		}
2224
2225		ctx.rx_sc = rx_sc;
2226		ctx.secy = secy;
2227		ret = macsec_offload(ops->mdo_del_rxsc, &ctx);
2228		if (ret)
2229			goto cleanup;
2230	}
2231
2232	free_rx_sc(rx_sc);
2233	rtnl_unlock();
2234
2235	return 0;
2236
2237cleanup:
2238	rtnl_unlock();
2239	return ret;
2240}
2241
2242static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
2243{
2244	struct nlattr **attrs = info->attrs;
2245	struct net_device *dev;
2246	struct macsec_secy *secy;
2247	struct macsec_tx_sc *tx_sc;
2248	struct macsec_tx_sa *tx_sa;
2249	u8 assoc_num;
2250	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2251	int ret;
2252
2253	if (!attrs[MACSEC_ATTR_IFINDEX])
2254		return -EINVAL;
2255
2256	if (parse_sa_config(attrs, tb_sa))
2257		return -EINVAL;
2258
2259	rtnl_lock();
2260	tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2261				 &dev, &secy, &tx_sc, &assoc_num);
2262	if (IS_ERR(tx_sa)) {
2263		rtnl_unlock();
2264		return PTR_ERR(tx_sa);
2265	}
2266
2267	if (tx_sa->active) {
2268		rtnl_unlock();
2269		return -EBUSY;
2270	}
2271
2272	/* If h/w offloading is available, propagate to the device */
2273	if (macsec_is_offloaded(netdev_priv(dev))) {
2274		const struct macsec_ops *ops;
2275		struct macsec_context ctx;
2276
2277		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2278		if (!ops) {
2279			ret = -EOPNOTSUPP;
2280			goto cleanup;
2281		}
2282
2283		ctx.sa.assoc_num = assoc_num;
2284		ctx.sa.tx_sa = tx_sa;
2285		ctx.secy = secy;
2286
2287		ret = macsec_offload(ops->mdo_del_txsa, &ctx);
2288		if (ret)
2289			goto cleanup;
2290	}
2291
2292	RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL);
2293	clear_tx_sa(tx_sa);
2294
2295	rtnl_unlock();
2296
2297	return 0;
2298
2299cleanup:
2300	rtnl_unlock();
2301	return ret;
2302}
2303
2304static bool validate_upd_sa(struct nlattr **attrs)
2305{
2306	if (!attrs[MACSEC_SA_ATTR_AN] ||
2307	    attrs[MACSEC_SA_ATTR_KEY] ||
2308	    attrs[MACSEC_SA_ATTR_KEYID] ||
2309	    attrs[MACSEC_SA_ATTR_SSCI] ||
2310	    attrs[MACSEC_SA_ATTR_SALT])
2311		return false;
2312
2313	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
2314		return false;
2315
2316	if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
2317		return false;
2318
2319	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
2320		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
2321			return false;
2322	}
2323
2324	return true;
2325}
2326
2327static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
2328{
2329	struct nlattr **attrs = info->attrs;
2330	struct net_device *dev;
2331	struct macsec_secy *secy;
2332	struct macsec_tx_sc *tx_sc;
2333	struct macsec_tx_sa *tx_sa;
2334	u8 assoc_num;
2335	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2336	bool was_operational, was_active;
2337	pn_t prev_pn;
2338	int ret = 0;
2339
2340	prev_pn.full64 = 0;
2341
2342	if (!attrs[MACSEC_ATTR_IFINDEX])
2343		return -EINVAL;
2344
2345	if (parse_sa_config(attrs, tb_sa))
2346		return -EINVAL;
2347
2348	if (!validate_upd_sa(tb_sa))
2349		return -EINVAL;
2350
2351	rtnl_lock();
2352	tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2353				 &dev, &secy, &tx_sc, &assoc_num);
2354	if (IS_ERR(tx_sa)) {
2355		rtnl_unlock();
2356		return PTR_ERR(tx_sa);
2357	}
2358
2359	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2360		int pn_len;
2361
2362		pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2363		if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2364			pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n",
2365				  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2366			rtnl_unlock();
2367			return -EINVAL;
2368		}
2369
2370		spin_lock_bh(&tx_sa->lock);
2371		prev_pn = tx_sa->next_pn_halves;
2372		tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
2373		spin_unlock_bh(&tx_sa->lock);
2374	}
2375
2376	was_active = tx_sa->active;
2377	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2378		tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2379
2380	was_operational = secy->operational;
2381	if (assoc_num == tx_sc->encoding_sa)
2382		secy->operational = tx_sa->active;
2383
2384	/* If h/w offloading is available, propagate to the device */
2385	if (macsec_is_offloaded(netdev_priv(dev))) {
2386		const struct macsec_ops *ops;
2387		struct macsec_context ctx;
2388
2389		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2390		if (!ops) {
2391			ret = -EOPNOTSUPP;
2392			goto cleanup;
2393		}
2394
2395		ctx.sa.assoc_num = assoc_num;
2396		ctx.sa.tx_sa = tx_sa;
2397		ctx.sa.update_pn = !!prev_pn.full64;
2398		ctx.secy = secy;
2399
2400		ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
2401		if (ret)
2402			goto cleanup;
2403	}
2404
2405	rtnl_unlock();
2406
2407	return 0;
2408
2409cleanup:
2410	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2411		spin_lock_bh(&tx_sa->lock);
2412		tx_sa->next_pn_halves = prev_pn;
2413		spin_unlock_bh(&tx_sa->lock);
2414	}
2415	tx_sa->active = was_active;
2416	secy->operational = was_operational;
2417	rtnl_unlock();
2418	return ret;
2419}
2420
2421static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
2422{
2423	struct nlattr **attrs = info->attrs;
2424	struct net_device *dev;
2425	struct macsec_secy *secy;
2426	struct macsec_rx_sc *rx_sc;
2427	struct macsec_rx_sa *rx_sa;
2428	u8 assoc_num;
2429	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2430	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2431	bool was_active;
2432	pn_t prev_pn;
2433	int ret = 0;
2434
2435	prev_pn.full64 = 0;
2436
2437	if (!attrs[MACSEC_ATTR_IFINDEX])
2438		return -EINVAL;
2439
2440	if (parse_rxsc_config(attrs, tb_rxsc))
2441		return -EINVAL;
2442
2443	if (parse_sa_config(attrs, tb_sa))
2444		return -EINVAL;
2445
2446	if (!validate_upd_sa(tb_sa))
2447		return -EINVAL;
2448
2449	rtnl_lock();
2450	rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
2451				 &dev, &secy, &rx_sc, &assoc_num);
2452	if (IS_ERR(rx_sa)) {
2453		rtnl_unlock();
2454		return PTR_ERR(rx_sa);
2455	}
2456
2457	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2458		int pn_len;
2459
2460		pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2461		if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2462			pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n",
2463				  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2464			rtnl_unlock();
2465			return -EINVAL;
2466		}
2467
2468		spin_lock_bh(&rx_sa->lock);
2469		prev_pn = rx_sa->next_pn_halves;
2470		rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
2471		spin_unlock_bh(&rx_sa->lock);
2472	}
2473
2474	was_active = rx_sa->active;
2475	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2476		rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2477
2478	/* If h/w offloading is available, propagate to the device */
2479	if (macsec_is_offloaded(netdev_priv(dev))) {
2480		const struct macsec_ops *ops;
2481		struct macsec_context ctx;
2482
2483		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2484		if (!ops) {
2485			ret = -EOPNOTSUPP;
2486			goto cleanup;
2487		}
2488
2489		ctx.sa.assoc_num = assoc_num;
2490		ctx.sa.rx_sa = rx_sa;
2491		ctx.sa.update_pn = !!prev_pn.full64;
2492		ctx.secy = secy;
2493
2494		ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
2495		if (ret)
2496			goto cleanup;
2497	}
2498
2499	rtnl_unlock();
2500	return 0;
2501
2502cleanup:
2503	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2504		spin_lock_bh(&rx_sa->lock);
2505		rx_sa->next_pn_halves = prev_pn;
2506		spin_unlock_bh(&rx_sa->lock);
2507	}
2508	rx_sa->active = was_active;
2509	rtnl_unlock();
2510	return ret;
2511}
2512
2513static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
2514{
2515	struct nlattr **attrs = info->attrs;
2516	struct net_device *dev;
2517	struct macsec_secy *secy;
2518	struct macsec_rx_sc *rx_sc;
2519	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2520	unsigned int prev_n_rx_sc;
2521	bool was_active;
2522	int ret;
2523
2524	if (!attrs[MACSEC_ATTR_IFINDEX])
2525		return -EINVAL;
2526
2527	if (parse_rxsc_config(attrs, tb_rxsc))
2528		return -EINVAL;
2529
2530	if (!validate_add_rxsc(tb_rxsc))
2531		return -EINVAL;
2532
2533	rtnl_lock();
2534	rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
2535	if (IS_ERR(rx_sc)) {
2536		rtnl_unlock();
2537		return PTR_ERR(rx_sc);
2538	}
2539
2540	was_active = rx_sc->active;
2541	prev_n_rx_sc = secy->n_rx_sc;
2542	if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) {
2543		bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
2544
2545		if (rx_sc->active != new)
2546			secy->n_rx_sc += new ? 1 : -1;
2547
2548		rx_sc->active = new;
2549	}
2550
2551	/* If h/w offloading is available, propagate to the device */
2552	if (macsec_is_offloaded(netdev_priv(dev))) {
2553		const struct macsec_ops *ops;
2554		struct macsec_context ctx;
2555
2556		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2557		if (!ops) {
2558			ret = -EOPNOTSUPP;
2559			goto cleanup;
2560		}
2561
2562		ctx.rx_sc = rx_sc;
2563		ctx.secy = secy;
2564
2565		ret = macsec_offload(ops->mdo_upd_rxsc, &ctx);
2566		if (ret)
2567			goto cleanup;
2568	}
2569
2570	rtnl_unlock();
2571
2572	return 0;
2573
2574cleanup:
2575	secy->n_rx_sc = prev_n_rx_sc;
2576	rx_sc->active = was_active;
2577	rtnl_unlock();
2578	return ret;
2579}
2580
2581static bool macsec_is_configured(struct macsec_dev *macsec)
2582{
2583	struct macsec_secy *secy = &macsec->secy;
2584	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
2585	int i;
2586
2587	if (secy->rx_sc)
2588		return true;
2589
2590	for (i = 0; i < MACSEC_NUM_AN; i++)
2591		if (tx_sc->sa[i])
2592			return true;
2593
2594	return false;
2595}
2596
2597static bool macsec_needs_tx_tag(struct macsec_dev *macsec,
2598				const struct macsec_ops *ops)
2599{
2600	return macsec->offload == MACSEC_OFFLOAD_PHY &&
2601		ops->mdo_insert_tx_tag;
2602}
2603
2604static void macsec_set_head_tail_room(struct net_device *dev)
2605{
2606	struct macsec_dev *macsec = macsec_priv(dev);
2607	struct net_device *real_dev = macsec->real_dev;
2608	int needed_headroom, needed_tailroom;
 
 
2609	const struct macsec_ops *ops;
 
 
 
2610
2611	ops = macsec_get_ops(macsec, NULL);
2612	if (ops) {
2613		needed_headroom = ops->needed_headroom;
2614		needed_tailroom = ops->needed_tailroom;
2615	} else {
2616		needed_headroom = MACSEC_NEEDED_HEADROOM;
2617		needed_tailroom = MACSEC_NEEDED_TAILROOM;
2618	}
2619
2620	dev->needed_headroom = real_dev->needed_headroom + needed_headroom;
2621	dev->needed_tailroom = real_dev->needed_tailroom + needed_tailroom;
2622}
2623
2624static void macsec_inherit_tso_max(struct net_device *dev)
2625{
2626	struct macsec_dev *macsec = macsec_priv(dev);
 
2627
2628	/* if macsec is offloaded, we need to follow the lower
2629	 * device's capabilities. otherwise, we can ignore them.
2630	 */
2631	if (macsec_is_offloaded(macsec))
2632		netif_inherit_tso_max(dev, macsec->real_dev);
2633}
2634
2635static int macsec_update_offload(struct net_device *dev, enum macsec_offload offload)
2636{
2637	enum macsec_offload prev_offload;
2638	const struct macsec_ops *ops;
2639	struct macsec_context ctx;
2640	struct macsec_dev *macsec;
2641	int ret = 0;
2642
2643	macsec = macsec_priv(dev);
 
 
2644
2645	/* Check if the offloading mode is supported by the underlying layers */
2646	if (offload != MACSEC_OFFLOAD_OFF &&
2647	    !macsec_check_offload(offload, macsec))
2648		return -EOPNOTSUPP;
2649
2650	/* Check if the net device is busy. */
2651	if (netif_running(dev))
2652		return -EBUSY;
2653
 
 
 
 
 
2654	/* Check if the device already has rules configured: we do not support
2655	 * rules migration.
2656	 */
2657	if (macsec_is_configured(macsec))
2658		return -EBUSY;
2659
2660	prev_offload = macsec->offload;
2661
2662	ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
2663			       macsec, &ctx);
2664	if (!ops)
2665		return -EOPNOTSUPP;
2666
2667	macsec->offload = offload;
2668
2669	ctx.secy = &macsec->secy;
2670	ret = offload == MACSEC_OFFLOAD_OFF ? macsec_offload(ops->mdo_del_secy, &ctx)
2671					    : macsec_offload(ops->mdo_add_secy, &ctx);
2672	if (ret) {
2673		macsec->offload = prev_offload;
2674		return ret;
2675	}
2676
2677	macsec_set_head_tail_room(dev);
2678	macsec->insert_tx_tag = macsec_needs_tx_tag(macsec, ops);
 
 
2679
2680	macsec_inherit_tso_max(dev);
 
 
 
2681
 
 
 
2682	netdev_update_features(dev);
2683
2684	return ret;
2685}
2686
2687static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
2688{
2689	struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1];
2690	struct nlattr **attrs = info->attrs;
2691	enum macsec_offload offload;
2692	struct macsec_dev *macsec;
2693	struct net_device *dev;
2694	int ret = 0;
2695
2696	if (!attrs[MACSEC_ATTR_IFINDEX])
2697		return -EINVAL;
2698
2699	if (!attrs[MACSEC_ATTR_OFFLOAD])
2700		return -EINVAL;
2701
2702	if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX,
2703					attrs[MACSEC_ATTR_OFFLOAD],
2704					macsec_genl_offload_policy, NULL))
2705		return -EINVAL;
2706
2707	rtnl_lock();
2708
2709	dev = get_dev_from_nl(genl_info_net(info), attrs);
2710	if (IS_ERR(dev)) {
2711		ret = PTR_ERR(dev);
2712		goto out;
2713	}
2714	macsec = macsec_priv(dev);
2715
2716	if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]) {
2717		ret = -EINVAL;
2718		goto out;
2719	}
2720
2721	offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
 
2722
2723	if (macsec->offload != offload)
2724		ret = macsec_update_offload(dev, offload);
2725out:
2726	rtnl_unlock();
2727	return ret;
2728}
2729
2730static void get_tx_sa_stats(struct net_device *dev, int an,
2731			    struct macsec_tx_sa *tx_sa,
2732			    struct macsec_tx_sa_stats *sum)
2733{
2734	struct macsec_dev *macsec = macsec_priv(dev);
2735	int cpu;
2736
2737	/* If h/w offloading is available, propagate to the device */
2738	if (macsec_is_offloaded(macsec)) {
2739		const struct macsec_ops *ops;
2740		struct macsec_context ctx;
2741
2742		ops = macsec_get_ops(macsec, &ctx);
2743		if (ops) {
2744			ctx.sa.assoc_num = an;
2745			ctx.sa.tx_sa = tx_sa;
2746			ctx.stats.tx_sa_stats = sum;
2747			ctx.secy = &macsec_priv(dev)->secy;
2748			macsec_offload(ops->mdo_get_tx_sa_stats, &ctx);
2749		}
2750		return;
2751	}
2752
2753	for_each_possible_cpu(cpu) {
2754		const struct macsec_tx_sa_stats *stats =
2755			per_cpu_ptr(tx_sa->stats, cpu);
2756
2757		sum->OutPktsProtected += stats->OutPktsProtected;
2758		sum->OutPktsEncrypted += stats->OutPktsEncrypted;
2759	}
2760}
2761
2762static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum)
2763{
2764	if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED,
2765			sum->OutPktsProtected) ||
2766	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED,
2767			sum->OutPktsEncrypted))
2768		return -EMSGSIZE;
2769
2770	return 0;
2771}
2772
2773static void get_rx_sa_stats(struct net_device *dev,
2774			    struct macsec_rx_sc *rx_sc, int an,
2775			    struct macsec_rx_sa *rx_sa,
2776			    struct macsec_rx_sa_stats *sum)
2777{
2778	struct macsec_dev *macsec = macsec_priv(dev);
2779	int cpu;
2780
2781	/* If h/w offloading is available, propagate to the device */
2782	if (macsec_is_offloaded(macsec)) {
2783		const struct macsec_ops *ops;
2784		struct macsec_context ctx;
2785
2786		ops = macsec_get_ops(macsec, &ctx);
2787		if (ops) {
2788			ctx.sa.assoc_num = an;
2789			ctx.sa.rx_sa = rx_sa;
2790			ctx.stats.rx_sa_stats = sum;
2791			ctx.secy = &macsec_priv(dev)->secy;
2792			ctx.rx_sc = rx_sc;
2793			macsec_offload(ops->mdo_get_rx_sa_stats, &ctx);
2794		}
2795		return;
2796	}
2797
2798	for_each_possible_cpu(cpu) {
2799		const struct macsec_rx_sa_stats *stats =
2800			per_cpu_ptr(rx_sa->stats, cpu);
2801
2802		sum->InPktsOK         += stats->InPktsOK;
2803		sum->InPktsInvalid    += stats->InPktsInvalid;
2804		sum->InPktsNotValid   += stats->InPktsNotValid;
2805		sum->InPktsNotUsingSA += stats->InPktsNotUsingSA;
2806		sum->InPktsUnusedSA   += stats->InPktsUnusedSA;
2807	}
2808}
2809
2810static int copy_rx_sa_stats(struct sk_buff *skb,
2811			    struct macsec_rx_sa_stats *sum)
2812{
2813	if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) ||
2814	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID,
2815			sum->InPktsInvalid) ||
2816	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID,
2817			sum->InPktsNotValid) ||
2818	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA,
2819			sum->InPktsNotUsingSA) ||
2820	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA,
2821			sum->InPktsUnusedSA))
2822		return -EMSGSIZE;
2823
2824	return 0;
2825}
2826
2827static void get_rx_sc_stats(struct net_device *dev,
2828			    struct macsec_rx_sc *rx_sc,
2829			    struct macsec_rx_sc_stats *sum)
2830{
2831	struct macsec_dev *macsec = macsec_priv(dev);
2832	int cpu;
2833
2834	/* If h/w offloading is available, propagate to the device */
2835	if (macsec_is_offloaded(macsec)) {
2836		const struct macsec_ops *ops;
2837		struct macsec_context ctx;
2838
2839		ops = macsec_get_ops(macsec, &ctx);
2840		if (ops) {
2841			ctx.stats.rx_sc_stats = sum;
2842			ctx.secy = &macsec_priv(dev)->secy;
2843			ctx.rx_sc = rx_sc;
2844			macsec_offload(ops->mdo_get_rx_sc_stats, &ctx);
2845		}
2846		return;
2847	}
2848
2849	for_each_possible_cpu(cpu) {
2850		const struct pcpu_rx_sc_stats *stats;
2851		struct macsec_rx_sc_stats tmp;
2852		unsigned int start;
2853
2854		stats = per_cpu_ptr(rx_sc->stats, cpu);
2855		do {
2856			start = u64_stats_fetch_begin(&stats->syncp);
2857			memcpy(&tmp, &stats->stats, sizeof(tmp));
2858		} while (u64_stats_fetch_retry(&stats->syncp, start));
2859
2860		sum->InOctetsValidated += tmp.InOctetsValidated;
2861		sum->InOctetsDecrypted += tmp.InOctetsDecrypted;
2862		sum->InPktsUnchecked   += tmp.InPktsUnchecked;
2863		sum->InPktsDelayed     += tmp.InPktsDelayed;
2864		sum->InPktsOK          += tmp.InPktsOK;
2865		sum->InPktsInvalid     += tmp.InPktsInvalid;
2866		sum->InPktsLate        += tmp.InPktsLate;
2867		sum->InPktsNotValid    += tmp.InPktsNotValid;
2868		sum->InPktsNotUsingSA  += tmp.InPktsNotUsingSA;
2869		sum->InPktsUnusedSA    += tmp.InPktsUnusedSA;
2870	}
2871}
2872
2873static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum)
2874{
2875	if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
2876			      sum->InOctetsValidated,
2877			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2878	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
2879			      sum->InOctetsDecrypted,
2880			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2881	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
2882			      sum->InPktsUnchecked,
2883			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2884	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
2885			      sum->InPktsDelayed,
2886			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2887	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
2888			      sum->InPktsOK,
2889			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2890	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
2891			      sum->InPktsInvalid,
2892			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2893	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
2894			      sum->InPktsLate,
2895			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2896	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
2897			      sum->InPktsNotValid,
2898			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2899	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
2900			      sum->InPktsNotUsingSA,
2901			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2902	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
2903			      sum->InPktsUnusedSA,
2904			      MACSEC_RXSC_STATS_ATTR_PAD))
2905		return -EMSGSIZE;
2906
2907	return 0;
2908}
2909
2910static void get_tx_sc_stats(struct net_device *dev,
2911			    struct macsec_tx_sc_stats *sum)
2912{
2913	struct macsec_dev *macsec = macsec_priv(dev);
2914	int cpu;
2915
2916	/* If h/w offloading is available, propagate to the device */
2917	if (macsec_is_offloaded(macsec)) {
2918		const struct macsec_ops *ops;
2919		struct macsec_context ctx;
2920
2921		ops = macsec_get_ops(macsec, &ctx);
2922		if (ops) {
2923			ctx.stats.tx_sc_stats = sum;
2924			ctx.secy = &macsec_priv(dev)->secy;
2925			macsec_offload(ops->mdo_get_tx_sc_stats, &ctx);
2926		}
2927		return;
2928	}
2929
2930	for_each_possible_cpu(cpu) {
2931		const struct pcpu_tx_sc_stats *stats;
2932		struct macsec_tx_sc_stats tmp;
2933		unsigned int start;
2934
2935		stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu);
2936		do {
2937			start = u64_stats_fetch_begin(&stats->syncp);
2938			memcpy(&tmp, &stats->stats, sizeof(tmp));
2939		} while (u64_stats_fetch_retry(&stats->syncp, start));
2940
2941		sum->OutPktsProtected   += tmp.OutPktsProtected;
2942		sum->OutPktsEncrypted   += tmp.OutPktsEncrypted;
2943		sum->OutOctetsProtected += tmp.OutOctetsProtected;
2944		sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted;
2945	}
2946}
2947
2948static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum)
2949{
2950	if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
2951			      sum->OutPktsProtected,
2952			      MACSEC_TXSC_STATS_ATTR_PAD) ||
2953	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
2954			      sum->OutPktsEncrypted,
2955			      MACSEC_TXSC_STATS_ATTR_PAD) ||
2956	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
2957			      sum->OutOctetsProtected,
2958			      MACSEC_TXSC_STATS_ATTR_PAD) ||
2959	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
2960			      sum->OutOctetsEncrypted,
2961			      MACSEC_TXSC_STATS_ATTR_PAD))
2962		return -EMSGSIZE;
2963
2964	return 0;
2965}
2966
2967static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum)
2968{
2969	struct macsec_dev *macsec = macsec_priv(dev);
2970	int cpu;
2971
2972	/* If h/w offloading is available, propagate to the device */
2973	if (macsec_is_offloaded(macsec)) {
2974		const struct macsec_ops *ops;
2975		struct macsec_context ctx;
2976
2977		ops = macsec_get_ops(macsec, &ctx);
2978		if (ops) {
2979			ctx.stats.dev_stats = sum;
2980			ctx.secy = &macsec_priv(dev)->secy;
2981			macsec_offload(ops->mdo_get_dev_stats, &ctx);
2982		}
2983		return;
2984	}
2985
2986	for_each_possible_cpu(cpu) {
2987		const struct pcpu_secy_stats *stats;
2988		struct macsec_dev_stats tmp;
2989		unsigned int start;
2990
2991		stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu);
2992		do {
2993			start = u64_stats_fetch_begin(&stats->syncp);
2994			memcpy(&tmp, &stats->stats, sizeof(tmp));
2995		} while (u64_stats_fetch_retry(&stats->syncp, start));
2996
2997		sum->OutPktsUntagged  += tmp.OutPktsUntagged;
2998		sum->InPktsUntagged   += tmp.InPktsUntagged;
2999		sum->OutPktsTooLong   += tmp.OutPktsTooLong;
3000		sum->InPktsNoTag      += tmp.InPktsNoTag;
3001		sum->InPktsBadTag     += tmp.InPktsBadTag;
3002		sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI;
3003		sum->InPktsNoSCI      += tmp.InPktsNoSCI;
3004		sum->InPktsOverrun    += tmp.InPktsOverrun;
3005	}
3006}
3007
3008static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum)
3009{
3010	if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
3011			      sum->OutPktsUntagged,
3012			      MACSEC_SECY_STATS_ATTR_PAD) ||
3013	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
3014			      sum->InPktsUntagged,
3015			      MACSEC_SECY_STATS_ATTR_PAD) ||
3016	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
3017			      sum->OutPktsTooLong,
3018			      MACSEC_SECY_STATS_ATTR_PAD) ||
3019	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
3020			      sum->InPktsNoTag,
3021			      MACSEC_SECY_STATS_ATTR_PAD) ||
3022	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
3023			      sum->InPktsBadTag,
3024			      MACSEC_SECY_STATS_ATTR_PAD) ||
3025	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
3026			      sum->InPktsUnknownSCI,
3027			      MACSEC_SECY_STATS_ATTR_PAD) ||
3028	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
3029			      sum->InPktsNoSCI,
3030			      MACSEC_SECY_STATS_ATTR_PAD) ||
3031	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
3032			      sum->InPktsOverrun,
3033			      MACSEC_SECY_STATS_ATTR_PAD))
3034		return -EMSGSIZE;
3035
3036	return 0;
3037}
3038
3039static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
3040{
3041	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
3042	struct nlattr *secy_nest = nla_nest_start_noflag(skb,
3043							 MACSEC_ATTR_SECY);
3044	u64 csid;
3045
3046	if (!secy_nest)
3047		return 1;
3048
3049	switch (secy->key_len) {
3050	case MACSEC_GCM_AES_128_SAK_LEN:
3051		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
3052		break;
3053	case MACSEC_GCM_AES_256_SAK_LEN:
3054		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
3055		break;
3056	default:
3057		goto cancel;
3058	}
3059
3060	if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
3061			MACSEC_SECY_ATTR_PAD) ||
3062	    nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
3063			      csid, MACSEC_SECY_ATTR_PAD) ||
3064	    nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
3065	    nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
3066	    nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
3067	    nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) ||
3068	    nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) ||
3069	    nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) ||
3070	    nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) ||
3071	    nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) ||
3072	    nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) ||
3073	    nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa))
3074		goto cancel;
3075
3076	if (secy->replay_protect) {
3077		if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window))
3078			goto cancel;
3079	}
3080
3081	nla_nest_end(skb, secy_nest);
3082	return 0;
3083
3084cancel:
3085	nla_nest_cancel(skb, secy_nest);
3086	return 1;
3087}
3088
3089static noinline_for_stack int
3090dump_secy(struct macsec_secy *secy, struct net_device *dev,
3091	  struct sk_buff *skb, struct netlink_callback *cb)
3092{
3093	struct macsec_tx_sc_stats tx_sc_stats = {0, };
3094	struct macsec_tx_sa_stats tx_sa_stats = {0, };
3095	struct macsec_rx_sc_stats rx_sc_stats = {0, };
3096	struct macsec_rx_sa_stats rx_sa_stats = {0, };
3097	struct macsec_dev *macsec = netdev_priv(dev);
3098	struct macsec_dev_stats dev_stats = {0, };
3099	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
3100	struct nlattr *txsa_list, *rxsc_list;
3101	struct macsec_rx_sc *rx_sc;
3102	struct nlattr *attr;
3103	void *hdr;
3104	int i, j;
3105
3106	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3107			  &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC);
3108	if (!hdr)
3109		return -EMSGSIZE;
3110
3111	genl_dump_check_consistent(cb, hdr);
3112
3113	if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
3114		goto nla_put_failure;
3115
3116	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD);
3117	if (!attr)
3118		goto nla_put_failure;
3119	if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload))
3120		goto nla_put_failure;
3121	nla_nest_end(skb, attr);
3122
3123	if (nla_put_secy(secy, skb))
3124		goto nla_put_failure;
3125
3126	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS);
3127	if (!attr)
3128		goto nla_put_failure;
3129
3130	get_tx_sc_stats(dev, &tx_sc_stats);
3131	if (copy_tx_sc_stats(skb, &tx_sc_stats)) {
3132		nla_nest_cancel(skb, attr);
3133		goto nla_put_failure;
3134	}
3135	nla_nest_end(skb, attr);
3136
3137	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS);
3138	if (!attr)
3139		goto nla_put_failure;
3140	get_secy_stats(dev, &dev_stats);
3141	if (copy_secy_stats(skb, &dev_stats)) {
3142		nla_nest_cancel(skb, attr);
3143		goto nla_put_failure;
3144	}
3145	nla_nest_end(skb, attr);
3146
3147	txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST);
3148	if (!txsa_list)
3149		goto nla_put_failure;
3150	for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) {
3151		struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]);
3152		struct nlattr *txsa_nest;
3153		u64 pn;
3154		int pn_len;
3155
3156		if (!tx_sa)
3157			continue;
3158
3159		txsa_nest = nla_nest_start_noflag(skb, j++);
3160		if (!txsa_nest) {
3161			nla_nest_cancel(skb, txsa_list);
3162			goto nla_put_failure;
3163		}
3164
3165		attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS);
3166		if (!attr) {
3167			nla_nest_cancel(skb, txsa_nest);
3168			nla_nest_cancel(skb, txsa_list);
3169			goto nla_put_failure;
3170		}
3171		memset(&tx_sa_stats, 0, sizeof(tx_sa_stats));
3172		get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats);
3173		if (copy_tx_sa_stats(skb, &tx_sa_stats)) {
3174			nla_nest_cancel(skb, attr);
3175			nla_nest_cancel(skb, txsa_nest);
3176			nla_nest_cancel(skb, txsa_list);
3177			goto nla_put_failure;
3178		}
3179		nla_nest_end(skb, attr);
3180
3181		if (secy->xpn) {
3182			pn = tx_sa->next_pn;
3183			pn_len = MACSEC_XPN_PN_LEN;
3184		} else {
3185			pn = tx_sa->next_pn_halves.lower;
3186			pn_len = MACSEC_DEFAULT_PN_LEN;
3187		}
3188
3189		if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
3190		    nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
3191		    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
3192		    (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, tx_sa->ssci)) ||
3193		    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
3194			nla_nest_cancel(skb, txsa_nest);
3195			nla_nest_cancel(skb, txsa_list);
3196			goto nla_put_failure;
3197		}
3198
3199		nla_nest_end(skb, txsa_nest);
3200	}
3201	nla_nest_end(skb, txsa_list);
3202
3203	rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST);
3204	if (!rxsc_list)
3205		goto nla_put_failure;
3206
3207	j = 1;
3208	for_each_rxsc_rtnl(secy, rx_sc) {
3209		int k;
3210		struct nlattr *rxsa_list;
3211		struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++);
3212
3213		if (!rxsc_nest) {
3214			nla_nest_cancel(skb, rxsc_list);
3215			goto nla_put_failure;
3216		}
3217
3218		if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) ||
3219		    nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci,
3220				MACSEC_RXSC_ATTR_PAD)) {
3221			nla_nest_cancel(skb, rxsc_nest);
3222			nla_nest_cancel(skb, rxsc_list);
3223			goto nla_put_failure;
3224		}
3225
3226		attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS);
3227		if (!attr) {
3228			nla_nest_cancel(skb, rxsc_nest);
3229			nla_nest_cancel(skb, rxsc_list);
3230			goto nla_put_failure;
3231		}
3232		memset(&rx_sc_stats, 0, sizeof(rx_sc_stats));
3233		get_rx_sc_stats(dev, rx_sc, &rx_sc_stats);
3234		if (copy_rx_sc_stats(skb, &rx_sc_stats)) {
3235			nla_nest_cancel(skb, attr);
3236			nla_nest_cancel(skb, rxsc_nest);
3237			nla_nest_cancel(skb, rxsc_list);
3238			goto nla_put_failure;
3239		}
3240		nla_nest_end(skb, attr);
3241
3242		rxsa_list = nla_nest_start_noflag(skb,
3243						  MACSEC_RXSC_ATTR_SA_LIST);
3244		if (!rxsa_list) {
3245			nla_nest_cancel(skb, rxsc_nest);
3246			nla_nest_cancel(skb, rxsc_list);
3247			goto nla_put_failure;
3248		}
3249
3250		for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) {
3251			struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]);
3252			struct nlattr *rxsa_nest;
3253			u64 pn;
3254			int pn_len;
3255
3256			if (!rx_sa)
3257				continue;
3258
3259			rxsa_nest = nla_nest_start_noflag(skb, k++);
3260			if (!rxsa_nest) {
3261				nla_nest_cancel(skb, rxsa_list);
3262				nla_nest_cancel(skb, rxsc_nest);
3263				nla_nest_cancel(skb, rxsc_list);
3264				goto nla_put_failure;
3265			}
3266
3267			attr = nla_nest_start_noflag(skb,
3268						     MACSEC_SA_ATTR_STATS);
3269			if (!attr) {
3270				nla_nest_cancel(skb, rxsa_list);
3271				nla_nest_cancel(skb, rxsc_nest);
3272				nla_nest_cancel(skb, rxsc_list);
3273				goto nla_put_failure;
3274			}
3275			memset(&rx_sa_stats, 0, sizeof(rx_sa_stats));
3276			get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats);
3277			if (copy_rx_sa_stats(skb, &rx_sa_stats)) {
3278				nla_nest_cancel(skb, attr);
3279				nla_nest_cancel(skb, rxsa_list);
3280				nla_nest_cancel(skb, rxsc_nest);
3281				nla_nest_cancel(skb, rxsc_list);
3282				goto nla_put_failure;
3283			}
3284			nla_nest_end(skb, attr);
3285
3286			if (secy->xpn) {
3287				pn = rx_sa->next_pn;
3288				pn_len = MACSEC_XPN_PN_LEN;
3289			} else {
3290				pn = rx_sa->next_pn_halves.lower;
3291				pn_len = MACSEC_DEFAULT_PN_LEN;
3292			}
3293
3294			if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
3295			    nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
3296			    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
3297			    (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, rx_sa->ssci)) ||
3298			    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
3299				nla_nest_cancel(skb, rxsa_nest);
3300				nla_nest_cancel(skb, rxsc_nest);
3301				nla_nest_cancel(skb, rxsc_list);
3302				goto nla_put_failure;
3303			}
3304			nla_nest_end(skb, rxsa_nest);
3305		}
3306
3307		nla_nest_end(skb, rxsa_list);
3308		nla_nest_end(skb, rxsc_nest);
3309	}
3310
3311	nla_nest_end(skb, rxsc_list);
3312
3313	genlmsg_end(skb, hdr);
3314
3315	return 0;
3316
3317nla_put_failure:
3318	genlmsg_cancel(skb, hdr);
3319	return -EMSGSIZE;
3320}
3321
3322static int macsec_generation = 1; /* protected by RTNL */
3323
3324static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
3325{
3326	struct net *net = sock_net(skb->sk);
3327	struct net_device *dev;
3328	int dev_idx, d;
3329
3330	dev_idx = cb->args[0];
3331
3332	d = 0;
3333	rtnl_lock();
3334
3335	cb->seq = macsec_generation;
3336
3337	for_each_netdev(net, dev) {
3338		struct macsec_secy *secy;
3339
3340		if (d < dev_idx)
3341			goto next;
3342
3343		if (!netif_is_macsec(dev))
3344			goto next;
3345
3346		secy = &macsec_priv(dev)->secy;
3347		if (dump_secy(secy, dev, skb, cb) < 0)
3348			goto done;
3349next:
3350		d++;
3351	}
3352
3353done:
3354	rtnl_unlock();
3355	cb->args[0] = d;
3356	return skb->len;
3357}
3358
3359static const struct genl_small_ops macsec_genl_ops[] = {
3360	{
3361		.cmd = MACSEC_CMD_GET_TXSC,
3362		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3363		.dumpit = macsec_dump_txsc,
3364	},
3365	{
3366		.cmd = MACSEC_CMD_ADD_RXSC,
3367		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3368		.doit = macsec_add_rxsc,
3369		.flags = GENL_ADMIN_PERM,
3370	},
3371	{
3372		.cmd = MACSEC_CMD_DEL_RXSC,
3373		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3374		.doit = macsec_del_rxsc,
3375		.flags = GENL_ADMIN_PERM,
3376	},
3377	{
3378		.cmd = MACSEC_CMD_UPD_RXSC,
3379		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3380		.doit = macsec_upd_rxsc,
3381		.flags = GENL_ADMIN_PERM,
3382	},
3383	{
3384		.cmd = MACSEC_CMD_ADD_TXSA,
3385		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3386		.doit = macsec_add_txsa,
3387		.flags = GENL_ADMIN_PERM,
3388	},
3389	{
3390		.cmd = MACSEC_CMD_DEL_TXSA,
3391		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3392		.doit = macsec_del_txsa,
3393		.flags = GENL_ADMIN_PERM,
3394	},
3395	{
3396		.cmd = MACSEC_CMD_UPD_TXSA,
3397		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3398		.doit = macsec_upd_txsa,
3399		.flags = GENL_ADMIN_PERM,
3400	},
3401	{
3402		.cmd = MACSEC_CMD_ADD_RXSA,
3403		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3404		.doit = macsec_add_rxsa,
3405		.flags = GENL_ADMIN_PERM,
3406	},
3407	{
3408		.cmd = MACSEC_CMD_DEL_RXSA,
3409		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3410		.doit = macsec_del_rxsa,
3411		.flags = GENL_ADMIN_PERM,
3412	},
3413	{
3414		.cmd = MACSEC_CMD_UPD_RXSA,
3415		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3416		.doit = macsec_upd_rxsa,
3417		.flags = GENL_ADMIN_PERM,
3418	},
3419	{
3420		.cmd = MACSEC_CMD_UPD_OFFLOAD,
3421		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3422		.doit = macsec_upd_offload,
3423		.flags = GENL_ADMIN_PERM,
3424	},
3425};
3426
3427static struct genl_family macsec_fam __ro_after_init = {
3428	.name		= MACSEC_GENL_NAME,
3429	.hdrsize	= 0,
3430	.version	= MACSEC_GENL_VERSION,
3431	.maxattr	= MACSEC_ATTR_MAX,
3432	.policy = macsec_genl_policy,
3433	.netnsok	= true,
3434	.module		= THIS_MODULE,
3435	.small_ops	= macsec_genl_ops,
3436	.n_small_ops	= ARRAY_SIZE(macsec_genl_ops),
3437	.resv_start_op	= MACSEC_CMD_UPD_OFFLOAD + 1,
3438};
3439
3440static struct sk_buff *macsec_insert_tx_tag(struct sk_buff *skb,
3441					    struct net_device *dev)
3442{
3443	struct macsec_dev *macsec = macsec_priv(dev);
3444	const struct macsec_ops *ops;
3445	struct phy_device *phydev;
3446	struct macsec_context ctx;
3447	int skb_final_len;
3448	int err;
3449
3450	ops = macsec_get_ops(macsec, &ctx);
3451	skb_final_len = skb->len - ETH_HLEN + ops->needed_headroom +
3452		ops->needed_tailroom;
3453	if (unlikely(skb_final_len > macsec->real_dev->mtu)) {
3454		err = -EINVAL;
3455		goto cleanup;
3456	}
3457
3458	phydev = macsec->real_dev->phydev;
3459
3460	err = skb_ensure_writable_head_tail(skb, dev);
3461	if (unlikely(err < 0))
3462		goto cleanup;
3463
3464	err = ops->mdo_insert_tx_tag(phydev, skb);
3465	if (unlikely(err))
3466		goto cleanup;
3467
3468	return skb;
3469cleanup:
3470	kfree_skb(skb);
3471	return ERR_PTR(err);
3472}
3473
3474static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
3475				     struct net_device *dev)
3476{
3477	struct macsec_dev *macsec = netdev_priv(dev);
3478	struct macsec_secy *secy = &macsec->secy;
3479	struct pcpu_secy_stats *secy_stats;
3480	int ret, len;
3481
3482	if (macsec_is_offloaded(netdev_priv(dev))) {
3483		struct metadata_dst *md_dst = secy->tx_sc.md_dst;
3484
3485		skb_dst_drop(skb);
3486		dst_hold(&md_dst->dst);
3487		skb_dst_set(skb, &md_dst->dst);
3488
3489		if (macsec->insert_tx_tag) {
3490			skb = macsec_insert_tx_tag(skb, dev);
3491			if (IS_ERR(skb)) {
3492				DEV_STATS_INC(dev, tx_dropped);
3493				return NETDEV_TX_OK;
3494			}
3495		}
3496
3497		skb->dev = macsec->real_dev;
3498		return dev_queue_xmit(skb);
3499	}
3500
3501	/* 10.5 */
3502	if (!secy->protect_frames) {
3503		secy_stats = this_cpu_ptr(macsec->stats);
3504		u64_stats_update_begin(&secy_stats->syncp);
3505		secy_stats->stats.OutPktsUntagged++;
3506		u64_stats_update_end(&secy_stats->syncp);
3507		skb->dev = macsec->real_dev;
3508		len = skb->len;
3509		ret = dev_queue_xmit(skb);
3510		count_tx(dev, ret, len);
3511		return ret;
3512	}
3513
3514	if (!secy->operational) {
3515		kfree_skb(skb);
3516		DEV_STATS_INC(dev, tx_dropped);
3517		return NETDEV_TX_OK;
3518	}
3519
3520	len = skb->len;
3521	skb = macsec_encrypt(skb, dev);
3522	if (IS_ERR(skb)) {
3523		if (PTR_ERR(skb) != -EINPROGRESS)
3524			DEV_STATS_INC(dev, tx_dropped);
3525		return NETDEV_TX_OK;
3526	}
3527
3528	macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
3529
3530	macsec_encrypt_finish(skb, dev);
 
3531	ret = dev_queue_xmit(skb);
3532	count_tx(dev, ret, len);
3533	return ret;
3534}
3535
3536#define MACSEC_FEATURES \
3537	(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
3538
3539#define MACSEC_OFFLOAD_FEATURES \
3540	(MACSEC_FEATURES | NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES | \
3541	 NETIF_F_LRO | NETIF_F_RXHASH | NETIF_F_CSUM_MASK | NETIF_F_RXCSUM)
 
 
 
3542
3543static int macsec_dev_init(struct net_device *dev)
3544{
3545	struct macsec_dev *macsec = macsec_priv(dev);
3546	struct net_device *real_dev = macsec->real_dev;
3547	int err;
3548
 
 
 
 
3549	err = gro_cells_init(&macsec->gro_cells, dev);
3550	if (err)
 
3551		return err;
 
3552
3553	macsec_inherit_tso_max(dev);
3554
3555	dev->hw_features = real_dev->hw_features & MACSEC_OFFLOAD_FEATURES;
3556	dev->hw_features |= NETIF_F_GSO_SOFTWARE;
3557
3558	dev->features = real_dev->features & MACSEC_OFFLOAD_FEATURES;
3559	dev->features |= NETIF_F_GSO_SOFTWARE;
3560	dev->lltx = true;
3561	dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
3562
3563	macsec_set_head_tail_room(dev);
 
 
 
3564
3565	if (is_zero_ether_addr(dev->dev_addr))
3566		eth_hw_addr_inherit(dev, real_dev);
3567	if (is_zero_ether_addr(dev->broadcast))
3568		memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
3569
3570	/* Get macsec's reference to real_dev */
3571	netdev_hold(real_dev, &macsec->dev_tracker, GFP_KERNEL);
3572
3573	return 0;
3574}
3575
3576static void macsec_dev_uninit(struct net_device *dev)
3577{
3578	struct macsec_dev *macsec = macsec_priv(dev);
3579
3580	gro_cells_destroy(&macsec->gro_cells);
 
3581}
3582
3583static netdev_features_t macsec_fix_features(struct net_device *dev,
3584					     netdev_features_t features)
3585{
3586	struct macsec_dev *macsec = macsec_priv(dev);
3587	struct net_device *real_dev = macsec->real_dev;
3588	netdev_features_t mask;
3589
3590	mask = macsec_is_offloaded(macsec) ? MACSEC_OFFLOAD_FEATURES
3591					   : MACSEC_FEATURES;
3592
3593	features &= (real_dev->features & mask) |
3594		    NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
 
3595
3596	return features;
3597}
3598
3599static int macsec_dev_open(struct net_device *dev)
3600{
3601	struct macsec_dev *macsec = macsec_priv(dev);
3602	struct net_device *real_dev = macsec->real_dev;
3603	int err;
3604
3605	err = dev_uc_add(real_dev, dev->dev_addr);
3606	if (err < 0)
3607		return err;
3608
3609	if (dev->flags & IFF_ALLMULTI) {
3610		err = dev_set_allmulti(real_dev, 1);
3611		if (err < 0)
3612			goto del_unicast;
3613	}
3614
3615	if (dev->flags & IFF_PROMISC) {
3616		err = dev_set_promiscuity(real_dev, 1);
3617		if (err < 0)
3618			goto clear_allmulti;
3619	}
3620
3621	/* If h/w offloading is available, propagate to the device */
3622	if (macsec_is_offloaded(macsec)) {
3623		const struct macsec_ops *ops;
3624		struct macsec_context ctx;
3625
3626		ops = macsec_get_ops(netdev_priv(dev), &ctx);
3627		if (!ops) {
3628			err = -EOPNOTSUPP;
3629			goto clear_allmulti;
3630		}
3631
3632		ctx.secy = &macsec->secy;
3633		err = macsec_offload(ops->mdo_dev_open, &ctx);
3634		if (err)
3635			goto clear_allmulti;
3636	}
3637
3638	if (netif_carrier_ok(real_dev))
3639		netif_carrier_on(dev);
3640
3641	return 0;
3642clear_allmulti:
3643	if (dev->flags & IFF_ALLMULTI)
3644		dev_set_allmulti(real_dev, -1);
3645del_unicast:
3646	dev_uc_del(real_dev, dev->dev_addr);
3647	netif_carrier_off(dev);
3648	return err;
3649}
3650
3651static int macsec_dev_stop(struct net_device *dev)
3652{
3653	struct macsec_dev *macsec = macsec_priv(dev);
3654	struct net_device *real_dev = macsec->real_dev;
3655
3656	netif_carrier_off(dev);
3657
3658	/* If h/w offloading is available, propagate to the device */
3659	if (macsec_is_offloaded(macsec)) {
3660		const struct macsec_ops *ops;
3661		struct macsec_context ctx;
3662
3663		ops = macsec_get_ops(macsec, &ctx);
3664		if (ops) {
3665			ctx.secy = &macsec->secy;
3666			macsec_offload(ops->mdo_dev_stop, &ctx);
3667		}
3668	}
3669
3670	dev_mc_unsync(real_dev, dev);
3671	dev_uc_unsync(real_dev, dev);
3672
3673	if (dev->flags & IFF_ALLMULTI)
3674		dev_set_allmulti(real_dev, -1);
3675
3676	if (dev->flags & IFF_PROMISC)
3677		dev_set_promiscuity(real_dev, -1);
3678
3679	dev_uc_del(real_dev, dev->dev_addr);
3680
3681	return 0;
3682}
3683
3684static void macsec_dev_change_rx_flags(struct net_device *dev, int change)
3685{
3686	struct net_device *real_dev = macsec_priv(dev)->real_dev;
3687
3688	if (!(dev->flags & IFF_UP))
3689		return;
3690
3691	if (change & IFF_ALLMULTI)
3692		dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
3693
3694	if (change & IFF_PROMISC)
3695		dev_set_promiscuity(real_dev,
3696				    dev->flags & IFF_PROMISC ? 1 : -1);
3697}
3698
3699static void macsec_dev_set_rx_mode(struct net_device *dev)
3700{
3701	struct net_device *real_dev = macsec_priv(dev)->real_dev;
3702
3703	dev_mc_sync(real_dev, dev);
3704	dev_uc_sync(real_dev, dev);
3705}
3706
3707static int macsec_set_mac_address(struct net_device *dev, void *p)
3708{
3709	struct macsec_dev *macsec = macsec_priv(dev);
3710	struct net_device *real_dev = macsec->real_dev;
3711	struct sockaddr *addr = p;
3712	u8  old_addr[ETH_ALEN];
3713	int err;
3714
3715	if (!is_valid_ether_addr(addr->sa_data))
3716		return -EADDRNOTAVAIL;
3717
3718	if (dev->flags & IFF_UP) {
3719		err = dev_uc_add(real_dev, addr->sa_data);
3720		if (err < 0)
3721			return err;
3722	}
3723
3724	ether_addr_copy(old_addr, dev->dev_addr);
3725	eth_hw_addr_set(dev, addr->sa_data);
 
 
 
 
 
 
 
3726
3727	/* If h/w offloading is available, propagate to the device */
3728	if (macsec_is_offloaded(macsec)) {
3729		const struct macsec_ops *ops;
3730		struct macsec_context ctx;
3731
3732		ops = macsec_get_ops(macsec, &ctx);
3733		if (!ops) {
3734			err = -EOPNOTSUPP;
3735			goto restore_old_addr;
3736		}
3737
3738		ctx.secy = &macsec->secy;
3739		err = macsec_offload(ops->mdo_upd_secy, &ctx);
3740		if (err)
3741			goto restore_old_addr;
3742	}
3743
3744	if (dev->flags & IFF_UP)
3745		dev_uc_del(real_dev, old_addr);
3746
3747	return 0;
3748
3749restore_old_addr:
3750	if (dev->flags & IFF_UP)
3751		dev_uc_del(real_dev, addr->sa_data);
3752
3753	eth_hw_addr_set(dev, old_addr);
3754
3755	return err;
3756}
3757
3758static int macsec_change_mtu(struct net_device *dev, int new_mtu)
3759{
3760	struct macsec_dev *macsec = macsec_priv(dev);
3761	unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true);
3762
3763	if (macsec->real_dev->mtu - extra < new_mtu)
3764		return -ERANGE;
3765
3766	WRITE_ONCE(dev->mtu, new_mtu);
3767
3768	return 0;
3769}
3770
3771static void macsec_get_stats64(struct net_device *dev,
3772			       struct rtnl_link_stats64 *s)
3773{
 
 
3774	if (!dev->tstats)
3775		return;
3776
3777	dev_fetch_sw_netstats(s, dev->tstats);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3778
3779	s->rx_dropped = DEV_STATS_READ(dev, rx_dropped);
3780	s->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
3781	s->rx_errors = DEV_STATS_READ(dev, rx_errors);
3782}
3783
3784static int macsec_get_iflink(const struct net_device *dev)
3785{
3786	return READ_ONCE(macsec_priv(dev)->real_dev->ifindex);
3787}
3788
3789static const struct net_device_ops macsec_netdev_ops = {
3790	.ndo_init		= macsec_dev_init,
3791	.ndo_uninit		= macsec_dev_uninit,
3792	.ndo_open		= macsec_dev_open,
3793	.ndo_stop		= macsec_dev_stop,
3794	.ndo_fix_features	= macsec_fix_features,
3795	.ndo_change_mtu		= macsec_change_mtu,
3796	.ndo_set_rx_mode	= macsec_dev_set_rx_mode,
3797	.ndo_change_rx_flags	= macsec_dev_change_rx_flags,
3798	.ndo_set_mac_address	= macsec_set_mac_address,
3799	.ndo_start_xmit		= macsec_start_xmit,
3800	.ndo_get_stats64	= macsec_get_stats64,
3801	.ndo_get_iflink		= macsec_get_iflink,
3802};
3803
3804static const struct device_type macsec_type = {
3805	.name = "macsec",
3806};
3807
3808static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
3809	[IFLA_MACSEC_SCI] = { .type = NLA_U64 },
3810	[IFLA_MACSEC_PORT] = { .type = NLA_U16 },
3811	[IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
3812	[IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
3813	[IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
3814	[IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 },
3815	[IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 },
3816	[IFLA_MACSEC_PROTECT] = { .type = NLA_U8 },
3817	[IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 },
3818	[IFLA_MACSEC_ES] = { .type = NLA_U8 },
3819	[IFLA_MACSEC_SCB] = { .type = NLA_U8 },
3820	[IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
3821	[IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
3822	[IFLA_MACSEC_OFFLOAD] = { .type = NLA_U8 },
3823};
3824
3825static void macsec_free_netdev(struct net_device *dev)
3826{
3827	struct macsec_dev *macsec = macsec_priv(dev);
3828
3829	dst_release(&macsec->secy.tx_sc.md_dst->dst);
3830	free_percpu(macsec->stats);
3831	free_percpu(macsec->secy.tx_sc.stats);
3832
3833	/* Get rid of the macsec's reference to real_dev */
3834	netdev_put(macsec->real_dev, &macsec->dev_tracker);
3835}
3836
3837static void macsec_setup(struct net_device *dev)
3838{
3839	ether_setup(dev);
3840	dev->min_mtu = 0;
3841	dev->max_mtu = ETH_MAX_MTU;
3842	dev->priv_flags |= IFF_NO_QUEUE;
3843	dev->netdev_ops = &macsec_netdev_ops;
3844	dev->needs_free_netdev = true;
3845	dev->priv_destructor = macsec_free_netdev;
3846	SET_NETDEV_DEVTYPE(dev, &macsec_type);
3847
3848	eth_zero_addr(dev->broadcast);
3849}
3850
3851static int macsec_changelink_common(struct net_device *dev,
3852				    struct nlattr *data[])
3853{
3854	struct macsec_secy *secy;
3855	struct macsec_tx_sc *tx_sc;
3856
3857	secy = &macsec_priv(dev)->secy;
3858	tx_sc = &secy->tx_sc;
3859
3860	if (data[IFLA_MACSEC_ENCODING_SA]) {
3861		struct macsec_tx_sa *tx_sa;
3862
3863		tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]);
3864		tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]);
3865
3866		secy->operational = tx_sa && tx_sa->active;
3867	}
3868
 
 
 
3869	if (data[IFLA_MACSEC_ENCRYPT])
3870		tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
3871
3872	if (data[IFLA_MACSEC_PROTECT])
3873		secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]);
3874
3875	if (data[IFLA_MACSEC_INC_SCI])
3876		tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
3877
3878	if (data[IFLA_MACSEC_ES])
3879		tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]);
3880
3881	if (data[IFLA_MACSEC_SCB])
3882		tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]);
3883
3884	if (data[IFLA_MACSEC_REPLAY_PROTECT])
3885		secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]);
3886
3887	if (data[IFLA_MACSEC_VALIDATION])
3888		secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]);
3889
3890	if (data[IFLA_MACSEC_CIPHER_SUITE]) {
3891		switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) {
3892		case MACSEC_CIPHER_ID_GCM_AES_128:
3893		case MACSEC_DEFAULT_CIPHER_ID:
3894			secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
3895			secy->xpn = false;
3896			break;
3897		case MACSEC_CIPHER_ID_GCM_AES_256:
3898			secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
3899			secy->xpn = false;
3900			break;
3901		case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
3902			secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
3903			secy->xpn = true;
3904			break;
3905		case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
3906			secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
3907			secy->xpn = true;
3908			break;
3909		default:
3910			return -EINVAL;
3911		}
3912	}
3913
3914	if (data[IFLA_MACSEC_WINDOW]) {
3915		secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
3916
3917		/* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window
3918		 * for XPN cipher suites */
3919		if (secy->xpn &&
3920		    secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW)
3921			return -EINVAL;
3922	}
3923
3924	return 0;
3925}
3926
3927static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
3928			     struct nlattr *data[],
3929			     struct netlink_ext_ack *extack)
3930{
3931	struct macsec_dev *macsec = macsec_priv(dev);
3932	bool macsec_offload_state_change = false;
3933	enum macsec_offload offload;
3934	struct macsec_tx_sc tx_sc;
3935	struct macsec_secy secy;
3936	int ret;
3937
3938	if (!data)
3939		return 0;
3940
3941	if (data[IFLA_MACSEC_CIPHER_SUITE] ||
3942	    data[IFLA_MACSEC_ICV_LEN] ||
3943	    data[IFLA_MACSEC_SCI] ||
3944	    data[IFLA_MACSEC_PORT])
3945		return -EINVAL;
3946
3947	/* Keep a copy of unmodified secy and tx_sc, in case the offload
3948	 * propagation fails, to revert macsec_changelink_common.
3949	 */
3950	memcpy(&secy, &macsec->secy, sizeof(secy));
3951	memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc));
3952
3953	ret = macsec_changelink_common(dev, data);
3954	if (ret)
3955		goto cleanup;
3956
3957	if (data[IFLA_MACSEC_OFFLOAD]) {
3958		offload = nla_get_u8(data[IFLA_MACSEC_OFFLOAD]);
3959		if (macsec->offload != offload) {
3960			macsec_offload_state_change = true;
3961			ret = macsec_update_offload(dev, offload);
3962			if (ret)
3963				goto cleanup;
3964		}
3965	}
3966
3967	/* If h/w offloading is available, propagate to the device */
3968	if (!macsec_offload_state_change && macsec_is_offloaded(macsec)) {
3969		const struct macsec_ops *ops;
3970		struct macsec_context ctx;
 
3971
3972		ops = macsec_get_ops(netdev_priv(dev), &ctx);
3973		if (!ops) {
3974			ret = -EOPNOTSUPP;
3975			goto cleanup;
3976		}
3977
3978		ctx.secy = &macsec->secy;
3979		ret = macsec_offload(ops->mdo_upd_secy, &ctx);
3980		if (ret)
3981			goto cleanup;
3982	}
3983
3984	return 0;
3985
3986cleanup:
3987	memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc));
3988	memcpy(&macsec->secy, &secy, sizeof(secy));
3989
3990	return ret;
3991}
3992
3993static void macsec_del_dev(struct macsec_dev *macsec)
3994{
3995	int i;
3996
3997	while (macsec->secy.rx_sc) {
3998		struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc);
3999
4000		rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next);
4001		free_rx_sc(rx_sc);
4002	}
4003
4004	for (i = 0; i < MACSEC_NUM_AN; i++) {
4005		struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]);
4006
4007		if (sa) {
4008			RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL);
4009			clear_tx_sa(sa);
4010		}
4011	}
4012}
4013
4014static void macsec_common_dellink(struct net_device *dev, struct list_head *head)
4015{
4016	struct macsec_dev *macsec = macsec_priv(dev);
4017	struct net_device *real_dev = macsec->real_dev;
4018
4019	/* If h/w offloading is available, propagate to the device */
4020	if (macsec_is_offloaded(macsec)) {
4021		const struct macsec_ops *ops;
4022		struct macsec_context ctx;
4023
4024		ops = macsec_get_ops(netdev_priv(dev), &ctx);
4025		if (ops) {
4026			ctx.secy = &macsec->secy;
4027			macsec_offload(ops->mdo_del_secy, &ctx);
4028		}
4029	}
4030
4031	unregister_netdevice_queue(dev, head);
4032	list_del_rcu(&macsec->secys);
4033	macsec_del_dev(macsec);
4034	netdev_upper_dev_unlink(real_dev, dev);
4035
4036	macsec_generation++;
4037}
4038
4039static void macsec_dellink(struct net_device *dev, struct list_head *head)
4040{
4041	struct macsec_dev *macsec = macsec_priv(dev);
4042	struct net_device *real_dev = macsec->real_dev;
4043	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
4044
 
 
 
 
 
 
 
 
 
 
 
 
4045	macsec_common_dellink(dev, head);
4046
4047	if (list_empty(&rxd->secys)) {
4048		netdev_rx_handler_unregister(real_dev);
4049		kfree(rxd);
4050	}
4051}
4052
4053static int register_macsec_dev(struct net_device *real_dev,
4054			       struct net_device *dev)
4055{
4056	struct macsec_dev *macsec = macsec_priv(dev);
4057	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
4058
4059	if (!rxd) {
4060		int err;
4061
4062		rxd = kmalloc(sizeof(*rxd), GFP_KERNEL);
4063		if (!rxd)
4064			return -ENOMEM;
4065
4066		INIT_LIST_HEAD(&rxd->secys);
4067
4068		err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
4069						 rxd);
4070		if (err < 0) {
4071			kfree(rxd);
4072			return err;
4073		}
4074	}
4075
4076	list_add_tail_rcu(&macsec->secys, &rxd->secys);
4077	return 0;
4078}
4079
4080static bool sci_exists(struct net_device *dev, sci_t sci)
4081{
4082	struct macsec_rxh_data *rxd = macsec_data_rtnl(dev);
4083	struct macsec_dev *macsec;
4084
4085	list_for_each_entry(macsec, &rxd->secys, secys) {
4086		if (macsec->secy.sci == sci)
4087			return true;
4088	}
4089
4090	return false;
4091}
4092
4093static sci_t dev_to_sci(struct net_device *dev, __be16 port)
4094{
4095	return make_sci(dev->dev_addr, port);
4096}
4097
4098static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
4099{
4100	struct macsec_dev *macsec = macsec_priv(dev);
4101	struct macsec_secy *secy = &macsec->secy;
4102
4103	macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats);
4104	if (!macsec->stats)
4105		return -ENOMEM;
4106
4107	secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats);
4108	if (!secy->tx_sc.stats)
4109		return -ENOMEM;
4110
4111	secy->tx_sc.md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);
4112	if (!secy->tx_sc.md_dst)
4113		/* macsec and secy percpu stats will be freed when unregistering
4114		 * net_device in macsec_free_netdev()
4115		 */
4116		return -ENOMEM;
 
4117
4118	if (sci == MACSEC_UNDEF_SCI)
4119		sci = dev_to_sci(dev, MACSEC_PORT_ES);
4120
4121	secy->netdev = dev;
4122	secy->operational = true;
4123	secy->key_len = DEFAULT_SAK_LEN;
4124	secy->icv_len = icv_len;
4125	secy->validate_frames = MACSEC_VALIDATE_DEFAULT;
4126	secy->protect_frames = true;
4127	secy->replay_protect = false;
4128	secy->xpn = DEFAULT_XPN;
4129
4130	secy->sci = sci;
4131	secy->tx_sc.md_dst->u.macsec_info.sci = sci;
4132	secy->tx_sc.active = true;
4133	secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA;
4134	secy->tx_sc.encrypt = DEFAULT_ENCRYPT;
4135	secy->tx_sc.send_sci = DEFAULT_SEND_SCI;
4136	secy->tx_sc.end_station = false;
4137	secy->tx_sc.scb = false;
4138
4139	return 0;
4140}
4141
4142static struct lock_class_key macsec_netdev_addr_lock_key;
4143
4144static int macsec_newlink(struct net *net, struct net_device *dev,
4145			  struct nlattr *tb[], struct nlattr *data[],
4146			  struct netlink_ext_ack *extack)
4147{
4148	struct macsec_dev *macsec = macsec_priv(dev);
4149	rx_handler_func_t *rx_handler;
4150	u8 icv_len = MACSEC_DEFAULT_ICV_LEN;
4151	struct net_device *real_dev;
4152	int err, mtu;
4153	sci_t sci;
4154
4155	if (!tb[IFLA_LINK])
4156		return -EINVAL;
4157	real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
4158	if (!real_dev)
4159		return -ENODEV;
4160	if (real_dev->type != ARPHRD_ETHER)
4161		return -EINVAL;
4162
4163	dev->priv_flags |= IFF_MACSEC;
4164
4165	macsec->real_dev = real_dev;
4166
4167	if (data && data[IFLA_MACSEC_OFFLOAD])
4168		macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]);
4169	else
4170		/* MACsec offloading is off by default */
4171		macsec->offload = MACSEC_OFFLOAD_OFF;
4172
4173	/* Check if the offloading mode is supported by the underlying layers */
4174	if (macsec->offload != MACSEC_OFFLOAD_OFF &&
4175	    !macsec_check_offload(macsec->offload, macsec))
4176		return -EOPNOTSUPP;
4177
4178	/* send_sci must be set to true when transmit sci explicitly is set */
4179	if ((data && data[IFLA_MACSEC_SCI]) &&
4180	    (data && data[IFLA_MACSEC_INC_SCI])) {
4181		u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
4182
4183		if (!send_sci)
4184			return -EINVAL;
4185	}
4186
4187	if (data && data[IFLA_MACSEC_ICV_LEN])
4188		icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
4189	mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
4190	if (mtu < 0)
4191		dev->mtu = 0;
4192	else
4193		dev->mtu = mtu;
4194
4195	rx_handler = rtnl_dereference(real_dev->rx_handler);
4196	if (rx_handler && rx_handler != macsec_handle_frame)
4197		return -EBUSY;
4198
4199	err = register_netdevice(dev);
4200	if (err < 0)
4201		return err;
4202
4203	netdev_lockdep_set_classes(dev);
4204	lockdep_set_class(&dev->addr_list_lock,
4205			  &macsec_netdev_addr_lock_key);
4206
4207	err = netdev_upper_dev_link(real_dev, dev, extack);
4208	if (err < 0)
4209		goto unregister;
4210
4211	/* need to be already registered so that ->init has run and
4212	 * the MAC addr is set
4213	 */
4214	if (data && data[IFLA_MACSEC_SCI])
4215		sci = nla_get_sci(data[IFLA_MACSEC_SCI]);
4216	else if (data && data[IFLA_MACSEC_PORT])
4217		sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT]));
4218	else
4219		sci = dev_to_sci(dev, MACSEC_PORT_ES);
4220
4221	if (rx_handler && sci_exists(real_dev, sci)) {
4222		err = -EBUSY;
4223		goto unlink;
4224	}
4225
4226	err = macsec_add_dev(dev, sci, icv_len);
4227	if (err)
4228		goto unlink;
4229
4230	if (data) {
4231		err = macsec_changelink_common(dev, data);
4232		if (err)
4233			goto del_dev;
4234	}
4235
4236	/* If h/w offloading is available, propagate to the device */
4237	if (macsec_is_offloaded(macsec)) {
4238		const struct macsec_ops *ops;
4239		struct macsec_context ctx;
4240
4241		ops = macsec_get_ops(macsec, &ctx);
4242		if (ops) {
4243			ctx.secy = &macsec->secy;
4244			err = macsec_offload(ops->mdo_add_secy, &ctx);
4245			if (err)
4246				goto del_dev;
4247
4248			macsec->insert_tx_tag =
4249				macsec_needs_tx_tag(macsec, ops);
4250		}
4251	}
4252
4253	err = register_macsec_dev(real_dev, dev);
4254	if (err < 0)
4255		goto del_dev;
4256
4257	netif_stacked_transfer_operstate(real_dev, dev);
4258	linkwatch_fire_event(dev);
4259
4260	macsec_generation++;
4261
4262	return 0;
4263
4264del_dev:
4265	macsec_del_dev(macsec);
4266unlink:
4267	netdev_upper_dev_unlink(real_dev, dev);
4268unregister:
4269	unregister_netdevice(dev);
4270	return err;
4271}
4272
4273static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
4274				struct netlink_ext_ack *extack)
4275{
4276	u64 csid = MACSEC_DEFAULT_CIPHER_ID;
4277	u8 icv_len = MACSEC_DEFAULT_ICV_LEN;
4278	int flag;
4279	bool es, scb, sci;
4280
4281	if (!data)
4282		return 0;
4283
4284	if (data[IFLA_MACSEC_CIPHER_SUITE])
4285		csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]);
4286
4287	if (data[IFLA_MACSEC_ICV_LEN]) {
4288		icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
4289		if (icv_len != MACSEC_DEFAULT_ICV_LEN) {
4290			char dummy_key[DEFAULT_SAK_LEN] = { 0 };
4291			struct crypto_aead *dummy_tfm;
4292
4293			dummy_tfm = macsec_alloc_tfm(dummy_key,
4294						     DEFAULT_SAK_LEN,
4295						     icv_len);
4296			if (IS_ERR(dummy_tfm))
4297				return PTR_ERR(dummy_tfm);
4298			crypto_free_aead(dummy_tfm);
4299		}
4300	}
4301
4302	switch (csid) {
4303	case MACSEC_CIPHER_ID_GCM_AES_128:
4304	case MACSEC_CIPHER_ID_GCM_AES_256:
4305	case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
4306	case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
4307	case MACSEC_DEFAULT_CIPHER_ID:
4308		if (icv_len < MACSEC_MIN_ICV_LEN ||
4309		    icv_len > MACSEC_STD_ICV_LEN)
4310			return -EINVAL;
4311		break;
4312	default:
4313		return -EINVAL;
4314	}
4315
4316	if (data[IFLA_MACSEC_ENCODING_SA]) {
4317		if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN)
4318			return -EINVAL;
4319	}
4320
4321	for (flag = IFLA_MACSEC_ENCODING_SA + 1;
4322	     flag < IFLA_MACSEC_VALIDATION;
4323	     flag++) {
4324		if (data[flag]) {
4325			if (nla_get_u8(data[flag]) > 1)
4326				return -EINVAL;
4327		}
4328	}
4329
4330	es  = nla_get_u8_default(data[IFLA_MACSEC_ES], false);
4331	sci = nla_get_u8_default(data[IFLA_MACSEC_INC_SCI], false);
4332	scb = nla_get_u8_default(data[IFLA_MACSEC_SCB], false);
4333
4334	if ((sci && (scb || es)) || (scb && es))
4335		return -EINVAL;
4336
4337	if (data[IFLA_MACSEC_VALIDATION] &&
4338	    nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
4339		return -EINVAL;
4340
4341	if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
4342	     nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
4343	    !data[IFLA_MACSEC_WINDOW])
4344		return -EINVAL;
4345
4346	return 0;
4347}
4348
4349static struct net *macsec_get_link_net(const struct net_device *dev)
4350{
4351	return dev_net(macsec_priv(dev)->real_dev);
4352}
4353
4354struct net_device *macsec_get_real_dev(const struct net_device *dev)
4355{
4356	return macsec_priv(dev)->real_dev;
4357}
4358EXPORT_SYMBOL_GPL(macsec_get_real_dev);
4359
4360bool macsec_netdev_is_offloaded(struct net_device *dev)
4361{
4362	return macsec_is_offloaded(macsec_priv(dev));
4363}
4364EXPORT_SYMBOL_GPL(macsec_netdev_is_offloaded);
4365
4366static size_t macsec_get_size(const struct net_device *dev)
4367{
4368	return  nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */
4369		nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */
4370		nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */
4371		nla_total_size(4) + /* IFLA_MACSEC_WINDOW */
4372		nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */
4373		nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */
4374		nla_total_size(1) + /* IFLA_MACSEC_PROTECT */
4375		nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */
4376		nla_total_size(1) + /* IFLA_MACSEC_ES */
4377		nla_total_size(1) + /* IFLA_MACSEC_SCB */
4378		nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
4379		nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
4380		nla_total_size(1) + /* IFLA_MACSEC_OFFLOAD */
4381		0;
4382}
4383
4384static int macsec_fill_info(struct sk_buff *skb,
4385			    const struct net_device *dev)
4386{
4387	struct macsec_tx_sc *tx_sc;
4388	struct macsec_dev *macsec;
4389	struct macsec_secy *secy;
4390	u64 csid;
4391
4392	macsec = macsec_priv(dev);
4393	secy = &macsec->secy;
4394	tx_sc = &secy->tx_sc;
4395
4396	switch (secy->key_len) {
4397	case MACSEC_GCM_AES_128_SAK_LEN:
4398		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
4399		break;
4400	case MACSEC_GCM_AES_256_SAK_LEN:
4401		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
4402		break;
4403	default:
4404		goto nla_put_failure;
4405	}
4406
4407	if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
4408			IFLA_MACSEC_PAD) ||
4409	    nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
4410	    nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
4411			      csid, IFLA_MACSEC_PAD) ||
4412	    nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
4413	    nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
4414	    nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
4415	    nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) ||
4416	    nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) ||
4417	    nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
4418	    nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
4419	    nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) ||
4420	    nla_put_u8(skb, IFLA_MACSEC_OFFLOAD, macsec->offload) ||
4421	    0)
4422		goto nla_put_failure;
4423
4424	if (secy->replay_protect) {
4425		if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window))
4426			goto nla_put_failure;
4427	}
4428
4429	return 0;
4430
4431nla_put_failure:
4432	return -EMSGSIZE;
4433}
4434
4435static struct rtnl_link_ops macsec_link_ops __read_mostly = {
4436	.kind		= "macsec",
4437	.priv_size	= sizeof(struct macsec_dev),
4438	.maxtype	= IFLA_MACSEC_MAX,
4439	.policy		= macsec_rtnl_policy,
4440	.setup		= macsec_setup,
4441	.validate	= macsec_validate_attr,
4442	.newlink	= macsec_newlink,
4443	.changelink	= macsec_changelink,
4444	.dellink	= macsec_dellink,
4445	.get_size	= macsec_get_size,
4446	.fill_info	= macsec_fill_info,
4447	.get_link_net	= macsec_get_link_net,
4448};
4449
4450static bool is_macsec_master(struct net_device *dev)
4451{
4452	return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame;
4453}
4454
4455static int macsec_notify(struct notifier_block *this, unsigned long event,
4456			 void *ptr)
4457{
4458	struct net_device *real_dev = netdev_notifier_info_to_dev(ptr);
4459	struct macsec_rxh_data *rxd;
4460	struct macsec_dev *m, *n;
4461	LIST_HEAD(head);
4462
4463	if (!is_macsec_master(real_dev))
4464		return NOTIFY_DONE;
4465
4466	rxd = macsec_data_rtnl(real_dev);
4467
4468	switch (event) {
4469	case NETDEV_DOWN:
4470	case NETDEV_UP:
4471	case NETDEV_CHANGE:
 
 
 
 
4472		list_for_each_entry_safe(m, n, &rxd->secys, secys) {
4473			struct net_device *dev = m->secy.netdev;
4474
4475			netif_stacked_transfer_operstate(real_dev, dev);
4476		}
4477		break;
4478	case NETDEV_UNREGISTER:
 
 
 
 
 
4479		list_for_each_entry_safe(m, n, &rxd->secys, secys) {
4480			macsec_common_dellink(m->secy.netdev, &head);
4481		}
4482
4483		netdev_rx_handler_unregister(real_dev);
4484		kfree(rxd);
4485
4486		unregister_netdevice_many(&head);
4487		break;
4488	case NETDEV_CHANGEMTU:
 
 
 
 
 
4489		list_for_each_entry(m, &rxd->secys, secys) {
4490			struct net_device *dev = m->secy.netdev;
4491			unsigned int mtu = real_dev->mtu - (m->secy.icv_len +
4492							    macsec_extra_len(true));
4493
4494			if (dev->mtu > mtu)
4495				dev_set_mtu(dev, mtu);
4496		}
4497		break;
4498	case NETDEV_FEAT_CHANGE:
4499		list_for_each_entry(m, &rxd->secys, secys) {
4500			macsec_inherit_tso_max(m->secy.netdev);
4501			netdev_update_features(m->secy.netdev);
4502		}
4503		break;
4504	}
4505
4506	return NOTIFY_OK;
4507}
4508
4509static struct notifier_block macsec_notifier = {
4510	.notifier_call = macsec_notify,
4511};
4512
4513static int __init macsec_init(void)
4514{
4515	int err;
4516
4517	pr_info("MACsec IEEE 802.1AE\n");
4518	err = register_netdevice_notifier(&macsec_notifier);
4519	if (err)
4520		return err;
4521
4522	err = rtnl_link_register(&macsec_link_ops);
4523	if (err)
4524		goto notifier;
4525
4526	err = genl_register_family(&macsec_fam);
4527	if (err)
4528		goto rtnl;
4529
4530	return 0;
4531
4532rtnl:
4533	rtnl_link_unregister(&macsec_link_ops);
4534notifier:
4535	unregister_netdevice_notifier(&macsec_notifier);
4536	return err;
4537}
4538
4539static void __exit macsec_exit(void)
4540{
4541	genl_unregister_family(&macsec_fam);
4542	rtnl_link_unregister(&macsec_link_ops);
4543	unregister_netdevice_notifier(&macsec_notifier);
4544	rcu_barrier();
4545}
4546
4547module_init(macsec_init);
4548module_exit(macsec_exit);
4549
4550MODULE_ALIAS_RTNL_LINK("macsec");
4551MODULE_ALIAS_GENL_FAMILY("macsec");
4552
4553MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
4554MODULE_LICENSE("GPL v2");
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * drivers/net/macsec.c - MACsec device
   4 *
   5 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
   6 */
   7
   8#include <linux/types.h>
   9#include <linux/skbuff.h>
  10#include <linux/socket.h>
  11#include <linux/module.h>
  12#include <crypto/aead.h>
  13#include <linux/etherdevice.h>
  14#include <linux/netdevice.h>
  15#include <linux/rtnetlink.h>
  16#include <linux/refcount.h>
  17#include <net/genetlink.h>
  18#include <net/sock.h>
  19#include <net/gro_cells.h>
  20#include <net/macsec.h>
 
  21#include <linux/phy.h>
  22#include <linux/byteorder/generic.h>
  23#include <linux/if_arp.h>
  24
  25#include <uapi/linux/if_macsec.h>
  26
  27#define MACSEC_SCI_LEN 8
  28
  29/* SecTAG length = macsec_eth_header without the optional SCI */
  30#define MACSEC_TAG_LEN 6
  31
  32struct macsec_eth_header {
  33	struct ethhdr eth;
  34	/* SecTAG */
  35	u8  tci_an;
  36#if defined(__LITTLE_ENDIAN_BITFIELD)
  37	u8  short_length:6,
  38		  unused:2;
  39#elif defined(__BIG_ENDIAN_BITFIELD)
  40	u8        unused:2,
  41	    short_length:6;
  42#else
  43#error	"Please fix <asm/byteorder.h>"
  44#endif
  45	__be32 packet_number;
  46	u8 secure_channel_id[8]; /* optional */
  47} __packed;
  48
  49#define MACSEC_TCI_VERSION 0x80
  50#define MACSEC_TCI_ES      0x40 /* end station */
  51#define MACSEC_TCI_SC      0x20 /* SCI present */
  52#define MACSEC_TCI_SCB     0x10 /* epon */
  53#define MACSEC_TCI_E       0x08 /* encryption */
  54#define MACSEC_TCI_C       0x04 /* changed text */
  55#define MACSEC_AN_MASK     0x03 /* association number */
  56#define MACSEC_TCI_CONFID  (MACSEC_TCI_E | MACSEC_TCI_C)
  57
  58/* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
  59#define MIN_NON_SHORT_LEN 48
  60
  61#define GCM_AES_IV_LEN 12
  62#define DEFAULT_ICV_LEN 16
  63
  64#define for_each_rxsc(secy, sc)				\
  65	for (sc = rcu_dereference_bh(secy->rx_sc);	\
  66	     sc;					\
  67	     sc = rcu_dereference_bh(sc->next))
  68#define for_each_rxsc_rtnl(secy, sc)			\
  69	for (sc = rtnl_dereference(secy->rx_sc);	\
  70	     sc;					\
  71	     sc = rtnl_dereference(sc->next))
  72
  73#define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31)))
  74
  75struct gcm_iv_xpn {
  76	union {
  77		u8 short_secure_channel_id[4];
  78		ssci_t ssci;
  79	};
  80	__be64 pn;
  81} __packed;
  82
  83struct gcm_iv {
  84	union {
  85		u8 secure_channel_id[8];
  86		sci_t sci;
  87	};
  88	__be32 pn;
  89};
  90
  91#define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
  92
  93struct pcpu_secy_stats {
  94	struct macsec_dev_stats stats;
  95	struct u64_stats_sync syncp;
  96};
  97
  98/**
  99 * struct macsec_dev - private data
 100 * @secy: SecY config
 101 * @real_dev: pointer to underlying netdevice
 
 102 * @stats: MACsec device stats
 103 * @secys: linked list of SecY's on the underlying device
 
 104 * @offload: status of offloading on the MACsec device
 
 
 105 */
 106struct macsec_dev {
 107	struct macsec_secy secy;
 108	struct net_device *real_dev;
 
 109	struct pcpu_secy_stats __percpu *stats;
 110	struct list_head secys;
 111	struct gro_cells gro_cells;
 112	enum macsec_offload offload;
 
 113};
 114
 115/**
 116 * struct macsec_rxh_data - rx_handler private argument
 117 * @secys: linked list of SecY's on this underlying device
 118 */
 119struct macsec_rxh_data {
 120	struct list_head secys;
 121};
 122
 123static struct macsec_dev *macsec_priv(const struct net_device *dev)
 124{
 125	return (struct macsec_dev *)netdev_priv(dev);
 126}
 127
 128static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev)
 129{
 130	return rcu_dereference_bh(dev->rx_handler_data);
 131}
 132
 133static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev)
 134{
 135	return rtnl_dereference(dev->rx_handler_data);
 136}
 137
 138struct macsec_cb {
 139	struct aead_request *req;
 140	union {
 141		struct macsec_tx_sa *tx_sa;
 142		struct macsec_rx_sa *rx_sa;
 143	};
 144	u8 assoc_num;
 145	bool valid;
 146	bool has_sci;
 147};
 148
 149static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
 150{
 151	struct macsec_rx_sa *sa = rcu_dereference_bh(ptr);
 152
 153	if (!sa || !sa->active)
 154		return NULL;
 155
 156	if (!refcount_inc_not_zero(&sa->refcnt))
 157		return NULL;
 158
 159	return sa;
 160}
 161
 162static void free_rx_sc_rcu(struct rcu_head *head)
 163{
 164	struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
 165
 166	free_percpu(rx_sc->stats);
 167	kfree(rx_sc);
 168}
 169
 170static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc)
 171{
 172	return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL;
 173}
 174
 175static void macsec_rxsc_put(struct macsec_rx_sc *sc)
 176{
 177	if (refcount_dec_and_test(&sc->refcnt))
 178		call_rcu(&sc->rcu_head, free_rx_sc_rcu);
 179}
 180
 181static void free_rxsa(struct rcu_head *head)
 182{
 183	struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu);
 184
 185	crypto_free_aead(sa->key.tfm);
 186	free_percpu(sa->stats);
 187	kfree(sa);
 188}
 189
 190static void macsec_rxsa_put(struct macsec_rx_sa *sa)
 191{
 192	if (refcount_dec_and_test(&sa->refcnt))
 193		call_rcu(&sa->rcu, free_rxsa);
 194}
 195
 196static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr)
 197{
 198	struct macsec_tx_sa *sa = rcu_dereference_bh(ptr);
 199
 200	if (!sa || !sa->active)
 201		return NULL;
 202
 203	if (!refcount_inc_not_zero(&sa->refcnt))
 204		return NULL;
 205
 206	return sa;
 207}
 208
 209static void free_txsa(struct rcu_head *head)
 210{
 211	struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu);
 212
 213	crypto_free_aead(sa->key.tfm);
 214	free_percpu(sa->stats);
 215	kfree(sa);
 216}
 217
 218static void macsec_txsa_put(struct macsec_tx_sa *sa)
 219{
 220	if (refcount_dec_and_test(&sa->refcnt))
 221		call_rcu(&sa->rcu, free_txsa);
 222}
 223
 224static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
 225{
 226	BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb));
 227	return (struct macsec_cb *)skb->cb;
 228}
 229
 230#define MACSEC_PORT_ES (htons(0x0001))
 231#define MACSEC_PORT_SCB (0x0000)
 232#define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
 233#define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff)
 234
 235#define MACSEC_GCM_AES_128_SAK_LEN 16
 236#define MACSEC_GCM_AES_256_SAK_LEN 32
 237
 238#define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
 239#define DEFAULT_XPN false
 240#define DEFAULT_SEND_SCI true
 241#define DEFAULT_ENCRYPT false
 242#define DEFAULT_ENCODING_SA 0
 
 243
 244static bool send_sci(const struct macsec_secy *secy)
 245{
 246	const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
 247
 248	return tx_sc->send_sci ||
 249		(secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
 250}
 251
 252static sci_t make_sci(u8 *addr, __be16 port)
 253{
 254	sci_t sci;
 255
 256	memcpy(&sci, addr, ETH_ALEN);
 257	memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port));
 258
 259	return sci;
 260}
 261
 262static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present)
 263{
 264	sci_t sci;
 265
 266	if (sci_present)
 267		memcpy(&sci, hdr->secure_channel_id,
 268		       sizeof(hdr->secure_channel_id));
 269	else
 270		sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES);
 271
 272	return sci;
 273}
 274
 275static unsigned int macsec_sectag_len(bool sci_present)
 276{
 277	return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
 278}
 279
 280static unsigned int macsec_hdr_len(bool sci_present)
 281{
 282	return macsec_sectag_len(sci_present) + ETH_HLEN;
 283}
 284
 285static unsigned int macsec_extra_len(bool sci_present)
 286{
 287	return macsec_sectag_len(sci_present) + sizeof(__be16);
 288}
 289
 290/* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
 291static void macsec_fill_sectag(struct macsec_eth_header *h,
 292			       const struct macsec_secy *secy, u32 pn,
 293			       bool sci_present)
 294{
 295	const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
 296
 297	memset(&h->tci_an, 0, macsec_sectag_len(sci_present));
 298	h->eth.h_proto = htons(ETH_P_MACSEC);
 299
 300	if (sci_present) {
 301		h->tci_an |= MACSEC_TCI_SC;
 302		memcpy(&h->secure_channel_id, &secy->sci,
 303		       sizeof(h->secure_channel_id));
 304	} else {
 305		if (tx_sc->end_station)
 306			h->tci_an |= MACSEC_TCI_ES;
 307		if (tx_sc->scb)
 308			h->tci_an |= MACSEC_TCI_SCB;
 309	}
 310
 311	h->packet_number = htonl(pn);
 312
 313	/* with GCM, C/E clear for !encrypt, both set for encrypt */
 314	if (tx_sc->encrypt)
 315		h->tci_an |= MACSEC_TCI_CONFID;
 316	else if (secy->icv_len != DEFAULT_ICV_LEN)
 317		h->tci_an |= MACSEC_TCI_C;
 318
 319	h->tci_an |= tx_sc->encoding_sa;
 320}
 321
 322static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len)
 323{
 324	if (data_len < MIN_NON_SHORT_LEN)
 325		h->short_length = data_len;
 326}
 327
 328/* Checks if a MACsec interface is being offloaded to an hardware engine */
 329static bool macsec_is_offloaded(struct macsec_dev *macsec)
 330{
 331	if (macsec->offload == MACSEC_OFFLOAD_MAC ||
 332	    macsec->offload == MACSEC_OFFLOAD_PHY)
 333		return true;
 334
 335	return false;
 336}
 337
 338/* Checks if underlying layers implement MACsec offloading functions. */
 339static bool macsec_check_offload(enum macsec_offload offload,
 340				 struct macsec_dev *macsec)
 341{
 342	if (!macsec || !macsec->real_dev)
 343		return false;
 344
 345	if (offload == MACSEC_OFFLOAD_PHY)
 346		return macsec->real_dev->phydev &&
 347		       macsec->real_dev->phydev->macsec_ops;
 348	else if (offload == MACSEC_OFFLOAD_MAC)
 349		return macsec->real_dev->features & NETIF_F_HW_MACSEC &&
 350		       macsec->real_dev->macsec_ops;
 351
 352	return false;
 353}
 354
 355static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload,
 356						 struct macsec_dev *macsec,
 357						 struct macsec_context *ctx)
 358{
 359	if (ctx) {
 360		memset(ctx, 0, sizeof(*ctx));
 361		ctx->offload = offload;
 362
 363		if (offload == MACSEC_OFFLOAD_PHY)
 364			ctx->phydev = macsec->real_dev->phydev;
 365		else if (offload == MACSEC_OFFLOAD_MAC)
 366			ctx->netdev = macsec->real_dev;
 367	}
 368
 369	if (offload == MACSEC_OFFLOAD_PHY)
 370		return macsec->real_dev->phydev->macsec_ops;
 371	else
 372		return macsec->real_dev->macsec_ops;
 373}
 374
 375/* Returns a pointer to the MACsec ops struct if any and updates the MACsec
 376 * context device reference if provided.
 377 */
 378static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec,
 379					       struct macsec_context *ctx)
 380{
 381	if (!macsec_check_offload(macsec->offload, macsec))
 382		return NULL;
 383
 384	return __macsec_get_ops(macsec->offload, macsec, ctx);
 385}
 386
 387/* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */
 388static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn)
 389{
 390	struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data;
 391	int len = skb->len - 2 * ETH_ALEN;
 392	int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len;
 393
 394	/* a) It comprises at least 17 octets */
 395	if (skb->len <= 16)
 396		return false;
 397
 398	/* b) MACsec EtherType: already checked */
 399
 400	/* c) V bit is clear */
 401	if (h->tci_an & MACSEC_TCI_VERSION)
 402		return false;
 403
 404	/* d) ES or SCB => !SC */
 405	if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) &&
 406	    (h->tci_an & MACSEC_TCI_SC))
 407		return false;
 408
 409	/* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */
 410	if (h->unused)
 411		return false;
 412
 413	/* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */
 414	if (!h->packet_number && !xpn)
 415		return false;
 416
 417	/* length check, f) g) h) i) */
 418	if (h->short_length)
 419		return len == extra_len + h->short_length;
 420	return len >= extra_len + MIN_NON_SHORT_LEN;
 421}
 422
 423#define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
 424#define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
 425
 426static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn,
 427			       salt_t salt)
 428{
 429	struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv;
 430
 431	gcm_iv->ssci = ssci ^ salt.ssci;
 432	gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn;
 433}
 434
 435static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn)
 436{
 437	struct gcm_iv *gcm_iv = (struct gcm_iv *)iv;
 438
 439	gcm_iv->sci = sci;
 440	gcm_iv->pn = htonl(pn);
 441}
 442
 443static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
 444{
 445	return (struct macsec_eth_header *)skb_mac_header(skb);
 446}
 447
 448static sci_t dev_to_sci(struct net_device *dev, __be16 port)
 449{
 450	return make_sci(dev->dev_addr, port);
 451}
 452
 453static void __macsec_pn_wrapped(struct macsec_secy *secy,
 454				struct macsec_tx_sa *tx_sa)
 455{
 456	pr_debug("PN wrapped, transitioning to !oper\n");
 457	tx_sa->active = false;
 458	if (secy->protect_frames)
 459		secy->operational = false;
 460}
 461
 462void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa)
 463{
 464	spin_lock_bh(&tx_sa->lock);
 465	__macsec_pn_wrapped(secy, tx_sa);
 466	spin_unlock_bh(&tx_sa->lock);
 467}
 468EXPORT_SYMBOL_GPL(macsec_pn_wrapped);
 469
 470static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa,
 471			    struct macsec_secy *secy)
 472{
 473	pn_t pn;
 474
 475	spin_lock_bh(&tx_sa->lock);
 476
 477	pn = tx_sa->next_pn_halves;
 478	if (secy->xpn)
 479		tx_sa->next_pn++;
 480	else
 481		tx_sa->next_pn_halves.lower++;
 482
 483	if (tx_sa->next_pn == 0)
 484		__macsec_pn_wrapped(secy, tx_sa);
 485	spin_unlock_bh(&tx_sa->lock);
 486
 487	return pn;
 488}
 489
 490static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
 491{
 492	struct macsec_dev *macsec = netdev_priv(dev);
 493
 494	skb->dev = macsec->real_dev;
 495	skb_reset_mac_header(skb);
 496	skb->protocol = eth_hdr(skb)->h_proto;
 497}
 498
 
 
 
 
 
 
 
 
 
 499static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
 500			    struct macsec_tx_sa *tx_sa)
 501{
 
 502	struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
 503
 504	u64_stats_update_begin(&txsc_stats->syncp);
 505	if (tx_sc->encrypt) {
 506		txsc_stats->stats.OutOctetsEncrypted += skb->len;
 507		txsc_stats->stats.OutPktsEncrypted++;
 508		this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
 509	} else {
 510		txsc_stats->stats.OutOctetsProtected += skb->len;
 511		txsc_stats->stats.OutPktsProtected++;
 512		this_cpu_inc(tx_sa->stats->OutPktsProtected);
 513	}
 514	u64_stats_update_end(&txsc_stats->syncp);
 515}
 516
 517static void count_tx(struct net_device *dev, int ret, int len)
 518{
 519	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
 520		struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
 521
 522		u64_stats_update_begin(&stats->syncp);
 523		stats->tx_packets++;
 524		stats->tx_bytes += len;
 525		u64_stats_update_end(&stats->syncp);
 526	}
 527}
 528
 529static void macsec_encrypt_done(struct crypto_async_request *base, int err)
 530{
 531	struct sk_buff *skb = base->data;
 532	struct net_device *dev = skb->dev;
 533	struct macsec_dev *macsec = macsec_priv(dev);
 534	struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa;
 535	int len, ret;
 536
 537	aead_request_free(macsec_skb_cb(skb)->req);
 538
 539	rcu_read_lock_bh();
 
 
 
 540	macsec_encrypt_finish(skb, dev);
 541	macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
 542	len = skb->len;
 543	ret = dev_queue_xmit(skb);
 544	count_tx(dev, ret, len);
 545	rcu_read_unlock_bh();
 546
 547	macsec_txsa_put(sa);
 548	dev_put(dev);
 549}
 550
 551static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
 552					     unsigned char **iv,
 553					     struct scatterlist **sg,
 554					     int num_frags)
 555{
 556	size_t size, iv_offset, sg_offset;
 557	struct aead_request *req;
 558	void *tmp;
 559
 560	size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
 561	iv_offset = size;
 562	size += GCM_AES_IV_LEN;
 563
 564	size = ALIGN(size, __alignof__(struct scatterlist));
 565	sg_offset = size;
 566	size += sizeof(struct scatterlist) * num_frags;
 567
 568	tmp = kmalloc(size, GFP_ATOMIC);
 569	if (!tmp)
 570		return NULL;
 571
 572	*iv = (unsigned char *)(tmp + iv_offset);
 573	*sg = (struct scatterlist *)(tmp + sg_offset);
 574	req = tmp;
 575
 576	aead_request_set_tfm(req, tfm);
 577
 578	return req;
 579}
 580
 581static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
 582				      struct net_device *dev)
 583{
 584	int ret;
 585	struct scatterlist *sg;
 586	struct sk_buff *trailer;
 587	unsigned char *iv;
 588	struct ethhdr *eth;
 589	struct macsec_eth_header *hh;
 590	size_t unprotected_len;
 591	struct aead_request *req;
 592	struct macsec_secy *secy;
 593	struct macsec_tx_sc *tx_sc;
 594	struct macsec_tx_sa *tx_sa;
 595	struct macsec_dev *macsec = macsec_priv(dev);
 596	bool sci_present;
 597	pn_t pn;
 598
 599	secy = &macsec->secy;
 600	tx_sc = &secy->tx_sc;
 601
 602	/* 10.5.1 TX SA assignment */
 603	tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]);
 604	if (!tx_sa) {
 605		secy->operational = false;
 606		kfree_skb(skb);
 607		return ERR_PTR(-EINVAL);
 608	}
 609
 610	if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
 611		     skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
 612		struct sk_buff *nskb = skb_copy_expand(skb,
 613						       MACSEC_NEEDED_HEADROOM,
 614						       MACSEC_NEEDED_TAILROOM,
 615						       GFP_ATOMIC);
 616		if (likely(nskb)) {
 617			consume_skb(skb);
 618			skb = nskb;
 619		} else {
 620			macsec_txsa_put(tx_sa);
 621			kfree_skb(skb);
 622			return ERR_PTR(-ENOMEM);
 623		}
 624	} else {
 625		skb = skb_unshare(skb, GFP_ATOMIC);
 626		if (!skb) {
 627			macsec_txsa_put(tx_sa);
 628			return ERR_PTR(-ENOMEM);
 629		}
 630	}
 631
 632	unprotected_len = skb->len;
 633	eth = eth_hdr(skb);
 634	sci_present = send_sci(secy);
 635	hh = skb_push(skb, macsec_extra_len(sci_present));
 636	memmove(hh, eth, 2 * ETH_ALEN);
 637
 638	pn = tx_sa_update_pn(tx_sa, secy);
 639	if (pn.full64 == 0) {
 640		macsec_txsa_put(tx_sa);
 641		kfree_skb(skb);
 642		return ERR_PTR(-ENOLINK);
 643	}
 644	macsec_fill_sectag(hh, secy, pn.lower, sci_present);
 645	macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
 646
 647	skb_put(skb, secy->icv_len);
 648
 649	if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
 650		struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
 651
 652		u64_stats_update_begin(&secy_stats->syncp);
 653		secy_stats->stats.OutPktsTooLong++;
 654		u64_stats_update_end(&secy_stats->syncp);
 655
 656		macsec_txsa_put(tx_sa);
 657		kfree_skb(skb);
 658		return ERR_PTR(-EINVAL);
 659	}
 660
 661	ret = skb_cow_data(skb, 0, &trailer);
 662	if (unlikely(ret < 0)) {
 663		macsec_txsa_put(tx_sa);
 664		kfree_skb(skb);
 665		return ERR_PTR(ret);
 666	}
 667
 668	req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret);
 669	if (!req) {
 670		macsec_txsa_put(tx_sa);
 671		kfree_skb(skb);
 672		return ERR_PTR(-ENOMEM);
 673	}
 674
 675	if (secy->xpn)
 676		macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt);
 677	else
 678		macsec_fill_iv(iv, secy->sci, pn.lower);
 679
 680	sg_init_table(sg, ret);
 681	ret = skb_to_sgvec(skb, sg, 0, skb->len);
 682	if (unlikely(ret < 0)) {
 683		aead_request_free(req);
 684		macsec_txsa_put(tx_sa);
 685		kfree_skb(skb);
 686		return ERR_PTR(ret);
 687	}
 688
 689	if (tx_sc->encrypt) {
 690		int len = skb->len - macsec_hdr_len(sci_present) -
 691			  secy->icv_len;
 692		aead_request_set_crypt(req, sg, sg, len, iv);
 693		aead_request_set_ad(req, macsec_hdr_len(sci_present));
 694	} else {
 695		aead_request_set_crypt(req, sg, sg, 0, iv);
 696		aead_request_set_ad(req, skb->len - secy->icv_len);
 697	}
 698
 699	macsec_skb_cb(skb)->req = req;
 700	macsec_skb_cb(skb)->tx_sa = tx_sa;
 
 701	aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
 702
 703	dev_hold(skb->dev);
 704	ret = crypto_aead_encrypt(req);
 705	if (ret == -EINPROGRESS) {
 706		return ERR_PTR(ret);
 707	} else if (ret != 0) {
 708		dev_put(skb->dev);
 709		kfree_skb(skb);
 710		aead_request_free(req);
 711		macsec_txsa_put(tx_sa);
 712		return ERR_PTR(-EINVAL);
 713	}
 714
 715	dev_put(skb->dev);
 716	aead_request_free(req);
 717	macsec_txsa_put(tx_sa);
 718
 719	return skb;
 720}
 721
 722static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn)
 723{
 724	struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
 725	struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats);
 726	struct macsec_eth_header *hdr = macsec_ethhdr(skb);
 727	u32 lowest_pn = 0;
 728
 729	spin_lock(&rx_sa->lock);
 730	if (rx_sa->next_pn_halves.lower >= secy->replay_window)
 731		lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window;
 732
 733	/* Now perform replay protection check again
 734	 * (see IEEE 802.1AE-2006 figure 10-5)
 735	 */
 736	if (secy->replay_protect && pn < lowest_pn &&
 737	    (!secy->xpn || pn_same_half(pn, lowest_pn))) {
 738		spin_unlock(&rx_sa->lock);
 739		u64_stats_update_begin(&rxsc_stats->syncp);
 740		rxsc_stats->stats.InPktsLate++;
 741		u64_stats_update_end(&rxsc_stats->syncp);
 
 742		return false;
 743	}
 744
 745	if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
 
 746		u64_stats_update_begin(&rxsc_stats->syncp);
 747		if (hdr->tci_an & MACSEC_TCI_E)
 748			rxsc_stats->stats.InOctetsDecrypted += skb->len;
 749		else
 750			rxsc_stats->stats.InOctetsValidated += skb->len;
 751		u64_stats_update_end(&rxsc_stats->syncp);
 752	}
 753
 754	if (!macsec_skb_cb(skb)->valid) {
 755		spin_unlock(&rx_sa->lock);
 756
 757		/* 10.6.5 */
 758		if (hdr->tci_an & MACSEC_TCI_C ||
 759		    secy->validate_frames == MACSEC_VALIDATE_STRICT) {
 760			u64_stats_update_begin(&rxsc_stats->syncp);
 761			rxsc_stats->stats.InPktsNotValid++;
 762			u64_stats_update_end(&rxsc_stats->syncp);
 
 
 763			return false;
 764		}
 765
 766		u64_stats_update_begin(&rxsc_stats->syncp);
 767		if (secy->validate_frames == MACSEC_VALIDATE_CHECK) {
 768			rxsc_stats->stats.InPktsInvalid++;
 769			this_cpu_inc(rx_sa->stats->InPktsInvalid);
 770		} else if (pn < lowest_pn) {
 771			rxsc_stats->stats.InPktsDelayed++;
 772		} else {
 773			rxsc_stats->stats.InPktsUnchecked++;
 774		}
 775		u64_stats_update_end(&rxsc_stats->syncp);
 776	} else {
 777		u64_stats_update_begin(&rxsc_stats->syncp);
 778		if (pn < lowest_pn) {
 779			rxsc_stats->stats.InPktsDelayed++;
 780		} else {
 781			rxsc_stats->stats.InPktsOK++;
 782			this_cpu_inc(rx_sa->stats->InPktsOK);
 783		}
 784		u64_stats_update_end(&rxsc_stats->syncp);
 785
 786		// Instead of "pn >=" - to support pn overflow in xpn
 787		if (pn + 1 > rx_sa->next_pn_halves.lower) {
 788			rx_sa->next_pn_halves.lower = pn + 1;
 789		} else if (secy->xpn &&
 790			   !pn_same_half(pn, rx_sa->next_pn_halves.lower)) {
 791			rx_sa->next_pn_halves.upper++;
 792			rx_sa->next_pn_halves.lower = pn + 1;
 793		}
 794
 795		spin_unlock(&rx_sa->lock);
 796	}
 797
 798	return true;
 799}
 800
 801static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev)
 802{
 803	skb->pkt_type = PACKET_HOST;
 804	skb->protocol = eth_type_trans(skb, dev);
 805
 806	skb_reset_network_header(skb);
 807	if (!skb_transport_header_was_set(skb))
 808		skb_reset_transport_header(skb);
 809	skb_reset_mac_len(skb);
 810}
 811
 812static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
 813{
 814	skb->ip_summed = CHECKSUM_NONE;
 815	memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
 816	skb_pull(skb, hdr_len);
 817	pskb_trim_unique(skb, skb->len - icv_len);
 818}
 819
 820static void count_rx(struct net_device *dev, int len)
 821{
 822	struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
 823
 824	u64_stats_update_begin(&stats->syncp);
 825	stats->rx_packets++;
 826	stats->rx_bytes += len;
 827	u64_stats_update_end(&stats->syncp);
 828}
 829
 830static void macsec_decrypt_done(struct crypto_async_request *base, int err)
 831{
 832	struct sk_buff *skb = base->data;
 833	struct net_device *dev = skb->dev;
 834	struct macsec_dev *macsec = macsec_priv(dev);
 835	struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
 836	struct macsec_rx_sc *rx_sc = rx_sa->sc;
 837	int len;
 838	u32 pn;
 839
 840	aead_request_free(macsec_skb_cb(skb)->req);
 841
 842	if (!err)
 843		macsec_skb_cb(skb)->valid = true;
 844
 845	rcu_read_lock_bh();
 846	pn = ntohl(macsec_ethhdr(skb)->packet_number);
 847	if (!macsec_post_decrypt(skb, &macsec->secy, pn)) {
 848		rcu_read_unlock_bh();
 849		kfree_skb(skb);
 850		goto out;
 851	}
 852
 853	macsec_finalize_skb(skb, macsec->secy.icv_len,
 854			    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
 
 855	macsec_reset_skb(skb, macsec->secy.netdev);
 856
 857	len = skb->len;
 858	if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
 859		count_rx(dev, len);
 860
 861	rcu_read_unlock_bh();
 862
 863out:
 864	macsec_rxsa_put(rx_sa);
 865	macsec_rxsc_put(rx_sc);
 866	dev_put(dev);
 867}
 868
 869static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
 870				      struct net_device *dev,
 871				      struct macsec_rx_sa *rx_sa,
 872				      sci_t sci,
 873				      struct macsec_secy *secy)
 874{
 875	int ret;
 876	struct scatterlist *sg;
 877	struct sk_buff *trailer;
 878	unsigned char *iv;
 879	struct aead_request *req;
 880	struct macsec_eth_header *hdr;
 881	u32 hdr_pn;
 882	u16 icv_len = secy->icv_len;
 883
 884	macsec_skb_cb(skb)->valid = false;
 885	skb = skb_share_check(skb, GFP_ATOMIC);
 886	if (!skb)
 887		return ERR_PTR(-ENOMEM);
 888
 889	ret = skb_cow_data(skb, 0, &trailer);
 890	if (unlikely(ret < 0)) {
 891		kfree_skb(skb);
 892		return ERR_PTR(ret);
 893	}
 894	req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret);
 895	if (!req) {
 896		kfree_skb(skb);
 897		return ERR_PTR(-ENOMEM);
 898	}
 899
 900	hdr = (struct macsec_eth_header *)skb->data;
 901	hdr_pn = ntohl(hdr->packet_number);
 902
 903	if (secy->xpn) {
 904		pn_t recovered_pn = rx_sa->next_pn_halves;
 905
 906		recovered_pn.lower = hdr_pn;
 907		if (hdr_pn < rx_sa->next_pn_halves.lower &&
 908		    !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower))
 909			recovered_pn.upper++;
 910
 911		macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64,
 912				   rx_sa->key.salt);
 913	} else {
 914		macsec_fill_iv(iv, sci, hdr_pn);
 915	}
 916
 917	sg_init_table(sg, ret);
 918	ret = skb_to_sgvec(skb, sg, 0, skb->len);
 919	if (unlikely(ret < 0)) {
 920		aead_request_free(req);
 921		kfree_skb(skb);
 922		return ERR_PTR(ret);
 923	}
 924
 925	if (hdr->tci_an & MACSEC_TCI_E) {
 926		/* confidentiality: ethernet + macsec header
 927		 * authenticated, encrypted payload
 928		 */
 929		int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci);
 930
 931		aead_request_set_crypt(req, sg, sg, len, iv);
 932		aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci));
 933		skb = skb_unshare(skb, GFP_ATOMIC);
 934		if (!skb) {
 935			aead_request_free(req);
 936			return ERR_PTR(-ENOMEM);
 937		}
 938	} else {
 939		/* integrity only: all headers + data authenticated */
 940		aead_request_set_crypt(req, sg, sg, icv_len, iv);
 941		aead_request_set_ad(req, skb->len - icv_len);
 942	}
 943
 944	macsec_skb_cb(skb)->req = req;
 945	skb->dev = dev;
 946	aead_request_set_callback(req, 0, macsec_decrypt_done, skb);
 947
 948	dev_hold(dev);
 949	ret = crypto_aead_decrypt(req);
 950	if (ret == -EINPROGRESS) {
 951		return ERR_PTR(ret);
 952	} else if (ret != 0) {
 953		/* decryption/authentication failed
 954		 * 10.6 if validateFrames is disabled, deliver anyway
 955		 */
 956		if (ret != -EBADMSG) {
 957			kfree_skb(skb);
 958			skb = ERR_PTR(ret);
 959		}
 960	} else {
 961		macsec_skb_cb(skb)->valid = true;
 962	}
 963	dev_put(dev);
 964
 965	aead_request_free(req);
 966
 967	return skb;
 968}
 969
 970static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci)
 971{
 972	struct macsec_rx_sc *rx_sc;
 973
 974	for_each_rxsc(secy, rx_sc) {
 975		if (rx_sc->sci == sci)
 976			return rx_sc;
 977	}
 978
 979	return NULL;
 980}
 981
 982static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci)
 983{
 984	struct macsec_rx_sc *rx_sc;
 985
 986	for_each_rxsc_rtnl(secy, rx_sc) {
 987		if (rx_sc->sci == sci)
 988			return rx_sc;
 989	}
 990
 991	return NULL;
 992}
 993
 994static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
 995{
 996	/* Deliver to the uncontrolled port by default */
 997	enum rx_handler_result ret = RX_HANDLER_PASS;
 998	struct ethhdr *hdr = eth_hdr(skb);
 
 999	struct macsec_rxh_data *rxd;
1000	struct macsec_dev *macsec;
 
1001
1002	rcu_read_lock();
1003	rxd = macsec_data_rcu(skb->dev);
 
 
1004
1005	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1006		struct sk_buff *nskb;
1007		struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
1008		struct net_device *ndev = macsec->secy.netdev;
1009
1010		/* If h/w offloading is enabled, HW decodes frames and strips
1011		 * the SecTAG, so we have to deduce which port to deliver to.
1012		 */
1013		if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1014			if (ether_addr_equal_64bits(hdr->h_dest,
1015						    ndev->dev_addr)) {
1016				/* exact match, divert skb to this port */
1017				skb->dev = ndev;
1018				skb->pkt_type = PACKET_HOST;
1019				ret = RX_HANDLER_ANOTHER;
1020				goto out;
1021			} else if (is_multicast_ether_addr_64bits(
1022					   hdr->h_dest)) {
1023				/* multicast frame, deliver on this port too */
1024				nskb = skb_clone(skb, GFP_ATOMIC);
1025				if (!nskb)
1026					break;
1027
1028				nskb->dev = ndev;
1029				if (ether_addr_equal_64bits(hdr->h_dest,
1030							    ndev->broadcast))
1031					nskb->pkt_type = PACKET_BROADCAST;
1032				else
1033					nskb->pkt_type = PACKET_MULTICAST;
1034
1035				netif_rx(nskb);
 
 
 
 
 
1036			}
 
1037			continue;
1038		}
1039
1040		/* 10.6 If the management control validateFrames is not
1041		 * Strict, frames without a SecTAG are received, counted, and
1042		 * delivered to the Controlled Port
1043		 */
1044		if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1045			u64_stats_update_begin(&secy_stats->syncp);
1046			secy_stats->stats.InPktsNoTag++;
1047			u64_stats_update_end(&secy_stats->syncp);
 
1048			continue;
1049		}
1050
1051		/* deliver on this port */
1052		nskb = skb_clone(skb, GFP_ATOMIC);
1053		if (!nskb)
1054			break;
1055
1056		nskb->dev = ndev;
1057
1058		if (netif_rx(nskb) == NET_RX_SUCCESS) {
1059			u64_stats_update_begin(&secy_stats->syncp);
1060			secy_stats->stats.InPktsUntagged++;
1061			u64_stats_update_end(&secy_stats->syncp);
1062		}
1063	}
1064
1065out:
1066	rcu_read_unlock();
1067	return ret;
1068}
1069
1070static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
1071{
1072	struct sk_buff *skb = *pskb;
1073	struct net_device *dev = skb->dev;
1074	struct macsec_eth_header *hdr;
1075	struct macsec_secy *secy = NULL;
1076	struct macsec_rx_sc *rx_sc;
1077	struct macsec_rx_sa *rx_sa;
1078	struct macsec_rxh_data *rxd;
1079	struct macsec_dev *macsec;
1080	unsigned int len;
1081	sci_t sci;
1082	u32 hdr_pn;
1083	bool cbit;
1084	struct pcpu_rx_sc_stats *rxsc_stats;
1085	struct pcpu_secy_stats *secy_stats;
1086	bool pulled_sci;
1087	int ret;
1088
1089	if (skb_headroom(skb) < ETH_HLEN)
1090		goto drop_direct;
1091
1092	hdr = macsec_ethhdr(skb);
1093	if (hdr->eth.h_proto != htons(ETH_P_MACSEC))
1094		return handle_not_macsec(skb);
1095
1096	skb = skb_unshare(skb, GFP_ATOMIC);
1097	*pskb = skb;
1098	if (!skb)
1099		return RX_HANDLER_CONSUMED;
1100
1101	pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
1102	if (!pulled_sci) {
1103		if (!pskb_may_pull(skb, macsec_extra_len(false)))
1104			goto drop_direct;
1105	}
1106
1107	hdr = macsec_ethhdr(skb);
1108
1109	/* Frames with a SecTAG that has the TCI E bit set but the C
1110	 * bit clear are discarded, as this reserved encoding is used
1111	 * to identify frames with a SecTAG that are not to be
1112	 * delivered to the Controlled Port.
1113	 */
1114	if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E)
1115		return RX_HANDLER_PASS;
1116
1117	/* now, pull the extra length */
1118	if (hdr->tci_an & MACSEC_TCI_SC) {
1119		if (!pulled_sci)
1120			goto drop_direct;
1121	}
1122
1123	/* ethernet header is part of crypto processing */
1124	skb_push(skb, ETH_HLEN);
1125
1126	macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC);
1127	macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK;
1128	sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci);
1129
1130	rcu_read_lock();
1131	rxd = macsec_data_rcu(skb->dev);
1132
1133	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1134		struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
1135
1136		sc = sc ? macsec_rxsc_get(sc) : NULL;
1137
1138		if (sc) {
1139			secy = &macsec->secy;
1140			rx_sc = sc;
1141			break;
1142		}
1143	}
1144
1145	if (!secy)
1146		goto nosci;
1147
1148	dev = secy->netdev;
1149	macsec = macsec_priv(dev);
1150	secy_stats = this_cpu_ptr(macsec->stats);
1151	rxsc_stats = this_cpu_ptr(rx_sc->stats);
1152
1153	if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) {
1154		u64_stats_update_begin(&secy_stats->syncp);
1155		secy_stats->stats.InPktsBadTag++;
1156		u64_stats_update_end(&secy_stats->syncp);
 
1157		goto drop_nosa;
1158	}
1159
1160	rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]);
1161	if (!rx_sa) {
1162		/* 10.6.1 if the SA is not in use */
1163
1164		/* If validateFrames is Strict or the C bit in the
1165		 * SecTAG is set, discard
1166		 */
1167		if (hdr->tci_an & MACSEC_TCI_C ||
1168		    secy->validate_frames == MACSEC_VALIDATE_STRICT) {
1169			u64_stats_update_begin(&rxsc_stats->syncp);
1170			rxsc_stats->stats.InPktsNotUsingSA++;
1171			u64_stats_update_end(&rxsc_stats->syncp);
 
1172			goto drop_nosa;
1173		}
1174
1175		/* not Strict, the frame (with the SecTAG and ICV
1176		 * removed) is delivered to the Controlled Port.
1177		 */
1178		u64_stats_update_begin(&rxsc_stats->syncp);
1179		rxsc_stats->stats.InPktsUnusedSA++;
1180		u64_stats_update_end(&rxsc_stats->syncp);
1181		goto deliver;
1182	}
1183
1184	/* First, PN check to avoid decrypting obviously wrong packets */
1185	hdr_pn = ntohl(hdr->packet_number);
1186	if (secy->replay_protect) {
1187		bool late;
1188
1189		spin_lock(&rx_sa->lock);
1190		late = rx_sa->next_pn_halves.lower >= secy->replay_window &&
1191		       hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window);
1192
1193		if (secy->xpn)
1194			late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn);
1195		spin_unlock(&rx_sa->lock);
1196
1197		if (late) {
1198			u64_stats_update_begin(&rxsc_stats->syncp);
1199			rxsc_stats->stats.InPktsLate++;
1200			u64_stats_update_end(&rxsc_stats->syncp);
 
1201			goto drop;
1202		}
1203	}
1204
1205	macsec_skb_cb(skb)->rx_sa = rx_sa;
1206
1207	/* Disabled && !changed text => skip validation */
1208	if (hdr->tci_an & MACSEC_TCI_C ||
1209	    secy->validate_frames != MACSEC_VALIDATE_DISABLED)
1210		skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
1211
1212	if (IS_ERR(skb)) {
1213		/* the decrypt callback needs the reference */
1214		if (PTR_ERR(skb) != -EINPROGRESS) {
1215			macsec_rxsa_put(rx_sa);
1216			macsec_rxsc_put(rx_sc);
1217		}
1218		rcu_read_unlock();
1219		*pskb = NULL;
1220		return RX_HANDLER_CONSUMED;
1221	}
1222
1223	if (!macsec_post_decrypt(skb, secy, hdr_pn))
1224		goto drop;
1225
1226deliver:
1227	macsec_finalize_skb(skb, secy->icv_len,
1228			    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
 
1229	macsec_reset_skb(skb, secy->netdev);
1230
1231	if (rx_sa)
1232		macsec_rxsa_put(rx_sa);
1233	macsec_rxsc_put(rx_sc);
1234
1235	skb_orphan(skb);
1236	len = skb->len;
1237	ret = gro_cells_receive(&macsec->gro_cells, skb);
1238	if (ret == NET_RX_SUCCESS)
1239		count_rx(dev, len);
1240	else
1241		macsec->secy.netdev->stats.rx_dropped++;
1242
1243	rcu_read_unlock();
1244
1245	*pskb = NULL;
1246	return RX_HANDLER_CONSUMED;
1247
1248drop:
1249	macsec_rxsa_put(rx_sa);
1250drop_nosa:
1251	macsec_rxsc_put(rx_sc);
1252	rcu_read_unlock();
1253drop_direct:
1254	kfree_skb(skb);
1255	*pskb = NULL;
1256	return RX_HANDLER_CONSUMED;
1257
1258nosci:
1259	/* 10.6.1 if the SC is not found */
1260	cbit = !!(hdr->tci_an & MACSEC_TCI_C);
1261	if (!cbit)
1262		macsec_finalize_skb(skb, DEFAULT_ICV_LEN,
1263				    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1264
1265	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1266		struct sk_buff *nskb;
1267
1268		secy_stats = this_cpu_ptr(macsec->stats);
1269
1270		/* If validateFrames is Strict or the C bit in the
1271		 * SecTAG is set, discard
1272		 */
1273		if (cbit ||
1274		    macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1275			u64_stats_update_begin(&secy_stats->syncp);
1276			secy_stats->stats.InPktsNoSCI++;
1277			u64_stats_update_end(&secy_stats->syncp);
 
1278			continue;
1279		}
1280
1281		/* not strict, the frame (with the SecTAG and ICV
1282		 * removed) is delivered to the Controlled Port.
1283		 */
1284		nskb = skb_clone(skb, GFP_ATOMIC);
1285		if (!nskb)
1286			break;
1287
1288		macsec_reset_skb(nskb, macsec->secy.netdev);
1289
1290		ret = netif_rx(nskb);
1291		if (ret == NET_RX_SUCCESS) {
1292			u64_stats_update_begin(&secy_stats->syncp);
1293			secy_stats->stats.InPktsUnknownSCI++;
1294			u64_stats_update_end(&secy_stats->syncp);
1295		} else {
1296			macsec->secy.netdev->stats.rx_dropped++;
1297		}
1298	}
1299
1300	rcu_read_unlock();
1301	*pskb = skb;
1302	return RX_HANDLER_PASS;
1303}
1304
1305static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
1306{
1307	struct crypto_aead *tfm;
1308	int ret;
1309
1310	/* Pick a sync gcm(aes) cipher to ensure order is preserved. */
1311	tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
1312
1313	if (IS_ERR(tfm))
1314		return tfm;
1315
1316	ret = crypto_aead_setkey(tfm, key, key_len);
1317	if (ret < 0)
1318		goto fail;
1319
1320	ret = crypto_aead_setauthsize(tfm, icv_len);
1321	if (ret < 0)
1322		goto fail;
1323
1324	return tfm;
1325fail:
1326	crypto_free_aead(tfm);
1327	return ERR_PTR(ret);
1328}
1329
1330static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
1331		      int icv_len)
1332{
1333	rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats);
1334	if (!rx_sa->stats)
1335		return -ENOMEM;
1336
1337	rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
1338	if (IS_ERR(rx_sa->key.tfm)) {
1339		free_percpu(rx_sa->stats);
1340		return PTR_ERR(rx_sa->key.tfm);
1341	}
1342
1343	rx_sa->ssci = MACSEC_UNDEF_SSCI;
1344	rx_sa->active = false;
1345	rx_sa->next_pn = 1;
1346	refcount_set(&rx_sa->refcnt, 1);
1347	spin_lock_init(&rx_sa->lock);
1348
1349	return 0;
1350}
1351
1352static void clear_rx_sa(struct macsec_rx_sa *rx_sa)
1353{
1354	rx_sa->active = false;
1355
1356	macsec_rxsa_put(rx_sa);
1357}
1358
1359static void free_rx_sc(struct macsec_rx_sc *rx_sc)
1360{
1361	int i;
1362
1363	for (i = 0; i < MACSEC_NUM_AN; i++) {
1364		struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]);
1365
1366		RCU_INIT_POINTER(rx_sc->sa[i], NULL);
1367		if (sa)
1368			clear_rx_sa(sa);
1369	}
1370
1371	macsec_rxsc_put(rx_sc);
1372}
1373
1374static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci)
1375{
1376	struct macsec_rx_sc *rx_sc, __rcu **rx_scp;
1377
1378	for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp);
1379	     rx_sc;
1380	     rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) {
1381		if (rx_sc->sci == sci) {
1382			if (rx_sc->active)
1383				secy->n_rx_sc--;
1384			rcu_assign_pointer(*rx_scp, rx_sc->next);
1385			return rx_sc;
1386		}
1387	}
1388
1389	return NULL;
1390}
1391
1392static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci)
 
1393{
1394	struct macsec_rx_sc *rx_sc;
1395	struct macsec_dev *macsec;
1396	struct net_device *real_dev = macsec_priv(dev)->real_dev;
1397	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
1398	struct macsec_secy *secy;
1399
1400	list_for_each_entry(macsec, &rxd->secys, secys) {
1401		if (find_rx_sc_rtnl(&macsec->secy, sci))
1402			return ERR_PTR(-EEXIST);
1403	}
1404
1405	rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
1406	if (!rx_sc)
1407		return ERR_PTR(-ENOMEM);
1408
1409	rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats);
1410	if (!rx_sc->stats) {
1411		kfree(rx_sc);
1412		return ERR_PTR(-ENOMEM);
1413	}
1414
1415	rx_sc->sci = sci;
1416	rx_sc->active = true;
1417	refcount_set(&rx_sc->refcnt, 1);
1418
1419	secy = &macsec_priv(dev)->secy;
1420	rcu_assign_pointer(rx_sc->next, secy->rx_sc);
1421	rcu_assign_pointer(secy->rx_sc, rx_sc);
1422
1423	if (rx_sc->active)
1424		secy->n_rx_sc++;
1425
1426	return rx_sc;
1427}
1428
1429static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len,
1430		      int icv_len)
1431{
1432	tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats);
1433	if (!tx_sa->stats)
1434		return -ENOMEM;
1435
1436	tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
1437	if (IS_ERR(tx_sa->key.tfm)) {
1438		free_percpu(tx_sa->stats);
1439		return PTR_ERR(tx_sa->key.tfm);
1440	}
1441
1442	tx_sa->ssci = MACSEC_UNDEF_SSCI;
1443	tx_sa->active = false;
1444	refcount_set(&tx_sa->refcnt, 1);
1445	spin_lock_init(&tx_sa->lock);
1446
1447	return 0;
1448}
1449
1450static void clear_tx_sa(struct macsec_tx_sa *tx_sa)
1451{
1452	tx_sa->active = false;
1453
1454	macsec_txsa_put(tx_sa);
1455}
1456
1457static struct genl_family macsec_fam;
1458
1459static struct net_device *get_dev_from_nl(struct net *net,
1460					  struct nlattr **attrs)
1461{
1462	int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]);
1463	struct net_device *dev;
1464
1465	dev = __dev_get_by_index(net, ifindex);
1466	if (!dev)
1467		return ERR_PTR(-ENODEV);
1468
1469	if (!netif_is_macsec(dev))
1470		return ERR_PTR(-ENODEV);
1471
1472	return dev;
1473}
1474
1475static enum macsec_offload nla_get_offload(const struct nlattr *nla)
1476{
1477	return (__force enum macsec_offload)nla_get_u8(nla);
1478}
1479
1480static sci_t nla_get_sci(const struct nlattr *nla)
1481{
1482	return (__force sci_t)nla_get_u64(nla);
1483}
1484
1485static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value,
1486		       int padattr)
1487{
1488	return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr);
1489}
1490
1491static ssci_t nla_get_ssci(const struct nlattr *nla)
1492{
1493	return (__force ssci_t)nla_get_u32(nla);
1494}
1495
1496static int nla_put_ssci(struct sk_buff *skb, int attrtype, ssci_t value)
1497{
1498	return nla_put_u32(skb, attrtype, (__force u64)value);
1499}
1500
1501static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
1502					     struct nlattr **attrs,
1503					     struct nlattr **tb_sa,
1504					     struct net_device **devp,
1505					     struct macsec_secy **secyp,
1506					     struct macsec_tx_sc **scp,
1507					     u8 *assoc_num)
1508{
1509	struct net_device *dev;
1510	struct macsec_secy *secy;
1511	struct macsec_tx_sc *tx_sc;
1512	struct macsec_tx_sa *tx_sa;
1513
1514	if (!tb_sa[MACSEC_SA_ATTR_AN])
1515		return ERR_PTR(-EINVAL);
1516
1517	*assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1518
1519	dev = get_dev_from_nl(net, attrs);
1520	if (IS_ERR(dev))
1521		return ERR_CAST(dev);
1522
1523	if (*assoc_num >= MACSEC_NUM_AN)
1524		return ERR_PTR(-EINVAL);
1525
1526	secy = &macsec_priv(dev)->secy;
1527	tx_sc = &secy->tx_sc;
1528
1529	tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]);
1530	if (!tx_sa)
1531		return ERR_PTR(-ENODEV);
1532
1533	*devp = dev;
1534	*scp = tx_sc;
1535	*secyp = secy;
1536	return tx_sa;
1537}
1538
1539static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net,
1540					     struct nlattr **attrs,
1541					     struct nlattr **tb_rxsc,
1542					     struct net_device **devp,
1543					     struct macsec_secy **secyp)
1544{
1545	struct net_device *dev;
1546	struct macsec_secy *secy;
1547	struct macsec_rx_sc *rx_sc;
1548	sci_t sci;
1549
1550	dev = get_dev_from_nl(net, attrs);
1551	if (IS_ERR(dev))
1552		return ERR_CAST(dev);
1553
1554	secy = &macsec_priv(dev)->secy;
1555
1556	if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
1557		return ERR_PTR(-EINVAL);
1558
1559	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1560	rx_sc = find_rx_sc_rtnl(secy, sci);
1561	if (!rx_sc)
1562		return ERR_PTR(-ENODEV);
1563
1564	*secyp = secy;
1565	*devp = dev;
1566
1567	return rx_sc;
1568}
1569
1570static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net,
1571					     struct nlattr **attrs,
1572					     struct nlattr **tb_rxsc,
1573					     struct nlattr **tb_sa,
1574					     struct net_device **devp,
1575					     struct macsec_secy **secyp,
1576					     struct macsec_rx_sc **scp,
1577					     u8 *assoc_num)
1578{
1579	struct macsec_rx_sc *rx_sc;
1580	struct macsec_rx_sa *rx_sa;
1581
1582	if (!tb_sa[MACSEC_SA_ATTR_AN])
1583		return ERR_PTR(-EINVAL);
1584
1585	*assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1586	if (*assoc_num >= MACSEC_NUM_AN)
1587		return ERR_PTR(-EINVAL);
1588
1589	rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp);
1590	if (IS_ERR(rx_sc))
1591		return ERR_CAST(rx_sc);
1592
1593	rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]);
1594	if (!rx_sa)
1595		return ERR_PTR(-ENODEV);
1596
1597	*scp = rx_sc;
1598	return rx_sa;
1599}
1600
1601static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
1602	[MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 },
1603	[MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED },
1604	[MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED },
1605	[MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED },
1606};
1607
1608static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
1609	[MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 },
1610	[MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 },
1611};
1612
1613static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
1614	[MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
1615	[MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
1616	[MACSEC_SA_ATTR_PN] = { .type = NLA_MIN_LEN, .len = 4 },
1617	[MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
1618				   .len = MACSEC_KEYID_LEN, },
1619	[MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
1620				 .len = MACSEC_MAX_KEY_LEN, },
1621	[MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 },
1622	[MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY,
1623				  .len = MACSEC_SALT_LEN, },
1624};
1625
1626static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = {
1627	[MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 },
1628};
1629
1630/* Offloads an operation to a device driver */
1631static int macsec_offload(int (* const func)(struct macsec_context *),
1632			  struct macsec_context *ctx)
1633{
1634	int ret;
1635
1636	if (unlikely(!func))
1637		return 0;
1638
1639	if (ctx->offload == MACSEC_OFFLOAD_PHY)
1640		mutex_lock(&ctx->phydev->lock);
1641
1642	/* Phase I: prepare. The drive should fail here if there are going to be
1643	 * issues in the commit phase.
1644	 */
1645	ctx->prepare = true;
1646	ret = (*func)(ctx);
1647	if (ret)
1648		goto phy_unlock;
1649
1650	/* Phase II: commit. This step cannot fail. */
1651	ctx->prepare = false;
1652	ret = (*func)(ctx);
1653	/* This should never happen: commit is not allowed to fail */
1654	if (unlikely(ret))
1655		WARN(1, "MACsec offloading commit failed (%d)\n", ret);
1656
1657phy_unlock:
1658	if (ctx->offload == MACSEC_OFFLOAD_PHY)
1659		mutex_unlock(&ctx->phydev->lock);
1660
1661	return ret;
1662}
1663
1664static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
1665{
1666	if (!attrs[MACSEC_ATTR_SA_CONFIG])
1667		return -EINVAL;
1668
1669	if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL))
1670		return -EINVAL;
1671
1672	return 0;
1673}
1674
1675static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc)
1676{
1677	if (!attrs[MACSEC_ATTR_RXSC_CONFIG])
1678		return -EINVAL;
1679
1680	if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL))
1681		return -EINVAL;
1682
1683	return 0;
1684}
1685
1686static bool validate_add_rxsa(struct nlattr **attrs)
1687{
1688	if (!attrs[MACSEC_SA_ATTR_AN] ||
1689	    !attrs[MACSEC_SA_ATTR_KEY] ||
1690	    !attrs[MACSEC_SA_ATTR_KEYID])
1691		return false;
1692
1693	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1694		return false;
1695
1696	if (attrs[MACSEC_SA_ATTR_PN] &&
1697	    *(u64 *)nla_data(attrs[MACSEC_SA_ATTR_PN]) == 0)
1698		return false;
1699
1700	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1701		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1702			return false;
1703	}
1704
1705	if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1706		return false;
1707
1708	return true;
1709}
1710
1711static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
1712{
1713	struct net_device *dev;
1714	struct nlattr **attrs = info->attrs;
1715	struct macsec_secy *secy;
1716	struct macsec_rx_sc *rx_sc;
1717	struct macsec_rx_sa *rx_sa;
1718	unsigned char assoc_num;
1719	int pn_len;
1720	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1721	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1722	int err;
1723
1724	if (!attrs[MACSEC_ATTR_IFINDEX])
1725		return -EINVAL;
1726
1727	if (parse_sa_config(attrs, tb_sa))
1728		return -EINVAL;
1729
1730	if (parse_rxsc_config(attrs, tb_rxsc))
1731		return -EINVAL;
1732
1733	if (!validate_add_rxsa(tb_sa))
1734		return -EINVAL;
1735
1736	rtnl_lock();
1737	rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
1738	if (IS_ERR(rx_sc)) {
1739		rtnl_unlock();
1740		return PTR_ERR(rx_sc);
1741	}
1742
1743	assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1744
1745	if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
1746		pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n",
1747			  nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
1748		rtnl_unlock();
1749		return -EINVAL;
1750	}
1751
1752	pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
1753	if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
 
1754		pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n",
1755			  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
1756		rtnl_unlock();
1757		return -EINVAL;
1758	}
1759
1760	if (secy->xpn) {
1761		if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
1762			rtnl_unlock();
1763			return -EINVAL;
1764		}
1765
1766		if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
1767			pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
1768				  nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
1769				  MACSEC_SA_ATTR_SALT);
1770			rtnl_unlock();
1771			return -EINVAL;
1772		}
1773	}
1774
1775	rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]);
1776	if (rx_sa) {
1777		rtnl_unlock();
1778		return -EBUSY;
1779	}
1780
1781	rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
1782	if (!rx_sa) {
1783		rtnl_unlock();
1784		return -ENOMEM;
1785	}
1786
1787	err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1788			 secy->key_len, secy->icv_len);
1789	if (err < 0) {
1790		kfree(rx_sa);
1791		rtnl_unlock();
1792		return err;
1793	}
1794
1795	if (tb_sa[MACSEC_SA_ATTR_PN]) {
1796		spin_lock_bh(&rx_sa->lock);
1797		rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
1798		spin_unlock_bh(&rx_sa->lock);
1799	}
1800
1801	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
1802		rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
1803
1804	rx_sa->sc = rx_sc;
1805
 
 
 
 
 
 
1806	/* If h/w offloading is available, propagate to the device */
1807	if (macsec_is_offloaded(netdev_priv(dev))) {
1808		const struct macsec_ops *ops;
1809		struct macsec_context ctx;
1810
1811		ops = macsec_get_ops(netdev_priv(dev), &ctx);
1812		if (!ops) {
1813			err = -EOPNOTSUPP;
1814			goto cleanup;
1815		}
1816
1817		ctx.sa.assoc_num = assoc_num;
1818		ctx.sa.rx_sa = rx_sa;
1819		ctx.secy = secy;
1820		memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1821		       MACSEC_KEYID_LEN);
1822
1823		err = macsec_offload(ops->mdo_add_rxsa, &ctx);
 
1824		if (err)
1825			goto cleanup;
1826	}
1827
1828	if (secy->xpn) {
1829		rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
1830		nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
1831			   MACSEC_SALT_LEN);
1832	}
1833
1834	nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
1835	rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
1836
1837	rtnl_unlock();
1838
1839	return 0;
1840
1841cleanup:
1842	kfree(rx_sa);
1843	rtnl_unlock();
1844	return err;
1845}
1846
1847static bool validate_add_rxsc(struct nlattr **attrs)
1848{
1849	if (!attrs[MACSEC_RXSC_ATTR_SCI])
1850		return false;
1851
1852	if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) {
1853		if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1)
1854			return false;
1855	}
1856
1857	return true;
1858}
1859
1860static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
1861{
1862	struct net_device *dev;
1863	sci_t sci = MACSEC_UNDEF_SCI;
1864	struct nlattr **attrs = info->attrs;
1865	struct macsec_rx_sc *rx_sc;
1866	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1867	struct macsec_secy *secy;
1868	bool was_active;
1869	int ret;
1870
1871	if (!attrs[MACSEC_ATTR_IFINDEX])
1872		return -EINVAL;
1873
1874	if (parse_rxsc_config(attrs, tb_rxsc))
1875		return -EINVAL;
1876
1877	if (!validate_add_rxsc(tb_rxsc))
1878		return -EINVAL;
1879
1880	rtnl_lock();
1881	dev = get_dev_from_nl(genl_info_net(info), attrs);
1882	if (IS_ERR(dev)) {
1883		rtnl_unlock();
1884		return PTR_ERR(dev);
1885	}
1886
1887	secy = &macsec_priv(dev)->secy;
1888	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1889
1890	rx_sc = create_rx_sc(dev, sci);
 
 
 
1891	if (IS_ERR(rx_sc)) {
1892		rtnl_unlock();
1893		return PTR_ERR(rx_sc);
1894	}
1895
1896	was_active = rx_sc->active;
1897	if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
1898		rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
1899
1900	if (macsec_is_offloaded(netdev_priv(dev))) {
1901		const struct macsec_ops *ops;
1902		struct macsec_context ctx;
1903
1904		ops = macsec_get_ops(netdev_priv(dev), &ctx);
1905		if (!ops) {
1906			ret = -EOPNOTSUPP;
1907			goto cleanup;
1908		}
1909
1910		ctx.rx_sc = rx_sc;
1911		ctx.secy = secy;
1912
1913		ret = macsec_offload(ops->mdo_add_rxsc, &ctx);
1914		if (ret)
1915			goto cleanup;
1916	}
1917
1918	rtnl_unlock();
1919
1920	return 0;
1921
1922cleanup:
1923	rx_sc->active = was_active;
 
1924	rtnl_unlock();
1925	return ret;
1926}
1927
1928static bool validate_add_txsa(struct nlattr **attrs)
1929{
1930	if (!attrs[MACSEC_SA_ATTR_AN] ||
1931	    !attrs[MACSEC_SA_ATTR_PN] ||
1932	    !attrs[MACSEC_SA_ATTR_KEY] ||
1933	    !attrs[MACSEC_SA_ATTR_KEYID])
1934		return false;
1935
1936	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1937		return false;
1938
1939	if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
1940		return false;
1941
1942	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1943		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1944			return false;
1945	}
1946
1947	if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1948		return false;
1949
1950	return true;
1951}
1952
1953static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
1954{
1955	struct net_device *dev;
1956	struct nlattr **attrs = info->attrs;
1957	struct macsec_secy *secy;
1958	struct macsec_tx_sc *tx_sc;
1959	struct macsec_tx_sa *tx_sa;
1960	unsigned char assoc_num;
1961	int pn_len;
1962	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1963	bool was_operational;
1964	int err;
1965
1966	if (!attrs[MACSEC_ATTR_IFINDEX])
1967		return -EINVAL;
1968
1969	if (parse_sa_config(attrs, tb_sa))
1970		return -EINVAL;
1971
1972	if (!validate_add_txsa(tb_sa))
1973		return -EINVAL;
1974
1975	rtnl_lock();
1976	dev = get_dev_from_nl(genl_info_net(info), attrs);
1977	if (IS_ERR(dev)) {
1978		rtnl_unlock();
1979		return PTR_ERR(dev);
1980	}
1981
1982	secy = &macsec_priv(dev)->secy;
1983	tx_sc = &secy->tx_sc;
1984
1985	assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1986
1987	if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
1988		pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n",
1989			  nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
1990		rtnl_unlock();
1991		return -EINVAL;
1992	}
1993
1994	pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
1995	if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
1996		pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n",
1997			  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
1998		rtnl_unlock();
1999		return -EINVAL;
2000	}
2001
2002	if (secy->xpn) {
2003		if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
2004			rtnl_unlock();
2005			return -EINVAL;
2006		}
2007
2008		if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
2009			pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
2010				  nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
2011				  MACSEC_SA_ATTR_SALT);
2012			rtnl_unlock();
2013			return -EINVAL;
2014		}
2015	}
2016
2017	tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]);
2018	if (tx_sa) {
2019		rtnl_unlock();
2020		return -EBUSY;
2021	}
2022
2023	tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
2024	if (!tx_sa) {
2025		rtnl_unlock();
2026		return -ENOMEM;
2027	}
2028
2029	err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
2030			 secy->key_len, secy->icv_len);
2031	if (err < 0) {
2032		kfree(tx_sa);
2033		rtnl_unlock();
2034		return err;
2035	}
2036
2037	spin_lock_bh(&tx_sa->lock);
2038	tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
2039	spin_unlock_bh(&tx_sa->lock);
2040
2041	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2042		tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2043
2044	was_operational = secy->operational;
2045	if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
2046		secy->operational = true;
2047
 
 
 
 
 
 
2048	/* If h/w offloading is available, propagate to the device */
2049	if (macsec_is_offloaded(netdev_priv(dev))) {
2050		const struct macsec_ops *ops;
2051		struct macsec_context ctx;
2052
2053		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2054		if (!ops) {
2055			err = -EOPNOTSUPP;
2056			goto cleanup;
2057		}
2058
2059		ctx.sa.assoc_num = assoc_num;
2060		ctx.sa.tx_sa = tx_sa;
2061		ctx.secy = secy;
2062		memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
2063		       MACSEC_KEYID_LEN);
2064
2065		err = macsec_offload(ops->mdo_add_txsa, &ctx);
 
2066		if (err)
2067			goto cleanup;
2068	}
2069
2070	if (secy->xpn) {
2071		tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
2072		nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
2073			   MACSEC_SALT_LEN);
2074	}
2075
2076	nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
2077	rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
2078
2079	rtnl_unlock();
2080
2081	return 0;
2082
2083cleanup:
2084	secy->operational = was_operational;
2085	kfree(tx_sa);
2086	rtnl_unlock();
2087	return err;
2088}
2089
2090static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
2091{
2092	struct nlattr **attrs = info->attrs;
2093	struct net_device *dev;
2094	struct macsec_secy *secy;
2095	struct macsec_rx_sc *rx_sc;
2096	struct macsec_rx_sa *rx_sa;
2097	u8 assoc_num;
2098	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2099	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2100	int ret;
2101
2102	if (!attrs[MACSEC_ATTR_IFINDEX])
2103		return -EINVAL;
2104
2105	if (parse_sa_config(attrs, tb_sa))
2106		return -EINVAL;
2107
2108	if (parse_rxsc_config(attrs, tb_rxsc))
2109		return -EINVAL;
2110
2111	rtnl_lock();
2112	rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
2113				 &dev, &secy, &rx_sc, &assoc_num);
2114	if (IS_ERR(rx_sa)) {
2115		rtnl_unlock();
2116		return PTR_ERR(rx_sa);
2117	}
2118
2119	if (rx_sa->active) {
2120		rtnl_unlock();
2121		return -EBUSY;
2122	}
2123
2124	/* If h/w offloading is available, propagate to the device */
2125	if (macsec_is_offloaded(netdev_priv(dev))) {
2126		const struct macsec_ops *ops;
2127		struct macsec_context ctx;
2128
2129		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2130		if (!ops) {
2131			ret = -EOPNOTSUPP;
2132			goto cleanup;
2133		}
2134
2135		ctx.sa.assoc_num = assoc_num;
2136		ctx.sa.rx_sa = rx_sa;
2137		ctx.secy = secy;
2138
2139		ret = macsec_offload(ops->mdo_del_rxsa, &ctx);
2140		if (ret)
2141			goto cleanup;
2142	}
2143
2144	RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL);
2145	clear_rx_sa(rx_sa);
2146
2147	rtnl_unlock();
2148
2149	return 0;
2150
2151cleanup:
2152	rtnl_unlock();
2153	return ret;
2154}
2155
2156static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
2157{
2158	struct nlattr **attrs = info->attrs;
2159	struct net_device *dev;
2160	struct macsec_secy *secy;
2161	struct macsec_rx_sc *rx_sc;
2162	sci_t sci;
2163	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2164	int ret;
2165
2166	if (!attrs[MACSEC_ATTR_IFINDEX])
2167		return -EINVAL;
2168
2169	if (parse_rxsc_config(attrs, tb_rxsc))
2170		return -EINVAL;
2171
2172	if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
2173		return -EINVAL;
2174
2175	rtnl_lock();
2176	dev = get_dev_from_nl(genl_info_net(info), info->attrs);
2177	if (IS_ERR(dev)) {
2178		rtnl_unlock();
2179		return PTR_ERR(dev);
2180	}
2181
2182	secy = &macsec_priv(dev)->secy;
2183	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
2184
2185	rx_sc = del_rx_sc(secy, sci);
2186	if (!rx_sc) {
2187		rtnl_unlock();
2188		return -ENODEV;
2189	}
2190
2191	/* If h/w offloading is available, propagate to the device */
2192	if (macsec_is_offloaded(netdev_priv(dev))) {
2193		const struct macsec_ops *ops;
2194		struct macsec_context ctx;
2195
2196		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2197		if (!ops) {
2198			ret = -EOPNOTSUPP;
2199			goto cleanup;
2200		}
2201
2202		ctx.rx_sc = rx_sc;
2203		ctx.secy = secy;
2204		ret = macsec_offload(ops->mdo_del_rxsc, &ctx);
2205		if (ret)
2206			goto cleanup;
2207	}
2208
2209	free_rx_sc(rx_sc);
2210	rtnl_unlock();
2211
2212	return 0;
2213
2214cleanup:
2215	rtnl_unlock();
2216	return ret;
2217}
2218
2219static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
2220{
2221	struct nlattr **attrs = info->attrs;
2222	struct net_device *dev;
2223	struct macsec_secy *secy;
2224	struct macsec_tx_sc *tx_sc;
2225	struct macsec_tx_sa *tx_sa;
2226	u8 assoc_num;
2227	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2228	int ret;
2229
2230	if (!attrs[MACSEC_ATTR_IFINDEX])
2231		return -EINVAL;
2232
2233	if (parse_sa_config(attrs, tb_sa))
2234		return -EINVAL;
2235
2236	rtnl_lock();
2237	tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2238				 &dev, &secy, &tx_sc, &assoc_num);
2239	if (IS_ERR(tx_sa)) {
2240		rtnl_unlock();
2241		return PTR_ERR(tx_sa);
2242	}
2243
2244	if (tx_sa->active) {
2245		rtnl_unlock();
2246		return -EBUSY;
2247	}
2248
2249	/* If h/w offloading is available, propagate to the device */
2250	if (macsec_is_offloaded(netdev_priv(dev))) {
2251		const struct macsec_ops *ops;
2252		struct macsec_context ctx;
2253
2254		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2255		if (!ops) {
2256			ret = -EOPNOTSUPP;
2257			goto cleanup;
2258		}
2259
2260		ctx.sa.assoc_num = assoc_num;
2261		ctx.sa.tx_sa = tx_sa;
2262		ctx.secy = secy;
2263
2264		ret = macsec_offload(ops->mdo_del_txsa, &ctx);
2265		if (ret)
2266			goto cleanup;
2267	}
2268
2269	RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL);
2270	clear_tx_sa(tx_sa);
2271
2272	rtnl_unlock();
2273
2274	return 0;
2275
2276cleanup:
2277	rtnl_unlock();
2278	return ret;
2279}
2280
2281static bool validate_upd_sa(struct nlattr **attrs)
2282{
2283	if (!attrs[MACSEC_SA_ATTR_AN] ||
2284	    attrs[MACSEC_SA_ATTR_KEY] ||
2285	    attrs[MACSEC_SA_ATTR_KEYID] ||
2286	    attrs[MACSEC_SA_ATTR_SSCI] ||
2287	    attrs[MACSEC_SA_ATTR_SALT])
2288		return false;
2289
2290	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
2291		return false;
2292
2293	if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
2294		return false;
2295
2296	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
2297		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
2298			return false;
2299	}
2300
2301	return true;
2302}
2303
2304static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
2305{
2306	struct nlattr **attrs = info->attrs;
2307	struct net_device *dev;
2308	struct macsec_secy *secy;
2309	struct macsec_tx_sc *tx_sc;
2310	struct macsec_tx_sa *tx_sa;
2311	u8 assoc_num;
2312	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2313	bool was_operational, was_active;
2314	pn_t prev_pn;
2315	int ret = 0;
2316
2317	prev_pn.full64 = 0;
2318
2319	if (!attrs[MACSEC_ATTR_IFINDEX])
2320		return -EINVAL;
2321
2322	if (parse_sa_config(attrs, tb_sa))
2323		return -EINVAL;
2324
2325	if (!validate_upd_sa(tb_sa))
2326		return -EINVAL;
2327
2328	rtnl_lock();
2329	tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2330				 &dev, &secy, &tx_sc, &assoc_num);
2331	if (IS_ERR(tx_sa)) {
2332		rtnl_unlock();
2333		return PTR_ERR(tx_sa);
2334	}
2335
2336	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2337		int pn_len;
2338
2339		pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2340		if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2341			pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n",
2342				  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2343			rtnl_unlock();
2344			return -EINVAL;
2345		}
2346
2347		spin_lock_bh(&tx_sa->lock);
2348		prev_pn = tx_sa->next_pn_halves;
2349		tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
2350		spin_unlock_bh(&tx_sa->lock);
2351	}
2352
2353	was_active = tx_sa->active;
2354	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2355		tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2356
2357	was_operational = secy->operational;
2358	if (assoc_num == tx_sc->encoding_sa)
2359		secy->operational = tx_sa->active;
2360
2361	/* If h/w offloading is available, propagate to the device */
2362	if (macsec_is_offloaded(netdev_priv(dev))) {
2363		const struct macsec_ops *ops;
2364		struct macsec_context ctx;
2365
2366		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2367		if (!ops) {
2368			ret = -EOPNOTSUPP;
2369			goto cleanup;
2370		}
2371
2372		ctx.sa.assoc_num = assoc_num;
2373		ctx.sa.tx_sa = tx_sa;
 
2374		ctx.secy = secy;
2375
2376		ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
2377		if (ret)
2378			goto cleanup;
2379	}
2380
2381	rtnl_unlock();
2382
2383	return 0;
2384
2385cleanup:
2386	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2387		spin_lock_bh(&tx_sa->lock);
2388		tx_sa->next_pn_halves = prev_pn;
2389		spin_unlock_bh(&tx_sa->lock);
2390	}
2391	tx_sa->active = was_active;
2392	secy->operational = was_operational;
2393	rtnl_unlock();
2394	return ret;
2395}
2396
2397static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
2398{
2399	struct nlattr **attrs = info->attrs;
2400	struct net_device *dev;
2401	struct macsec_secy *secy;
2402	struct macsec_rx_sc *rx_sc;
2403	struct macsec_rx_sa *rx_sa;
2404	u8 assoc_num;
2405	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2406	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2407	bool was_active;
2408	pn_t prev_pn;
2409	int ret = 0;
2410
2411	prev_pn.full64 = 0;
2412
2413	if (!attrs[MACSEC_ATTR_IFINDEX])
2414		return -EINVAL;
2415
2416	if (parse_rxsc_config(attrs, tb_rxsc))
2417		return -EINVAL;
2418
2419	if (parse_sa_config(attrs, tb_sa))
2420		return -EINVAL;
2421
2422	if (!validate_upd_sa(tb_sa))
2423		return -EINVAL;
2424
2425	rtnl_lock();
2426	rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
2427				 &dev, &secy, &rx_sc, &assoc_num);
2428	if (IS_ERR(rx_sa)) {
2429		rtnl_unlock();
2430		return PTR_ERR(rx_sa);
2431	}
2432
2433	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2434		int pn_len;
2435
2436		pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2437		if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2438			pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n",
2439				  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2440			rtnl_unlock();
2441			return -EINVAL;
2442		}
2443
2444		spin_lock_bh(&rx_sa->lock);
2445		prev_pn = rx_sa->next_pn_halves;
2446		rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
2447		spin_unlock_bh(&rx_sa->lock);
2448	}
2449
2450	was_active = rx_sa->active;
2451	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2452		rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2453
2454	/* If h/w offloading is available, propagate to the device */
2455	if (macsec_is_offloaded(netdev_priv(dev))) {
2456		const struct macsec_ops *ops;
2457		struct macsec_context ctx;
2458
2459		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2460		if (!ops) {
2461			ret = -EOPNOTSUPP;
2462			goto cleanup;
2463		}
2464
2465		ctx.sa.assoc_num = assoc_num;
2466		ctx.sa.rx_sa = rx_sa;
 
2467		ctx.secy = secy;
2468
2469		ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
2470		if (ret)
2471			goto cleanup;
2472	}
2473
2474	rtnl_unlock();
2475	return 0;
2476
2477cleanup:
2478	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2479		spin_lock_bh(&rx_sa->lock);
2480		rx_sa->next_pn_halves = prev_pn;
2481		spin_unlock_bh(&rx_sa->lock);
2482	}
2483	rx_sa->active = was_active;
2484	rtnl_unlock();
2485	return ret;
2486}
2487
2488static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
2489{
2490	struct nlattr **attrs = info->attrs;
2491	struct net_device *dev;
2492	struct macsec_secy *secy;
2493	struct macsec_rx_sc *rx_sc;
2494	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2495	unsigned int prev_n_rx_sc;
2496	bool was_active;
2497	int ret;
2498
2499	if (!attrs[MACSEC_ATTR_IFINDEX])
2500		return -EINVAL;
2501
2502	if (parse_rxsc_config(attrs, tb_rxsc))
2503		return -EINVAL;
2504
2505	if (!validate_add_rxsc(tb_rxsc))
2506		return -EINVAL;
2507
2508	rtnl_lock();
2509	rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
2510	if (IS_ERR(rx_sc)) {
2511		rtnl_unlock();
2512		return PTR_ERR(rx_sc);
2513	}
2514
2515	was_active = rx_sc->active;
2516	prev_n_rx_sc = secy->n_rx_sc;
2517	if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) {
2518		bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
2519
2520		if (rx_sc->active != new)
2521			secy->n_rx_sc += new ? 1 : -1;
2522
2523		rx_sc->active = new;
2524	}
2525
2526	/* If h/w offloading is available, propagate to the device */
2527	if (macsec_is_offloaded(netdev_priv(dev))) {
2528		const struct macsec_ops *ops;
2529		struct macsec_context ctx;
2530
2531		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2532		if (!ops) {
2533			ret = -EOPNOTSUPP;
2534			goto cleanup;
2535		}
2536
2537		ctx.rx_sc = rx_sc;
2538		ctx.secy = secy;
2539
2540		ret = macsec_offload(ops->mdo_upd_rxsc, &ctx);
2541		if (ret)
2542			goto cleanup;
2543	}
2544
2545	rtnl_unlock();
2546
2547	return 0;
2548
2549cleanup:
2550	secy->n_rx_sc = prev_n_rx_sc;
2551	rx_sc->active = was_active;
2552	rtnl_unlock();
2553	return ret;
2554}
2555
2556static bool macsec_is_configured(struct macsec_dev *macsec)
2557{
2558	struct macsec_secy *secy = &macsec->secy;
2559	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
2560	int i;
2561
2562	if (secy->n_rx_sc > 0)
2563		return true;
2564
2565	for (i = 0; i < MACSEC_NUM_AN; i++)
2566		if (tx_sc->sa[i])
2567			return true;
2568
2569	return false;
2570}
2571
2572static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
 
 
 
 
 
 
 
2573{
2574	struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1];
2575	enum macsec_offload offload, prev_offload;
2576	int (*func)(struct macsec_context *ctx);
2577	struct nlattr **attrs = info->attrs;
2578	struct net_device *dev;
2579	const struct macsec_ops *ops;
2580	struct macsec_context ctx;
2581	struct macsec_dev *macsec;
2582	int ret;
2583
2584	if (!attrs[MACSEC_ATTR_IFINDEX])
2585		return -EINVAL;
 
 
 
 
 
 
2586
2587	if (!attrs[MACSEC_ATTR_OFFLOAD])
2588		return -EINVAL;
 
2589
2590	if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX,
2591					attrs[MACSEC_ATTR_OFFLOAD],
2592					macsec_genl_offload_policy, NULL))
2593		return -EINVAL;
2594
2595	dev = get_dev_from_nl(genl_info_net(info), attrs);
2596	if (IS_ERR(dev))
2597		return PTR_ERR(dev);
2598	macsec = macsec_priv(dev);
 
 
2599
2600	if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE])
2601		return -EINVAL;
 
 
 
 
 
2602
2603	offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
2604	if (macsec->offload == offload)
2605		return 0;
2606
2607	/* Check if the offloading mode is supported by the underlying layers */
2608	if (offload != MACSEC_OFFLOAD_OFF &&
2609	    !macsec_check_offload(offload, macsec))
2610		return -EOPNOTSUPP;
2611
2612	/* Check if the net device is busy. */
2613	if (netif_running(dev))
2614		return -EBUSY;
2615
2616	rtnl_lock();
2617
2618	prev_offload = macsec->offload;
2619	macsec->offload = offload;
2620
2621	/* Check if the device already has rules configured: we do not support
2622	 * rules migration.
2623	 */
2624	if (macsec_is_configured(macsec)) {
2625		ret = -EBUSY;
2626		goto rollback;
2627	}
2628
2629	ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
2630			       macsec, &ctx);
2631	if (!ops) {
2632		ret = -EOPNOTSUPP;
2633		goto rollback;
 
 
 
 
 
 
 
 
2634	}
2635
2636	if (prev_offload == MACSEC_OFFLOAD_OFF)
2637		func = ops->mdo_add_secy;
2638	else
2639		func = ops->mdo_del_secy;
2640
2641	ctx.secy = &macsec->secy;
2642	ret = macsec_offload(func, &ctx);
2643	if (ret)
2644		goto rollback;
2645
2646	/* Force features update, since they are different for SW MACSec and
2647	 * HW offloading cases.
2648	 */
2649	netdev_update_features(dev);
2650
2651	rtnl_unlock();
2652	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2653
2654rollback:
2655	macsec->offload = prev_offload;
2656
 
 
 
2657	rtnl_unlock();
2658	return ret;
2659}
2660
2661static void get_tx_sa_stats(struct net_device *dev, int an,
2662			    struct macsec_tx_sa *tx_sa,
2663			    struct macsec_tx_sa_stats *sum)
2664{
2665	struct macsec_dev *macsec = macsec_priv(dev);
2666	int cpu;
2667
2668	/* If h/w offloading is available, propagate to the device */
2669	if (macsec_is_offloaded(macsec)) {
2670		const struct macsec_ops *ops;
2671		struct macsec_context ctx;
2672
2673		ops = macsec_get_ops(macsec, &ctx);
2674		if (ops) {
2675			ctx.sa.assoc_num = an;
2676			ctx.sa.tx_sa = tx_sa;
2677			ctx.stats.tx_sa_stats = sum;
2678			ctx.secy = &macsec_priv(dev)->secy;
2679			macsec_offload(ops->mdo_get_tx_sa_stats, &ctx);
2680		}
2681		return;
2682	}
2683
2684	for_each_possible_cpu(cpu) {
2685		const struct macsec_tx_sa_stats *stats =
2686			per_cpu_ptr(tx_sa->stats, cpu);
2687
2688		sum->OutPktsProtected += stats->OutPktsProtected;
2689		sum->OutPktsEncrypted += stats->OutPktsEncrypted;
2690	}
2691}
2692
2693static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum)
2694{
2695	if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED,
2696			sum->OutPktsProtected) ||
2697	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED,
2698			sum->OutPktsEncrypted))
2699		return -EMSGSIZE;
2700
2701	return 0;
2702}
2703
2704static void get_rx_sa_stats(struct net_device *dev,
2705			    struct macsec_rx_sc *rx_sc, int an,
2706			    struct macsec_rx_sa *rx_sa,
2707			    struct macsec_rx_sa_stats *sum)
2708{
2709	struct macsec_dev *macsec = macsec_priv(dev);
2710	int cpu;
2711
2712	/* If h/w offloading is available, propagate to the device */
2713	if (macsec_is_offloaded(macsec)) {
2714		const struct macsec_ops *ops;
2715		struct macsec_context ctx;
2716
2717		ops = macsec_get_ops(macsec, &ctx);
2718		if (ops) {
2719			ctx.sa.assoc_num = an;
2720			ctx.sa.rx_sa = rx_sa;
2721			ctx.stats.rx_sa_stats = sum;
2722			ctx.secy = &macsec_priv(dev)->secy;
2723			ctx.rx_sc = rx_sc;
2724			macsec_offload(ops->mdo_get_rx_sa_stats, &ctx);
2725		}
2726		return;
2727	}
2728
2729	for_each_possible_cpu(cpu) {
2730		const struct macsec_rx_sa_stats *stats =
2731			per_cpu_ptr(rx_sa->stats, cpu);
2732
2733		sum->InPktsOK         += stats->InPktsOK;
2734		sum->InPktsInvalid    += stats->InPktsInvalid;
2735		sum->InPktsNotValid   += stats->InPktsNotValid;
2736		sum->InPktsNotUsingSA += stats->InPktsNotUsingSA;
2737		sum->InPktsUnusedSA   += stats->InPktsUnusedSA;
2738	}
2739}
2740
2741static int copy_rx_sa_stats(struct sk_buff *skb,
2742			    struct macsec_rx_sa_stats *sum)
2743{
2744	if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) ||
2745	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID,
2746			sum->InPktsInvalid) ||
2747	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID,
2748			sum->InPktsNotValid) ||
2749	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA,
2750			sum->InPktsNotUsingSA) ||
2751	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA,
2752			sum->InPktsUnusedSA))
2753		return -EMSGSIZE;
2754
2755	return 0;
2756}
2757
2758static void get_rx_sc_stats(struct net_device *dev,
2759			    struct macsec_rx_sc *rx_sc,
2760			    struct macsec_rx_sc_stats *sum)
2761{
2762	struct macsec_dev *macsec = macsec_priv(dev);
2763	int cpu;
2764
2765	/* If h/w offloading is available, propagate to the device */
2766	if (macsec_is_offloaded(macsec)) {
2767		const struct macsec_ops *ops;
2768		struct macsec_context ctx;
2769
2770		ops = macsec_get_ops(macsec, &ctx);
2771		if (ops) {
2772			ctx.stats.rx_sc_stats = sum;
2773			ctx.secy = &macsec_priv(dev)->secy;
2774			ctx.rx_sc = rx_sc;
2775			macsec_offload(ops->mdo_get_rx_sc_stats, &ctx);
2776		}
2777		return;
2778	}
2779
2780	for_each_possible_cpu(cpu) {
2781		const struct pcpu_rx_sc_stats *stats;
2782		struct macsec_rx_sc_stats tmp;
2783		unsigned int start;
2784
2785		stats = per_cpu_ptr(rx_sc->stats, cpu);
2786		do {
2787			start = u64_stats_fetch_begin_irq(&stats->syncp);
2788			memcpy(&tmp, &stats->stats, sizeof(tmp));
2789		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2790
2791		sum->InOctetsValidated += tmp.InOctetsValidated;
2792		sum->InOctetsDecrypted += tmp.InOctetsDecrypted;
2793		sum->InPktsUnchecked   += tmp.InPktsUnchecked;
2794		sum->InPktsDelayed     += tmp.InPktsDelayed;
2795		sum->InPktsOK          += tmp.InPktsOK;
2796		sum->InPktsInvalid     += tmp.InPktsInvalid;
2797		sum->InPktsLate        += tmp.InPktsLate;
2798		sum->InPktsNotValid    += tmp.InPktsNotValid;
2799		sum->InPktsNotUsingSA  += tmp.InPktsNotUsingSA;
2800		sum->InPktsUnusedSA    += tmp.InPktsUnusedSA;
2801	}
2802}
2803
2804static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum)
2805{
2806	if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
2807			      sum->InOctetsValidated,
2808			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2809	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
2810			      sum->InOctetsDecrypted,
2811			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2812	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
2813			      sum->InPktsUnchecked,
2814			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2815	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
2816			      sum->InPktsDelayed,
2817			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2818	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
2819			      sum->InPktsOK,
2820			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2821	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
2822			      sum->InPktsInvalid,
2823			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2824	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
2825			      sum->InPktsLate,
2826			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2827	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
2828			      sum->InPktsNotValid,
2829			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2830	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
2831			      sum->InPktsNotUsingSA,
2832			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2833	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
2834			      sum->InPktsUnusedSA,
2835			      MACSEC_RXSC_STATS_ATTR_PAD))
2836		return -EMSGSIZE;
2837
2838	return 0;
2839}
2840
2841static void get_tx_sc_stats(struct net_device *dev,
2842			    struct macsec_tx_sc_stats *sum)
2843{
2844	struct macsec_dev *macsec = macsec_priv(dev);
2845	int cpu;
2846
2847	/* If h/w offloading is available, propagate to the device */
2848	if (macsec_is_offloaded(macsec)) {
2849		const struct macsec_ops *ops;
2850		struct macsec_context ctx;
2851
2852		ops = macsec_get_ops(macsec, &ctx);
2853		if (ops) {
2854			ctx.stats.tx_sc_stats = sum;
2855			ctx.secy = &macsec_priv(dev)->secy;
2856			macsec_offload(ops->mdo_get_tx_sc_stats, &ctx);
2857		}
2858		return;
2859	}
2860
2861	for_each_possible_cpu(cpu) {
2862		const struct pcpu_tx_sc_stats *stats;
2863		struct macsec_tx_sc_stats tmp;
2864		unsigned int start;
2865
2866		stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu);
2867		do {
2868			start = u64_stats_fetch_begin_irq(&stats->syncp);
2869			memcpy(&tmp, &stats->stats, sizeof(tmp));
2870		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2871
2872		sum->OutPktsProtected   += tmp.OutPktsProtected;
2873		sum->OutPktsEncrypted   += tmp.OutPktsEncrypted;
2874		sum->OutOctetsProtected += tmp.OutOctetsProtected;
2875		sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted;
2876	}
2877}
2878
2879static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum)
2880{
2881	if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
2882			      sum->OutPktsProtected,
2883			      MACSEC_TXSC_STATS_ATTR_PAD) ||
2884	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
2885			      sum->OutPktsEncrypted,
2886			      MACSEC_TXSC_STATS_ATTR_PAD) ||
2887	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
2888			      sum->OutOctetsProtected,
2889			      MACSEC_TXSC_STATS_ATTR_PAD) ||
2890	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
2891			      sum->OutOctetsEncrypted,
2892			      MACSEC_TXSC_STATS_ATTR_PAD))
2893		return -EMSGSIZE;
2894
2895	return 0;
2896}
2897
2898static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum)
2899{
2900	struct macsec_dev *macsec = macsec_priv(dev);
2901	int cpu;
2902
2903	/* If h/w offloading is available, propagate to the device */
2904	if (macsec_is_offloaded(macsec)) {
2905		const struct macsec_ops *ops;
2906		struct macsec_context ctx;
2907
2908		ops = macsec_get_ops(macsec, &ctx);
2909		if (ops) {
2910			ctx.stats.dev_stats = sum;
2911			ctx.secy = &macsec_priv(dev)->secy;
2912			macsec_offload(ops->mdo_get_dev_stats, &ctx);
2913		}
2914		return;
2915	}
2916
2917	for_each_possible_cpu(cpu) {
2918		const struct pcpu_secy_stats *stats;
2919		struct macsec_dev_stats tmp;
2920		unsigned int start;
2921
2922		stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu);
2923		do {
2924			start = u64_stats_fetch_begin_irq(&stats->syncp);
2925			memcpy(&tmp, &stats->stats, sizeof(tmp));
2926		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2927
2928		sum->OutPktsUntagged  += tmp.OutPktsUntagged;
2929		sum->InPktsUntagged   += tmp.InPktsUntagged;
2930		sum->OutPktsTooLong   += tmp.OutPktsTooLong;
2931		sum->InPktsNoTag      += tmp.InPktsNoTag;
2932		sum->InPktsBadTag     += tmp.InPktsBadTag;
2933		sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI;
2934		sum->InPktsNoSCI      += tmp.InPktsNoSCI;
2935		sum->InPktsOverrun    += tmp.InPktsOverrun;
2936	}
2937}
2938
2939static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum)
2940{
2941	if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
2942			      sum->OutPktsUntagged,
2943			      MACSEC_SECY_STATS_ATTR_PAD) ||
2944	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
2945			      sum->InPktsUntagged,
2946			      MACSEC_SECY_STATS_ATTR_PAD) ||
2947	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
2948			      sum->OutPktsTooLong,
2949			      MACSEC_SECY_STATS_ATTR_PAD) ||
2950	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
2951			      sum->InPktsNoTag,
2952			      MACSEC_SECY_STATS_ATTR_PAD) ||
2953	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
2954			      sum->InPktsBadTag,
2955			      MACSEC_SECY_STATS_ATTR_PAD) ||
2956	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
2957			      sum->InPktsUnknownSCI,
2958			      MACSEC_SECY_STATS_ATTR_PAD) ||
2959	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
2960			      sum->InPktsNoSCI,
2961			      MACSEC_SECY_STATS_ATTR_PAD) ||
2962	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
2963			      sum->InPktsOverrun,
2964			      MACSEC_SECY_STATS_ATTR_PAD))
2965		return -EMSGSIZE;
2966
2967	return 0;
2968}
2969
2970static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
2971{
2972	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
2973	struct nlattr *secy_nest = nla_nest_start_noflag(skb,
2974							 MACSEC_ATTR_SECY);
2975	u64 csid;
2976
2977	if (!secy_nest)
2978		return 1;
2979
2980	switch (secy->key_len) {
2981	case MACSEC_GCM_AES_128_SAK_LEN:
2982		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
2983		break;
2984	case MACSEC_GCM_AES_256_SAK_LEN:
2985		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
2986		break;
2987	default:
2988		goto cancel;
2989	}
2990
2991	if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
2992			MACSEC_SECY_ATTR_PAD) ||
2993	    nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
2994			      csid, MACSEC_SECY_ATTR_PAD) ||
2995	    nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
2996	    nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
2997	    nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
2998	    nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) ||
2999	    nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) ||
3000	    nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) ||
3001	    nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) ||
3002	    nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) ||
3003	    nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) ||
3004	    nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa))
3005		goto cancel;
3006
3007	if (secy->replay_protect) {
3008		if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window))
3009			goto cancel;
3010	}
3011
3012	nla_nest_end(skb, secy_nest);
3013	return 0;
3014
3015cancel:
3016	nla_nest_cancel(skb, secy_nest);
3017	return 1;
3018}
3019
3020static noinline_for_stack int
3021dump_secy(struct macsec_secy *secy, struct net_device *dev,
3022	  struct sk_buff *skb, struct netlink_callback *cb)
3023{
3024	struct macsec_tx_sc_stats tx_sc_stats = {0, };
3025	struct macsec_tx_sa_stats tx_sa_stats = {0, };
3026	struct macsec_rx_sc_stats rx_sc_stats = {0, };
3027	struct macsec_rx_sa_stats rx_sa_stats = {0, };
3028	struct macsec_dev *macsec = netdev_priv(dev);
3029	struct macsec_dev_stats dev_stats = {0, };
3030	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
3031	struct nlattr *txsa_list, *rxsc_list;
3032	struct macsec_rx_sc *rx_sc;
3033	struct nlattr *attr;
3034	void *hdr;
3035	int i, j;
3036
3037	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3038			  &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC);
3039	if (!hdr)
3040		return -EMSGSIZE;
3041
3042	genl_dump_check_consistent(cb, hdr);
3043
3044	if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
3045		goto nla_put_failure;
3046
3047	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD);
3048	if (!attr)
3049		goto nla_put_failure;
3050	if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload))
3051		goto nla_put_failure;
3052	nla_nest_end(skb, attr);
3053
3054	if (nla_put_secy(secy, skb))
3055		goto nla_put_failure;
3056
3057	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS);
3058	if (!attr)
3059		goto nla_put_failure;
3060
3061	get_tx_sc_stats(dev, &tx_sc_stats);
3062	if (copy_tx_sc_stats(skb, &tx_sc_stats)) {
3063		nla_nest_cancel(skb, attr);
3064		goto nla_put_failure;
3065	}
3066	nla_nest_end(skb, attr);
3067
3068	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS);
3069	if (!attr)
3070		goto nla_put_failure;
3071	get_secy_stats(dev, &dev_stats);
3072	if (copy_secy_stats(skb, &dev_stats)) {
3073		nla_nest_cancel(skb, attr);
3074		goto nla_put_failure;
3075	}
3076	nla_nest_end(skb, attr);
3077
3078	txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST);
3079	if (!txsa_list)
3080		goto nla_put_failure;
3081	for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) {
3082		struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]);
3083		struct nlattr *txsa_nest;
3084		u64 pn;
3085		int pn_len;
3086
3087		if (!tx_sa)
3088			continue;
3089
3090		txsa_nest = nla_nest_start_noflag(skb, j++);
3091		if (!txsa_nest) {
3092			nla_nest_cancel(skb, txsa_list);
3093			goto nla_put_failure;
3094		}
3095
3096		attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS);
3097		if (!attr) {
3098			nla_nest_cancel(skb, txsa_nest);
3099			nla_nest_cancel(skb, txsa_list);
3100			goto nla_put_failure;
3101		}
3102		memset(&tx_sa_stats, 0, sizeof(tx_sa_stats));
3103		get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats);
3104		if (copy_tx_sa_stats(skb, &tx_sa_stats)) {
3105			nla_nest_cancel(skb, attr);
3106			nla_nest_cancel(skb, txsa_nest);
3107			nla_nest_cancel(skb, txsa_list);
3108			goto nla_put_failure;
3109		}
3110		nla_nest_end(skb, attr);
3111
3112		if (secy->xpn) {
3113			pn = tx_sa->next_pn;
3114			pn_len = MACSEC_XPN_PN_LEN;
3115		} else {
3116			pn = tx_sa->next_pn_halves.lower;
3117			pn_len = MACSEC_DEFAULT_PN_LEN;
3118		}
3119
3120		if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
3121		    nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
3122		    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
3123		    (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, tx_sa->ssci)) ||
3124		    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
3125			nla_nest_cancel(skb, txsa_nest);
3126			nla_nest_cancel(skb, txsa_list);
3127			goto nla_put_failure;
3128		}
3129
3130		nla_nest_end(skb, txsa_nest);
3131	}
3132	nla_nest_end(skb, txsa_list);
3133
3134	rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST);
3135	if (!rxsc_list)
3136		goto nla_put_failure;
3137
3138	j = 1;
3139	for_each_rxsc_rtnl(secy, rx_sc) {
3140		int k;
3141		struct nlattr *rxsa_list;
3142		struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++);
3143
3144		if (!rxsc_nest) {
3145			nla_nest_cancel(skb, rxsc_list);
3146			goto nla_put_failure;
3147		}
3148
3149		if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) ||
3150		    nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci,
3151				MACSEC_RXSC_ATTR_PAD)) {
3152			nla_nest_cancel(skb, rxsc_nest);
3153			nla_nest_cancel(skb, rxsc_list);
3154			goto nla_put_failure;
3155		}
3156
3157		attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS);
3158		if (!attr) {
3159			nla_nest_cancel(skb, rxsc_nest);
3160			nla_nest_cancel(skb, rxsc_list);
3161			goto nla_put_failure;
3162		}
3163		memset(&rx_sc_stats, 0, sizeof(rx_sc_stats));
3164		get_rx_sc_stats(dev, rx_sc, &rx_sc_stats);
3165		if (copy_rx_sc_stats(skb, &rx_sc_stats)) {
3166			nla_nest_cancel(skb, attr);
3167			nla_nest_cancel(skb, rxsc_nest);
3168			nla_nest_cancel(skb, rxsc_list);
3169			goto nla_put_failure;
3170		}
3171		nla_nest_end(skb, attr);
3172
3173		rxsa_list = nla_nest_start_noflag(skb,
3174						  MACSEC_RXSC_ATTR_SA_LIST);
3175		if (!rxsa_list) {
3176			nla_nest_cancel(skb, rxsc_nest);
3177			nla_nest_cancel(skb, rxsc_list);
3178			goto nla_put_failure;
3179		}
3180
3181		for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) {
3182			struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]);
3183			struct nlattr *rxsa_nest;
3184			u64 pn;
3185			int pn_len;
3186
3187			if (!rx_sa)
3188				continue;
3189
3190			rxsa_nest = nla_nest_start_noflag(skb, k++);
3191			if (!rxsa_nest) {
3192				nla_nest_cancel(skb, rxsa_list);
3193				nla_nest_cancel(skb, rxsc_nest);
3194				nla_nest_cancel(skb, rxsc_list);
3195				goto nla_put_failure;
3196			}
3197
3198			attr = nla_nest_start_noflag(skb,
3199						     MACSEC_SA_ATTR_STATS);
3200			if (!attr) {
3201				nla_nest_cancel(skb, rxsa_list);
3202				nla_nest_cancel(skb, rxsc_nest);
3203				nla_nest_cancel(skb, rxsc_list);
3204				goto nla_put_failure;
3205			}
3206			memset(&rx_sa_stats, 0, sizeof(rx_sa_stats));
3207			get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats);
3208			if (copy_rx_sa_stats(skb, &rx_sa_stats)) {
3209				nla_nest_cancel(skb, attr);
3210				nla_nest_cancel(skb, rxsa_list);
3211				nla_nest_cancel(skb, rxsc_nest);
3212				nla_nest_cancel(skb, rxsc_list);
3213				goto nla_put_failure;
3214			}
3215			nla_nest_end(skb, attr);
3216
3217			if (secy->xpn) {
3218				pn = rx_sa->next_pn;
3219				pn_len = MACSEC_XPN_PN_LEN;
3220			} else {
3221				pn = rx_sa->next_pn_halves.lower;
3222				pn_len = MACSEC_DEFAULT_PN_LEN;
3223			}
3224
3225			if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
3226			    nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
3227			    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
3228			    (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, rx_sa->ssci)) ||
3229			    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
3230				nla_nest_cancel(skb, rxsa_nest);
3231				nla_nest_cancel(skb, rxsc_nest);
3232				nla_nest_cancel(skb, rxsc_list);
3233				goto nla_put_failure;
3234			}
3235			nla_nest_end(skb, rxsa_nest);
3236		}
3237
3238		nla_nest_end(skb, rxsa_list);
3239		nla_nest_end(skb, rxsc_nest);
3240	}
3241
3242	nla_nest_end(skb, rxsc_list);
3243
3244	genlmsg_end(skb, hdr);
3245
3246	return 0;
3247
3248nla_put_failure:
3249	genlmsg_cancel(skb, hdr);
3250	return -EMSGSIZE;
3251}
3252
3253static int macsec_generation = 1; /* protected by RTNL */
3254
3255static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
3256{
3257	struct net *net = sock_net(skb->sk);
3258	struct net_device *dev;
3259	int dev_idx, d;
3260
3261	dev_idx = cb->args[0];
3262
3263	d = 0;
3264	rtnl_lock();
3265
3266	cb->seq = macsec_generation;
3267
3268	for_each_netdev(net, dev) {
3269		struct macsec_secy *secy;
3270
3271		if (d < dev_idx)
3272			goto next;
3273
3274		if (!netif_is_macsec(dev))
3275			goto next;
3276
3277		secy = &macsec_priv(dev)->secy;
3278		if (dump_secy(secy, dev, skb, cb) < 0)
3279			goto done;
3280next:
3281		d++;
3282	}
3283
3284done:
3285	rtnl_unlock();
3286	cb->args[0] = d;
3287	return skb->len;
3288}
3289
3290static const struct genl_ops macsec_genl_ops[] = {
3291	{
3292		.cmd = MACSEC_CMD_GET_TXSC,
3293		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3294		.dumpit = macsec_dump_txsc,
3295	},
3296	{
3297		.cmd = MACSEC_CMD_ADD_RXSC,
3298		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3299		.doit = macsec_add_rxsc,
3300		.flags = GENL_ADMIN_PERM,
3301	},
3302	{
3303		.cmd = MACSEC_CMD_DEL_RXSC,
3304		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3305		.doit = macsec_del_rxsc,
3306		.flags = GENL_ADMIN_PERM,
3307	},
3308	{
3309		.cmd = MACSEC_CMD_UPD_RXSC,
3310		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3311		.doit = macsec_upd_rxsc,
3312		.flags = GENL_ADMIN_PERM,
3313	},
3314	{
3315		.cmd = MACSEC_CMD_ADD_TXSA,
3316		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3317		.doit = macsec_add_txsa,
3318		.flags = GENL_ADMIN_PERM,
3319	},
3320	{
3321		.cmd = MACSEC_CMD_DEL_TXSA,
3322		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3323		.doit = macsec_del_txsa,
3324		.flags = GENL_ADMIN_PERM,
3325	},
3326	{
3327		.cmd = MACSEC_CMD_UPD_TXSA,
3328		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3329		.doit = macsec_upd_txsa,
3330		.flags = GENL_ADMIN_PERM,
3331	},
3332	{
3333		.cmd = MACSEC_CMD_ADD_RXSA,
3334		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3335		.doit = macsec_add_rxsa,
3336		.flags = GENL_ADMIN_PERM,
3337	},
3338	{
3339		.cmd = MACSEC_CMD_DEL_RXSA,
3340		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3341		.doit = macsec_del_rxsa,
3342		.flags = GENL_ADMIN_PERM,
3343	},
3344	{
3345		.cmd = MACSEC_CMD_UPD_RXSA,
3346		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3347		.doit = macsec_upd_rxsa,
3348		.flags = GENL_ADMIN_PERM,
3349	},
3350	{
3351		.cmd = MACSEC_CMD_UPD_OFFLOAD,
3352		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3353		.doit = macsec_upd_offload,
3354		.flags = GENL_ADMIN_PERM,
3355	},
3356};
3357
3358static struct genl_family macsec_fam __ro_after_init = {
3359	.name		= MACSEC_GENL_NAME,
3360	.hdrsize	= 0,
3361	.version	= MACSEC_GENL_VERSION,
3362	.maxattr	= MACSEC_ATTR_MAX,
3363	.policy = macsec_genl_policy,
3364	.netnsok	= true,
3365	.module		= THIS_MODULE,
3366	.ops		= macsec_genl_ops,
3367	.n_ops		= ARRAY_SIZE(macsec_genl_ops),
 
3368};
3369
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3370static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
3371				     struct net_device *dev)
3372{
3373	struct macsec_dev *macsec = netdev_priv(dev);
3374	struct macsec_secy *secy = &macsec->secy;
3375	struct pcpu_secy_stats *secy_stats;
3376	int ret, len;
3377
3378	if (macsec_is_offloaded(netdev_priv(dev))) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3379		skb->dev = macsec->real_dev;
3380		return dev_queue_xmit(skb);
3381	}
3382
3383	/* 10.5 */
3384	if (!secy->protect_frames) {
3385		secy_stats = this_cpu_ptr(macsec->stats);
3386		u64_stats_update_begin(&secy_stats->syncp);
3387		secy_stats->stats.OutPktsUntagged++;
3388		u64_stats_update_end(&secy_stats->syncp);
3389		skb->dev = macsec->real_dev;
3390		len = skb->len;
3391		ret = dev_queue_xmit(skb);
3392		count_tx(dev, ret, len);
3393		return ret;
3394	}
3395
3396	if (!secy->operational) {
3397		kfree_skb(skb);
3398		dev->stats.tx_dropped++;
3399		return NETDEV_TX_OK;
3400	}
3401
 
3402	skb = macsec_encrypt(skb, dev);
3403	if (IS_ERR(skb)) {
3404		if (PTR_ERR(skb) != -EINPROGRESS)
3405			dev->stats.tx_dropped++;
3406		return NETDEV_TX_OK;
3407	}
3408
3409	macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
3410
3411	macsec_encrypt_finish(skb, dev);
3412	len = skb->len;
3413	ret = dev_queue_xmit(skb);
3414	count_tx(dev, ret, len);
3415	return ret;
3416}
3417
3418#define SW_MACSEC_FEATURES \
3419	(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
3420
3421/* If h/w offloading is enabled, use real device features save for
3422 *   VLAN_FEATURES - they require additional ops
3423 *   HW_MACSEC - no reason to report it
3424 */
3425#define REAL_DEV_FEATURES(dev) \
3426	((dev)->features & ~(NETIF_F_VLAN_FEATURES | NETIF_F_HW_MACSEC))
3427
3428static int macsec_dev_init(struct net_device *dev)
3429{
3430	struct macsec_dev *macsec = macsec_priv(dev);
3431	struct net_device *real_dev = macsec->real_dev;
3432	int err;
3433
3434	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
3435	if (!dev->tstats)
3436		return -ENOMEM;
3437
3438	err = gro_cells_init(&macsec->gro_cells, dev);
3439	if (err) {
3440		free_percpu(dev->tstats);
3441		return err;
3442	}
3443
3444	if (macsec_is_offloaded(macsec)) {
3445		dev->features = REAL_DEV_FEATURES(real_dev);
3446	} else {
3447		dev->features = real_dev->features & SW_MACSEC_FEATURES;
3448		dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
3449	}
 
 
 
3450
3451	dev->needed_headroom = real_dev->needed_headroom +
3452			       MACSEC_NEEDED_HEADROOM;
3453	dev->needed_tailroom = real_dev->needed_tailroom +
3454			       MACSEC_NEEDED_TAILROOM;
3455
3456	if (is_zero_ether_addr(dev->dev_addr))
3457		eth_hw_addr_inherit(dev, real_dev);
3458	if (is_zero_ether_addr(dev->broadcast))
3459		memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
3460
 
 
 
3461	return 0;
3462}
3463
3464static void macsec_dev_uninit(struct net_device *dev)
3465{
3466	struct macsec_dev *macsec = macsec_priv(dev);
3467
3468	gro_cells_destroy(&macsec->gro_cells);
3469	free_percpu(dev->tstats);
3470}
3471
3472static netdev_features_t macsec_fix_features(struct net_device *dev,
3473					     netdev_features_t features)
3474{
3475	struct macsec_dev *macsec = macsec_priv(dev);
3476	struct net_device *real_dev = macsec->real_dev;
 
3477
3478	if (macsec_is_offloaded(macsec))
3479		return REAL_DEV_FEATURES(real_dev);
3480
3481	features &= (real_dev->features & SW_MACSEC_FEATURES) |
3482		    NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
3483	features |= NETIF_F_LLTX;
3484
3485	return features;
3486}
3487
3488static int macsec_dev_open(struct net_device *dev)
3489{
3490	struct macsec_dev *macsec = macsec_priv(dev);
3491	struct net_device *real_dev = macsec->real_dev;
3492	int err;
3493
3494	err = dev_uc_add(real_dev, dev->dev_addr);
3495	if (err < 0)
3496		return err;
3497
3498	if (dev->flags & IFF_ALLMULTI) {
3499		err = dev_set_allmulti(real_dev, 1);
3500		if (err < 0)
3501			goto del_unicast;
3502	}
3503
3504	if (dev->flags & IFF_PROMISC) {
3505		err = dev_set_promiscuity(real_dev, 1);
3506		if (err < 0)
3507			goto clear_allmulti;
3508	}
3509
3510	/* If h/w offloading is available, propagate to the device */
3511	if (macsec_is_offloaded(macsec)) {
3512		const struct macsec_ops *ops;
3513		struct macsec_context ctx;
3514
3515		ops = macsec_get_ops(netdev_priv(dev), &ctx);
3516		if (!ops) {
3517			err = -EOPNOTSUPP;
3518			goto clear_allmulti;
3519		}
3520
3521		ctx.secy = &macsec->secy;
3522		err = macsec_offload(ops->mdo_dev_open, &ctx);
3523		if (err)
3524			goto clear_allmulti;
3525	}
3526
3527	if (netif_carrier_ok(real_dev))
3528		netif_carrier_on(dev);
3529
3530	return 0;
3531clear_allmulti:
3532	if (dev->flags & IFF_ALLMULTI)
3533		dev_set_allmulti(real_dev, -1);
3534del_unicast:
3535	dev_uc_del(real_dev, dev->dev_addr);
3536	netif_carrier_off(dev);
3537	return err;
3538}
3539
3540static int macsec_dev_stop(struct net_device *dev)
3541{
3542	struct macsec_dev *macsec = macsec_priv(dev);
3543	struct net_device *real_dev = macsec->real_dev;
3544
3545	netif_carrier_off(dev);
3546
3547	/* If h/w offloading is available, propagate to the device */
3548	if (macsec_is_offloaded(macsec)) {
3549		const struct macsec_ops *ops;
3550		struct macsec_context ctx;
3551
3552		ops = macsec_get_ops(macsec, &ctx);
3553		if (ops) {
3554			ctx.secy = &macsec->secy;
3555			macsec_offload(ops->mdo_dev_stop, &ctx);
3556		}
3557	}
3558
3559	dev_mc_unsync(real_dev, dev);
3560	dev_uc_unsync(real_dev, dev);
3561
3562	if (dev->flags & IFF_ALLMULTI)
3563		dev_set_allmulti(real_dev, -1);
3564
3565	if (dev->flags & IFF_PROMISC)
3566		dev_set_promiscuity(real_dev, -1);
3567
3568	dev_uc_del(real_dev, dev->dev_addr);
3569
3570	return 0;
3571}
3572
3573static void macsec_dev_change_rx_flags(struct net_device *dev, int change)
3574{
3575	struct net_device *real_dev = macsec_priv(dev)->real_dev;
3576
3577	if (!(dev->flags & IFF_UP))
3578		return;
3579
3580	if (change & IFF_ALLMULTI)
3581		dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
3582
3583	if (change & IFF_PROMISC)
3584		dev_set_promiscuity(real_dev,
3585				    dev->flags & IFF_PROMISC ? 1 : -1);
3586}
3587
3588static void macsec_dev_set_rx_mode(struct net_device *dev)
3589{
3590	struct net_device *real_dev = macsec_priv(dev)->real_dev;
3591
3592	dev_mc_sync(real_dev, dev);
3593	dev_uc_sync(real_dev, dev);
3594}
3595
3596static int macsec_set_mac_address(struct net_device *dev, void *p)
3597{
3598	struct macsec_dev *macsec = macsec_priv(dev);
3599	struct net_device *real_dev = macsec->real_dev;
3600	struct sockaddr *addr = p;
 
3601	int err;
3602
3603	if (!is_valid_ether_addr(addr->sa_data))
3604		return -EADDRNOTAVAIL;
3605
3606	if (!(dev->flags & IFF_UP))
3607		goto out;
 
 
 
3608
3609	err = dev_uc_add(real_dev, addr->sa_data);
3610	if (err < 0)
3611		return err;
3612
3613	dev_uc_del(real_dev, dev->dev_addr);
3614
3615out:
3616	ether_addr_copy(dev->dev_addr, addr->sa_data);
3617	macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
3618
3619	/* If h/w offloading is available, propagate to the device */
3620	if (macsec_is_offloaded(macsec)) {
3621		const struct macsec_ops *ops;
3622		struct macsec_context ctx;
3623
3624		ops = macsec_get_ops(macsec, &ctx);
3625		if (ops) {
3626			ctx.secy = &macsec->secy;
3627			macsec_offload(ops->mdo_upd_secy, &ctx);
3628		}
 
 
 
 
 
3629	}
3630
 
 
 
3631	return 0;
 
 
 
 
 
 
 
 
3632}
3633
3634static int macsec_change_mtu(struct net_device *dev, int new_mtu)
3635{
3636	struct macsec_dev *macsec = macsec_priv(dev);
3637	unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true);
3638
3639	if (macsec->real_dev->mtu - extra < new_mtu)
3640		return -ERANGE;
3641
3642	dev->mtu = new_mtu;
3643
3644	return 0;
3645}
3646
3647static void macsec_get_stats64(struct net_device *dev,
3648			       struct rtnl_link_stats64 *s)
3649{
3650	int cpu;
3651
3652	if (!dev->tstats)
3653		return;
3654
3655	for_each_possible_cpu(cpu) {
3656		struct pcpu_sw_netstats *stats;
3657		struct pcpu_sw_netstats tmp;
3658		int start;
3659
3660		stats = per_cpu_ptr(dev->tstats, cpu);
3661		do {
3662			start = u64_stats_fetch_begin_irq(&stats->syncp);
3663			tmp.rx_packets = stats->rx_packets;
3664			tmp.rx_bytes   = stats->rx_bytes;
3665			tmp.tx_packets = stats->tx_packets;
3666			tmp.tx_bytes   = stats->tx_bytes;
3667		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
3668
3669		s->rx_packets += tmp.rx_packets;
3670		s->rx_bytes   += tmp.rx_bytes;
3671		s->tx_packets += tmp.tx_packets;
3672		s->tx_bytes   += tmp.tx_bytes;
3673	}
3674
3675	s->rx_dropped = dev->stats.rx_dropped;
3676	s->tx_dropped = dev->stats.tx_dropped;
 
3677}
3678
3679static int macsec_get_iflink(const struct net_device *dev)
3680{
3681	return macsec_priv(dev)->real_dev->ifindex;
3682}
3683
3684static const struct net_device_ops macsec_netdev_ops = {
3685	.ndo_init		= macsec_dev_init,
3686	.ndo_uninit		= macsec_dev_uninit,
3687	.ndo_open		= macsec_dev_open,
3688	.ndo_stop		= macsec_dev_stop,
3689	.ndo_fix_features	= macsec_fix_features,
3690	.ndo_change_mtu		= macsec_change_mtu,
3691	.ndo_set_rx_mode	= macsec_dev_set_rx_mode,
3692	.ndo_change_rx_flags	= macsec_dev_change_rx_flags,
3693	.ndo_set_mac_address	= macsec_set_mac_address,
3694	.ndo_start_xmit		= macsec_start_xmit,
3695	.ndo_get_stats64	= macsec_get_stats64,
3696	.ndo_get_iflink		= macsec_get_iflink,
3697};
3698
3699static const struct device_type macsec_type = {
3700	.name = "macsec",
3701};
3702
3703static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
3704	[IFLA_MACSEC_SCI] = { .type = NLA_U64 },
3705	[IFLA_MACSEC_PORT] = { .type = NLA_U16 },
3706	[IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
3707	[IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
3708	[IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
3709	[IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 },
3710	[IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 },
3711	[IFLA_MACSEC_PROTECT] = { .type = NLA_U8 },
3712	[IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 },
3713	[IFLA_MACSEC_ES] = { .type = NLA_U8 },
3714	[IFLA_MACSEC_SCB] = { .type = NLA_U8 },
3715	[IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
3716	[IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
 
3717};
3718
3719static void macsec_free_netdev(struct net_device *dev)
3720{
3721	struct macsec_dev *macsec = macsec_priv(dev);
3722
 
3723	free_percpu(macsec->stats);
3724	free_percpu(macsec->secy.tx_sc.stats);
3725
 
 
3726}
3727
3728static void macsec_setup(struct net_device *dev)
3729{
3730	ether_setup(dev);
3731	dev->min_mtu = 0;
3732	dev->max_mtu = ETH_MAX_MTU;
3733	dev->priv_flags |= IFF_NO_QUEUE;
3734	dev->netdev_ops = &macsec_netdev_ops;
3735	dev->needs_free_netdev = true;
3736	dev->priv_destructor = macsec_free_netdev;
3737	SET_NETDEV_DEVTYPE(dev, &macsec_type);
3738
3739	eth_zero_addr(dev->broadcast);
3740}
3741
3742static int macsec_changelink_common(struct net_device *dev,
3743				    struct nlattr *data[])
3744{
3745	struct macsec_secy *secy;
3746	struct macsec_tx_sc *tx_sc;
3747
3748	secy = &macsec_priv(dev)->secy;
3749	tx_sc = &secy->tx_sc;
3750
3751	if (data[IFLA_MACSEC_ENCODING_SA]) {
3752		struct macsec_tx_sa *tx_sa;
3753
3754		tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]);
3755		tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]);
3756
3757		secy->operational = tx_sa && tx_sa->active;
3758	}
3759
3760	if (data[IFLA_MACSEC_WINDOW])
3761		secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
3762
3763	if (data[IFLA_MACSEC_ENCRYPT])
3764		tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
3765
3766	if (data[IFLA_MACSEC_PROTECT])
3767		secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]);
3768
3769	if (data[IFLA_MACSEC_INC_SCI])
3770		tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
3771
3772	if (data[IFLA_MACSEC_ES])
3773		tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]);
3774
3775	if (data[IFLA_MACSEC_SCB])
3776		tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]);
3777
3778	if (data[IFLA_MACSEC_REPLAY_PROTECT])
3779		secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]);
3780
3781	if (data[IFLA_MACSEC_VALIDATION])
3782		secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]);
3783
3784	if (data[IFLA_MACSEC_CIPHER_SUITE]) {
3785		switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) {
3786		case MACSEC_CIPHER_ID_GCM_AES_128:
3787		case MACSEC_DEFAULT_CIPHER_ID:
3788			secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
3789			secy->xpn = false;
3790			break;
3791		case MACSEC_CIPHER_ID_GCM_AES_256:
3792			secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
3793			secy->xpn = false;
3794			break;
3795		case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
3796			secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
3797			secy->xpn = true;
3798			break;
3799		case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
3800			secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
3801			secy->xpn = true;
3802			break;
3803		default:
3804			return -EINVAL;
3805		}
3806	}
3807
 
 
 
 
 
 
 
 
 
 
3808	return 0;
3809}
3810
3811static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
3812			     struct nlattr *data[],
3813			     struct netlink_ext_ack *extack)
3814{
3815	struct macsec_dev *macsec = macsec_priv(dev);
 
 
3816	struct macsec_tx_sc tx_sc;
3817	struct macsec_secy secy;
3818	int ret;
3819
3820	if (!data)
3821		return 0;
3822
3823	if (data[IFLA_MACSEC_CIPHER_SUITE] ||
3824	    data[IFLA_MACSEC_ICV_LEN] ||
3825	    data[IFLA_MACSEC_SCI] ||
3826	    data[IFLA_MACSEC_PORT])
3827		return -EINVAL;
3828
3829	/* Keep a copy of unmodified secy and tx_sc, in case the offload
3830	 * propagation fails, to revert macsec_changelink_common.
3831	 */
3832	memcpy(&secy, &macsec->secy, sizeof(secy));
3833	memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc));
3834
3835	ret = macsec_changelink_common(dev, data);
3836	if (ret)
3837		return ret;
 
 
 
 
 
 
 
 
 
 
3838
3839	/* If h/w offloading is available, propagate to the device */
3840	if (macsec_is_offloaded(macsec)) {
3841		const struct macsec_ops *ops;
3842		struct macsec_context ctx;
3843		int ret;
3844
3845		ops = macsec_get_ops(netdev_priv(dev), &ctx);
3846		if (!ops) {
3847			ret = -EOPNOTSUPP;
3848			goto cleanup;
3849		}
3850
3851		ctx.secy = &macsec->secy;
3852		ret = macsec_offload(ops->mdo_upd_secy, &ctx);
3853		if (ret)
3854			goto cleanup;
3855	}
3856
3857	return 0;
3858
3859cleanup:
3860	memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc));
3861	memcpy(&macsec->secy, &secy, sizeof(secy));
3862
3863	return ret;
3864}
3865
3866static void macsec_del_dev(struct macsec_dev *macsec)
3867{
3868	int i;
3869
3870	while (macsec->secy.rx_sc) {
3871		struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc);
3872
3873		rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next);
3874		free_rx_sc(rx_sc);
3875	}
3876
3877	for (i = 0; i < MACSEC_NUM_AN; i++) {
3878		struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]);
3879
3880		if (sa) {
3881			RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL);
3882			clear_tx_sa(sa);
3883		}
3884	}
3885}
3886
3887static void macsec_common_dellink(struct net_device *dev, struct list_head *head)
3888{
3889	struct macsec_dev *macsec = macsec_priv(dev);
3890	struct net_device *real_dev = macsec->real_dev;
3891
 
 
 
 
 
 
 
 
 
 
 
 
3892	unregister_netdevice_queue(dev, head);
3893	list_del_rcu(&macsec->secys);
3894	macsec_del_dev(macsec);
3895	netdev_upper_dev_unlink(real_dev, dev);
3896
3897	macsec_generation++;
3898}
3899
3900static void macsec_dellink(struct net_device *dev, struct list_head *head)
3901{
3902	struct macsec_dev *macsec = macsec_priv(dev);
3903	struct net_device *real_dev = macsec->real_dev;
3904	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3905
3906	/* If h/w offloading is available, propagate to the device */
3907	if (macsec_is_offloaded(macsec)) {
3908		const struct macsec_ops *ops;
3909		struct macsec_context ctx;
3910
3911		ops = macsec_get_ops(netdev_priv(dev), &ctx);
3912		if (ops) {
3913			ctx.secy = &macsec->secy;
3914			macsec_offload(ops->mdo_del_secy, &ctx);
3915		}
3916	}
3917
3918	macsec_common_dellink(dev, head);
3919
3920	if (list_empty(&rxd->secys)) {
3921		netdev_rx_handler_unregister(real_dev);
3922		kfree(rxd);
3923	}
3924}
3925
3926static int register_macsec_dev(struct net_device *real_dev,
3927			       struct net_device *dev)
3928{
3929	struct macsec_dev *macsec = macsec_priv(dev);
3930	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3931
3932	if (!rxd) {
3933		int err;
3934
3935		rxd = kmalloc(sizeof(*rxd), GFP_KERNEL);
3936		if (!rxd)
3937			return -ENOMEM;
3938
3939		INIT_LIST_HEAD(&rxd->secys);
3940
3941		err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
3942						 rxd);
3943		if (err < 0) {
3944			kfree(rxd);
3945			return err;
3946		}
3947	}
3948
3949	list_add_tail_rcu(&macsec->secys, &rxd->secys);
3950	return 0;
3951}
3952
3953static bool sci_exists(struct net_device *dev, sci_t sci)
3954{
3955	struct macsec_rxh_data *rxd = macsec_data_rtnl(dev);
3956	struct macsec_dev *macsec;
3957
3958	list_for_each_entry(macsec, &rxd->secys, secys) {
3959		if (macsec->secy.sci == sci)
3960			return true;
3961	}
3962
3963	return false;
3964}
3965
 
 
 
 
 
3966static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
3967{
3968	struct macsec_dev *macsec = macsec_priv(dev);
3969	struct macsec_secy *secy = &macsec->secy;
3970
3971	macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats);
3972	if (!macsec->stats)
3973		return -ENOMEM;
3974
3975	secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats);
3976	if (!secy->tx_sc.stats) {
3977		free_percpu(macsec->stats);
 
 
 
 
 
 
3978		return -ENOMEM;
3979	}
3980
3981	if (sci == MACSEC_UNDEF_SCI)
3982		sci = dev_to_sci(dev, MACSEC_PORT_ES);
3983
3984	secy->netdev = dev;
3985	secy->operational = true;
3986	secy->key_len = DEFAULT_SAK_LEN;
3987	secy->icv_len = icv_len;
3988	secy->validate_frames = MACSEC_VALIDATE_DEFAULT;
3989	secy->protect_frames = true;
3990	secy->replay_protect = false;
3991	secy->xpn = DEFAULT_XPN;
3992
3993	secy->sci = sci;
 
3994	secy->tx_sc.active = true;
3995	secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA;
3996	secy->tx_sc.encrypt = DEFAULT_ENCRYPT;
3997	secy->tx_sc.send_sci = DEFAULT_SEND_SCI;
3998	secy->tx_sc.end_station = false;
3999	secy->tx_sc.scb = false;
4000
4001	return 0;
4002}
4003
4004static struct lock_class_key macsec_netdev_addr_lock_key;
4005
4006static int macsec_newlink(struct net *net, struct net_device *dev,
4007			  struct nlattr *tb[], struct nlattr *data[],
4008			  struct netlink_ext_ack *extack)
4009{
4010	struct macsec_dev *macsec = macsec_priv(dev);
4011	rx_handler_func_t *rx_handler;
4012	u8 icv_len = DEFAULT_ICV_LEN;
4013	struct net_device *real_dev;
4014	int err, mtu;
4015	sci_t sci;
4016
4017	if (!tb[IFLA_LINK])
4018		return -EINVAL;
4019	real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
4020	if (!real_dev)
4021		return -ENODEV;
4022	if (real_dev->type != ARPHRD_ETHER)
4023		return -EINVAL;
4024
4025	dev->priv_flags |= IFF_MACSEC;
4026
4027	macsec->real_dev = real_dev;
4028
4029	if (data && data[IFLA_MACSEC_OFFLOAD])
4030		macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]);
4031	else
4032		/* MACsec offloading is off by default */
4033		macsec->offload = MACSEC_OFFLOAD_OFF;
4034
4035	/* Check if the offloading mode is supported by the underlying layers */
4036	if (macsec->offload != MACSEC_OFFLOAD_OFF &&
4037	    !macsec_check_offload(macsec->offload, macsec))
4038		return -EOPNOTSUPP;
4039
 
 
 
 
 
 
 
 
 
4040	if (data && data[IFLA_MACSEC_ICV_LEN])
4041		icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
4042	mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
4043	if (mtu < 0)
4044		dev->mtu = 0;
4045	else
4046		dev->mtu = mtu;
4047
4048	rx_handler = rtnl_dereference(real_dev->rx_handler);
4049	if (rx_handler && rx_handler != macsec_handle_frame)
4050		return -EBUSY;
4051
4052	err = register_netdevice(dev);
4053	if (err < 0)
4054		return err;
4055
4056	netdev_lockdep_set_classes(dev);
4057	lockdep_set_class(&dev->addr_list_lock,
4058			  &macsec_netdev_addr_lock_key);
4059
4060	err = netdev_upper_dev_link(real_dev, dev, extack);
4061	if (err < 0)
4062		goto unregister;
4063
4064	/* need to be already registered so that ->init has run and
4065	 * the MAC addr is set
4066	 */
4067	if (data && data[IFLA_MACSEC_SCI])
4068		sci = nla_get_sci(data[IFLA_MACSEC_SCI]);
4069	else if (data && data[IFLA_MACSEC_PORT])
4070		sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT]));
4071	else
4072		sci = dev_to_sci(dev, MACSEC_PORT_ES);
4073
4074	if (rx_handler && sci_exists(real_dev, sci)) {
4075		err = -EBUSY;
4076		goto unlink;
4077	}
4078
4079	err = macsec_add_dev(dev, sci, icv_len);
4080	if (err)
4081		goto unlink;
4082
4083	if (data) {
4084		err = macsec_changelink_common(dev, data);
4085		if (err)
4086			goto del_dev;
4087	}
4088
4089	/* If h/w offloading is available, propagate to the device */
4090	if (macsec_is_offloaded(macsec)) {
4091		const struct macsec_ops *ops;
4092		struct macsec_context ctx;
4093
4094		ops = macsec_get_ops(macsec, &ctx);
4095		if (ops) {
4096			ctx.secy = &macsec->secy;
4097			err = macsec_offload(ops->mdo_add_secy, &ctx);
4098			if (err)
4099				goto del_dev;
 
 
 
4100		}
4101	}
4102
4103	err = register_macsec_dev(real_dev, dev);
4104	if (err < 0)
4105		goto del_dev;
4106
4107	netif_stacked_transfer_operstate(real_dev, dev);
4108	linkwatch_fire_event(dev);
4109
4110	macsec_generation++;
4111
4112	return 0;
4113
4114del_dev:
4115	macsec_del_dev(macsec);
4116unlink:
4117	netdev_upper_dev_unlink(real_dev, dev);
4118unregister:
4119	unregister_netdevice(dev);
4120	return err;
4121}
4122
4123static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
4124				struct netlink_ext_ack *extack)
4125{
4126	u64 csid = MACSEC_DEFAULT_CIPHER_ID;
4127	u8 icv_len = DEFAULT_ICV_LEN;
4128	int flag;
4129	bool es, scb, sci;
4130
4131	if (!data)
4132		return 0;
4133
4134	if (data[IFLA_MACSEC_CIPHER_SUITE])
4135		csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]);
4136
4137	if (data[IFLA_MACSEC_ICV_LEN]) {
4138		icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
4139		if (icv_len != DEFAULT_ICV_LEN) {
4140			char dummy_key[DEFAULT_SAK_LEN] = { 0 };
4141			struct crypto_aead *dummy_tfm;
4142
4143			dummy_tfm = macsec_alloc_tfm(dummy_key,
4144						     DEFAULT_SAK_LEN,
4145						     icv_len);
4146			if (IS_ERR(dummy_tfm))
4147				return PTR_ERR(dummy_tfm);
4148			crypto_free_aead(dummy_tfm);
4149		}
4150	}
4151
4152	switch (csid) {
4153	case MACSEC_CIPHER_ID_GCM_AES_128:
4154	case MACSEC_CIPHER_ID_GCM_AES_256:
4155	case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
4156	case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
4157	case MACSEC_DEFAULT_CIPHER_ID:
4158		if (icv_len < MACSEC_MIN_ICV_LEN ||
4159		    icv_len > MACSEC_STD_ICV_LEN)
4160			return -EINVAL;
4161		break;
4162	default:
4163		return -EINVAL;
4164	}
4165
4166	if (data[IFLA_MACSEC_ENCODING_SA]) {
4167		if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN)
4168			return -EINVAL;
4169	}
4170
4171	for (flag = IFLA_MACSEC_ENCODING_SA + 1;
4172	     flag < IFLA_MACSEC_VALIDATION;
4173	     flag++) {
4174		if (data[flag]) {
4175			if (nla_get_u8(data[flag]) > 1)
4176				return -EINVAL;
4177		}
4178	}
4179
4180	es  = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false;
4181	sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false;
4182	scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false;
4183
4184	if ((sci && (scb || es)) || (scb && es))
4185		return -EINVAL;
4186
4187	if (data[IFLA_MACSEC_VALIDATION] &&
4188	    nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
4189		return -EINVAL;
4190
4191	if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
4192	     nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
4193	    !data[IFLA_MACSEC_WINDOW])
4194		return -EINVAL;
4195
4196	return 0;
4197}
4198
4199static struct net *macsec_get_link_net(const struct net_device *dev)
4200{
4201	return dev_net(macsec_priv(dev)->real_dev);
4202}
4203
 
 
 
 
 
 
 
 
 
 
 
 
4204static size_t macsec_get_size(const struct net_device *dev)
4205{
4206	return  nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */
4207		nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */
4208		nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */
4209		nla_total_size(4) + /* IFLA_MACSEC_WINDOW */
4210		nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */
4211		nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */
4212		nla_total_size(1) + /* IFLA_MACSEC_PROTECT */
4213		nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */
4214		nla_total_size(1) + /* IFLA_MACSEC_ES */
4215		nla_total_size(1) + /* IFLA_MACSEC_SCB */
4216		nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
4217		nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
 
4218		0;
4219}
4220
4221static int macsec_fill_info(struct sk_buff *skb,
4222			    const struct net_device *dev)
4223{
4224	struct macsec_secy *secy = &macsec_priv(dev)->secy;
4225	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
 
4226	u64 csid;
4227
 
 
 
 
4228	switch (secy->key_len) {
4229	case MACSEC_GCM_AES_128_SAK_LEN:
4230		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
4231		break;
4232	case MACSEC_GCM_AES_256_SAK_LEN:
4233		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
4234		break;
4235	default:
4236		goto nla_put_failure;
4237	}
4238
4239	if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
4240			IFLA_MACSEC_PAD) ||
4241	    nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
4242	    nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
4243			      csid, IFLA_MACSEC_PAD) ||
4244	    nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
4245	    nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
4246	    nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
4247	    nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) ||
4248	    nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) ||
4249	    nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
4250	    nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
4251	    nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) ||
 
4252	    0)
4253		goto nla_put_failure;
4254
4255	if (secy->replay_protect) {
4256		if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window))
4257			goto nla_put_failure;
4258	}
4259
4260	return 0;
4261
4262nla_put_failure:
4263	return -EMSGSIZE;
4264}
4265
4266static struct rtnl_link_ops macsec_link_ops __read_mostly = {
4267	.kind		= "macsec",
4268	.priv_size	= sizeof(struct macsec_dev),
4269	.maxtype	= IFLA_MACSEC_MAX,
4270	.policy		= macsec_rtnl_policy,
4271	.setup		= macsec_setup,
4272	.validate	= macsec_validate_attr,
4273	.newlink	= macsec_newlink,
4274	.changelink	= macsec_changelink,
4275	.dellink	= macsec_dellink,
4276	.get_size	= macsec_get_size,
4277	.fill_info	= macsec_fill_info,
4278	.get_link_net	= macsec_get_link_net,
4279};
4280
4281static bool is_macsec_master(struct net_device *dev)
4282{
4283	return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame;
4284}
4285
4286static int macsec_notify(struct notifier_block *this, unsigned long event,
4287			 void *ptr)
4288{
4289	struct net_device *real_dev = netdev_notifier_info_to_dev(ptr);
 
 
4290	LIST_HEAD(head);
4291
4292	if (!is_macsec_master(real_dev))
4293		return NOTIFY_DONE;
4294
 
 
4295	switch (event) {
4296	case NETDEV_DOWN:
4297	case NETDEV_UP:
4298	case NETDEV_CHANGE: {
4299		struct macsec_dev *m, *n;
4300		struct macsec_rxh_data *rxd;
4301
4302		rxd = macsec_data_rtnl(real_dev);
4303		list_for_each_entry_safe(m, n, &rxd->secys, secys) {
4304			struct net_device *dev = m->secy.netdev;
4305
4306			netif_stacked_transfer_operstate(real_dev, dev);
4307		}
4308		break;
4309	}
4310	case NETDEV_UNREGISTER: {
4311		struct macsec_dev *m, *n;
4312		struct macsec_rxh_data *rxd;
4313
4314		rxd = macsec_data_rtnl(real_dev);
4315		list_for_each_entry_safe(m, n, &rxd->secys, secys) {
4316			macsec_common_dellink(m->secy.netdev, &head);
4317		}
4318
4319		netdev_rx_handler_unregister(real_dev);
4320		kfree(rxd);
4321
4322		unregister_netdevice_many(&head);
4323		break;
4324	}
4325	case NETDEV_CHANGEMTU: {
4326		struct macsec_dev *m;
4327		struct macsec_rxh_data *rxd;
4328
4329		rxd = macsec_data_rtnl(real_dev);
4330		list_for_each_entry(m, &rxd->secys, secys) {
4331			struct net_device *dev = m->secy.netdev;
4332			unsigned int mtu = real_dev->mtu - (m->secy.icv_len +
4333							    macsec_extra_len(true));
4334
4335			if (dev->mtu > mtu)
4336				dev_set_mtu(dev, mtu);
4337		}
4338	}
 
 
 
 
 
 
4339	}
4340
4341	return NOTIFY_OK;
4342}
4343
4344static struct notifier_block macsec_notifier = {
4345	.notifier_call = macsec_notify,
4346};
4347
4348static int __init macsec_init(void)
4349{
4350	int err;
4351
4352	pr_info("MACsec IEEE 802.1AE\n");
4353	err = register_netdevice_notifier(&macsec_notifier);
4354	if (err)
4355		return err;
4356
4357	err = rtnl_link_register(&macsec_link_ops);
4358	if (err)
4359		goto notifier;
4360
4361	err = genl_register_family(&macsec_fam);
4362	if (err)
4363		goto rtnl;
4364
4365	return 0;
4366
4367rtnl:
4368	rtnl_link_unregister(&macsec_link_ops);
4369notifier:
4370	unregister_netdevice_notifier(&macsec_notifier);
4371	return err;
4372}
4373
4374static void __exit macsec_exit(void)
4375{
4376	genl_unregister_family(&macsec_fam);
4377	rtnl_link_unregister(&macsec_link_ops);
4378	unregister_netdevice_notifier(&macsec_notifier);
4379	rcu_barrier();
4380}
4381
4382module_init(macsec_init);
4383module_exit(macsec_exit);
4384
4385MODULE_ALIAS_RTNL_LINK("macsec");
4386MODULE_ALIAS_GENL_FAMILY("macsec");
4387
4388MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
4389MODULE_LICENSE("GPL v2");