Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * drivers/net/macsec.c - MACsec device
   4 *
   5 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
   6 */
   7
   8#include <linux/types.h>
   9#include <linux/skbuff.h>
  10#include <linux/socket.h>
  11#include <linux/module.h>
  12#include <crypto/aead.h>
  13#include <linux/etherdevice.h>
  14#include <linux/netdevice.h>
  15#include <linux/rtnetlink.h>
  16#include <linux/refcount.h>
  17#include <net/genetlink.h>
  18#include <net/sock.h>
  19#include <net/gro_cells.h>
  20#include <net/macsec.h>
  21#include <net/dst_metadata.h>
  22#include <linux/phy.h>
  23#include <linux/byteorder/generic.h>
  24#include <linux/if_arp.h>
  25
  26#include <uapi/linux/if_macsec.h>
  27
 
 
  28/* SecTAG length = macsec_eth_header without the optional SCI */
  29#define MACSEC_TAG_LEN 6
  30
  31struct macsec_eth_header {
  32	struct ethhdr eth;
  33	/* SecTAG */
  34	u8  tci_an;
  35#if defined(__LITTLE_ENDIAN_BITFIELD)
  36	u8  short_length:6,
  37		  unused:2;
  38#elif defined(__BIG_ENDIAN_BITFIELD)
  39	u8        unused:2,
  40	    short_length:6;
  41#else
  42#error	"Please fix <asm/byteorder.h>"
  43#endif
  44	__be32 packet_number;
  45	u8 secure_channel_id[8]; /* optional */
  46} __packed;
  47
 
 
 
 
 
 
 
 
 
  48/* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
  49#define MIN_NON_SHORT_LEN 48
  50
  51#define GCM_AES_IV_LEN 12
 
  52
  53#define for_each_rxsc(secy, sc)				\
  54	for (sc = rcu_dereference_bh(secy->rx_sc);	\
  55	     sc;					\
  56	     sc = rcu_dereference_bh(sc->next))
  57#define for_each_rxsc_rtnl(secy, sc)			\
  58	for (sc = rtnl_dereference(secy->rx_sc);	\
  59	     sc;					\
  60	     sc = rtnl_dereference(sc->next))
  61
  62#define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31)))
  63
  64struct gcm_iv_xpn {
  65	union {
  66		u8 short_secure_channel_id[4];
  67		ssci_t ssci;
  68	};
  69	__be64 pn;
  70} __packed;
  71
  72struct gcm_iv {
  73	union {
  74		u8 secure_channel_id[8];
  75		sci_t sci;
  76	};
  77	__be32 pn;
  78};
  79
  80#define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
  81
  82struct pcpu_secy_stats {
  83	struct macsec_dev_stats stats;
  84	struct u64_stats_sync syncp;
  85};
  86
  87/**
  88 * struct macsec_dev - private data
  89 * @secy: SecY config
  90 * @real_dev: pointer to underlying netdevice
  91 * @dev_tracker: refcount tracker for @real_dev reference
  92 * @stats: MACsec device stats
  93 * @secys: linked list of SecY's on the underlying device
  94 * @gro_cells: pointer to the Generic Receive Offload cell
  95 * @offload: status of offloading on the MACsec device
  96 * @insert_tx_tag: when offloading, device requires to insert an
  97 *	additional tag
  98 */
  99struct macsec_dev {
 100	struct macsec_secy secy;
 101	struct net_device *real_dev;
 102	netdevice_tracker dev_tracker;
 103	struct pcpu_secy_stats __percpu *stats;
 104	struct list_head secys;
 105	struct gro_cells gro_cells;
 106	enum macsec_offload offload;
 107	bool insert_tx_tag;
 108};
 109
 110/**
 111 * struct macsec_rxh_data - rx_handler private argument
 112 * @secys: linked list of SecY's on this underlying device
 113 */
 114struct macsec_rxh_data {
 115	struct list_head secys;
 116};
 117
 118static struct macsec_dev *macsec_priv(const struct net_device *dev)
 119{
 120	return (struct macsec_dev *)netdev_priv(dev);
 121}
 122
 123static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev)
 124{
 125	return rcu_dereference_bh(dev->rx_handler_data);
 126}
 127
 128static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev)
 129{
 130	return rtnl_dereference(dev->rx_handler_data);
 131}
 132
 133struct macsec_cb {
 134	struct aead_request *req;
 135	union {
 136		struct macsec_tx_sa *tx_sa;
 137		struct macsec_rx_sa *rx_sa;
 138	};
 139	u8 assoc_num;
 140	bool valid;
 141	bool has_sci;
 142};
 143
 144static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
 145{
 146	struct macsec_rx_sa *sa = rcu_dereference_bh(ptr);
 147
 148	if (!sa || !sa->active)
 149		return NULL;
 150
 151	if (!refcount_inc_not_zero(&sa->refcnt))
 152		return NULL;
 153
 154	return sa;
 155}
 156
 157static void free_rx_sc_rcu(struct rcu_head *head)
 158{
 159	struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
 160
 161	free_percpu(rx_sc->stats);
 162	kfree(rx_sc);
 163}
 164
 165static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc)
 166{
 167	return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL;
 168}
 169
 170static void macsec_rxsc_put(struct macsec_rx_sc *sc)
 171{
 172	if (refcount_dec_and_test(&sc->refcnt))
 173		call_rcu(&sc->rcu_head, free_rx_sc_rcu);
 174}
 175
 176static void free_rxsa(struct rcu_head *head)
 177{
 178	struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu);
 179
 180	crypto_free_aead(sa->key.tfm);
 181	free_percpu(sa->stats);
 182	kfree(sa);
 183}
 184
 185static void macsec_rxsa_put(struct macsec_rx_sa *sa)
 186{
 187	if (refcount_dec_and_test(&sa->refcnt))
 188		call_rcu(&sa->rcu, free_rxsa);
 189}
 190
 191static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr)
 192{
 193	struct macsec_tx_sa *sa = rcu_dereference_bh(ptr);
 194
 195	if (!sa || !sa->active)
 196		return NULL;
 197
 198	if (!refcount_inc_not_zero(&sa->refcnt))
 199		return NULL;
 200
 201	return sa;
 202}
 203
 204static void free_txsa(struct rcu_head *head)
 205{
 206	struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu);
 207
 208	crypto_free_aead(sa->key.tfm);
 209	free_percpu(sa->stats);
 210	kfree(sa);
 211}
 212
 213static void macsec_txsa_put(struct macsec_tx_sa *sa)
 214{
 215	if (refcount_dec_and_test(&sa->refcnt))
 216		call_rcu(&sa->rcu, free_txsa);
 217}
 218
 219static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
 220{
 221	BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb));
 222	return (struct macsec_cb *)skb->cb;
 223}
 224
 
 225#define MACSEC_PORT_SCB (0x0000)
 226#define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
 227#define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff)
 228
 229#define MACSEC_GCM_AES_128_SAK_LEN 16
 230#define MACSEC_GCM_AES_256_SAK_LEN 32
 231
 232#define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
 233#define DEFAULT_XPN false
 234#define DEFAULT_SEND_SCI true
 235#define DEFAULT_ENCRYPT false
 236#define DEFAULT_ENCODING_SA 0
 237#define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1))
 238
 239static sci_t make_sci(const u8 *addr, __be16 port)
 
 
 
 
 
 
 
 
 240{
 241	sci_t sci;
 242
 243	memcpy(&sci, addr, ETH_ALEN);
 244	memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port));
 245
 246	return sci;
 247}
 248
 249static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present)
 250{
 251	sci_t sci;
 252
 253	if (sci_present)
 254		memcpy(&sci, hdr->secure_channel_id,
 255		       sizeof(hdr->secure_channel_id));
 256	else
 257		sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES);
 258
 259	return sci;
 260}
 261
 262static unsigned int macsec_sectag_len(bool sci_present)
 263{
 264	return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
 265}
 266
 267static unsigned int macsec_hdr_len(bool sci_present)
 268{
 269	return macsec_sectag_len(sci_present) + ETH_HLEN;
 270}
 271
 272static unsigned int macsec_extra_len(bool sci_present)
 273{
 274	return macsec_sectag_len(sci_present) + sizeof(__be16);
 275}
 276
 277/* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
 278static void macsec_fill_sectag(struct macsec_eth_header *h,
 279			       const struct macsec_secy *secy, u32 pn,
 280			       bool sci_present)
 281{
 282	const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
 283
 284	memset(&h->tci_an, 0, macsec_sectag_len(sci_present));
 285	h->eth.h_proto = htons(ETH_P_MACSEC);
 286
 287	if (sci_present) {
 288		h->tci_an |= MACSEC_TCI_SC;
 289		memcpy(&h->secure_channel_id, &secy->sci,
 290		       sizeof(h->secure_channel_id));
 291	} else {
 292		if (tx_sc->end_station)
 293			h->tci_an |= MACSEC_TCI_ES;
 294		if (tx_sc->scb)
 295			h->tci_an |= MACSEC_TCI_SCB;
 296	}
 297
 298	h->packet_number = htonl(pn);
 299
 300	/* with GCM, C/E clear for !encrypt, both set for encrypt */
 301	if (tx_sc->encrypt)
 302		h->tci_an |= MACSEC_TCI_CONFID;
 303	else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
 304		h->tci_an |= MACSEC_TCI_C;
 305
 306	h->tci_an |= tx_sc->encoding_sa;
 307}
 308
 309static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len)
 310{
 311	if (data_len < MIN_NON_SHORT_LEN)
 312		h->short_length = data_len;
 313}
 314
 315/* Checks if a MACsec interface is being offloaded to an hardware engine */
 316static bool macsec_is_offloaded(struct macsec_dev *macsec)
 317{
 318	if (macsec->offload == MACSEC_OFFLOAD_MAC ||
 319	    macsec->offload == MACSEC_OFFLOAD_PHY)
 320		return true;
 321
 322	return false;
 323}
 324
 325/* Checks if underlying layers implement MACsec offloading functions. */
 326static bool macsec_check_offload(enum macsec_offload offload,
 327				 struct macsec_dev *macsec)
 328{
 329	if (!macsec || !macsec->real_dev)
 330		return false;
 331
 332	if (offload == MACSEC_OFFLOAD_PHY)
 333		return macsec->real_dev->phydev &&
 334		       macsec->real_dev->phydev->macsec_ops;
 335	else if (offload == MACSEC_OFFLOAD_MAC)
 336		return macsec->real_dev->features & NETIF_F_HW_MACSEC &&
 337		       macsec->real_dev->macsec_ops;
 338
 339	return false;
 340}
 341
 342static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload,
 343						 struct macsec_dev *macsec,
 344						 struct macsec_context *ctx)
 345{
 346	if (ctx) {
 347		memset(ctx, 0, sizeof(*ctx));
 348		ctx->offload = offload;
 349
 350		if (offload == MACSEC_OFFLOAD_PHY)
 351			ctx->phydev = macsec->real_dev->phydev;
 352		else if (offload == MACSEC_OFFLOAD_MAC)
 353			ctx->netdev = macsec->real_dev;
 354	}
 355
 356	if (offload == MACSEC_OFFLOAD_PHY)
 357		return macsec->real_dev->phydev->macsec_ops;
 358	else
 359		return macsec->real_dev->macsec_ops;
 360}
 361
 362/* Returns a pointer to the MACsec ops struct if any and updates the MACsec
 363 * context device reference if provided.
 364 */
 365static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec,
 366					       struct macsec_context *ctx)
 367{
 368	if (!macsec_check_offload(macsec->offload, macsec))
 369		return NULL;
 370
 371	return __macsec_get_ops(macsec->offload, macsec, ctx);
 372}
 373
 374/* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */
 375static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn)
 376{
 377	struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data;
 378	int len = skb->len - 2 * ETH_ALEN;
 379	int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len;
 380
 381	/* a) It comprises at least 17 octets */
 382	if (skb->len <= 16)
 383		return false;
 384
 385	/* b) MACsec EtherType: already checked */
 386
 387	/* c) V bit is clear */
 388	if (h->tci_an & MACSEC_TCI_VERSION)
 389		return false;
 390
 391	/* d) ES or SCB => !SC */
 392	if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) &&
 393	    (h->tci_an & MACSEC_TCI_SC))
 394		return false;
 395
 396	/* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */
 397	if (h->unused)
 398		return false;
 399
 400	/* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */
 401	if (!h->packet_number && !xpn)
 402		return false;
 403
 404	/* length check, f) g) h) i) */
 405	if (h->short_length)
 406		return len == extra_len + h->short_length;
 407	return len >= extra_len + MIN_NON_SHORT_LEN;
 408}
 409
 410#define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
 411#define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
 412
 413static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn,
 414			       salt_t salt)
 415{
 416	struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv;
 417
 418	gcm_iv->ssci = ssci ^ salt.ssci;
 419	gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn;
 420}
 421
 422static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn)
 423{
 424	struct gcm_iv *gcm_iv = (struct gcm_iv *)iv;
 425
 426	gcm_iv->sci = sci;
 427	gcm_iv->pn = htonl(pn);
 428}
 429
 430static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
 431{
 432	return (struct macsec_eth_header *)skb_mac_header(skb);
 433}
 434
 
 
 
 
 
 435static void __macsec_pn_wrapped(struct macsec_secy *secy,
 436				struct macsec_tx_sa *tx_sa)
 437{
 438	pr_debug("PN wrapped, transitioning to !oper\n");
 439	tx_sa->active = false;
 440	if (secy->protect_frames)
 441		secy->operational = false;
 442}
 443
 444void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa)
 445{
 446	spin_lock_bh(&tx_sa->lock);
 447	__macsec_pn_wrapped(secy, tx_sa);
 448	spin_unlock_bh(&tx_sa->lock);
 449}
 450EXPORT_SYMBOL_GPL(macsec_pn_wrapped);
 451
 452static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa,
 453			    struct macsec_secy *secy)
 454{
 455	pn_t pn;
 456
 457	spin_lock_bh(&tx_sa->lock);
 458
 459	pn = tx_sa->next_pn_halves;
 460	if (secy->xpn)
 461		tx_sa->next_pn++;
 462	else
 463		tx_sa->next_pn_halves.lower++;
 464
 465	if (tx_sa->next_pn == 0)
 466		__macsec_pn_wrapped(secy, tx_sa);
 467	spin_unlock_bh(&tx_sa->lock);
 468
 469	return pn;
 470}
 471
 472static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
 473{
 474	struct macsec_dev *macsec = netdev_priv(dev);
 475
 476	skb->dev = macsec->real_dev;
 477	skb_reset_mac_header(skb);
 478	skb->protocol = eth_hdr(skb)->h_proto;
 479}
 480
 481static unsigned int macsec_msdu_len(struct sk_buff *skb)
 482{
 483	struct macsec_dev *macsec = macsec_priv(skb->dev);
 484	struct macsec_secy *secy = &macsec->secy;
 485	bool sci_present = macsec_skb_cb(skb)->has_sci;
 486
 487	return skb->len - macsec_hdr_len(sci_present) - secy->icv_len;
 488}
 489
 490static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
 491			    struct macsec_tx_sa *tx_sa)
 492{
 493	unsigned int msdu_len = macsec_msdu_len(skb);
 494	struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
 495
 496	u64_stats_update_begin(&txsc_stats->syncp);
 497	if (tx_sc->encrypt) {
 498		txsc_stats->stats.OutOctetsEncrypted += msdu_len;
 499		txsc_stats->stats.OutPktsEncrypted++;
 500		this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
 501	} else {
 502		txsc_stats->stats.OutOctetsProtected += msdu_len;
 503		txsc_stats->stats.OutPktsProtected++;
 504		this_cpu_inc(tx_sa->stats->OutPktsProtected);
 505	}
 506	u64_stats_update_end(&txsc_stats->syncp);
 507}
 508
 509static void count_tx(struct net_device *dev, int ret, int len)
 510{
 511	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN))
 512		dev_sw_netstats_tx_add(dev, 1, len);
 
 
 
 
 
 
 513}
 514
 515static void macsec_encrypt_done(void *data, int err)
 516{
 517	struct sk_buff *skb = data;
 518	struct net_device *dev = skb->dev;
 519	struct macsec_dev *macsec = macsec_priv(dev);
 520	struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa;
 521	int len, ret;
 522
 523	aead_request_free(macsec_skb_cb(skb)->req);
 524
 525	rcu_read_lock_bh();
 526	macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
 527	/* packet is encrypted/protected so tx_bytes must be calculated */
 528	len = macsec_msdu_len(skb) + 2 * ETH_ALEN;
 529	macsec_encrypt_finish(skb, dev);
 
 
 530	ret = dev_queue_xmit(skb);
 531	count_tx(dev, ret, len);
 532	rcu_read_unlock_bh();
 533
 534	macsec_txsa_put(sa);
 535	dev_put(dev);
 536}
 537
 538static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
 539					     unsigned char **iv,
 540					     struct scatterlist **sg,
 541					     int num_frags)
 542{
 543	size_t size, iv_offset, sg_offset;
 544	struct aead_request *req;
 545	void *tmp;
 546
 547	size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
 548	iv_offset = size;
 549	size += GCM_AES_IV_LEN;
 550
 551	size = ALIGN(size, __alignof__(struct scatterlist));
 552	sg_offset = size;
 553	size += sizeof(struct scatterlist) * num_frags;
 554
 555	tmp = kmalloc(size, GFP_ATOMIC);
 556	if (!tmp)
 557		return NULL;
 558
 559	*iv = (unsigned char *)(tmp + iv_offset);
 560	*sg = (struct scatterlist *)(tmp + sg_offset);
 561	req = tmp;
 562
 563	aead_request_set_tfm(req, tfm);
 564
 565	return req;
 566}
 567
 568static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
 569				      struct net_device *dev)
 570{
 571	int ret;
 572	struct scatterlist *sg;
 573	struct sk_buff *trailer;
 574	unsigned char *iv;
 575	struct ethhdr *eth;
 576	struct macsec_eth_header *hh;
 577	size_t unprotected_len;
 578	struct aead_request *req;
 579	struct macsec_secy *secy;
 580	struct macsec_tx_sc *tx_sc;
 581	struct macsec_tx_sa *tx_sa;
 582	struct macsec_dev *macsec = macsec_priv(dev);
 583	bool sci_present;
 584	pn_t pn;
 585
 586	secy = &macsec->secy;
 587	tx_sc = &secy->tx_sc;
 588
 589	/* 10.5.1 TX SA assignment */
 590	tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]);
 591	if (!tx_sa) {
 592		secy->operational = false;
 593		kfree_skb(skb);
 594		return ERR_PTR(-EINVAL);
 595	}
 596
 597	if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
 598		     skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
 599		struct sk_buff *nskb = skb_copy_expand(skb,
 600						       MACSEC_NEEDED_HEADROOM,
 601						       MACSEC_NEEDED_TAILROOM,
 602						       GFP_ATOMIC);
 603		if (likely(nskb)) {
 604			consume_skb(skb);
 605			skb = nskb;
 606		} else {
 607			macsec_txsa_put(tx_sa);
 608			kfree_skb(skb);
 609			return ERR_PTR(-ENOMEM);
 610		}
 611	} else {
 612		skb = skb_unshare(skb, GFP_ATOMIC);
 613		if (!skb) {
 614			macsec_txsa_put(tx_sa);
 615			return ERR_PTR(-ENOMEM);
 616		}
 617	}
 618
 619	unprotected_len = skb->len;
 620	eth = eth_hdr(skb);
 621	sci_present = macsec_send_sci(secy);
 622	hh = skb_push(skb, macsec_extra_len(sci_present));
 623	memmove(hh, eth, 2 * ETH_ALEN);
 624
 625	pn = tx_sa_update_pn(tx_sa, secy);
 626	if (pn.full64 == 0) {
 627		macsec_txsa_put(tx_sa);
 628		kfree_skb(skb);
 629		return ERR_PTR(-ENOLINK);
 630	}
 631	macsec_fill_sectag(hh, secy, pn.lower, sci_present);
 632	macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
 633
 634	skb_put(skb, secy->icv_len);
 635
 636	if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
 637		struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
 638
 639		u64_stats_update_begin(&secy_stats->syncp);
 640		secy_stats->stats.OutPktsTooLong++;
 641		u64_stats_update_end(&secy_stats->syncp);
 642
 643		macsec_txsa_put(tx_sa);
 644		kfree_skb(skb);
 645		return ERR_PTR(-EINVAL);
 646	}
 647
 648	ret = skb_cow_data(skb, 0, &trailer);
 649	if (unlikely(ret < 0)) {
 650		macsec_txsa_put(tx_sa);
 651		kfree_skb(skb);
 652		return ERR_PTR(ret);
 653	}
 654
 655	req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret);
 656	if (!req) {
 657		macsec_txsa_put(tx_sa);
 658		kfree_skb(skb);
 659		return ERR_PTR(-ENOMEM);
 660	}
 661
 662	if (secy->xpn)
 663		macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt);
 664	else
 665		macsec_fill_iv(iv, secy->sci, pn.lower);
 666
 667	sg_init_table(sg, ret);
 668	ret = skb_to_sgvec(skb, sg, 0, skb->len);
 669	if (unlikely(ret < 0)) {
 670		aead_request_free(req);
 671		macsec_txsa_put(tx_sa);
 672		kfree_skb(skb);
 673		return ERR_PTR(ret);
 674	}
 675
 676	if (tx_sc->encrypt) {
 677		int len = skb->len - macsec_hdr_len(sci_present) -
 678			  secy->icv_len;
 679		aead_request_set_crypt(req, sg, sg, len, iv);
 680		aead_request_set_ad(req, macsec_hdr_len(sci_present));
 681	} else {
 682		aead_request_set_crypt(req, sg, sg, 0, iv);
 683		aead_request_set_ad(req, skb->len - secy->icv_len);
 684	}
 685
 686	macsec_skb_cb(skb)->req = req;
 687	macsec_skb_cb(skb)->tx_sa = tx_sa;
 688	macsec_skb_cb(skb)->has_sci = sci_present;
 689	aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
 690
 691	dev_hold(skb->dev);
 692	ret = crypto_aead_encrypt(req);
 693	if (ret == -EINPROGRESS) {
 694		return ERR_PTR(ret);
 695	} else if (ret != 0) {
 696		dev_put(skb->dev);
 697		kfree_skb(skb);
 698		aead_request_free(req);
 699		macsec_txsa_put(tx_sa);
 700		return ERR_PTR(-EINVAL);
 701	}
 702
 703	dev_put(skb->dev);
 704	aead_request_free(req);
 705	macsec_txsa_put(tx_sa);
 706
 707	return skb;
 708}
 709
 710static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn)
 711{
 712	struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
 713	struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats);
 714	struct macsec_eth_header *hdr = macsec_ethhdr(skb);
 715	u32 lowest_pn = 0;
 716
 717	spin_lock(&rx_sa->lock);
 718	if (rx_sa->next_pn_halves.lower >= secy->replay_window)
 719		lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window;
 720
 721	/* Now perform replay protection check again
 722	 * (see IEEE 802.1AE-2006 figure 10-5)
 723	 */
 724	if (secy->replay_protect && pn < lowest_pn &&
 725	    (!secy->xpn || pn_same_half(pn, lowest_pn))) {
 726		spin_unlock(&rx_sa->lock);
 727		u64_stats_update_begin(&rxsc_stats->syncp);
 728		rxsc_stats->stats.InPktsLate++;
 729		u64_stats_update_end(&rxsc_stats->syncp);
 730		DEV_STATS_INC(secy->netdev, rx_dropped);
 731		return false;
 732	}
 733
 734	if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
 735		unsigned int msdu_len = macsec_msdu_len(skb);
 736		u64_stats_update_begin(&rxsc_stats->syncp);
 737		if (hdr->tci_an & MACSEC_TCI_E)
 738			rxsc_stats->stats.InOctetsDecrypted += msdu_len;
 739		else
 740			rxsc_stats->stats.InOctetsValidated += msdu_len;
 741		u64_stats_update_end(&rxsc_stats->syncp);
 742	}
 743
 744	if (!macsec_skb_cb(skb)->valid) {
 745		spin_unlock(&rx_sa->lock);
 746
 747		/* 10.6.5 */
 748		if (hdr->tci_an & MACSEC_TCI_C ||
 749		    secy->validate_frames == MACSEC_VALIDATE_STRICT) {
 750			u64_stats_update_begin(&rxsc_stats->syncp);
 751			rxsc_stats->stats.InPktsNotValid++;
 752			u64_stats_update_end(&rxsc_stats->syncp);
 753			this_cpu_inc(rx_sa->stats->InPktsNotValid);
 754			DEV_STATS_INC(secy->netdev, rx_errors);
 755			return false;
 756		}
 757
 758		u64_stats_update_begin(&rxsc_stats->syncp);
 759		if (secy->validate_frames == MACSEC_VALIDATE_CHECK) {
 760			rxsc_stats->stats.InPktsInvalid++;
 761			this_cpu_inc(rx_sa->stats->InPktsInvalid);
 762		} else if (pn < lowest_pn) {
 763			rxsc_stats->stats.InPktsDelayed++;
 764		} else {
 765			rxsc_stats->stats.InPktsUnchecked++;
 766		}
 767		u64_stats_update_end(&rxsc_stats->syncp);
 768	} else {
 769		u64_stats_update_begin(&rxsc_stats->syncp);
 770		if (pn < lowest_pn) {
 771			rxsc_stats->stats.InPktsDelayed++;
 772		} else {
 773			rxsc_stats->stats.InPktsOK++;
 774			this_cpu_inc(rx_sa->stats->InPktsOK);
 775		}
 776		u64_stats_update_end(&rxsc_stats->syncp);
 777
 778		// Instead of "pn >=" - to support pn overflow in xpn
 779		if (pn + 1 > rx_sa->next_pn_halves.lower) {
 780			rx_sa->next_pn_halves.lower = pn + 1;
 781		} else if (secy->xpn &&
 782			   !pn_same_half(pn, rx_sa->next_pn_halves.lower)) {
 783			rx_sa->next_pn_halves.upper++;
 784			rx_sa->next_pn_halves.lower = pn + 1;
 785		}
 786
 787		spin_unlock(&rx_sa->lock);
 788	}
 789
 790	return true;
 791}
 792
 793static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev)
 794{
 795	skb->pkt_type = PACKET_HOST;
 796	skb->protocol = eth_type_trans(skb, dev);
 797
 798	skb_reset_network_header(skb);
 799	if (!skb_transport_header_was_set(skb))
 800		skb_reset_transport_header(skb);
 801	skb_reset_mac_len(skb);
 802}
 803
 804static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
 805{
 806	skb->ip_summed = CHECKSUM_NONE;
 807	memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
 808	skb_pull(skb, hdr_len);
 809	pskb_trim_unique(skb, skb->len - icv_len);
 810}
 811
 812static void count_rx(struct net_device *dev, int len)
 813{
 814	dev_sw_netstats_rx_add(dev, len);
 
 
 
 
 
 815}
 816
 817static void macsec_decrypt_done(void *data, int err)
 818{
 819	struct sk_buff *skb = data;
 820	struct net_device *dev = skb->dev;
 821	struct macsec_dev *macsec = macsec_priv(dev);
 822	struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
 823	struct macsec_rx_sc *rx_sc = rx_sa->sc;
 824	int len;
 825	u32 pn;
 826
 827	aead_request_free(macsec_skb_cb(skb)->req);
 828
 829	if (!err)
 830		macsec_skb_cb(skb)->valid = true;
 831
 832	rcu_read_lock_bh();
 833	pn = ntohl(macsec_ethhdr(skb)->packet_number);
 834	if (!macsec_post_decrypt(skb, &macsec->secy, pn)) {
 835		rcu_read_unlock_bh();
 836		kfree_skb(skb);
 837		goto out;
 838	}
 839
 840	macsec_finalize_skb(skb, macsec->secy.icv_len,
 841			    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
 842	len = skb->len;
 843	macsec_reset_skb(skb, macsec->secy.netdev);
 844
 
 845	if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
 846		count_rx(dev, len);
 847
 848	rcu_read_unlock_bh();
 849
 850out:
 851	macsec_rxsa_put(rx_sa);
 852	macsec_rxsc_put(rx_sc);
 853	dev_put(dev);
 854}
 855
 856static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
 857				      struct net_device *dev,
 858				      struct macsec_rx_sa *rx_sa,
 859				      sci_t sci,
 860				      struct macsec_secy *secy)
 861{
 862	int ret;
 863	struct scatterlist *sg;
 864	struct sk_buff *trailer;
 865	unsigned char *iv;
 866	struct aead_request *req;
 867	struct macsec_eth_header *hdr;
 868	u32 hdr_pn;
 869	u16 icv_len = secy->icv_len;
 870
 871	macsec_skb_cb(skb)->valid = false;
 872	skb = skb_share_check(skb, GFP_ATOMIC);
 873	if (!skb)
 874		return ERR_PTR(-ENOMEM);
 875
 876	ret = skb_cow_data(skb, 0, &trailer);
 877	if (unlikely(ret < 0)) {
 878		kfree_skb(skb);
 879		return ERR_PTR(ret);
 880	}
 881	req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret);
 882	if (!req) {
 883		kfree_skb(skb);
 884		return ERR_PTR(-ENOMEM);
 885	}
 886
 887	hdr = (struct macsec_eth_header *)skb->data;
 888	hdr_pn = ntohl(hdr->packet_number);
 889
 890	if (secy->xpn) {
 891		pn_t recovered_pn = rx_sa->next_pn_halves;
 892
 893		recovered_pn.lower = hdr_pn;
 894		if (hdr_pn < rx_sa->next_pn_halves.lower &&
 895		    !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower))
 896			recovered_pn.upper++;
 897
 898		macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64,
 899				   rx_sa->key.salt);
 900	} else {
 901		macsec_fill_iv(iv, sci, hdr_pn);
 902	}
 903
 904	sg_init_table(sg, ret);
 905	ret = skb_to_sgvec(skb, sg, 0, skb->len);
 906	if (unlikely(ret < 0)) {
 907		aead_request_free(req);
 908		kfree_skb(skb);
 909		return ERR_PTR(ret);
 910	}
 911
 912	if (hdr->tci_an & MACSEC_TCI_E) {
 913		/* confidentiality: ethernet + macsec header
 914		 * authenticated, encrypted payload
 915		 */
 916		int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci);
 917
 918		aead_request_set_crypt(req, sg, sg, len, iv);
 919		aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci));
 920		skb = skb_unshare(skb, GFP_ATOMIC);
 921		if (!skb) {
 922			aead_request_free(req);
 923			return ERR_PTR(-ENOMEM);
 924		}
 925	} else {
 926		/* integrity only: all headers + data authenticated */
 927		aead_request_set_crypt(req, sg, sg, icv_len, iv);
 928		aead_request_set_ad(req, skb->len - icv_len);
 929	}
 930
 931	macsec_skb_cb(skb)->req = req;
 932	skb->dev = dev;
 933	aead_request_set_callback(req, 0, macsec_decrypt_done, skb);
 934
 935	dev_hold(dev);
 936	ret = crypto_aead_decrypt(req);
 937	if (ret == -EINPROGRESS) {
 938		return ERR_PTR(ret);
 939	} else if (ret != 0) {
 940		/* decryption/authentication failed
 941		 * 10.6 if validateFrames is disabled, deliver anyway
 942		 */
 943		if (ret != -EBADMSG) {
 944			kfree_skb(skb);
 945			skb = ERR_PTR(ret);
 946		}
 947	} else {
 948		macsec_skb_cb(skb)->valid = true;
 949	}
 950	dev_put(dev);
 951
 952	aead_request_free(req);
 953
 954	return skb;
 955}
 956
 957static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci)
 958{
 959	struct macsec_rx_sc *rx_sc;
 960
 961	for_each_rxsc(secy, rx_sc) {
 962		if (rx_sc->sci == sci)
 963			return rx_sc;
 964	}
 965
 966	return NULL;
 967}
 968
 969static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci)
 970{
 971	struct macsec_rx_sc *rx_sc;
 972
 973	for_each_rxsc_rtnl(secy, rx_sc) {
 974		if (rx_sc->sci == sci)
 975			return rx_sc;
 976	}
 977
 978	return NULL;
 979}
 980
 981static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
 982{
 983	/* Deliver to the uncontrolled port by default */
 984	enum rx_handler_result ret = RX_HANDLER_PASS;
 985	struct ethhdr *hdr = eth_hdr(skb);
 986	struct metadata_dst *md_dst;
 987	struct macsec_rxh_data *rxd;
 988	struct macsec_dev *macsec;
 989	bool is_macsec_md_dst;
 990
 991	rcu_read_lock();
 992	rxd = macsec_data_rcu(skb->dev);
 993	md_dst = skb_metadata_dst(skb);
 994	is_macsec_md_dst = md_dst && md_dst->type == METADATA_MACSEC;
 995
 996	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
 997		struct sk_buff *nskb;
 998		struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
 999		struct net_device *ndev = macsec->secy.netdev;
1000
1001		/* If h/w offloading is enabled, HW decodes frames and strips
1002		 * the SecTAG, so we have to deduce which port to deliver to.
1003		 */
1004		if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
1005			const struct macsec_ops *ops;
1006
1007			ops = macsec_get_ops(macsec, NULL);
1008
1009			if (ops->rx_uses_md_dst && !is_macsec_md_dst)
1010				continue;
1011
1012			if (is_macsec_md_dst) {
1013				struct macsec_rx_sc *rx_sc;
1014
1015				/* All drivers that implement MACsec offload
1016				 * support using skb metadata destinations must
1017				 * indicate that they do so.
1018				 */
1019				DEBUG_NET_WARN_ON_ONCE(!ops->rx_uses_md_dst);
1020				rx_sc = find_rx_sc(&macsec->secy,
1021						   md_dst->u.macsec_info.sci);
1022				if (!rx_sc)
1023					continue;
1024				/* device indicated macsec offload occurred */
1025				skb->dev = ndev;
1026				skb->pkt_type = PACKET_HOST;
1027				eth_skb_pkt_type(skb, ndev);
1028				ret = RX_HANDLER_ANOTHER;
1029				goto out;
1030			}
1031
1032			/* This datapath is insecure because it is unable to
1033			 * enforce isolation of broadcast/multicast traffic and
1034			 * unicast traffic with promiscuous mode on the macsec
1035			 * netdev. Since the core stack has no mechanism to
1036			 * check that the hardware did indeed receive MACsec
1037			 * traffic, it is possible that the response handling
1038			 * done by the MACsec port was to a plaintext packet.
1039			 * This violates the MACsec protocol standard.
1040			 */
1041			if (ether_addr_equal_64bits(hdr->h_dest,
1042						    ndev->dev_addr)) {
1043				/* exact match, divert skb to this port */
1044				skb->dev = ndev;
1045				skb->pkt_type = PACKET_HOST;
1046				ret = RX_HANDLER_ANOTHER;
1047				goto out;
1048			} else if (is_multicast_ether_addr_64bits(
1049					   hdr->h_dest)) {
1050				/* multicast frame, deliver on this port too */
1051				nskb = skb_clone(skb, GFP_ATOMIC);
1052				if (!nskb)
1053					break;
1054
1055				nskb->dev = ndev;
1056				eth_skb_pkt_type(nskb, ndev);
 
 
 
 
1057
1058				__netif_rx(nskb);
1059			} else if (ndev->flags & IFF_PROMISC) {
1060				skb->dev = ndev;
1061				skb->pkt_type = PACKET_HOST;
1062				ret = RX_HANDLER_ANOTHER;
1063				goto out;
1064			}
1065
1066			continue;
1067		}
1068
1069		/* 10.6 If the management control validateFrames is not
1070		 * Strict, frames without a SecTAG are received, counted, and
1071		 * delivered to the Controlled Port
1072		 */
1073		if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1074			u64_stats_update_begin(&secy_stats->syncp);
1075			secy_stats->stats.InPktsNoTag++;
1076			u64_stats_update_end(&secy_stats->syncp);
1077			DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
1078			continue;
1079		}
1080
1081		/* deliver on this port */
1082		nskb = skb_clone(skb, GFP_ATOMIC);
1083		if (!nskb)
1084			break;
1085
1086		nskb->dev = ndev;
1087
1088		if (__netif_rx(nskb) == NET_RX_SUCCESS) {
1089			u64_stats_update_begin(&secy_stats->syncp);
1090			secy_stats->stats.InPktsUntagged++;
1091			u64_stats_update_end(&secy_stats->syncp);
1092		}
1093	}
1094
1095out:
1096	rcu_read_unlock();
1097	return ret;
1098}
1099
1100static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
1101{
1102	struct sk_buff *skb = *pskb;
1103	struct net_device *dev = skb->dev;
1104	struct macsec_eth_header *hdr;
1105	struct macsec_secy *secy = NULL;
1106	struct macsec_rx_sc *rx_sc;
1107	struct macsec_rx_sa *rx_sa;
1108	struct macsec_rxh_data *rxd;
1109	struct macsec_dev *macsec;
1110	unsigned int len;
1111	sci_t sci;
1112	u32 hdr_pn;
1113	bool cbit;
1114	struct pcpu_rx_sc_stats *rxsc_stats;
1115	struct pcpu_secy_stats *secy_stats;
1116	bool pulled_sci;
1117	int ret;
1118
1119	if (skb_headroom(skb) < ETH_HLEN)
1120		goto drop_direct;
1121
1122	hdr = macsec_ethhdr(skb);
1123	if (hdr->eth.h_proto != htons(ETH_P_MACSEC))
1124		return handle_not_macsec(skb);
1125
1126	skb = skb_unshare(skb, GFP_ATOMIC);
1127	*pskb = skb;
1128	if (!skb)
1129		return RX_HANDLER_CONSUMED;
1130
1131	pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
1132	if (!pulled_sci) {
1133		if (!pskb_may_pull(skb, macsec_extra_len(false)))
1134			goto drop_direct;
1135	}
1136
1137	hdr = macsec_ethhdr(skb);
1138
1139	/* Frames with a SecTAG that has the TCI E bit set but the C
1140	 * bit clear are discarded, as this reserved encoding is used
1141	 * to identify frames with a SecTAG that are not to be
1142	 * delivered to the Controlled Port.
1143	 */
1144	if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E)
1145		return RX_HANDLER_PASS;
1146
1147	/* now, pull the extra length */
1148	if (hdr->tci_an & MACSEC_TCI_SC) {
1149		if (!pulled_sci)
1150			goto drop_direct;
1151	}
1152
1153	/* ethernet header is part of crypto processing */
1154	skb_push(skb, ETH_HLEN);
1155
1156	macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC);
1157	macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK;
1158	sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci);
1159
1160	rcu_read_lock();
1161	rxd = macsec_data_rcu(skb->dev);
1162
1163	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1164		struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
1165
1166		sc = sc ? macsec_rxsc_get(sc) : NULL;
1167
1168		if (sc) {
1169			secy = &macsec->secy;
1170			rx_sc = sc;
1171			break;
1172		}
1173	}
1174
1175	if (!secy)
1176		goto nosci;
1177
1178	dev = secy->netdev;
1179	macsec = macsec_priv(dev);
1180	secy_stats = this_cpu_ptr(macsec->stats);
1181	rxsc_stats = this_cpu_ptr(rx_sc->stats);
1182
1183	if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) {
1184		u64_stats_update_begin(&secy_stats->syncp);
1185		secy_stats->stats.InPktsBadTag++;
1186		u64_stats_update_end(&secy_stats->syncp);
1187		DEV_STATS_INC(secy->netdev, rx_errors);
1188		goto drop_nosa;
1189	}
1190
1191	rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]);
1192	if (!rx_sa) {
1193		/* 10.6.1 if the SA is not in use */
1194
1195		/* If validateFrames is Strict or the C bit in the
1196		 * SecTAG is set, discard
1197		 */
1198		if (hdr->tci_an & MACSEC_TCI_C ||
1199		    secy->validate_frames == MACSEC_VALIDATE_STRICT) {
1200			u64_stats_update_begin(&rxsc_stats->syncp);
1201			rxsc_stats->stats.InPktsNotUsingSA++;
1202			u64_stats_update_end(&rxsc_stats->syncp);
1203			DEV_STATS_INC(secy->netdev, rx_errors);
1204			goto drop_nosa;
1205		}
1206
1207		/* not Strict, the frame (with the SecTAG and ICV
1208		 * removed) is delivered to the Controlled Port.
1209		 */
1210		u64_stats_update_begin(&rxsc_stats->syncp);
1211		rxsc_stats->stats.InPktsUnusedSA++;
1212		u64_stats_update_end(&rxsc_stats->syncp);
1213		goto deliver;
1214	}
1215
1216	/* First, PN check to avoid decrypting obviously wrong packets */
1217	hdr_pn = ntohl(hdr->packet_number);
1218	if (secy->replay_protect) {
1219		bool late;
1220
1221		spin_lock(&rx_sa->lock);
1222		late = rx_sa->next_pn_halves.lower >= secy->replay_window &&
1223		       hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window);
1224
1225		if (secy->xpn)
1226			late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn);
1227		spin_unlock(&rx_sa->lock);
1228
1229		if (late) {
1230			u64_stats_update_begin(&rxsc_stats->syncp);
1231			rxsc_stats->stats.InPktsLate++;
1232			u64_stats_update_end(&rxsc_stats->syncp);
1233			DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
1234			goto drop;
1235		}
1236	}
1237
1238	macsec_skb_cb(skb)->rx_sa = rx_sa;
1239
1240	/* Disabled && !changed text => skip validation */
1241	if (hdr->tci_an & MACSEC_TCI_C ||
1242	    secy->validate_frames != MACSEC_VALIDATE_DISABLED)
1243		skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
1244
1245	if (IS_ERR(skb)) {
1246		/* the decrypt callback needs the reference */
1247		if (PTR_ERR(skb) != -EINPROGRESS) {
1248			macsec_rxsa_put(rx_sa);
1249			macsec_rxsc_put(rx_sc);
1250		}
1251		rcu_read_unlock();
1252		*pskb = NULL;
1253		return RX_HANDLER_CONSUMED;
1254	}
1255
1256	if (!macsec_post_decrypt(skb, secy, hdr_pn))
1257		goto drop;
1258
1259deliver:
1260	macsec_finalize_skb(skb, secy->icv_len,
1261			    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1262	len = skb->len;
1263	macsec_reset_skb(skb, secy->netdev);
1264
1265	if (rx_sa)
1266		macsec_rxsa_put(rx_sa);
1267	macsec_rxsc_put(rx_sc);
1268
1269	skb_orphan(skb);
 
1270	ret = gro_cells_receive(&macsec->gro_cells, skb);
1271	if (ret == NET_RX_SUCCESS)
1272		count_rx(dev, len);
1273	else
1274		DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
1275
1276	rcu_read_unlock();
1277
1278	*pskb = NULL;
1279	return RX_HANDLER_CONSUMED;
1280
1281drop:
1282	macsec_rxsa_put(rx_sa);
1283drop_nosa:
1284	macsec_rxsc_put(rx_sc);
1285	rcu_read_unlock();
1286drop_direct:
1287	kfree_skb(skb);
1288	*pskb = NULL;
1289	return RX_HANDLER_CONSUMED;
1290
1291nosci:
1292	/* 10.6.1 if the SC is not found */
1293	cbit = !!(hdr->tci_an & MACSEC_TCI_C);
1294	if (!cbit)
1295		macsec_finalize_skb(skb, MACSEC_DEFAULT_ICV_LEN,
1296				    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1297
1298	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1299		struct sk_buff *nskb;
1300
1301		secy_stats = this_cpu_ptr(macsec->stats);
1302
1303		/* If validateFrames is Strict or the C bit in the
1304		 * SecTAG is set, discard
1305		 */
1306		if (cbit ||
1307		    macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1308			u64_stats_update_begin(&secy_stats->syncp);
1309			secy_stats->stats.InPktsNoSCI++;
1310			u64_stats_update_end(&secy_stats->syncp);
1311			DEV_STATS_INC(macsec->secy.netdev, rx_errors);
1312			continue;
1313		}
1314
1315		/* not strict, the frame (with the SecTAG and ICV
1316		 * removed) is delivered to the Controlled Port.
1317		 */
1318		nskb = skb_clone(skb, GFP_ATOMIC);
1319		if (!nskb)
1320			break;
1321
1322		macsec_reset_skb(nskb, macsec->secy.netdev);
1323
1324		ret = __netif_rx(nskb);
1325		if (ret == NET_RX_SUCCESS) {
1326			u64_stats_update_begin(&secy_stats->syncp);
1327			secy_stats->stats.InPktsUnknownSCI++;
1328			u64_stats_update_end(&secy_stats->syncp);
1329		} else {
1330			DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
1331		}
1332	}
1333
1334	rcu_read_unlock();
1335	*pskb = skb;
1336	return RX_HANDLER_PASS;
1337}
1338
1339static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
1340{
1341	struct crypto_aead *tfm;
1342	int ret;
1343
1344	tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
 
1345
1346	if (IS_ERR(tfm))
1347		return tfm;
1348
1349	ret = crypto_aead_setkey(tfm, key, key_len);
1350	if (ret < 0)
1351		goto fail;
1352
1353	ret = crypto_aead_setauthsize(tfm, icv_len);
1354	if (ret < 0)
1355		goto fail;
1356
1357	return tfm;
1358fail:
1359	crypto_free_aead(tfm);
1360	return ERR_PTR(ret);
1361}
1362
1363static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
1364		      int icv_len)
1365{
1366	rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats);
1367	if (!rx_sa->stats)
1368		return -ENOMEM;
1369
1370	rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
1371	if (IS_ERR(rx_sa->key.tfm)) {
1372		free_percpu(rx_sa->stats);
1373		return PTR_ERR(rx_sa->key.tfm);
1374	}
1375
1376	rx_sa->ssci = MACSEC_UNDEF_SSCI;
1377	rx_sa->active = false;
1378	rx_sa->next_pn = 1;
1379	refcount_set(&rx_sa->refcnt, 1);
1380	spin_lock_init(&rx_sa->lock);
1381
1382	return 0;
1383}
1384
1385static void clear_rx_sa(struct macsec_rx_sa *rx_sa)
1386{
1387	rx_sa->active = false;
1388
1389	macsec_rxsa_put(rx_sa);
1390}
1391
1392static void free_rx_sc(struct macsec_rx_sc *rx_sc)
1393{
1394	int i;
1395
1396	for (i = 0; i < MACSEC_NUM_AN; i++) {
1397		struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]);
1398
1399		RCU_INIT_POINTER(rx_sc->sa[i], NULL);
1400		if (sa)
1401			clear_rx_sa(sa);
1402	}
1403
1404	macsec_rxsc_put(rx_sc);
1405}
1406
1407static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci)
1408{
1409	struct macsec_rx_sc *rx_sc, __rcu **rx_scp;
1410
1411	for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp);
1412	     rx_sc;
1413	     rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) {
1414		if (rx_sc->sci == sci) {
1415			if (rx_sc->active)
1416				secy->n_rx_sc--;
1417			rcu_assign_pointer(*rx_scp, rx_sc->next);
1418			return rx_sc;
1419		}
1420	}
1421
1422	return NULL;
1423}
1424
1425static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci,
1426					 bool active)
1427{
1428	struct macsec_rx_sc *rx_sc;
1429	struct macsec_dev *macsec;
1430	struct net_device *real_dev = macsec_priv(dev)->real_dev;
1431	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
1432	struct macsec_secy *secy;
1433
1434	list_for_each_entry(macsec, &rxd->secys, secys) {
1435		if (find_rx_sc_rtnl(&macsec->secy, sci))
1436			return ERR_PTR(-EEXIST);
1437	}
1438
1439	rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
1440	if (!rx_sc)
1441		return ERR_PTR(-ENOMEM);
1442
1443	rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats);
1444	if (!rx_sc->stats) {
1445		kfree(rx_sc);
1446		return ERR_PTR(-ENOMEM);
1447	}
1448
1449	rx_sc->sci = sci;
1450	rx_sc->active = active;
1451	refcount_set(&rx_sc->refcnt, 1);
1452
1453	secy = &macsec_priv(dev)->secy;
1454	rcu_assign_pointer(rx_sc->next, secy->rx_sc);
1455	rcu_assign_pointer(secy->rx_sc, rx_sc);
1456
1457	if (rx_sc->active)
1458		secy->n_rx_sc++;
1459
1460	return rx_sc;
1461}
1462
1463static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len,
1464		      int icv_len)
1465{
1466	tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats);
1467	if (!tx_sa->stats)
1468		return -ENOMEM;
1469
1470	tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
1471	if (IS_ERR(tx_sa->key.tfm)) {
1472		free_percpu(tx_sa->stats);
1473		return PTR_ERR(tx_sa->key.tfm);
1474	}
1475
1476	tx_sa->ssci = MACSEC_UNDEF_SSCI;
1477	tx_sa->active = false;
1478	refcount_set(&tx_sa->refcnt, 1);
1479	spin_lock_init(&tx_sa->lock);
1480
1481	return 0;
1482}
1483
1484static void clear_tx_sa(struct macsec_tx_sa *tx_sa)
1485{
1486	tx_sa->active = false;
1487
1488	macsec_txsa_put(tx_sa);
1489}
1490
1491static struct genl_family macsec_fam;
1492
1493static struct net_device *get_dev_from_nl(struct net *net,
1494					  struct nlattr **attrs)
1495{
1496	int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]);
1497	struct net_device *dev;
1498
1499	dev = __dev_get_by_index(net, ifindex);
1500	if (!dev)
1501		return ERR_PTR(-ENODEV);
1502
1503	if (!netif_is_macsec(dev))
1504		return ERR_PTR(-ENODEV);
1505
1506	return dev;
1507}
1508
1509static enum macsec_offload nla_get_offload(const struct nlattr *nla)
1510{
1511	return (__force enum macsec_offload)nla_get_u8(nla);
1512}
1513
1514static sci_t nla_get_sci(const struct nlattr *nla)
1515{
1516	return (__force sci_t)nla_get_u64(nla);
1517}
1518
1519static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value,
1520		       int padattr)
1521{
1522	return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr);
1523}
1524
1525static ssci_t nla_get_ssci(const struct nlattr *nla)
1526{
1527	return (__force ssci_t)nla_get_u32(nla);
1528}
1529
1530static int nla_put_ssci(struct sk_buff *skb, int attrtype, ssci_t value)
1531{
1532	return nla_put_u32(skb, attrtype, (__force u64)value);
1533}
1534
1535static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
1536					     struct nlattr **attrs,
1537					     struct nlattr **tb_sa,
1538					     struct net_device **devp,
1539					     struct macsec_secy **secyp,
1540					     struct macsec_tx_sc **scp,
1541					     u8 *assoc_num)
1542{
1543	struct net_device *dev;
1544	struct macsec_secy *secy;
1545	struct macsec_tx_sc *tx_sc;
1546	struct macsec_tx_sa *tx_sa;
1547
1548	if (!tb_sa[MACSEC_SA_ATTR_AN])
1549		return ERR_PTR(-EINVAL);
1550
1551	*assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1552
1553	dev = get_dev_from_nl(net, attrs);
1554	if (IS_ERR(dev))
1555		return ERR_CAST(dev);
1556
1557	if (*assoc_num >= MACSEC_NUM_AN)
1558		return ERR_PTR(-EINVAL);
1559
1560	secy = &macsec_priv(dev)->secy;
1561	tx_sc = &secy->tx_sc;
1562
1563	tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]);
1564	if (!tx_sa)
1565		return ERR_PTR(-ENODEV);
1566
1567	*devp = dev;
1568	*scp = tx_sc;
1569	*secyp = secy;
1570	return tx_sa;
1571}
1572
1573static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net,
1574					     struct nlattr **attrs,
1575					     struct nlattr **tb_rxsc,
1576					     struct net_device **devp,
1577					     struct macsec_secy **secyp)
1578{
1579	struct net_device *dev;
1580	struct macsec_secy *secy;
1581	struct macsec_rx_sc *rx_sc;
1582	sci_t sci;
1583
1584	dev = get_dev_from_nl(net, attrs);
1585	if (IS_ERR(dev))
1586		return ERR_CAST(dev);
1587
1588	secy = &macsec_priv(dev)->secy;
1589
1590	if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
1591		return ERR_PTR(-EINVAL);
1592
1593	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1594	rx_sc = find_rx_sc_rtnl(secy, sci);
1595	if (!rx_sc)
1596		return ERR_PTR(-ENODEV);
1597
1598	*secyp = secy;
1599	*devp = dev;
1600
1601	return rx_sc;
1602}
1603
1604static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net,
1605					     struct nlattr **attrs,
1606					     struct nlattr **tb_rxsc,
1607					     struct nlattr **tb_sa,
1608					     struct net_device **devp,
1609					     struct macsec_secy **secyp,
1610					     struct macsec_rx_sc **scp,
1611					     u8 *assoc_num)
1612{
1613	struct macsec_rx_sc *rx_sc;
1614	struct macsec_rx_sa *rx_sa;
1615
1616	if (!tb_sa[MACSEC_SA_ATTR_AN])
1617		return ERR_PTR(-EINVAL);
1618
1619	*assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1620	if (*assoc_num >= MACSEC_NUM_AN)
1621		return ERR_PTR(-EINVAL);
1622
1623	rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp);
1624	if (IS_ERR(rx_sc))
1625		return ERR_CAST(rx_sc);
1626
1627	rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]);
1628	if (!rx_sa)
1629		return ERR_PTR(-ENODEV);
1630
1631	*scp = rx_sc;
1632	return rx_sa;
1633}
1634
1635static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
1636	[MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 },
1637	[MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED },
1638	[MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED },
1639	[MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED },
1640};
1641
1642static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
1643	[MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 },
1644	[MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 },
1645};
1646
1647static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
1648	[MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
1649	[MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
1650	[MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN_LEN(4),
1651	[MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
1652				   .len = MACSEC_KEYID_LEN, },
1653	[MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
1654				 .len = MACSEC_MAX_KEY_LEN, },
1655	[MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 },
1656	[MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY,
1657				  .len = MACSEC_SALT_LEN, },
1658};
1659
1660static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = {
1661	[MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 },
1662};
1663
1664/* Offloads an operation to a device driver */
1665static int macsec_offload(int (* const func)(struct macsec_context *),
1666			  struct macsec_context *ctx)
1667{
1668	int ret;
1669
1670	if (unlikely(!func))
1671		return 0;
1672
1673	if (ctx->offload == MACSEC_OFFLOAD_PHY)
1674		mutex_lock(&ctx->phydev->lock);
1675
 
 
 
 
 
 
 
 
 
 
1676	ret = (*func)(ctx);
 
 
 
1677
 
1678	if (ctx->offload == MACSEC_OFFLOAD_PHY)
1679		mutex_unlock(&ctx->phydev->lock);
1680
1681	return ret;
1682}
1683
1684static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
1685{
1686	if (!attrs[MACSEC_ATTR_SA_CONFIG])
1687		return -EINVAL;
1688
1689	if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL))
1690		return -EINVAL;
1691
1692	return 0;
1693}
1694
1695static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc)
1696{
1697	if (!attrs[MACSEC_ATTR_RXSC_CONFIG])
1698		return -EINVAL;
1699
1700	if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL))
1701		return -EINVAL;
1702
1703	return 0;
1704}
1705
1706static bool validate_add_rxsa(struct nlattr **attrs)
1707{
1708	if (!attrs[MACSEC_SA_ATTR_AN] ||
1709	    !attrs[MACSEC_SA_ATTR_KEY] ||
1710	    !attrs[MACSEC_SA_ATTR_KEYID])
1711		return false;
1712
1713	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1714		return false;
1715
1716	if (attrs[MACSEC_SA_ATTR_PN] &&
1717	    nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
1718		return false;
1719
1720	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1721		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1722			return false;
1723	}
1724
1725	if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1726		return false;
1727
1728	return true;
1729}
1730
1731static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
1732{
1733	struct net_device *dev;
1734	struct nlattr **attrs = info->attrs;
1735	struct macsec_secy *secy;
1736	struct macsec_rx_sc *rx_sc;
1737	struct macsec_rx_sa *rx_sa;
1738	unsigned char assoc_num;
1739	int pn_len;
1740	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1741	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1742	int err;
1743
1744	if (!attrs[MACSEC_ATTR_IFINDEX])
1745		return -EINVAL;
1746
1747	if (parse_sa_config(attrs, tb_sa))
1748		return -EINVAL;
1749
1750	if (parse_rxsc_config(attrs, tb_rxsc))
1751		return -EINVAL;
1752
1753	if (!validate_add_rxsa(tb_sa))
1754		return -EINVAL;
1755
1756	rtnl_lock();
1757	rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
1758	if (IS_ERR(rx_sc)) {
1759		rtnl_unlock();
1760		return PTR_ERR(rx_sc);
1761	}
1762
1763	assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1764
1765	if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
1766		pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n",
1767			  nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
1768		rtnl_unlock();
1769		return -EINVAL;
1770	}
1771
1772	pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
1773	if (tb_sa[MACSEC_SA_ATTR_PN] &&
1774	    nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
1775		pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n",
1776			  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
1777		rtnl_unlock();
1778		return -EINVAL;
1779	}
1780
1781	if (secy->xpn) {
1782		if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
1783			rtnl_unlock();
1784			return -EINVAL;
1785		}
1786
1787		if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
1788			pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
1789				  nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
1790				  MACSEC_SALT_LEN);
1791			rtnl_unlock();
1792			return -EINVAL;
1793		}
1794	}
1795
1796	rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]);
1797	if (rx_sa) {
1798		rtnl_unlock();
1799		return -EBUSY;
1800	}
1801
1802	rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
1803	if (!rx_sa) {
1804		rtnl_unlock();
1805		return -ENOMEM;
1806	}
1807
1808	err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1809			 secy->key_len, secy->icv_len);
1810	if (err < 0) {
1811		kfree(rx_sa);
1812		rtnl_unlock();
1813		return err;
1814	}
1815
1816	if (tb_sa[MACSEC_SA_ATTR_PN]) {
1817		spin_lock_bh(&rx_sa->lock);
1818		rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
1819		spin_unlock_bh(&rx_sa->lock);
1820	}
1821
1822	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
1823		rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
1824
1825	rx_sa->sc = rx_sc;
1826
1827	if (secy->xpn) {
1828		rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
1829		nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
1830			   MACSEC_SALT_LEN);
1831	}
1832
1833	/* If h/w offloading is available, propagate to the device */
1834	if (macsec_is_offloaded(netdev_priv(dev))) {
1835		const struct macsec_ops *ops;
1836		struct macsec_context ctx;
1837
1838		ops = macsec_get_ops(netdev_priv(dev), &ctx);
1839		if (!ops) {
1840			err = -EOPNOTSUPP;
1841			goto cleanup;
1842		}
1843
1844		ctx.sa.assoc_num = assoc_num;
1845		ctx.sa.rx_sa = rx_sa;
1846		ctx.secy = secy;
1847		memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1848		       secy->key_len);
1849
1850		err = macsec_offload(ops->mdo_add_rxsa, &ctx);
1851		memzero_explicit(ctx.sa.key, secy->key_len);
1852		if (err)
1853			goto cleanup;
1854	}
1855
 
 
 
 
 
 
1856	nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
1857	rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
1858
1859	rtnl_unlock();
1860
1861	return 0;
1862
1863cleanup:
1864	macsec_rxsa_put(rx_sa);
1865	rtnl_unlock();
1866	return err;
1867}
1868
1869static bool validate_add_rxsc(struct nlattr **attrs)
1870{
1871	if (!attrs[MACSEC_RXSC_ATTR_SCI])
1872		return false;
1873
1874	if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) {
1875		if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1)
1876			return false;
1877	}
1878
1879	return true;
1880}
1881
1882static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
1883{
1884	struct net_device *dev;
1885	sci_t sci = MACSEC_UNDEF_SCI;
1886	struct nlattr **attrs = info->attrs;
1887	struct macsec_rx_sc *rx_sc;
1888	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1889	struct macsec_secy *secy;
1890	bool active = true;
1891	int ret;
1892
1893	if (!attrs[MACSEC_ATTR_IFINDEX])
1894		return -EINVAL;
1895
1896	if (parse_rxsc_config(attrs, tb_rxsc))
1897		return -EINVAL;
1898
1899	if (!validate_add_rxsc(tb_rxsc))
1900		return -EINVAL;
1901
1902	rtnl_lock();
1903	dev = get_dev_from_nl(genl_info_net(info), attrs);
1904	if (IS_ERR(dev)) {
1905		rtnl_unlock();
1906		return PTR_ERR(dev);
1907	}
1908
1909	secy = &macsec_priv(dev)->secy;
1910	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1911
1912	if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
1913		active = nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
1914
1915	rx_sc = create_rx_sc(dev, sci, active);
1916	if (IS_ERR(rx_sc)) {
1917		rtnl_unlock();
1918		return PTR_ERR(rx_sc);
1919	}
1920
 
 
 
 
1921	if (macsec_is_offloaded(netdev_priv(dev))) {
1922		const struct macsec_ops *ops;
1923		struct macsec_context ctx;
1924
1925		ops = macsec_get_ops(netdev_priv(dev), &ctx);
1926		if (!ops) {
1927			ret = -EOPNOTSUPP;
1928			goto cleanup;
1929		}
1930
1931		ctx.rx_sc = rx_sc;
1932		ctx.secy = secy;
1933
1934		ret = macsec_offload(ops->mdo_add_rxsc, &ctx);
1935		if (ret)
1936			goto cleanup;
1937	}
1938
1939	rtnl_unlock();
1940
1941	return 0;
1942
1943cleanup:
1944	del_rx_sc(secy, sci);
1945	free_rx_sc(rx_sc);
1946	rtnl_unlock();
1947	return ret;
1948}
1949
1950static bool validate_add_txsa(struct nlattr **attrs)
1951{
1952	if (!attrs[MACSEC_SA_ATTR_AN] ||
1953	    !attrs[MACSEC_SA_ATTR_PN] ||
1954	    !attrs[MACSEC_SA_ATTR_KEY] ||
1955	    !attrs[MACSEC_SA_ATTR_KEYID])
1956		return false;
1957
1958	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1959		return false;
1960
1961	if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
1962		return false;
1963
1964	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1965		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1966			return false;
1967	}
1968
1969	if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1970		return false;
1971
1972	return true;
1973}
1974
1975static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
1976{
1977	struct net_device *dev;
1978	struct nlattr **attrs = info->attrs;
1979	struct macsec_secy *secy;
1980	struct macsec_tx_sc *tx_sc;
1981	struct macsec_tx_sa *tx_sa;
1982	unsigned char assoc_num;
1983	int pn_len;
1984	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1985	bool was_operational;
1986	int err;
1987
1988	if (!attrs[MACSEC_ATTR_IFINDEX])
1989		return -EINVAL;
1990
1991	if (parse_sa_config(attrs, tb_sa))
1992		return -EINVAL;
1993
1994	if (!validate_add_txsa(tb_sa))
1995		return -EINVAL;
1996
1997	rtnl_lock();
1998	dev = get_dev_from_nl(genl_info_net(info), attrs);
1999	if (IS_ERR(dev)) {
2000		rtnl_unlock();
2001		return PTR_ERR(dev);
2002	}
2003
2004	secy = &macsec_priv(dev)->secy;
2005	tx_sc = &secy->tx_sc;
2006
2007	assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
2008
2009	if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
2010		pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n",
2011			  nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
2012		rtnl_unlock();
2013		return -EINVAL;
2014	}
2015
2016	pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2017	if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2018		pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n",
2019			  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2020		rtnl_unlock();
2021		return -EINVAL;
2022	}
2023
2024	if (secy->xpn) {
2025		if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
2026			rtnl_unlock();
2027			return -EINVAL;
2028		}
2029
2030		if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
2031			pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
2032				  nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
2033				  MACSEC_SALT_LEN);
2034			rtnl_unlock();
2035			return -EINVAL;
2036		}
2037	}
2038
2039	tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]);
2040	if (tx_sa) {
2041		rtnl_unlock();
2042		return -EBUSY;
2043	}
2044
2045	tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
2046	if (!tx_sa) {
2047		rtnl_unlock();
2048		return -ENOMEM;
2049	}
2050
2051	err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
2052			 secy->key_len, secy->icv_len);
2053	if (err < 0) {
2054		kfree(tx_sa);
2055		rtnl_unlock();
2056		return err;
2057	}
2058
2059	spin_lock_bh(&tx_sa->lock);
2060	tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
2061	spin_unlock_bh(&tx_sa->lock);
2062
2063	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2064		tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2065
2066	was_operational = secy->operational;
2067	if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
2068		secy->operational = true;
2069
2070	if (secy->xpn) {
2071		tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
2072		nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
2073			   MACSEC_SALT_LEN);
2074	}
2075
2076	/* If h/w offloading is available, propagate to the device */
2077	if (macsec_is_offloaded(netdev_priv(dev))) {
2078		const struct macsec_ops *ops;
2079		struct macsec_context ctx;
2080
2081		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2082		if (!ops) {
2083			err = -EOPNOTSUPP;
2084			goto cleanup;
2085		}
2086
2087		ctx.sa.assoc_num = assoc_num;
2088		ctx.sa.tx_sa = tx_sa;
2089		ctx.secy = secy;
2090		memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
2091		       secy->key_len);
2092
2093		err = macsec_offload(ops->mdo_add_txsa, &ctx);
2094		memzero_explicit(ctx.sa.key, secy->key_len);
2095		if (err)
2096			goto cleanup;
2097	}
2098
 
 
 
 
 
 
2099	nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
2100	rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
2101
2102	rtnl_unlock();
2103
2104	return 0;
2105
2106cleanup:
2107	secy->operational = was_operational;
2108	macsec_txsa_put(tx_sa);
2109	rtnl_unlock();
2110	return err;
2111}
2112
2113static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
2114{
2115	struct nlattr **attrs = info->attrs;
2116	struct net_device *dev;
2117	struct macsec_secy *secy;
2118	struct macsec_rx_sc *rx_sc;
2119	struct macsec_rx_sa *rx_sa;
2120	u8 assoc_num;
2121	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2122	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2123	int ret;
2124
2125	if (!attrs[MACSEC_ATTR_IFINDEX])
2126		return -EINVAL;
2127
2128	if (parse_sa_config(attrs, tb_sa))
2129		return -EINVAL;
2130
2131	if (parse_rxsc_config(attrs, tb_rxsc))
2132		return -EINVAL;
2133
2134	rtnl_lock();
2135	rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
2136				 &dev, &secy, &rx_sc, &assoc_num);
2137	if (IS_ERR(rx_sa)) {
2138		rtnl_unlock();
2139		return PTR_ERR(rx_sa);
2140	}
2141
2142	if (rx_sa->active) {
2143		rtnl_unlock();
2144		return -EBUSY;
2145	}
2146
2147	/* If h/w offloading is available, propagate to the device */
2148	if (macsec_is_offloaded(netdev_priv(dev))) {
2149		const struct macsec_ops *ops;
2150		struct macsec_context ctx;
2151
2152		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2153		if (!ops) {
2154			ret = -EOPNOTSUPP;
2155			goto cleanup;
2156		}
2157
2158		ctx.sa.assoc_num = assoc_num;
2159		ctx.sa.rx_sa = rx_sa;
2160		ctx.secy = secy;
2161
2162		ret = macsec_offload(ops->mdo_del_rxsa, &ctx);
2163		if (ret)
2164			goto cleanup;
2165	}
2166
2167	RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL);
2168	clear_rx_sa(rx_sa);
2169
2170	rtnl_unlock();
2171
2172	return 0;
2173
2174cleanup:
2175	rtnl_unlock();
2176	return ret;
2177}
2178
2179static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
2180{
2181	struct nlattr **attrs = info->attrs;
2182	struct net_device *dev;
2183	struct macsec_secy *secy;
2184	struct macsec_rx_sc *rx_sc;
2185	sci_t sci;
2186	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2187	int ret;
2188
2189	if (!attrs[MACSEC_ATTR_IFINDEX])
2190		return -EINVAL;
2191
2192	if (parse_rxsc_config(attrs, tb_rxsc))
2193		return -EINVAL;
2194
2195	if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
2196		return -EINVAL;
2197
2198	rtnl_lock();
2199	dev = get_dev_from_nl(genl_info_net(info), info->attrs);
2200	if (IS_ERR(dev)) {
2201		rtnl_unlock();
2202		return PTR_ERR(dev);
2203	}
2204
2205	secy = &macsec_priv(dev)->secy;
2206	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
2207
2208	rx_sc = del_rx_sc(secy, sci);
2209	if (!rx_sc) {
2210		rtnl_unlock();
2211		return -ENODEV;
2212	}
2213
2214	/* If h/w offloading is available, propagate to the device */
2215	if (macsec_is_offloaded(netdev_priv(dev))) {
2216		const struct macsec_ops *ops;
2217		struct macsec_context ctx;
2218
2219		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2220		if (!ops) {
2221			ret = -EOPNOTSUPP;
2222			goto cleanup;
2223		}
2224
2225		ctx.rx_sc = rx_sc;
2226		ctx.secy = secy;
2227		ret = macsec_offload(ops->mdo_del_rxsc, &ctx);
2228		if (ret)
2229			goto cleanup;
2230	}
2231
2232	free_rx_sc(rx_sc);
2233	rtnl_unlock();
2234
2235	return 0;
2236
2237cleanup:
2238	rtnl_unlock();
2239	return ret;
2240}
2241
2242static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
2243{
2244	struct nlattr **attrs = info->attrs;
2245	struct net_device *dev;
2246	struct macsec_secy *secy;
2247	struct macsec_tx_sc *tx_sc;
2248	struct macsec_tx_sa *tx_sa;
2249	u8 assoc_num;
2250	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2251	int ret;
2252
2253	if (!attrs[MACSEC_ATTR_IFINDEX])
2254		return -EINVAL;
2255
2256	if (parse_sa_config(attrs, tb_sa))
2257		return -EINVAL;
2258
2259	rtnl_lock();
2260	tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2261				 &dev, &secy, &tx_sc, &assoc_num);
2262	if (IS_ERR(tx_sa)) {
2263		rtnl_unlock();
2264		return PTR_ERR(tx_sa);
2265	}
2266
2267	if (tx_sa->active) {
2268		rtnl_unlock();
2269		return -EBUSY;
2270	}
2271
2272	/* If h/w offloading is available, propagate to the device */
2273	if (macsec_is_offloaded(netdev_priv(dev))) {
2274		const struct macsec_ops *ops;
2275		struct macsec_context ctx;
2276
2277		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2278		if (!ops) {
2279			ret = -EOPNOTSUPP;
2280			goto cleanup;
2281		}
2282
2283		ctx.sa.assoc_num = assoc_num;
2284		ctx.sa.tx_sa = tx_sa;
2285		ctx.secy = secy;
2286
2287		ret = macsec_offload(ops->mdo_del_txsa, &ctx);
2288		if (ret)
2289			goto cleanup;
2290	}
2291
2292	RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL);
2293	clear_tx_sa(tx_sa);
2294
2295	rtnl_unlock();
2296
2297	return 0;
2298
2299cleanup:
2300	rtnl_unlock();
2301	return ret;
2302}
2303
2304static bool validate_upd_sa(struct nlattr **attrs)
2305{
2306	if (!attrs[MACSEC_SA_ATTR_AN] ||
2307	    attrs[MACSEC_SA_ATTR_KEY] ||
2308	    attrs[MACSEC_SA_ATTR_KEYID] ||
2309	    attrs[MACSEC_SA_ATTR_SSCI] ||
2310	    attrs[MACSEC_SA_ATTR_SALT])
2311		return false;
2312
2313	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
2314		return false;
2315
2316	if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
2317		return false;
2318
2319	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
2320		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
2321			return false;
2322	}
2323
2324	return true;
2325}
2326
2327static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
2328{
2329	struct nlattr **attrs = info->attrs;
2330	struct net_device *dev;
2331	struct macsec_secy *secy;
2332	struct macsec_tx_sc *tx_sc;
2333	struct macsec_tx_sa *tx_sa;
2334	u8 assoc_num;
2335	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2336	bool was_operational, was_active;
2337	pn_t prev_pn;
2338	int ret = 0;
2339
2340	prev_pn.full64 = 0;
2341
2342	if (!attrs[MACSEC_ATTR_IFINDEX])
2343		return -EINVAL;
2344
2345	if (parse_sa_config(attrs, tb_sa))
2346		return -EINVAL;
2347
2348	if (!validate_upd_sa(tb_sa))
2349		return -EINVAL;
2350
2351	rtnl_lock();
2352	tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2353				 &dev, &secy, &tx_sc, &assoc_num);
2354	if (IS_ERR(tx_sa)) {
2355		rtnl_unlock();
2356		return PTR_ERR(tx_sa);
2357	}
2358
2359	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2360		int pn_len;
2361
2362		pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2363		if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2364			pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n",
2365				  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2366			rtnl_unlock();
2367			return -EINVAL;
2368		}
2369
2370		spin_lock_bh(&tx_sa->lock);
2371		prev_pn = tx_sa->next_pn_halves;
2372		tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
2373		spin_unlock_bh(&tx_sa->lock);
2374	}
2375
2376	was_active = tx_sa->active;
2377	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2378		tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2379
2380	was_operational = secy->operational;
2381	if (assoc_num == tx_sc->encoding_sa)
2382		secy->operational = tx_sa->active;
2383
2384	/* If h/w offloading is available, propagate to the device */
2385	if (macsec_is_offloaded(netdev_priv(dev))) {
2386		const struct macsec_ops *ops;
2387		struct macsec_context ctx;
2388
2389		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2390		if (!ops) {
2391			ret = -EOPNOTSUPP;
2392			goto cleanup;
2393		}
2394
2395		ctx.sa.assoc_num = assoc_num;
2396		ctx.sa.tx_sa = tx_sa;
2397		ctx.sa.update_pn = !!prev_pn.full64;
2398		ctx.secy = secy;
2399
2400		ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
2401		if (ret)
2402			goto cleanup;
2403	}
2404
2405	rtnl_unlock();
2406
2407	return 0;
2408
2409cleanup:
2410	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2411		spin_lock_bh(&tx_sa->lock);
2412		tx_sa->next_pn_halves = prev_pn;
2413		spin_unlock_bh(&tx_sa->lock);
2414	}
2415	tx_sa->active = was_active;
2416	secy->operational = was_operational;
2417	rtnl_unlock();
2418	return ret;
2419}
2420
2421static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
2422{
2423	struct nlattr **attrs = info->attrs;
2424	struct net_device *dev;
2425	struct macsec_secy *secy;
2426	struct macsec_rx_sc *rx_sc;
2427	struct macsec_rx_sa *rx_sa;
2428	u8 assoc_num;
2429	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2430	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2431	bool was_active;
2432	pn_t prev_pn;
2433	int ret = 0;
2434
2435	prev_pn.full64 = 0;
2436
2437	if (!attrs[MACSEC_ATTR_IFINDEX])
2438		return -EINVAL;
2439
2440	if (parse_rxsc_config(attrs, tb_rxsc))
2441		return -EINVAL;
2442
2443	if (parse_sa_config(attrs, tb_sa))
2444		return -EINVAL;
2445
2446	if (!validate_upd_sa(tb_sa))
2447		return -EINVAL;
2448
2449	rtnl_lock();
2450	rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
2451				 &dev, &secy, &rx_sc, &assoc_num);
2452	if (IS_ERR(rx_sa)) {
2453		rtnl_unlock();
2454		return PTR_ERR(rx_sa);
2455	}
2456
2457	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2458		int pn_len;
2459
2460		pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2461		if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2462			pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n",
2463				  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2464			rtnl_unlock();
2465			return -EINVAL;
2466		}
2467
2468		spin_lock_bh(&rx_sa->lock);
2469		prev_pn = rx_sa->next_pn_halves;
2470		rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
2471		spin_unlock_bh(&rx_sa->lock);
2472	}
2473
2474	was_active = rx_sa->active;
2475	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2476		rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2477
2478	/* If h/w offloading is available, propagate to the device */
2479	if (macsec_is_offloaded(netdev_priv(dev))) {
2480		const struct macsec_ops *ops;
2481		struct macsec_context ctx;
2482
2483		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2484		if (!ops) {
2485			ret = -EOPNOTSUPP;
2486			goto cleanup;
2487		}
2488
2489		ctx.sa.assoc_num = assoc_num;
2490		ctx.sa.rx_sa = rx_sa;
2491		ctx.sa.update_pn = !!prev_pn.full64;
2492		ctx.secy = secy;
2493
2494		ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
2495		if (ret)
2496			goto cleanup;
2497	}
2498
2499	rtnl_unlock();
2500	return 0;
2501
2502cleanup:
2503	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2504		spin_lock_bh(&rx_sa->lock);
2505		rx_sa->next_pn_halves = prev_pn;
2506		spin_unlock_bh(&rx_sa->lock);
2507	}
2508	rx_sa->active = was_active;
2509	rtnl_unlock();
2510	return ret;
2511}
2512
2513static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
2514{
2515	struct nlattr **attrs = info->attrs;
2516	struct net_device *dev;
2517	struct macsec_secy *secy;
2518	struct macsec_rx_sc *rx_sc;
2519	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2520	unsigned int prev_n_rx_sc;
2521	bool was_active;
2522	int ret;
2523
2524	if (!attrs[MACSEC_ATTR_IFINDEX])
2525		return -EINVAL;
2526
2527	if (parse_rxsc_config(attrs, tb_rxsc))
2528		return -EINVAL;
2529
2530	if (!validate_add_rxsc(tb_rxsc))
2531		return -EINVAL;
2532
2533	rtnl_lock();
2534	rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
2535	if (IS_ERR(rx_sc)) {
2536		rtnl_unlock();
2537		return PTR_ERR(rx_sc);
2538	}
2539
2540	was_active = rx_sc->active;
2541	prev_n_rx_sc = secy->n_rx_sc;
2542	if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) {
2543		bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
2544
2545		if (rx_sc->active != new)
2546			secy->n_rx_sc += new ? 1 : -1;
2547
2548		rx_sc->active = new;
2549	}
2550
2551	/* If h/w offloading is available, propagate to the device */
2552	if (macsec_is_offloaded(netdev_priv(dev))) {
2553		const struct macsec_ops *ops;
2554		struct macsec_context ctx;
2555
2556		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2557		if (!ops) {
2558			ret = -EOPNOTSUPP;
2559			goto cleanup;
2560		}
2561
2562		ctx.rx_sc = rx_sc;
2563		ctx.secy = secy;
2564
2565		ret = macsec_offload(ops->mdo_upd_rxsc, &ctx);
2566		if (ret)
2567			goto cleanup;
2568	}
2569
2570	rtnl_unlock();
2571
2572	return 0;
2573
2574cleanup:
2575	secy->n_rx_sc = prev_n_rx_sc;
2576	rx_sc->active = was_active;
2577	rtnl_unlock();
2578	return ret;
2579}
2580
2581static bool macsec_is_configured(struct macsec_dev *macsec)
2582{
2583	struct macsec_secy *secy = &macsec->secy;
2584	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
2585	int i;
2586
2587	if (secy->rx_sc)
2588		return true;
2589
2590	for (i = 0; i < MACSEC_NUM_AN; i++)
2591		if (tx_sc->sa[i])
2592			return true;
2593
2594	return false;
2595}
2596
2597static bool macsec_needs_tx_tag(struct macsec_dev *macsec,
2598				const struct macsec_ops *ops)
2599{
2600	return macsec->offload == MACSEC_OFFLOAD_PHY &&
2601		ops->mdo_insert_tx_tag;
2602}
2603
2604static void macsec_set_head_tail_room(struct net_device *dev)
2605{
2606	struct macsec_dev *macsec = macsec_priv(dev);
2607	struct net_device *real_dev = macsec->real_dev;
2608	int needed_headroom, needed_tailroom;
 
 
2609	const struct macsec_ops *ops;
 
 
 
2610
2611	ops = macsec_get_ops(macsec, NULL);
2612	if (ops) {
2613		needed_headroom = ops->needed_headroom;
2614		needed_tailroom = ops->needed_tailroom;
2615	} else {
2616		needed_headroom = MACSEC_NEEDED_HEADROOM;
2617		needed_tailroom = MACSEC_NEEDED_TAILROOM;
2618	}
2619
2620	dev->needed_headroom = real_dev->needed_headroom + needed_headroom;
2621	dev->needed_tailroom = real_dev->needed_tailroom + needed_tailroom;
2622}
2623
2624static void macsec_inherit_tso_max(struct net_device *dev)
2625{
2626	struct macsec_dev *macsec = macsec_priv(dev);
 
2627
2628	/* if macsec is offloaded, we need to follow the lower
2629	 * device's capabilities. otherwise, we can ignore them.
2630	 */
2631	if (macsec_is_offloaded(macsec))
2632		netif_inherit_tso_max(dev, macsec->real_dev);
2633}
2634
2635static int macsec_update_offload(struct net_device *dev, enum macsec_offload offload)
2636{
2637	enum macsec_offload prev_offload;
2638	const struct macsec_ops *ops;
2639	struct macsec_context ctx;
2640	struct macsec_dev *macsec;
2641	int ret = 0;
2642
2643	macsec = macsec_priv(dev);
 
 
2644
2645	/* Check if the offloading mode is supported by the underlying layers */
2646	if (offload != MACSEC_OFFLOAD_OFF &&
2647	    !macsec_check_offload(offload, macsec))
2648		return -EOPNOTSUPP;
2649
2650	/* Check if the net device is busy. */
2651	if (netif_running(dev))
2652		return -EBUSY;
2653
 
 
 
 
 
2654	/* Check if the device already has rules configured: we do not support
2655	 * rules migration.
2656	 */
2657	if (macsec_is_configured(macsec))
2658		return -EBUSY;
2659
2660	prev_offload = macsec->offload;
2661
2662	ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
2663			       macsec, &ctx);
2664	if (!ops)
2665		return -EOPNOTSUPP;
2666
2667	macsec->offload = offload;
2668
2669	ctx.secy = &macsec->secy;
2670	ret = offload == MACSEC_OFFLOAD_OFF ? macsec_offload(ops->mdo_del_secy, &ctx)
2671					    : macsec_offload(ops->mdo_add_secy, &ctx);
2672	if (ret) {
2673		macsec->offload = prev_offload;
2674		return ret;
2675	}
2676
2677	macsec_set_head_tail_room(dev);
2678	macsec->insert_tx_tag = macsec_needs_tx_tag(macsec, ops);
 
 
2679
2680	macsec_inherit_tso_max(dev);
 
 
 
2681
 
 
 
2682	netdev_update_features(dev);
2683
2684	return ret;
2685}
2686
2687static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
2688{
2689	struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1];
2690	struct nlattr **attrs = info->attrs;
2691	enum macsec_offload offload;
2692	struct macsec_dev *macsec;
2693	struct net_device *dev;
2694	int ret = 0;
2695
2696	if (!attrs[MACSEC_ATTR_IFINDEX])
2697		return -EINVAL;
2698
2699	if (!attrs[MACSEC_ATTR_OFFLOAD])
2700		return -EINVAL;
2701
2702	if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX,
2703					attrs[MACSEC_ATTR_OFFLOAD],
2704					macsec_genl_offload_policy, NULL))
2705		return -EINVAL;
2706
2707	rtnl_lock();
2708
2709	dev = get_dev_from_nl(genl_info_net(info), attrs);
2710	if (IS_ERR(dev)) {
2711		ret = PTR_ERR(dev);
2712		goto out;
2713	}
2714	macsec = macsec_priv(dev);
2715
2716	if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]) {
2717		ret = -EINVAL;
2718		goto out;
2719	}
2720
2721	offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
 
2722
2723	if (macsec->offload != offload)
2724		ret = macsec_update_offload(dev, offload);
2725out:
2726	rtnl_unlock();
2727	return ret;
2728}
2729
2730static void get_tx_sa_stats(struct net_device *dev, int an,
2731			    struct macsec_tx_sa *tx_sa,
2732			    struct macsec_tx_sa_stats *sum)
2733{
2734	struct macsec_dev *macsec = macsec_priv(dev);
2735	int cpu;
2736
2737	/* If h/w offloading is available, propagate to the device */
2738	if (macsec_is_offloaded(macsec)) {
2739		const struct macsec_ops *ops;
2740		struct macsec_context ctx;
2741
2742		ops = macsec_get_ops(macsec, &ctx);
2743		if (ops) {
2744			ctx.sa.assoc_num = an;
2745			ctx.sa.tx_sa = tx_sa;
2746			ctx.stats.tx_sa_stats = sum;
2747			ctx.secy = &macsec_priv(dev)->secy;
2748			macsec_offload(ops->mdo_get_tx_sa_stats, &ctx);
2749		}
2750		return;
2751	}
2752
2753	for_each_possible_cpu(cpu) {
2754		const struct macsec_tx_sa_stats *stats =
2755			per_cpu_ptr(tx_sa->stats, cpu);
2756
2757		sum->OutPktsProtected += stats->OutPktsProtected;
2758		sum->OutPktsEncrypted += stats->OutPktsEncrypted;
2759	}
2760}
2761
2762static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum)
2763{
2764	if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED,
2765			sum->OutPktsProtected) ||
2766	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED,
2767			sum->OutPktsEncrypted))
2768		return -EMSGSIZE;
2769
2770	return 0;
2771}
2772
2773static void get_rx_sa_stats(struct net_device *dev,
2774			    struct macsec_rx_sc *rx_sc, int an,
2775			    struct macsec_rx_sa *rx_sa,
2776			    struct macsec_rx_sa_stats *sum)
2777{
2778	struct macsec_dev *macsec = macsec_priv(dev);
2779	int cpu;
2780
2781	/* If h/w offloading is available, propagate to the device */
2782	if (macsec_is_offloaded(macsec)) {
2783		const struct macsec_ops *ops;
2784		struct macsec_context ctx;
2785
2786		ops = macsec_get_ops(macsec, &ctx);
2787		if (ops) {
2788			ctx.sa.assoc_num = an;
2789			ctx.sa.rx_sa = rx_sa;
2790			ctx.stats.rx_sa_stats = sum;
2791			ctx.secy = &macsec_priv(dev)->secy;
2792			ctx.rx_sc = rx_sc;
2793			macsec_offload(ops->mdo_get_rx_sa_stats, &ctx);
2794		}
2795		return;
2796	}
2797
2798	for_each_possible_cpu(cpu) {
2799		const struct macsec_rx_sa_stats *stats =
2800			per_cpu_ptr(rx_sa->stats, cpu);
2801
2802		sum->InPktsOK         += stats->InPktsOK;
2803		sum->InPktsInvalid    += stats->InPktsInvalid;
2804		sum->InPktsNotValid   += stats->InPktsNotValid;
2805		sum->InPktsNotUsingSA += stats->InPktsNotUsingSA;
2806		sum->InPktsUnusedSA   += stats->InPktsUnusedSA;
2807	}
2808}
2809
2810static int copy_rx_sa_stats(struct sk_buff *skb,
2811			    struct macsec_rx_sa_stats *sum)
2812{
2813	if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) ||
2814	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID,
2815			sum->InPktsInvalid) ||
2816	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID,
2817			sum->InPktsNotValid) ||
2818	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA,
2819			sum->InPktsNotUsingSA) ||
2820	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA,
2821			sum->InPktsUnusedSA))
2822		return -EMSGSIZE;
2823
2824	return 0;
2825}
2826
2827static void get_rx_sc_stats(struct net_device *dev,
2828			    struct macsec_rx_sc *rx_sc,
2829			    struct macsec_rx_sc_stats *sum)
2830{
2831	struct macsec_dev *macsec = macsec_priv(dev);
2832	int cpu;
2833
2834	/* If h/w offloading is available, propagate to the device */
2835	if (macsec_is_offloaded(macsec)) {
2836		const struct macsec_ops *ops;
2837		struct macsec_context ctx;
2838
2839		ops = macsec_get_ops(macsec, &ctx);
2840		if (ops) {
2841			ctx.stats.rx_sc_stats = sum;
2842			ctx.secy = &macsec_priv(dev)->secy;
2843			ctx.rx_sc = rx_sc;
2844			macsec_offload(ops->mdo_get_rx_sc_stats, &ctx);
2845		}
2846		return;
2847	}
2848
2849	for_each_possible_cpu(cpu) {
2850		const struct pcpu_rx_sc_stats *stats;
2851		struct macsec_rx_sc_stats tmp;
2852		unsigned int start;
2853
2854		stats = per_cpu_ptr(rx_sc->stats, cpu);
2855		do {
2856			start = u64_stats_fetch_begin(&stats->syncp);
2857			memcpy(&tmp, &stats->stats, sizeof(tmp));
2858		} while (u64_stats_fetch_retry(&stats->syncp, start));
2859
2860		sum->InOctetsValidated += tmp.InOctetsValidated;
2861		sum->InOctetsDecrypted += tmp.InOctetsDecrypted;
2862		sum->InPktsUnchecked   += tmp.InPktsUnchecked;
2863		sum->InPktsDelayed     += tmp.InPktsDelayed;
2864		sum->InPktsOK          += tmp.InPktsOK;
2865		sum->InPktsInvalid     += tmp.InPktsInvalid;
2866		sum->InPktsLate        += tmp.InPktsLate;
2867		sum->InPktsNotValid    += tmp.InPktsNotValid;
2868		sum->InPktsNotUsingSA  += tmp.InPktsNotUsingSA;
2869		sum->InPktsUnusedSA    += tmp.InPktsUnusedSA;
2870	}
2871}
2872
2873static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum)
2874{
2875	if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
2876			      sum->InOctetsValidated,
2877			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2878	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
2879			      sum->InOctetsDecrypted,
2880			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2881	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
2882			      sum->InPktsUnchecked,
2883			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2884	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
2885			      sum->InPktsDelayed,
2886			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2887	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
2888			      sum->InPktsOK,
2889			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2890	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
2891			      sum->InPktsInvalid,
2892			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2893	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
2894			      sum->InPktsLate,
2895			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2896	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
2897			      sum->InPktsNotValid,
2898			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2899	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
2900			      sum->InPktsNotUsingSA,
2901			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2902	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
2903			      sum->InPktsUnusedSA,
2904			      MACSEC_RXSC_STATS_ATTR_PAD))
2905		return -EMSGSIZE;
2906
2907	return 0;
2908}
2909
2910static void get_tx_sc_stats(struct net_device *dev,
2911			    struct macsec_tx_sc_stats *sum)
2912{
2913	struct macsec_dev *macsec = macsec_priv(dev);
2914	int cpu;
2915
2916	/* If h/w offloading is available, propagate to the device */
2917	if (macsec_is_offloaded(macsec)) {
2918		const struct macsec_ops *ops;
2919		struct macsec_context ctx;
2920
2921		ops = macsec_get_ops(macsec, &ctx);
2922		if (ops) {
2923			ctx.stats.tx_sc_stats = sum;
2924			ctx.secy = &macsec_priv(dev)->secy;
2925			macsec_offload(ops->mdo_get_tx_sc_stats, &ctx);
2926		}
2927		return;
2928	}
2929
2930	for_each_possible_cpu(cpu) {
2931		const struct pcpu_tx_sc_stats *stats;
2932		struct macsec_tx_sc_stats tmp;
2933		unsigned int start;
2934
2935		stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu);
2936		do {
2937			start = u64_stats_fetch_begin(&stats->syncp);
2938			memcpy(&tmp, &stats->stats, sizeof(tmp));
2939		} while (u64_stats_fetch_retry(&stats->syncp, start));
2940
2941		sum->OutPktsProtected   += tmp.OutPktsProtected;
2942		sum->OutPktsEncrypted   += tmp.OutPktsEncrypted;
2943		sum->OutOctetsProtected += tmp.OutOctetsProtected;
2944		sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted;
2945	}
2946}
2947
2948static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum)
2949{
2950	if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
2951			      sum->OutPktsProtected,
2952			      MACSEC_TXSC_STATS_ATTR_PAD) ||
2953	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
2954			      sum->OutPktsEncrypted,
2955			      MACSEC_TXSC_STATS_ATTR_PAD) ||
2956	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
2957			      sum->OutOctetsProtected,
2958			      MACSEC_TXSC_STATS_ATTR_PAD) ||
2959	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
2960			      sum->OutOctetsEncrypted,
2961			      MACSEC_TXSC_STATS_ATTR_PAD))
2962		return -EMSGSIZE;
2963
2964	return 0;
2965}
2966
2967static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum)
2968{
2969	struct macsec_dev *macsec = macsec_priv(dev);
2970	int cpu;
2971
2972	/* If h/w offloading is available, propagate to the device */
2973	if (macsec_is_offloaded(macsec)) {
2974		const struct macsec_ops *ops;
2975		struct macsec_context ctx;
2976
2977		ops = macsec_get_ops(macsec, &ctx);
2978		if (ops) {
2979			ctx.stats.dev_stats = sum;
2980			ctx.secy = &macsec_priv(dev)->secy;
2981			macsec_offload(ops->mdo_get_dev_stats, &ctx);
2982		}
2983		return;
2984	}
2985
2986	for_each_possible_cpu(cpu) {
2987		const struct pcpu_secy_stats *stats;
2988		struct macsec_dev_stats tmp;
2989		unsigned int start;
2990
2991		stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu);
2992		do {
2993			start = u64_stats_fetch_begin(&stats->syncp);
2994			memcpy(&tmp, &stats->stats, sizeof(tmp));
2995		} while (u64_stats_fetch_retry(&stats->syncp, start));
2996
2997		sum->OutPktsUntagged  += tmp.OutPktsUntagged;
2998		sum->InPktsUntagged   += tmp.InPktsUntagged;
2999		sum->OutPktsTooLong   += tmp.OutPktsTooLong;
3000		sum->InPktsNoTag      += tmp.InPktsNoTag;
3001		sum->InPktsBadTag     += tmp.InPktsBadTag;
3002		sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI;
3003		sum->InPktsNoSCI      += tmp.InPktsNoSCI;
3004		sum->InPktsOverrun    += tmp.InPktsOverrun;
3005	}
3006}
3007
3008static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum)
3009{
3010	if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
3011			      sum->OutPktsUntagged,
3012			      MACSEC_SECY_STATS_ATTR_PAD) ||
3013	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
3014			      sum->InPktsUntagged,
3015			      MACSEC_SECY_STATS_ATTR_PAD) ||
3016	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
3017			      sum->OutPktsTooLong,
3018			      MACSEC_SECY_STATS_ATTR_PAD) ||
3019	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
3020			      sum->InPktsNoTag,
3021			      MACSEC_SECY_STATS_ATTR_PAD) ||
3022	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
3023			      sum->InPktsBadTag,
3024			      MACSEC_SECY_STATS_ATTR_PAD) ||
3025	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
3026			      sum->InPktsUnknownSCI,
3027			      MACSEC_SECY_STATS_ATTR_PAD) ||
3028	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
3029			      sum->InPktsNoSCI,
3030			      MACSEC_SECY_STATS_ATTR_PAD) ||
3031	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
3032			      sum->InPktsOverrun,
3033			      MACSEC_SECY_STATS_ATTR_PAD))
3034		return -EMSGSIZE;
3035
3036	return 0;
3037}
3038
3039static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
3040{
3041	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
3042	struct nlattr *secy_nest = nla_nest_start_noflag(skb,
3043							 MACSEC_ATTR_SECY);
3044	u64 csid;
3045
3046	if (!secy_nest)
3047		return 1;
3048
3049	switch (secy->key_len) {
3050	case MACSEC_GCM_AES_128_SAK_LEN:
3051		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
3052		break;
3053	case MACSEC_GCM_AES_256_SAK_LEN:
3054		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
3055		break;
3056	default:
3057		goto cancel;
3058	}
3059
3060	if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
3061			MACSEC_SECY_ATTR_PAD) ||
3062	    nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
3063			      csid, MACSEC_SECY_ATTR_PAD) ||
3064	    nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
3065	    nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
3066	    nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
3067	    nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) ||
3068	    nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) ||
3069	    nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) ||
3070	    nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) ||
3071	    nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) ||
3072	    nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) ||
3073	    nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa))
3074		goto cancel;
3075
3076	if (secy->replay_protect) {
3077		if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window))
3078			goto cancel;
3079	}
3080
3081	nla_nest_end(skb, secy_nest);
3082	return 0;
3083
3084cancel:
3085	nla_nest_cancel(skb, secy_nest);
3086	return 1;
3087}
3088
3089static noinline_for_stack int
3090dump_secy(struct macsec_secy *secy, struct net_device *dev,
3091	  struct sk_buff *skb, struct netlink_callback *cb)
3092{
3093	struct macsec_tx_sc_stats tx_sc_stats = {0, };
3094	struct macsec_tx_sa_stats tx_sa_stats = {0, };
3095	struct macsec_rx_sc_stats rx_sc_stats = {0, };
3096	struct macsec_rx_sa_stats rx_sa_stats = {0, };
3097	struct macsec_dev *macsec = netdev_priv(dev);
3098	struct macsec_dev_stats dev_stats = {0, };
3099	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
3100	struct nlattr *txsa_list, *rxsc_list;
3101	struct macsec_rx_sc *rx_sc;
3102	struct nlattr *attr;
3103	void *hdr;
3104	int i, j;
3105
3106	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3107			  &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC);
3108	if (!hdr)
3109		return -EMSGSIZE;
3110
3111	genl_dump_check_consistent(cb, hdr);
3112
3113	if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
3114		goto nla_put_failure;
3115
3116	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD);
3117	if (!attr)
3118		goto nla_put_failure;
3119	if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload))
3120		goto nla_put_failure;
3121	nla_nest_end(skb, attr);
3122
3123	if (nla_put_secy(secy, skb))
3124		goto nla_put_failure;
3125
3126	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS);
3127	if (!attr)
3128		goto nla_put_failure;
3129
3130	get_tx_sc_stats(dev, &tx_sc_stats);
3131	if (copy_tx_sc_stats(skb, &tx_sc_stats)) {
3132		nla_nest_cancel(skb, attr);
3133		goto nla_put_failure;
3134	}
3135	nla_nest_end(skb, attr);
3136
3137	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS);
3138	if (!attr)
3139		goto nla_put_failure;
3140	get_secy_stats(dev, &dev_stats);
3141	if (copy_secy_stats(skb, &dev_stats)) {
3142		nla_nest_cancel(skb, attr);
3143		goto nla_put_failure;
3144	}
3145	nla_nest_end(skb, attr);
3146
3147	txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST);
3148	if (!txsa_list)
3149		goto nla_put_failure;
3150	for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) {
3151		struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]);
3152		struct nlattr *txsa_nest;
3153		u64 pn;
3154		int pn_len;
3155
3156		if (!tx_sa)
3157			continue;
3158
3159		txsa_nest = nla_nest_start_noflag(skb, j++);
3160		if (!txsa_nest) {
3161			nla_nest_cancel(skb, txsa_list);
3162			goto nla_put_failure;
3163		}
3164
3165		attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS);
3166		if (!attr) {
3167			nla_nest_cancel(skb, txsa_nest);
3168			nla_nest_cancel(skb, txsa_list);
3169			goto nla_put_failure;
3170		}
3171		memset(&tx_sa_stats, 0, sizeof(tx_sa_stats));
3172		get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats);
3173		if (copy_tx_sa_stats(skb, &tx_sa_stats)) {
3174			nla_nest_cancel(skb, attr);
3175			nla_nest_cancel(skb, txsa_nest);
3176			nla_nest_cancel(skb, txsa_list);
3177			goto nla_put_failure;
3178		}
3179		nla_nest_end(skb, attr);
3180
3181		if (secy->xpn) {
3182			pn = tx_sa->next_pn;
3183			pn_len = MACSEC_XPN_PN_LEN;
3184		} else {
3185			pn = tx_sa->next_pn_halves.lower;
3186			pn_len = MACSEC_DEFAULT_PN_LEN;
3187		}
3188
3189		if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
3190		    nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
3191		    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
3192		    (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, tx_sa->ssci)) ||
3193		    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
3194			nla_nest_cancel(skb, txsa_nest);
3195			nla_nest_cancel(skb, txsa_list);
3196			goto nla_put_failure;
3197		}
3198
3199		nla_nest_end(skb, txsa_nest);
3200	}
3201	nla_nest_end(skb, txsa_list);
3202
3203	rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST);
3204	if (!rxsc_list)
3205		goto nla_put_failure;
3206
3207	j = 1;
3208	for_each_rxsc_rtnl(secy, rx_sc) {
3209		int k;
3210		struct nlattr *rxsa_list;
3211		struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++);
3212
3213		if (!rxsc_nest) {
3214			nla_nest_cancel(skb, rxsc_list);
3215			goto nla_put_failure;
3216		}
3217
3218		if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) ||
3219		    nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci,
3220				MACSEC_RXSC_ATTR_PAD)) {
3221			nla_nest_cancel(skb, rxsc_nest);
3222			nla_nest_cancel(skb, rxsc_list);
3223			goto nla_put_failure;
3224		}
3225
3226		attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS);
3227		if (!attr) {
3228			nla_nest_cancel(skb, rxsc_nest);
3229			nla_nest_cancel(skb, rxsc_list);
3230			goto nla_put_failure;
3231		}
3232		memset(&rx_sc_stats, 0, sizeof(rx_sc_stats));
3233		get_rx_sc_stats(dev, rx_sc, &rx_sc_stats);
3234		if (copy_rx_sc_stats(skb, &rx_sc_stats)) {
3235			nla_nest_cancel(skb, attr);
3236			nla_nest_cancel(skb, rxsc_nest);
3237			nla_nest_cancel(skb, rxsc_list);
3238			goto nla_put_failure;
3239		}
3240		nla_nest_end(skb, attr);
3241
3242		rxsa_list = nla_nest_start_noflag(skb,
3243						  MACSEC_RXSC_ATTR_SA_LIST);
3244		if (!rxsa_list) {
3245			nla_nest_cancel(skb, rxsc_nest);
3246			nla_nest_cancel(skb, rxsc_list);
3247			goto nla_put_failure;
3248		}
3249
3250		for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) {
3251			struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]);
3252			struct nlattr *rxsa_nest;
3253			u64 pn;
3254			int pn_len;
3255
3256			if (!rx_sa)
3257				continue;
3258
3259			rxsa_nest = nla_nest_start_noflag(skb, k++);
3260			if (!rxsa_nest) {
3261				nla_nest_cancel(skb, rxsa_list);
3262				nla_nest_cancel(skb, rxsc_nest);
3263				nla_nest_cancel(skb, rxsc_list);
3264				goto nla_put_failure;
3265			}
3266
3267			attr = nla_nest_start_noflag(skb,
3268						     MACSEC_SA_ATTR_STATS);
3269			if (!attr) {
3270				nla_nest_cancel(skb, rxsa_list);
3271				nla_nest_cancel(skb, rxsc_nest);
3272				nla_nest_cancel(skb, rxsc_list);
3273				goto nla_put_failure;
3274			}
3275			memset(&rx_sa_stats, 0, sizeof(rx_sa_stats));
3276			get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats);
3277			if (copy_rx_sa_stats(skb, &rx_sa_stats)) {
3278				nla_nest_cancel(skb, attr);
3279				nla_nest_cancel(skb, rxsa_list);
3280				nla_nest_cancel(skb, rxsc_nest);
3281				nla_nest_cancel(skb, rxsc_list);
3282				goto nla_put_failure;
3283			}
3284			nla_nest_end(skb, attr);
3285
3286			if (secy->xpn) {
3287				pn = rx_sa->next_pn;
3288				pn_len = MACSEC_XPN_PN_LEN;
3289			} else {
3290				pn = rx_sa->next_pn_halves.lower;
3291				pn_len = MACSEC_DEFAULT_PN_LEN;
3292			}
3293
3294			if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
3295			    nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
3296			    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
3297			    (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, rx_sa->ssci)) ||
3298			    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
3299				nla_nest_cancel(skb, rxsa_nest);
3300				nla_nest_cancel(skb, rxsc_nest);
3301				nla_nest_cancel(skb, rxsc_list);
3302				goto nla_put_failure;
3303			}
3304			nla_nest_end(skb, rxsa_nest);
3305		}
3306
3307		nla_nest_end(skb, rxsa_list);
3308		nla_nest_end(skb, rxsc_nest);
3309	}
3310
3311	nla_nest_end(skb, rxsc_list);
3312
3313	genlmsg_end(skb, hdr);
3314
3315	return 0;
3316
3317nla_put_failure:
3318	genlmsg_cancel(skb, hdr);
3319	return -EMSGSIZE;
3320}
3321
3322static int macsec_generation = 1; /* protected by RTNL */
3323
3324static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
3325{
3326	struct net *net = sock_net(skb->sk);
3327	struct net_device *dev;
3328	int dev_idx, d;
3329
3330	dev_idx = cb->args[0];
3331
3332	d = 0;
3333	rtnl_lock();
3334
3335	cb->seq = macsec_generation;
3336
3337	for_each_netdev(net, dev) {
3338		struct macsec_secy *secy;
3339
3340		if (d < dev_idx)
3341			goto next;
3342
3343		if (!netif_is_macsec(dev))
3344			goto next;
3345
3346		secy = &macsec_priv(dev)->secy;
3347		if (dump_secy(secy, dev, skb, cb) < 0)
3348			goto done;
3349next:
3350		d++;
3351	}
3352
3353done:
3354	rtnl_unlock();
3355	cb->args[0] = d;
3356	return skb->len;
3357}
3358
3359static const struct genl_small_ops macsec_genl_ops[] = {
3360	{
3361		.cmd = MACSEC_CMD_GET_TXSC,
3362		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3363		.dumpit = macsec_dump_txsc,
3364	},
3365	{
3366		.cmd = MACSEC_CMD_ADD_RXSC,
3367		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3368		.doit = macsec_add_rxsc,
3369		.flags = GENL_ADMIN_PERM,
3370	},
3371	{
3372		.cmd = MACSEC_CMD_DEL_RXSC,
3373		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3374		.doit = macsec_del_rxsc,
3375		.flags = GENL_ADMIN_PERM,
3376	},
3377	{
3378		.cmd = MACSEC_CMD_UPD_RXSC,
3379		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3380		.doit = macsec_upd_rxsc,
3381		.flags = GENL_ADMIN_PERM,
3382	},
3383	{
3384		.cmd = MACSEC_CMD_ADD_TXSA,
3385		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3386		.doit = macsec_add_txsa,
3387		.flags = GENL_ADMIN_PERM,
3388	},
3389	{
3390		.cmd = MACSEC_CMD_DEL_TXSA,
3391		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3392		.doit = macsec_del_txsa,
3393		.flags = GENL_ADMIN_PERM,
3394	},
3395	{
3396		.cmd = MACSEC_CMD_UPD_TXSA,
3397		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3398		.doit = macsec_upd_txsa,
3399		.flags = GENL_ADMIN_PERM,
3400	},
3401	{
3402		.cmd = MACSEC_CMD_ADD_RXSA,
3403		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3404		.doit = macsec_add_rxsa,
3405		.flags = GENL_ADMIN_PERM,
3406	},
3407	{
3408		.cmd = MACSEC_CMD_DEL_RXSA,
3409		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3410		.doit = macsec_del_rxsa,
3411		.flags = GENL_ADMIN_PERM,
3412	},
3413	{
3414		.cmd = MACSEC_CMD_UPD_RXSA,
3415		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3416		.doit = macsec_upd_rxsa,
3417		.flags = GENL_ADMIN_PERM,
3418	},
3419	{
3420		.cmd = MACSEC_CMD_UPD_OFFLOAD,
3421		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3422		.doit = macsec_upd_offload,
3423		.flags = GENL_ADMIN_PERM,
3424	},
3425};
3426
3427static struct genl_family macsec_fam __ro_after_init = {
3428	.name		= MACSEC_GENL_NAME,
3429	.hdrsize	= 0,
3430	.version	= MACSEC_GENL_VERSION,
3431	.maxattr	= MACSEC_ATTR_MAX,
3432	.policy = macsec_genl_policy,
3433	.netnsok	= true,
3434	.module		= THIS_MODULE,
3435	.small_ops	= macsec_genl_ops,
3436	.n_small_ops	= ARRAY_SIZE(macsec_genl_ops),
3437	.resv_start_op	= MACSEC_CMD_UPD_OFFLOAD + 1,
3438};
3439
3440static struct sk_buff *macsec_insert_tx_tag(struct sk_buff *skb,
3441					    struct net_device *dev)
3442{
3443	struct macsec_dev *macsec = macsec_priv(dev);
3444	const struct macsec_ops *ops;
3445	struct phy_device *phydev;
3446	struct macsec_context ctx;
3447	int skb_final_len;
3448	int err;
3449
3450	ops = macsec_get_ops(macsec, &ctx);
3451	skb_final_len = skb->len - ETH_HLEN + ops->needed_headroom +
3452		ops->needed_tailroom;
3453	if (unlikely(skb_final_len > macsec->real_dev->mtu)) {
3454		err = -EINVAL;
3455		goto cleanup;
3456	}
3457
3458	phydev = macsec->real_dev->phydev;
3459
3460	err = skb_ensure_writable_head_tail(skb, dev);
3461	if (unlikely(err < 0))
3462		goto cleanup;
3463
3464	err = ops->mdo_insert_tx_tag(phydev, skb);
3465	if (unlikely(err))
3466		goto cleanup;
3467
3468	return skb;
3469cleanup:
3470	kfree_skb(skb);
3471	return ERR_PTR(err);
3472}
3473
3474static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
3475				     struct net_device *dev)
3476{
3477	struct macsec_dev *macsec = netdev_priv(dev);
3478	struct macsec_secy *secy = &macsec->secy;
3479	struct pcpu_secy_stats *secy_stats;
3480	int ret, len;
3481
3482	if (macsec_is_offloaded(netdev_priv(dev))) {
3483		struct metadata_dst *md_dst = secy->tx_sc.md_dst;
3484
3485		skb_dst_drop(skb);
3486		dst_hold(&md_dst->dst);
3487		skb_dst_set(skb, &md_dst->dst);
3488
3489		if (macsec->insert_tx_tag) {
3490			skb = macsec_insert_tx_tag(skb, dev);
3491			if (IS_ERR(skb)) {
3492				DEV_STATS_INC(dev, tx_dropped);
3493				return NETDEV_TX_OK;
3494			}
3495		}
3496
3497		skb->dev = macsec->real_dev;
3498		return dev_queue_xmit(skb);
3499	}
3500
3501	/* 10.5 */
3502	if (!secy->protect_frames) {
3503		secy_stats = this_cpu_ptr(macsec->stats);
3504		u64_stats_update_begin(&secy_stats->syncp);
3505		secy_stats->stats.OutPktsUntagged++;
3506		u64_stats_update_end(&secy_stats->syncp);
3507		skb->dev = macsec->real_dev;
3508		len = skb->len;
3509		ret = dev_queue_xmit(skb);
3510		count_tx(dev, ret, len);
3511		return ret;
3512	}
3513
3514	if (!secy->operational) {
3515		kfree_skb(skb);
3516		DEV_STATS_INC(dev, tx_dropped);
3517		return NETDEV_TX_OK;
3518	}
3519
3520	len = skb->len;
3521	skb = macsec_encrypt(skb, dev);
3522	if (IS_ERR(skb)) {
3523		if (PTR_ERR(skb) != -EINPROGRESS)
3524			DEV_STATS_INC(dev, tx_dropped);
3525		return NETDEV_TX_OK;
3526	}
3527
3528	macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
3529
3530	macsec_encrypt_finish(skb, dev);
 
3531	ret = dev_queue_xmit(skb);
3532	count_tx(dev, ret, len);
3533	return ret;
3534}
3535
3536#define MACSEC_FEATURES \
3537	(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
3538
3539#define MACSEC_OFFLOAD_FEATURES \
3540	(MACSEC_FEATURES | NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES | \
3541	 NETIF_F_LRO | NETIF_F_RXHASH | NETIF_F_CSUM_MASK | NETIF_F_RXCSUM)
 
 
 
3542
3543static int macsec_dev_init(struct net_device *dev)
3544{
3545	struct macsec_dev *macsec = macsec_priv(dev);
3546	struct net_device *real_dev = macsec->real_dev;
3547	int err;
3548
 
 
 
 
3549	err = gro_cells_init(&macsec->gro_cells, dev);
3550	if (err)
 
3551		return err;
 
3552
3553	macsec_inherit_tso_max(dev);
3554
3555	dev->hw_features = real_dev->hw_features & MACSEC_OFFLOAD_FEATURES;
3556	dev->hw_features |= NETIF_F_GSO_SOFTWARE;
3557
3558	dev->features = real_dev->features & MACSEC_OFFLOAD_FEATURES;
3559	dev->features |= NETIF_F_GSO_SOFTWARE;
3560	dev->lltx = true;
3561	dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
3562
3563	macsec_set_head_tail_room(dev);
 
 
 
3564
3565	if (is_zero_ether_addr(dev->dev_addr))
3566		eth_hw_addr_inherit(dev, real_dev);
3567	if (is_zero_ether_addr(dev->broadcast))
3568		memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
3569
3570	/* Get macsec's reference to real_dev */
3571	netdev_hold(real_dev, &macsec->dev_tracker, GFP_KERNEL);
3572
3573	return 0;
3574}
3575
3576static void macsec_dev_uninit(struct net_device *dev)
3577{
3578	struct macsec_dev *macsec = macsec_priv(dev);
3579
3580	gro_cells_destroy(&macsec->gro_cells);
 
3581}
3582
3583static netdev_features_t macsec_fix_features(struct net_device *dev,
3584					     netdev_features_t features)
3585{
3586	struct macsec_dev *macsec = macsec_priv(dev);
3587	struct net_device *real_dev = macsec->real_dev;
3588	netdev_features_t mask;
3589
3590	mask = macsec_is_offloaded(macsec) ? MACSEC_OFFLOAD_FEATURES
3591					   : MACSEC_FEATURES;
3592
3593	features &= (real_dev->features & mask) |
3594		    NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
 
3595
3596	return features;
3597}
3598
3599static int macsec_dev_open(struct net_device *dev)
3600{
3601	struct macsec_dev *macsec = macsec_priv(dev);
3602	struct net_device *real_dev = macsec->real_dev;
3603	int err;
3604
3605	err = dev_uc_add(real_dev, dev->dev_addr);
3606	if (err < 0)
3607		return err;
3608
3609	if (dev->flags & IFF_ALLMULTI) {
3610		err = dev_set_allmulti(real_dev, 1);
3611		if (err < 0)
3612			goto del_unicast;
3613	}
3614
3615	if (dev->flags & IFF_PROMISC) {
3616		err = dev_set_promiscuity(real_dev, 1);
3617		if (err < 0)
3618			goto clear_allmulti;
3619	}
3620
3621	/* If h/w offloading is available, propagate to the device */
3622	if (macsec_is_offloaded(macsec)) {
3623		const struct macsec_ops *ops;
3624		struct macsec_context ctx;
3625
3626		ops = macsec_get_ops(netdev_priv(dev), &ctx);
3627		if (!ops) {
3628			err = -EOPNOTSUPP;
3629			goto clear_allmulti;
3630		}
3631
3632		ctx.secy = &macsec->secy;
3633		err = macsec_offload(ops->mdo_dev_open, &ctx);
3634		if (err)
3635			goto clear_allmulti;
3636	}
3637
3638	if (netif_carrier_ok(real_dev))
3639		netif_carrier_on(dev);
3640
3641	return 0;
3642clear_allmulti:
3643	if (dev->flags & IFF_ALLMULTI)
3644		dev_set_allmulti(real_dev, -1);
3645del_unicast:
3646	dev_uc_del(real_dev, dev->dev_addr);
3647	netif_carrier_off(dev);
3648	return err;
3649}
3650
3651static int macsec_dev_stop(struct net_device *dev)
3652{
3653	struct macsec_dev *macsec = macsec_priv(dev);
3654	struct net_device *real_dev = macsec->real_dev;
3655
3656	netif_carrier_off(dev);
3657
3658	/* If h/w offloading is available, propagate to the device */
3659	if (macsec_is_offloaded(macsec)) {
3660		const struct macsec_ops *ops;
3661		struct macsec_context ctx;
3662
3663		ops = macsec_get_ops(macsec, &ctx);
3664		if (ops) {
3665			ctx.secy = &macsec->secy;
3666			macsec_offload(ops->mdo_dev_stop, &ctx);
3667		}
3668	}
3669
3670	dev_mc_unsync(real_dev, dev);
3671	dev_uc_unsync(real_dev, dev);
3672
3673	if (dev->flags & IFF_ALLMULTI)
3674		dev_set_allmulti(real_dev, -1);
3675
3676	if (dev->flags & IFF_PROMISC)
3677		dev_set_promiscuity(real_dev, -1);
3678
3679	dev_uc_del(real_dev, dev->dev_addr);
3680
3681	return 0;
3682}
3683
3684static void macsec_dev_change_rx_flags(struct net_device *dev, int change)
3685{
3686	struct net_device *real_dev = macsec_priv(dev)->real_dev;
3687
3688	if (!(dev->flags & IFF_UP))
3689		return;
3690
3691	if (change & IFF_ALLMULTI)
3692		dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
3693
3694	if (change & IFF_PROMISC)
3695		dev_set_promiscuity(real_dev,
3696				    dev->flags & IFF_PROMISC ? 1 : -1);
3697}
3698
3699static void macsec_dev_set_rx_mode(struct net_device *dev)
3700{
3701	struct net_device *real_dev = macsec_priv(dev)->real_dev;
3702
3703	dev_mc_sync(real_dev, dev);
3704	dev_uc_sync(real_dev, dev);
3705}
3706
3707static int macsec_set_mac_address(struct net_device *dev, void *p)
3708{
3709	struct macsec_dev *macsec = macsec_priv(dev);
3710	struct net_device *real_dev = macsec->real_dev;
3711	struct sockaddr *addr = p;
3712	u8  old_addr[ETH_ALEN];
3713	int err;
3714
3715	if (!is_valid_ether_addr(addr->sa_data))
3716		return -EADDRNOTAVAIL;
3717
3718	if (dev->flags & IFF_UP) {
3719		err = dev_uc_add(real_dev, addr->sa_data);
3720		if (err < 0)
3721			return err;
3722	}
 
 
 
3723
3724	ether_addr_copy(old_addr, dev->dev_addr);
3725	eth_hw_addr_set(dev, addr->sa_data);
 
3726
3727	/* If h/w offloading is available, propagate to the device */
3728	if (macsec_is_offloaded(macsec)) {
3729		const struct macsec_ops *ops;
3730		struct macsec_context ctx;
3731
3732		ops = macsec_get_ops(macsec, &ctx);
3733		if (!ops) {
3734			err = -EOPNOTSUPP;
3735			goto restore_old_addr;
3736		}
3737
3738		ctx.secy = &macsec->secy;
3739		err = macsec_offload(ops->mdo_upd_secy, &ctx);
3740		if (err)
3741			goto restore_old_addr;
3742	}
3743
3744	if (dev->flags & IFF_UP)
3745		dev_uc_del(real_dev, old_addr);
3746
3747	return 0;
3748
3749restore_old_addr:
3750	if (dev->flags & IFF_UP)
3751		dev_uc_del(real_dev, addr->sa_data);
3752
3753	eth_hw_addr_set(dev, old_addr);
3754
3755	return err;
3756}
3757
3758static int macsec_change_mtu(struct net_device *dev, int new_mtu)
3759{
3760	struct macsec_dev *macsec = macsec_priv(dev);
3761	unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true);
3762
3763	if (macsec->real_dev->mtu - extra < new_mtu)
3764		return -ERANGE;
3765
3766	WRITE_ONCE(dev->mtu, new_mtu);
3767
3768	return 0;
3769}
3770
3771static void macsec_get_stats64(struct net_device *dev,
3772			       struct rtnl_link_stats64 *s)
3773{
3774	if (!dev->tstats)
3775		return;
3776
3777	dev_fetch_sw_netstats(s, dev->tstats);
3778
3779	s->rx_dropped = DEV_STATS_READ(dev, rx_dropped);
3780	s->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
3781	s->rx_errors = DEV_STATS_READ(dev, rx_errors);
3782}
3783
3784static int macsec_get_iflink(const struct net_device *dev)
3785{
3786	return READ_ONCE(macsec_priv(dev)->real_dev->ifindex);
3787}
3788
3789static const struct net_device_ops macsec_netdev_ops = {
3790	.ndo_init		= macsec_dev_init,
3791	.ndo_uninit		= macsec_dev_uninit,
3792	.ndo_open		= macsec_dev_open,
3793	.ndo_stop		= macsec_dev_stop,
3794	.ndo_fix_features	= macsec_fix_features,
3795	.ndo_change_mtu		= macsec_change_mtu,
3796	.ndo_set_rx_mode	= macsec_dev_set_rx_mode,
3797	.ndo_change_rx_flags	= macsec_dev_change_rx_flags,
3798	.ndo_set_mac_address	= macsec_set_mac_address,
3799	.ndo_start_xmit		= macsec_start_xmit,
3800	.ndo_get_stats64	= macsec_get_stats64,
3801	.ndo_get_iflink		= macsec_get_iflink,
3802};
3803
3804static const struct device_type macsec_type = {
3805	.name = "macsec",
3806};
3807
3808static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
3809	[IFLA_MACSEC_SCI] = { .type = NLA_U64 },
3810	[IFLA_MACSEC_PORT] = { .type = NLA_U16 },
3811	[IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
3812	[IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
3813	[IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
3814	[IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 },
3815	[IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 },
3816	[IFLA_MACSEC_PROTECT] = { .type = NLA_U8 },
3817	[IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 },
3818	[IFLA_MACSEC_ES] = { .type = NLA_U8 },
3819	[IFLA_MACSEC_SCB] = { .type = NLA_U8 },
3820	[IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
3821	[IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
3822	[IFLA_MACSEC_OFFLOAD] = { .type = NLA_U8 },
3823};
3824
3825static void macsec_free_netdev(struct net_device *dev)
3826{
3827	struct macsec_dev *macsec = macsec_priv(dev);
3828
3829	dst_release(&macsec->secy.tx_sc.md_dst->dst);
3830	free_percpu(macsec->stats);
3831	free_percpu(macsec->secy.tx_sc.stats);
3832
3833	/* Get rid of the macsec's reference to real_dev */
3834	netdev_put(macsec->real_dev, &macsec->dev_tracker);
3835}
3836
3837static void macsec_setup(struct net_device *dev)
3838{
3839	ether_setup(dev);
3840	dev->min_mtu = 0;
3841	dev->max_mtu = ETH_MAX_MTU;
3842	dev->priv_flags |= IFF_NO_QUEUE;
3843	dev->netdev_ops = &macsec_netdev_ops;
3844	dev->needs_free_netdev = true;
3845	dev->priv_destructor = macsec_free_netdev;
3846	SET_NETDEV_DEVTYPE(dev, &macsec_type);
3847
3848	eth_zero_addr(dev->broadcast);
3849}
3850
3851static int macsec_changelink_common(struct net_device *dev,
3852				    struct nlattr *data[])
3853{
3854	struct macsec_secy *secy;
3855	struct macsec_tx_sc *tx_sc;
3856
3857	secy = &macsec_priv(dev)->secy;
3858	tx_sc = &secy->tx_sc;
3859
3860	if (data[IFLA_MACSEC_ENCODING_SA]) {
3861		struct macsec_tx_sa *tx_sa;
3862
3863		tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]);
3864		tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]);
3865
3866		secy->operational = tx_sa && tx_sa->active;
3867	}
3868
 
 
 
3869	if (data[IFLA_MACSEC_ENCRYPT])
3870		tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
3871
3872	if (data[IFLA_MACSEC_PROTECT])
3873		secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]);
3874
3875	if (data[IFLA_MACSEC_INC_SCI])
3876		tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
3877
3878	if (data[IFLA_MACSEC_ES])
3879		tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]);
3880
3881	if (data[IFLA_MACSEC_SCB])
3882		tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]);
3883
3884	if (data[IFLA_MACSEC_REPLAY_PROTECT])
3885		secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]);
3886
3887	if (data[IFLA_MACSEC_VALIDATION])
3888		secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]);
3889
3890	if (data[IFLA_MACSEC_CIPHER_SUITE]) {
3891		switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) {
3892		case MACSEC_CIPHER_ID_GCM_AES_128:
3893		case MACSEC_DEFAULT_CIPHER_ID:
3894			secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
3895			secy->xpn = false;
3896			break;
3897		case MACSEC_CIPHER_ID_GCM_AES_256:
3898			secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
3899			secy->xpn = false;
3900			break;
3901		case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
3902			secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
3903			secy->xpn = true;
3904			break;
3905		case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
3906			secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
3907			secy->xpn = true;
3908			break;
3909		default:
3910			return -EINVAL;
3911		}
3912	}
3913
3914	if (data[IFLA_MACSEC_WINDOW]) {
3915		secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
3916
3917		/* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window
3918		 * for XPN cipher suites */
3919		if (secy->xpn &&
3920		    secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW)
3921			return -EINVAL;
3922	}
3923
3924	return 0;
3925}
3926
3927static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
3928			     struct nlattr *data[],
3929			     struct netlink_ext_ack *extack)
3930{
3931	struct macsec_dev *macsec = macsec_priv(dev);
3932	bool macsec_offload_state_change = false;
3933	enum macsec_offload offload;
3934	struct macsec_tx_sc tx_sc;
3935	struct macsec_secy secy;
3936	int ret;
3937
3938	if (!data)
3939		return 0;
3940
3941	if (data[IFLA_MACSEC_CIPHER_SUITE] ||
3942	    data[IFLA_MACSEC_ICV_LEN] ||
3943	    data[IFLA_MACSEC_SCI] ||
3944	    data[IFLA_MACSEC_PORT])
3945		return -EINVAL;
3946
3947	/* Keep a copy of unmodified secy and tx_sc, in case the offload
3948	 * propagation fails, to revert macsec_changelink_common.
3949	 */
3950	memcpy(&secy, &macsec->secy, sizeof(secy));
3951	memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc));
3952
3953	ret = macsec_changelink_common(dev, data);
3954	if (ret)
3955		goto cleanup;
3956
3957	if (data[IFLA_MACSEC_OFFLOAD]) {
3958		offload = nla_get_u8(data[IFLA_MACSEC_OFFLOAD]);
3959		if (macsec->offload != offload) {
3960			macsec_offload_state_change = true;
3961			ret = macsec_update_offload(dev, offload);
3962			if (ret)
3963				goto cleanup;
3964		}
3965	}
3966
3967	/* If h/w offloading is available, propagate to the device */
3968	if (!macsec_offload_state_change && macsec_is_offloaded(macsec)) {
3969		const struct macsec_ops *ops;
3970		struct macsec_context ctx;
 
3971
3972		ops = macsec_get_ops(netdev_priv(dev), &ctx);
3973		if (!ops) {
3974			ret = -EOPNOTSUPP;
3975			goto cleanup;
3976		}
3977
3978		ctx.secy = &macsec->secy;
3979		ret = macsec_offload(ops->mdo_upd_secy, &ctx);
3980		if (ret)
3981			goto cleanup;
3982	}
3983
3984	return 0;
3985
3986cleanup:
3987	memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc));
3988	memcpy(&macsec->secy, &secy, sizeof(secy));
3989
3990	return ret;
3991}
3992
3993static void macsec_del_dev(struct macsec_dev *macsec)
3994{
3995	int i;
3996
3997	while (macsec->secy.rx_sc) {
3998		struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc);
3999
4000		rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next);
4001		free_rx_sc(rx_sc);
4002	}
4003
4004	for (i = 0; i < MACSEC_NUM_AN; i++) {
4005		struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]);
4006
4007		if (sa) {
4008			RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL);
4009			clear_tx_sa(sa);
4010		}
4011	}
4012}
4013
4014static void macsec_common_dellink(struct net_device *dev, struct list_head *head)
4015{
4016	struct macsec_dev *macsec = macsec_priv(dev);
4017	struct net_device *real_dev = macsec->real_dev;
4018
4019	/* If h/w offloading is available, propagate to the device */
4020	if (macsec_is_offloaded(macsec)) {
4021		const struct macsec_ops *ops;
4022		struct macsec_context ctx;
4023
4024		ops = macsec_get_ops(netdev_priv(dev), &ctx);
4025		if (ops) {
4026			ctx.secy = &macsec->secy;
4027			macsec_offload(ops->mdo_del_secy, &ctx);
4028		}
4029	}
4030
4031	unregister_netdevice_queue(dev, head);
4032	list_del_rcu(&macsec->secys);
4033	macsec_del_dev(macsec);
4034	netdev_upper_dev_unlink(real_dev, dev);
4035
4036	macsec_generation++;
4037}
4038
4039static void macsec_dellink(struct net_device *dev, struct list_head *head)
4040{
4041	struct macsec_dev *macsec = macsec_priv(dev);
4042	struct net_device *real_dev = macsec->real_dev;
4043	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
4044
 
 
 
 
 
 
 
 
 
 
 
 
4045	macsec_common_dellink(dev, head);
4046
4047	if (list_empty(&rxd->secys)) {
4048		netdev_rx_handler_unregister(real_dev);
4049		kfree(rxd);
4050	}
4051}
4052
4053static int register_macsec_dev(struct net_device *real_dev,
4054			       struct net_device *dev)
4055{
4056	struct macsec_dev *macsec = macsec_priv(dev);
4057	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
4058
4059	if (!rxd) {
4060		int err;
4061
4062		rxd = kmalloc(sizeof(*rxd), GFP_KERNEL);
4063		if (!rxd)
4064			return -ENOMEM;
4065
4066		INIT_LIST_HEAD(&rxd->secys);
4067
4068		err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
4069						 rxd);
4070		if (err < 0) {
4071			kfree(rxd);
4072			return err;
4073		}
4074	}
4075
4076	list_add_tail_rcu(&macsec->secys, &rxd->secys);
4077	return 0;
4078}
4079
4080static bool sci_exists(struct net_device *dev, sci_t sci)
4081{
4082	struct macsec_rxh_data *rxd = macsec_data_rtnl(dev);
4083	struct macsec_dev *macsec;
4084
4085	list_for_each_entry(macsec, &rxd->secys, secys) {
4086		if (macsec->secy.sci == sci)
4087			return true;
4088	}
4089
4090	return false;
4091}
4092
4093static sci_t dev_to_sci(struct net_device *dev, __be16 port)
4094{
4095	return make_sci(dev->dev_addr, port);
4096}
4097
4098static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
4099{
4100	struct macsec_dev *macsec = macsec_priv(dev);
4101	struct macsec_secy *secy = &macsec->secy;
4102
4103	macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats);
4104	if (!macsec->stats)
4105		return -ENOMEM;
4106
4107	secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats);
4108	if (!secy->tx_sc.stats)
4109		return -ENOMEM;
4110
4111	secy->tx_sc.md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);
4112	if (!secy->tx_sc.md_dst)
4113		/* macsec and secy percpu stats will be freed when unregistering
4114		 * net_device in macsec_free_netdev()
4115		 */
4116		return -ENOMEM;
 
4117
4118	if (sci == MACSEC_UNDEF_SCI)
4119		sci = dev_to_sci(dev, MACSEC_PORT_ES);
4120
4121	secy->netdev = dev;
4122	secy->operational = true;
4123	secy->key_len = DEFAULT_SAK_LEN;
4124	secy->icv_len = icv_len;
4125	secy->validate_frames = MACSEC_VALIDATE_DEFAULT;
4126	secy->protect_frames = true;
4127	secy->replay_protect = false;
4128	secy->xpn = DEFAULT_XPN;
4129
4130	secy->sci = sci;
4131	secy->tx_sc.md_dst->u.macsec_info.sci = sci;
4132	secy->tx_sc.active = true;
4133	secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA;
4134	secy->tx_sc.encrypt = DEFAULT_ENCRYPT;
4135	secy->tx_sc.send_sci = DEFAULT_SEND_SCI;
4136	secy->tx_sc.end_station = false;
4137	secy->tx_sc.scb = false;
4138
4139	return 0;
4140}
4141
4142static struct lock_class_key macsec_netdev_addr_lock_key;
4143
4144static int macsec_newlink(struct net *net, struct net_device *dev,
4145			  struct nlattr *tb[], struct nlattr *data[],
4146			  struct netlink_ext_ack *extack)
4147{
4148	struct macsec_dev *macsec = macsec_priv(dev);
4149	rx_handler_func_t *rx_handler;
4150	u8 icv_len = MACSEC_DEFAULT_ICV_LEN;
4151	struct net_device *real_dev;
4152	int err, mtu;
4153	sci_t sci;
4154
4155	if (!tb[IFLA_LINK])
4156		return -EINVAL;
4157	real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
4158	if (!real_dev)
4159		return -ENODEV;
4160	if (real_dev->type != ARPHRD_ETHER)
4161		return -EINVAL;
4162
4163	dev->priv_flags |= IFF_MACSEC;
4164
4165	macsec->real_dev = real_dev;
4166
4167	if (data && data[IFLA_MACSEC_OFFLOAD])
4168		macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]);
4169	else
4170		/* MACsec offloading is off by default */
4171		macsec->offload = MACSEC_OFFLOAD_OFF;
4172
4173	/* Check if the offloading mode is supported by the underlying layers */
4174	if (macsec->offload != MACSEC_OFFLOAD_OFF &&
4175	    !macsec_check_offload(macsec->offload, macsec))
4176		return -EOPNOTSUPP;
4177
4178	/* send_sci must be set to true when transmit sci explicitly is set */
4179	if ((data && data[IFLA_MACSEC_SCI]) &&
4180	    (data && data[IFLA_MACSEC_INC_SCI])) {
4181		u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
4182
4183		if (!send_sci)
4184			return -EINVAL;
4185	}
4186
4187	if (data && data[IFLA_MACSEC_ICV_LEN])
4188		icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
4189	mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
4190	if (mtu < 0)
4191		dev->mtu = 0;
4192	else
4193		dev->mtu = mtu;
4194
4195	rx_handler = rtnl_dereference(real_dev->rx_handler);
4196	if (rx_handler && rx_handler != macsec_handle_frame)
4197		return -EBUSY;
4198
4199	err = register_netdevice(dev);
4200	if (err < 0)
4201		return err;
4202
4203	netdev_lockdep_set_classes(dev);
4204	lockdep_set_class(&dev->addr_list_lock,
4205			  &macsec_netdev_addr_lock_key);
4206
4207	err = netdev_upper_dev_link(real_dev, dev, extack);
4208	if (err < 0)
4209		goto unregister;
4210
4211	/* need to be already registered so that ->init has run and
4212	 * the MAC addr is set
4213	 */
4214	if (data && data[IFLA_MACSEC_SCI])
4215		sci = nla_get_sci(data[IFLA_MACSEC_SCI]);
4216	else if (data && data[IFLA_MACSEC_PORT])
4217		sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT]));
4218	else
4219		sci = dev_to_sci(dev, MACSEC_PORT_ES);
4220
4221	if (rx_handler && sci_exists(real_dev, sci)) {
4222		err = -EBUSY;
4223		goto unlink;
4224	}
4225
4226	err = macsec_add_dev(dev, sci, icv_len);
4227	if (err)
4228		goto unlink;
4229
4230	if (data) {
4231		err = macsec_changelink_common(dev, data);
4232		if (err)
4233			goto del_dev;
4234	}
4235
4236	/* If h/w offloading is available, propagate to the device */
4237	if (macsec_is_offloaded(macsec)) {
4238		const struct macsec_ops *ops;
4239		struct macsec_context ctx;
4240
4241		ops = macsec_get_ops(macsec, &ctx);
4242		if (ops) {
4243			ctx.secy = &macsec->secy;
4244			err = macsec_offload(ops->mdo_add_secy, &ctx);
4245			if (err)
4246				goto del_dev;
4247
4248			macsec->insert_tx_tag =
4249				macsec_needs_tx_tag(macsec, ops);
4250		}
4251	}
4252
4253	err = register_macsec_dev(real_dev, dev);
4254	if (err < 0)
4255		goto del_dev;
4256
4257	netif_stacked_transfer_operstate(real_dev, dev);
4258	linkwatch_fire_event(dev);
4259
4260	macsec_generation++;
4261
4262	return 0;
4263
4264del_dev:
4265	macsec_del_dev(macsec);
4266unlink:
4267	netdev_upper_dev_unlink(real_dev, dev);
4268unregister:
4269	unregister_netdevice(dev);
4270	return err;
4271}
4272
4273static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
4274				struct netlink_ext_ack *extack)
4275{
4276	u64 csid = MACSEC_DEFAULT_CIPHER_ID;
4277	u8 icv_len = MACSEC_DEFAULT_ICV_LEN;
4278	int flag;
4279	bool es, scb, sci;
4280
4281	if (!data)
4282		return 0;
4283
4284	if (data[IFLA_MACSEC_CIPHER_SUITE])
4285		csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]);
4286
4287	if (data[IFLA_MACSEC_ICV_LEN]) {
4288		icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
4289		if (icv_len != MACSEC_DEFAULT_ICV_LEN) {
4290			char dummy_key[DEFAULT_SAK_LEN] = { 0 };
4291			struct crypto_aead *dummy_tfm;
4292
4293			dummy_tfm = macsec_alloc_tfm(dummy_key,
4294						     DEFAULT_SAK_LEN,
4295						     icv_len);
4296			if (IS_ERR(dummy_tfm))
4297				return PTR_ERR(dummy_tfm);
4298			crypto_free_aead(dummy_tfm);
4299		}
4300	}
4301
4302	switch (csid) {
4303	case MACSEC_CIPHER_ID_GCM_AES_128:
4304	case MACSEC_CIPHER_ID_GCM_AES_256:
4305	case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
4306	case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
4307	case MACSEC_DEFAULT_CIPHER_ID:
4308		if (icv_len < MACSEC_MIN_ICV_LEN ||
4309		    icv_len > MACSEC_STD_ICV_LEN)
4310			return -EINVAL;
4311		break;
4312	default:
4313		return -EINVAL;
4314	}
4315
4316	if (data[IFLA_MACSEC_ENCODING_SA]) {
4317		if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN)
4318			return -EINVAL;
4319	}
4320
4321	for (flag = IFLA_MACSEC_ENCODING_SA + 1;
4322	     flag < IFLA_MACSEC_VALIDATION;
4323	     flag++) {
4324		if (data[flag]) {
4325			if (nla_get_u8(data[flag]) > 1)
4326				return -EINVAL;
4327		}
4328	}
4329
4330	es  = nla_get_u8_default(data[IFLA_MACSEC_ES], false);
4331	sci = nla_get_u8_default(data[IFLA_MACSEC_INC_SCI], false);
4332	scb = nla_get_u8_default(data[IFLA_MACSEC_SCB], false);
4333
4334	if ((sci && (scb || es)) || (scb && es))
4335		return -EINVAL;
4336
4337	if (data[IFLA_MACSEC_VALIDATION] &&
4338	    nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
4339		return -EINVAL;
4340
4341	if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
4342	     nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
4343	    !data[IFLA_MACSEC_WINDOW])
4344		return -EINVAL;
4345
4346	return 0;
4347}
4348
4349static struct net *macsec_get_link_net(const struct net_device *dev)
4350{
4351	return dev_net(macsec_priv(dev)->real_dev);
4352}
4353
4354struct net_device *macsec_get_real_dev(const struct net_device *dev)
4355{
4356	return macsec_priv(dev)->real_dev;
4357}
4358EXPORT_SYMBOL_GPL(macsec_get_real_dev);
4359
4360bool macsec_netdev_is_offloaded(struct net_device *dev)
4361{
4362	return macsec_is_offloaded(macsec_priv(dev));
4363}
4364EXPORT_SYMBOL_GPL(macsec_netdev_is_offloaded);
4365
4366static size_t macsec_get_size(const struct net_device *dev)
4367{
4368	return  nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */
4369		nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */
4370		nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */
4371		nla_total_size(4) + /* IFLA_MACSEC_WINDOW */
4372		nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */
4373		nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */
4374		nla_total_size(1) + /* IFLA_MACSEC_PROTECT */
4375		nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */
4376		nla_total_size(1) + /* IFLA_MACSEC_ES */
4377		nla_total_size(1) + /* IFLA_MACSEC_SCB */
4378		nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
4379		nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
4380		nla_total_size(1) + /* IFLA_MACSEC_OFFLOAD */
4381		0;
4382}
4383
4384static int macsec_fill_info(struct sk_buff *skb,
4385			    const struct net_device *dev)
4386{
4387	struct macsec_tx_sc *tx_sc;
4388	struct macsec_dev *macsec;
4389	struct macsec_secy *secy;
4390	u64 csid;
4391
4392	macsec = macsec_priv(dev);
4393	secy = &macsec->secy;
4394	tx_sc = &secy->tx_sc;
4395
4396	switch (secy->key_len) {
4397	case MACSEC_GCM_AES_128_SAK_LEN:
4398		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
4399		break;
4400	case MACSEC_GCM_AES_256_SAK_LEN:
4401		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
4402		break;
4403	default:
4404		goto nla_put_failure;
4405	}
4406
4407	if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
4408			IFLA_MACSEC_PAD) ||
4409	    nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
4410	    nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
4411			      csid, IFLA_MACSEC_PAD) ||
4412	    nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
4413	    nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
4414	    nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
4415	    nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) ||
4416	    nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) ||
4417	    nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
4418	    nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
4419	    nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) ||
4420	    nla_put_u8(skb, IFLA_MACSEC_OFFLOAD, macsec->offload) ||
4421	    0)
4422		goto nla_put_failure;
4423
4424	if (secy->replay_protect) {
4425		if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window))
4426			goto nla_put_failure;
4427	}
4428
4429	return 0;
4430
4431nla_put_failure:
4432	return -EMSGSIZE;
4433}
4434
4435static struct rtnl_link_ops macsec_link_ops __read_mostly = {
4436	.kind		= "macsec",
4437	.priv_size	= sizeof(struct macsec_dev),
4438	.maxtype	= IFLA_MACSEC_MAX,
4439	.policy		= macsec_rtnl_policy,
4440	.setup		= macsec_setup,
4441	.validate	= macsec_validate_attr,
4442	.newlink	= macsec_newlink,
4443	.changelink	= macsec_changelink,
4444	.dellink	= macsec_dellink,
4445	.get_size	= macsec_get_size,
4446	.fill_info	= macsec_fill_info,
4447	.get_link_net	= macsec_get_link_net,
4448};
4449
4450static bool is_macsec_master(struct net_device *dev)
4451{
4452	return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame;
4453}
4454
4455static int macsec_notify(struct notifier_block *this, unsigned long event,
4456			 void *ptr)
4457{
4458	struct net_device *real_dev = netdev_notifier_info_to_dev(ptr);
4459	struct macsec_rxh_data *rxd;
4460	struct macsec_dev *m, *n;
4461	LIST_HEAD(head);
4462
4463	if (!is_macsec_master(real_dev))
4464		return NOTIFY_DONE;
4465
4466	rxd = macsec_data_rtnl(real_dev);
4467
4468	switch (event) {
4469	case NETDEV_DOWN:
4470	case NETDEV_UP:
4471	case NETDEV_CHANGE:
 
 
 
 
4472		list_for_each_entry_safe(m, n, &rxd->secys, secys) {
4473			struct net_device *dev = m->secy.netdev;
4474
4475			netif_stacked_transfer_operstate(real_dev, dev);
4476		}
4477		break;
4478	case NETDEV_UNREGISTER:
 
 
 
 
 
4479		list_for_each_entry_safe(m, n, &rxd->secys, secys) {
4480			macsec_common_dellink(m->secy.netdev, &head);
4481		}
4482
4483		netdev_rx_handler_unregister(real_dev);
4484		kfree(rxd);
4485
4486		unregister_netdevice_many(&head);
4487		break;
4488	case NETDEV_CHANGEMTU:
 
 
 
 
 
4489		list_for_each_entry(m, &rxd->secys, secys) {
4490			struct net_device *dev = m->secy.netdev;
4491			unsigned int mtu = real_dev->mtu - (m->secy.icv_len +
4492							    macsec_extra_len(true));
4493
4494			if (dev->mtu > mtu)
4495				dev_set_mtu(dev, mtu);
4496		}
4497		break;
4498	case NETDEV_FEAT_CHANGE:
4499		list_for_each_entry(m, &rxd->secys, secys) {
4500			macsec_inherit_tso_max(m->secy.netdev);
4501			netdev_update_features(m->secy.netdev);
4502		}
4503		break;
4504	}
4505
4506	return NOTIFY_OK;
4507}
4508
4509static struct notifier_block macsec_notifier = {
4510	.notifier_call = macsec_notify,
4511};
4512
4513static int __init macsec_init(void)
4514{
4515	int err;
4516
4517	pr_info("MACsec IEEE 802.1AE\n");
4518	err = register_netdevice_notifier(&macsec_notifier);
4519	if (err)
4520		return err;
4521
4522	err = rtnl_link_register(&macsec_link_ops);
4523	if (err)
4524		goto notifier;
4525
4526	err = genl_register_family(&macsec_fam);
4527	if (err)
4528		goto rtnl;
4529
4530	return 0;
4531
4532rtnl:
4533	rtnl_link_unregister(&macsec_link_ops);
4534notifier:
4535	unregister_netdevice_notifier(&macsec_notifier);
4536	return err;
4537}
4538
4539static void __exit macsec_exit(void)
4540{
4541	genl_unregister_family(&macsec_fam);
4542	rtnl_link_unregister(&macsec_link_ops);
4543	unregister_netdevice_notifier(&macsec_notifier);
4544	rcu_barrier();
4545}
4546
4547module_init(macsec_init);
4548module_exit(macsec_exit);
4549
4550MODULE_ALIAS_RTNL_LINK("macsec");
4551MODULE_ALIAS_GENL_FAMILY("macsec");
4552
4553MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
4554MODULE_LICENSE("GPL v2");
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * drivers/net/macsec.c - MACsec device
   4 *
   5 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
   6 */
   7
   8#include <linux/types.h>
   9#include <linux/skbuff.h>
  10#include <linux/socket.h>
  11#include <linux/module.h>
  12#include <crypto/aead.h>
  13#include <linux/etherdevice.h>
  14#include <linux/netdevice.h>
  15#include <linux/rtnetlink.h>
  16#include <linux/refcount.h>
  17#include <net/genetlink.h>
  18#include <net/sock.h>
  19#include <net/gro_cells.h>
  20#include <net/macsec.h>
 
  21#include <linux/phy.h>
  22#include <linux/byteorder/generic.h>
  23#include <linux/if_arp.h>
  24
  25#include <uapi/linux/if_macsec.h>
  26
  27#define MACSEC_SCI_LEN 8
  28
  29/* SecTAG length = macsec_eth_header without the optional SCI */
  30#define MACSEC_TAG_LEN 6
  31
  32struct macsec_eth_header {
  33	struct ethhdr eth;
  34	/* SecTAG */
  35	u8  tci_an;
  36#if defined(__LITTLE_ENDIAN_BITFIELD)
  37	u8  short_length:6,
  38		  unused:2;
  39#elif defined(__BIG_ENDIAN_BITFIELD)
  40	u8        unused:2,
  41	    short_length:6;
  42#else
  43#error	"Please fix <asm/byteorder.h>"
  44#endif
  45	__be32 packet_number;
  46	u8 secure_channel_id[8]; /* optional */
  47} __packed;
  48
  49#define MACSEC_TCI_VERSION 0x80
  50#define MACSEC_TCI_ES      0x40 /* end station */
  51#define MACSEC_TCI_SC      0x20 /* SCI present */
  52#define MACSEC_TCI_SCB     0x10 /* epon */
  53#define MACSEC_TCI_E       0x08 /* encryption */
  54#define MACSEC_TCI_C       0x04 /* changed text */
  55#define MACSEC_AN_MASK     0x03 /* association number */
  56#define MACSEC_TCI_CONFID  (MACSEC_TCI_E | MACSEC_TCI_C)
  57
  58/* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
  59#define MIN_NON_SHORT_LEN 48
  60
  61#define GCM_AES_IV_LEN 12
  62#define DEFAULT_ICV_LEN 16
  63
  64#define for_each_rxsc(secy, sc)				\
  65	for (sc = rcu_dereference_bh(secy->rx_sc);	\
  66	     sc;					\
  67	     sc = rcu_dereference_bh(sc->next))
  68#define for_each_rxsc_rtnl(secy, sc)			\
  69	for (sc = rtnl_dereference(secy->rx_sc);	\
  70	     sc;					\
  71	     sc = rtnl_dereference(sc->next))
  72
  73#define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31)))
  74
  75struct gcm_iv_xpn {
  76	union {
  77		u8 short_secure_channel_id[4];
  78		ssci_t ssci;
  79	};
  80	__be64 pn;
  81} __packed;
  82
  83struct gcm_iv {
  84	union {
  85		u8 secure_channel_id[8];
  86		sci_t sci;
  87	};
  88	__be32 pn;
  89};
  90
  91#define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
  92
  93struct pcpu_secy_stats {
  94	struct macsec_dev_stats stats;
  95	struct u64_stats_sync syncp;
  96};
  97
  98/**
  99 * struct macsec_dev - private data
 100 * @secy: SecY config
 101 * @real_dev: pointer to underlying netdevice
 
 102 * @stats: MACsec device stats
 103 * @secys: linked list of SecY's on the underlying device
 104 * @gro_cells: pointer to the Generic Receive Offload cell
 105 * @offload: status of offloading on the MACsec device
 
 
 106 */
 107struct macsec_dev {
 108	struct macsec_secy secy;
 109	struct net_device *real_dev;
 
 110	struct pcpu_secy_stats __percpu *stats;
 111	struct list_head secys;
 112	struct gro_cells gro_cells;
 113	enum macsec_offload offload;
 
 114};
 115
 116/**
 117 * struct macsec_rxh_data - rx_handler private argument
 118 * @secys: linked list of SecY's on this underlying device
 119 */
 120struct macsec_rxh_data {
 121	struct list_head secys;
 122};
 123
 124static struct macsec_dev *macsec_priv(const struct net_device *dev)
 125{
 126	return (struct macsec_dev *)netdev_priv(dev);
 127}
 128
 129static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev)
 130{
 131	return rcu_dereference_bh(dev->rx_handler_data);
 132}
 133
 134static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev)
 135{
 136	return rtnl_dereference(dev->rx_handler_data);
 137}
 138
 139struct macsec_cb {
 140	struct aead_request *req;
 141	union {
 142		struct macsec_tx_sa *tx_sa;
 143		struct macsec_rx_sa *rx_sa;
 144	};
 145	u8 assoc_num;
 146	bool valid;
 147	bool has_sci;
 148};
 149
 150static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
 151{
 152	struct macsec_rx_sa *sa = rcu_dereference_bh(ptr);
 153
 154	if (!sa || !sa->active)
 155		return NULL;
 156
 157	if (!refcount_inc_not_zero(&sa->refcnt))
 158		return NULL;
 159
 160	return sa;
 161}
 162
 163static void free_rx_sc_rcu(struct rcu_head *head)
 164{
 165	struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
 166
 167	free_percpu(rx_sc->stats);
 168	kfree(rx_sc);
 169}
 170
 171static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc)
 172{
 173	return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL;
 174}
 175
 176static void macsec_rxsc_put(struct macsec_rx_sc *sc)
 177{
 178	if (refcount_dec_and_test(&sc->refcnt))
 179		call_rcu(&sc->rcu_head, free_rx_sc_rcu);
 180}
 181
 182static void free_rxsa(struct rcu_head *head)
 183{
 184	struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu);
 185
 186	crypto_free_aead(sa->key.tfm);
 187	free_percpu(sa->stats);
 188	kfree(sa);
 189}
 190
 191static void macsec_rxsa_put(struct macsec_rx_sa *sa)
 192{
 193	if (refcount_dec_and_test(&sa->refcnt))
 194		call_rcu(&sa->rcu, free_rxsa);
 195}
 196
 197static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr)
 198{
 199	struct macsec_tx_sa *sa = rcu_dereference_bh(ptr);
 200
 201	if (!sa || !sa->active)
 202		return NULL;
 203
 204	if (!refcount_inc_not_zero(&sa->refcnt))
 205		return NULL;
 206
 207	return sa;
 208}
 209
 210static void free_txsa(struct rcu_head *head)
 211{
 212	struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu);
 213
 214	crypto_free_aead(sa->key.tfm);
 215	free_percpu(sa->stats);
 216	kfree(sa);
 217}
 218
 219static void macsec_txsa_put(struct macsec_tx_sa *sa)
 220{
 221	if (refcount_dec_and_test(&sa->refcnt))
 222		call_rcu(&sa->rcu, free_txsa);
 223}
 224
 225static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
 226{
 227	BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb));
 228	return (struct macsec_cb *)skb->cb;
 229}
 230
 231#define MACSEC_PORT_ES (htons(0x0001))
 232#define MACSEC_PORT_SCB (0x0000)
 233#define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
 234#define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff)
 235
 236#define MACSEC_GCM_AES_128_SAK_LEN 16
 237#define MACSEC_GCM_AES_256_SAK_LEN 32
 238
 239#define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
 240#define DEFAULT_XPN false
 241#define DEFAULT_SEND_SCI true
 242#define DEFAULT_ENCRYPT false
 243#define DEFAULT_ENCODING_SA 0
 
 244
 245static bool send_sci(const struct macsec_secy *secy)
 246{
 247	const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
 248
 249	return tx_sc->send_sci ||
 250		(secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
 251}
 252
 253static sci_t make_sci(u8 *addr, __be16 port)
 254{
 255	sci_t sci;
 256
 257	memcpy(&sci, addr, ETH_ALEN);
 258	memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port));
 259
 260	return sci;
 261}
 262
 263static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present)
 264{
 265	sci_t sci;
 266
 267	if (sci_present)
 268		memcpy(&sci, hdr->secure_channel_id,
 269		       sizeof(hdr->secure_channel_id));
 270	else
 271		sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES);
 272
 273	return sci;
 274}
 275
 276static unsigned int macsec_sectag_len(bool sci_present)
 277{
 278	return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
 279}
 280
 281static unsigned int macsec_hdr_len(bool sci_present)
 282{
 283	return macsec_sectag_len(sci_present) + ETH_HLEN;
 284}
 285
 286static unsigned int macsec_extra_len(bool sci_present)
 287{
 288	return macsec_sectag_len(sci_present) + sizeof(__be16);
 289}
 290
 291/* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
 292static void macsec_fill_sectag(struct macsec_eth_header *h,
 293			       const struct macsec_secy *secy, u32 pn,
 294			       bool sci_present)
 295{
 296	const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
 297
 298	memset(&h->tci_an, 0, macsec_sectag_len(sci_present));
 299	h->eth.h_proto = htons(ETH_P_MACSEC);
 300
 301	if (sci_present) {
 302		h->tci_an |= MACSEC_TCI_SC;
 303		memcpy(&h->secure_channel_id, &secy->sci,
 304		       sizeof(h->secure_channel_id));
 305	} else {
 306		if (tx_sc->end_station)
 307			h->tci_an |= MACSEC_TCI_ES;
 308		if (tx_sc->scb)
 309			h->tci_an |= MACSEC_TCI_SCB;
 310	}
 311
 312	h->packet_number = htonl(pn);
 313
 314	/* with GCM, C/E clear for !encrypt, both set for encrypt */
 315	if (tx_sc->encrypt)
 316		h->tci_an |= MACSEC_TCI_CONFID;
 317	else if (secy->icv_len != DEFAULT_ICV_LEN)
 318		h->tci_an |= MACSEC_TCI_C;
 319
 320	h->tci_an |= tx_sc->encoding_sa;
 321}
 322
 323static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len)
 324{
 325	if (data_len < MIN_NON_SHORT_LEN)
 326		h->short_length = data_len;
 327}
 328
 329/* Checks if a MACsec interface is being offloaded to an hardware engine */
 330static bool macsec_is_offloaded(struct macsec_dev *macsec)
 331{
 332	if (macsec->offload == MACSEC_OFFLOAD_MAC ||
 333	    macsec->offload == MACSEC_OFFLOAD_PHY)
 334		return true;
 335
 336	return false;
 337}
 338
 339/* Checks if underlying layers implement MACsec offloading functions. */
 340static bool macsec_check_offload(enum macsec_offload offload,
 341				 struct macsec_dev *macsec)
 342{
 343	if (!macsec || !macsec->real_dev)
 344		return false;
 345
 346	if (offload == MACSEC_OFFLOAD_PHY)
 347		return macsec->real_dev->phydev &&
 348		       macsec->real_dev->phydev->macsec_ops;
 349	else if (offload == MACSEC_OFFLOAD_MAC)
 350		return macsec->real_dev->features & NETIF_F_HW_MACSEC &&
 351		       macsec->real_dev->macsec_ops;
 352
 353	return false;
 354}
 355
 356static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload,
 357						 struct macsec_dev *macsec,
 358						 struct macsec_context *ctx)
 359{
 360	if (ctx) {
 361		memset(ctx, 0, sizeof(*ctx));
 362		ctx->offload = offload;
 363
 364		if (offload == MACSEC_OFFLOAD_PHY)
 365			ctx->phydev = macsec->real_dev->phydev;
 366		else if (offload == MACSEC_OFFLOAD_MAC)
 367			ctx->netdev = macsec->real_dev;
 368	}
 369
 370	if (offload == MACSEC_OFFLOAD_PHY)
 371		return macsec->real_dev->phydev->macsec_ops;
 372	else
 373		return macsec->real_dev->macsec_ops;
 374}
 375
 376/* Returns a pointer to the MACsec ops struct if any and updates the MACsec
 377 * context device reference if provided.
 378 */
 379static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec,
 380					       struct macsec_context *ctx)
 381{
 382	if (!macsec_check_offload(macsec->offload, macsec))
 383		return NULL;
 384
 385	return __macsec_get_ops(macsec->offload, macsec, ctx);
 386}
 387
 388/* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */
 389static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn)
 390{
 391	struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data;
 392	int len = skb->len - 2 * ETH_ALEN;
 393	int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len;
 394
 395	/* a) It comprises at least 17 octets */
 396	if (skb->len <= 16)
 397		return false;
 398
 399	/* b) MACsec EtherType: already checked */
 400
 401	/* c) V bit is clear */
 402	if (h->tci_an & MACSEC_TCI_VERSION)
 403		return false;
 404
 405	/* d) ES or SCB => !SC */
 406	if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) &&
 407	    (h->tci_an & MACSEC_TCI_SC))
 408		return false;
 409
 410	/* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */
 411	if (h->unused)
 412		return false;
 413
 414	/* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */
 415	if (!h->packet_number && !xpn)
 416		return false;
 417
 418	/* length check, f) g) h) i) */
 419	if (h->short_length)
 420		return len == extra_len + h->short_length;
 421	return len >= extra_len + MIN_NON_SHORT_LEN;
 422}
 423
 424#define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
 425#define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
 426
 427static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn,
 428			       salt_t salt)
 429{
 430	struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv;
 431
 432	gcm_iv->ssci = ssci ^ salt.ssci;
 433	gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn;
 434}
 435
 436static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn)
 437{
 438	struct gcm_iv *gcm_iv = (struct gcm_iv *)iv;
 439
 440	gcm_iv->sci = sci;
 441	gcm_iv->pn = htonl(pn);
 442}
 443
 444static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
 445{
 446	return (struct macsec_eth_header *)skb_mac_header(skb);
 447}
 448
 449static sci_t dev_to_sci(struct net_device *dev, __be16 port)
 450{
 451	return make_sci(dev->dev_addr, port);
 452}
 453
 454static void __macsec_pn_wrapped(struct macsec_secy *secy,
 455				struct macsec_tx_sa *tx_sa)
 456{
 457	pr_debug("PN wrapped, transitioning to !oper\n");
 458	tx_sa->active = false;
 459	if (secy->protect_frames)
 460		secy->operational = false;
 461}
 462
 463void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa)
 464{
 465	spin_lock_bh(&tx_sa->lock);
 466	__macsec_pn_wrapped(secy, tx_sa);
 467	spin_unlock_bh(&tx_sa->lock);
 468}
 469EXPORT_SYMBOL_GPL(macsec_pn_wrapped);
 470
 471static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa,
 472			    struct macsec_secy *secy)
 473{
 474	pn_t pn;
 475
 476	spin_lock_bh(&tx_sa->lock);
 477
 478	pn = tx_sa->next_pn_halves;
 479	if (secy->xpn)
 480		tx_sa->next_pn++;
 481	else
 482		tx_sa->next_pn_halves.lower++;
 483
 484	if (tx_sa->next_pn == 0)
 485		__macsec_pn_wrapped(secy, tx_sa);
 486	spin_unlock_bh(&tx_sa->lock);
 487
 488	return pn;
 489}
 490
 491static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
 492{
 493	struct macsec_dev *macsec = netdev_priv(dev);
 494
 495	skb->dev = macsec->real_dev;
 496	skb_reset_mac_header(skb);
 497	skb->protocol = eth_hdr(skb)->h_proto;
 498}
 499
 
 
 
 
 
 
 
 
 
 500static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
 501			    struct macsec_tx_sa *tx_sa)
 502{
 
 503	struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
 504
 505	u64_stats_update_begin(&txsc_stats->syncp);
 506	if (tx_sc->encrypt) {
 507		txsc_stats->stats.OutOctetsEncrypted += skb->len;
 508		txsc_stats->stats.OutPktsEncrypted++;
 509		this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
 510	} else {
 511		txsc_stats->stats.OutOctetsProtected += skb->len;
 512		txsc_stats->stats.OutPktsProtected++;
 513		this_cpu_inc(tx_sa->stats->OutPktsProtected);
 514	}
 515	u64_stats_update_end(&txsc_stats->syncp);
 516}
 517
 518static void count_tx(struct net_device *dev, int ret, int len)
 519{
 520	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
 521		struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
 522
 523		u64_stats_update_begin(&stats->syncp);
 524		stats->tx_packets++;
 525		stats->tx_bytes += len;
 526		u64_stats_update_end(&stats->syncp);
 527	}
 528}
 529
 530static void macsec_encrypt_done(struct crypto_async_request *base, int err)
 531{
 532	struct sk_buff *skb = base->data;
 533	struct net_device *dev = skb->dev;
 534	struct macsec_dev *macsec = macsec_priv(dev);
 535	struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa;
 536	int len, ret;
 537
 538	aead_request_free(macsec_skb_cb(skb)->req);
 539
 540	rcu_read_lock_bh();
 
 
 
 541	macsec_encrypt_finish(skb, dev);
 542	macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
 543	len = skb->len;
 544	ret = dev_queue_xmit(skb);
 545	count_tx(dev, ret, len);
 546	rcu_read_unlock_bh();
 547
 548	macsec_txsa_put(sa);
 549	dev_put(dev);
 550}
 551
 552static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
 553					     unsigned char **iv,
 554					     struct scatterlist **sg,
 555					     int num_frags)
 556{
 557	size_t size, iv_offset, sg_offset;
 558	struct aead_request *req;
 559	void *tmp;
 560
 561	size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
 562	iv_offset = size;
 563	size += GCM_AES_IV_LEN;
 564
 565	size = ALIGN(size, __alignof__(struct scatterlist));
 566	sg_offset = size;
 567	size += sizeof(struct scatterlist) * num_frags;
 568
 569	tmp = kmalloc(size, GFP_ATOMIC);
 570	if (!tmp)
 571		return NULL;
 572
 573	*iv = (unsigned char *)(tmp + iv_offset);
 574	*sg = (struct scatterlist *)(tmp + sg_offset);
 575	req = tmp;
 576
 577	aead_request_set_tfm(req, tfm);
 578
 579	return req;
 580}
 581
 582static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
 583				      struct net_device *dev)
 584{
 585	int ret;
 586	struct scatterlist *sg;
 587	struct sk_buff *trailer;
 588	unsigned char *iv;
 589	struct ethhdr *eth;
 590	struct macsec_eth_header *hh;
 591	size_t unprotected_len;
 592	struct aead_request *req;
 593	struct macsec_secy *secy;
 594	struct macsec_tx_sc *tx_sc;
 595	struct macsec_tx_sa *tx_sa;
 596	struct macsec_dev *macsec = macsec_priv(dev);
 597	bool sci_present;
 598	pn_t pn;
 599
 600	secy = &macsec->secy;
 601	tx_sc = &secy->tx_sc;
 602
 603	/* 10.5.1 TX SA assignment */
 604	tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]);
 605	if (!tx_sa) {
 606		secy->operational = false;
 607		kfree_skb(skb);
 608		return ERR_PTR(-EINVAL);
 609	}
 610
 611	if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
 612		     skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
 613		struct sk_buff *nskb = skb_copy_expand(skb,
 614						       MACSEC_NEEDED_HEADROOM,
 615						       MACSEC_NEEDED_TAILROOM,
 616						       GFP_ATOMIC);
 617		if (likely(nskb)) {
 618			consume_skb(skb);
 619			skb = nskb;
 620		} else {
 621			macsec_txsa_put(tx_sa);
 622			kfree_skb(skb);
 623			return ERR_PTR(-ENOMEM);
 624		}
 625	} else {
 626		skb = skb_unshare(skb, GFP_ATOMIC);
 627		if (!skb) {
 628			macsec_txsa_put(tx_sa);
 629			return ERR_PTR(-ENOMEM);
 630		}
 631	}
 632
 633	unprotected_len = skb->len;
 634	eth = eth_hdr(skb);
 635	sci_present = send_sci(secy);
 636	hh = skb_push(skb, macsec_extra_len(sci_present));
 637	memmove(hh, eth, 2 * ETH_ALEN);
 638
 639	pn = tx_sa_update_pn(tx_sa, secy);
 640	if (pn.full64 == 0) {
 641		macsec_txsa_put(tx_sa);
 642		kfree_skb(skb);
 643		return ERR_PTR(-ENOLINK);
 644	}
 645	macsec_fill_sectag(hh, secy, pn.lower, sci_present);
 646	macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
 647
 648	skb_put(skb, secy->icv_len);
 649
 650	if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
 651		struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
 652
 653		u64_stats_update_begin(&secy_stats->syncp);
 654		secy_stats->stats.OutPktsTooLong++;
 655		u64_stats_update_end(&secy_stats->syncp);
 656
 657		macsec_txsa_put(tx_sa);
 658		kfree_skb(skb);
 659		return ERR_PTR(-EINVAL);
 660	}
 661
 662	ret = skb_cow_data(skb, 0, &trailer);
 663	if (unlikely(ret < 0)) {
 664		macsec_txsa_put(tx_sa);
 665		kfree_skb(skb);
 666		return ERR_PTR(ret);
 667	}
 668
 669	req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret);
 670	if (!req) {
 671		macsec_txsa_put(tx_sa);
 672		kfree_skb(skb);
 673		return ERR_PTR(-ENOMEM);
 674	}
 675
 676	if (secy->xpn)
 677		macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt);
 678	else
 679		macsec_fill_iv(iv, secy->sci, pn.lower);
 680
 681	sg_init_table(sg, ret);
 682	ret = skb_to_sgvec(skb, sg, 0, skb->len);
 683	if (unlikely(ret < 0)) {
 684		aead_request_free(req);
 685		macsec_txsa_put(tx_sa);
 686		kfree_skb(skb);
 687		return ERR_PTR(ret);
 688	}
 689
 690	if (tx_sc->encrypt) {
 691		int len = skb->len - macsec_hdr_len(sci_present) -
 692			  secy->icv_len;
 693		aead_request_set_crypt(req, sg, sg, len, iv);
 694		aead_request_set_ad(req, macsec_hdr_len(sci_present));
 695	} else {
 696		aead_request_set_crypt(req, sg, sg, 0, iv);
 697		aead_request_set_ad(req, skb->len - secy->icv_len);
 698	}
 699
 700	macsec_skb_cb(skb)->req = req;
 701	macsec_skb_cb(skb)->tx_sa = tx_sa;
 
 702	aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
 703
 704	dev_hold(skb->dev);
 705	ret = crypto_aead_encrypt(req);
 706	if (ret == -EINPROGRESS) {
 707		return ERR_PTR(ret);
 708	} else if (ret != 0) {
 709		dev_put(skb->dev);
 710		kfree_skb(skb);
 711		aead_request_free(req);
 712		macsec_txsa_put(tx_sa);
 713		return ERR_PTR(-EINVAL);
 714	}
 715
 716	dev_put(skb->dev);
 717	aead_request_free(req);
 718	macsec_txsa_put(tx_sa);
 719
 720	return skb;
 721}
 722
 723static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn)
 724{
 725	struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
 726	struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats);
 727	struct macsec_eth_header *hdr = macsec_ethhdr(skb);
 728	u32 lowest_pn = 0;
 729
 730	spin_lock(&rx_sa->lock);
 731	if (rx_sa->next_pn_halves.lower >= secy->replay_window)
 732		lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window;
 733
 734	/* Now perform replay protection check again
 735	 * (see IEEE 802.1AE-2006 figure 10-5)
 736	 */
 737	if (secy->replay_protect && pn < lowest_pn &&
 738	    (!secy->xpn || pn_same_half(pn, lowest_pn))) {
 739		spin_unlock(&rx_sa->lock);
 740		u64_stats_update_begin(&rxsc_stats->syncp);
 741		rxsc_stats->stats.InPktsLate++;
 742		u64_stats_update_end(&rxsc_stats->syncp);
 
 743		return false;
 744	}
 745
 746	if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
 
 747		u64_stats_update_begin(&rxsc_stats->syncp);
 748		if (hdr->tci_an & MACSEC_TCI_E)
 749			rxsc_stats->stats.InOctetsDecrypted += skb->len;
 750		else
 751			rxsc_stats->stats.InOctetsValidated += skb->len;
 752		u64_stats_update_end(&rxsc_stats->syncp);
 753	}
 754
 755	if (!macsec_skb_cb(skb)->valid) {
 756		spin_unlock(&rx_sa->lock);
 757
 758		/* 10.6.5 */
 759		if (hdr->tci_an & MACSEC_TCI_C ||
 760		    secy->validate_frames == MACSEC_VALIDATE_STRICT) {
 761			u64_stats_update_begin(&rxsc_stats->syncp);
 762			rxsc_stats->stats.InPktsNotValid++;
 763			u64_stats_update_end(&rxsc_stats->syncp);
 
 
 764			return false;
 765		}
 766
 767		u64_stats_update_begin(&rxsc_stats->syncp);
 768		if (secy->validate_frames == MACSEC_VALIDATE_CHECK) {
 769			rxsc_stats->stats.InPktsInvalid++;
 770			this_cpu_inc(rx_sa->stats->InPktsInvalid);
 771		} else if (pn < lowest_pn) {
 772			rxsc_stats->stats.InPktsDelayed++;
 773		} else {
 774			rxsc_stats->stats.InPktsUnchecked++;
 775		}
 776		u64_stats_update_end(&rxsc_stats->syncp);
 777	} else {
 778		u64_stats_update_begin(&rxsc_stats->syncp);
 779		if (pn < lowest_pn) {
 780			rxsc_stats->stats.InPktsDelayed++;
 781		} else {
 782			rxsc_stats->stats.InPktsOK++;
 783			this_cpu_inc(rx_sa->stats->InPktsOK);
 784		}
 785		u64_stats_update_end(&rxsc_stats->syncp);
 786
 787		// Instead of "pn >=" - to support pn overflow in xpn
 788		if (pn + 1 > rx_sa->next_pn_halves.lower) {
 789			rx_sa->next_pn_halves.lower = pn + 1;
 790		} else if (secy->xpn &&
 791			   !pn_same_half(pn, rx_sa->next_pn_halves.lower)) {
 792			rx_sa->next_pn_halves.upper++;
 793			rx_sa->next_pn_halves.lower = pn + 1;
 794		}
 795
 796		spin_unlock(&rx_sa->lock);
 797	}
 798
 799	return true;
 800}
 801
 802static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev)
 803{
 804	skb->pkt_type = PACKET_HOST;
 805	skb->protocol = eth_type_trans(skb, dev);
 806
 807	skb_reset_network_header(skb);
 808	if (!skb_transport_header_was_set(skb))
 809		skb_reset_transport_header(skb);
 810	skb_reset_mac_len(skb);
 811}
 812
 813static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
 814{
 815	skb->ip_summed = CHECKSUM_NONE;
 816	memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
 817	skb_pull(skb, hdr_len);
 818	pskb_trim_unique(skb, skb->len - icv_len);
 819}
 820
 821static void count_rx(struct net_device *dev, int len)
 822{
 823	struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
 824
 825	u64_stats_update_begin(&stats->syncp);
 826	stats->rx_packets++;
 827	stats->rx_bytes += len;
 828	u64_stats_update_end(&stats->syncp);
 829}
 830
 831static void macsec_decrypt_done(struct crypto_async_request *base, int err)
 832{
 833	struct sk_buff *skb = base->data;
 834	struct net_device *dev = skb->dev;
 835	struct macsec_dev *macsec = macsec_priv(dev);
 836	struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
 837	struct macsec_rx_sc *rx_sc = rx_sa->sc;
 838	int len;
 839	u32 pn;
 840
 841	aead_request_free(macsec_skb_cb(skb)->req);
 842
 843	if (!err)
 844		macsec_skb_cb(skb)->valid = true;
 845
 846	rcu_read_lock_bh();
 847	pn = ntohl(macsec_ethhdr(skb)->packet_number);
 848	if (!macsec_post_decrypt(skb, &macsec->secy, pn)) {
 849		rcu_read_unlock_bh();
 850		kfree_skb(skb);
 851		goto out;
 852	}
 853
 854	macsec_finalize_skb(skb, macsec->secy.icv_len,
 855			    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
 
 856	macsec_reset_skb(skb, macsec->secy.netdev);
 857
 858	len = skb->len;
 859	if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
 860		count_rx(dev, len);
 861
 862	rcu_read_unlock_bh();
 863
 864out:
 865	macsec_rxsa_put(rx_sa);
 866	macsec_rxsc_put(rx_sc);
 867	dev_put(dev);
 868}
 869
 870static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
 871				      struct net_device *dev,
 872				      struct macsec_rx_sa *rx_sa,
 873				      sci_t sci,
 874				      struct macsec_secy *secy)
 875{
 876	int ret;
 877	struct scatterlist *sg;
 878	struct sk_buff *trailer;
 879	unsigned char *iv;
 880	struct aead_request *req;
 881	struct macsec_eth_header *hdr;
 882	u32 hdr_pn;
 883	u16 icv_len = secy->icv_len;
 884
 885	macsec_skb_cb(skb)->valid = false;
 886	skb = skb_share_check(skb, GFP_ATOMIC);
 887	if (!skb)
 888		return ERR_PTR(-ENOMEM);
 889
 890	ret = skb_cow_data(skb, 0, &trailer);
 891	if (unlikely(ret < 0)) {
 892		kfree_skb(skb);
 893		return ERR_PTR(ret);
 894	}
 895	req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret);
 896	if (!req) {
 897		kfree_skb(skb);
 898		return ERR_PTR(-ENOMEM);
 899	}
 900
 901	hdr = (struct macsec_eth_header *)skb->data;
 902	hdr_pn = ntohl(hdr->packet_number);
 903
 904	if (secy->xpn) {
 905		pn_t recovered_pn = rx_sa->next_pn_halves;
 906
 907		recovered_pn.lower = hdr_pn;
 908		if (hdr_pn < rx_sa->next_pn_halves.lower &&
 909		    !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower))
 910			recovered_pn.upper++;
 911
 912		macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64,
 913				   rx_sa->key.salt);
 914	} else {
 915		macsec_fill_iv(iv, sci, hdr_pn);
 916	}
 917
 918	sg_init_table(sg, ret);
 919	ret = skb_to_sgvec(skb, sg, 0, skb->len);
 920	if (unlikely(ret < 0)) {
 921		aead_request_free(req);
 922		kfree_skb(skb);
 923		return ERR_PTR(ret);
 924	}
 925
 926	if (hdr->tci_an & MACSEC_TCI_E) {
 927		/* confidentiality: ethernet + macsec header
 928		 * authenticated, encrypted payload
 929		 */
 930		int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci);
 931
 932		aead_request_set_crypt(req, sg, sg, len, iv);
 933		aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci));
 934		skb = skb_unshare(skb, GFP_ATOMIC);
 935		if (!skb) {
 936			aead_request_free(req);
 937			return ERR_PTR(-ENOMEM);
 938		}
 939	} else {
 940		/* integrity only: all headers + data authenticated */
 941		aead_request_set_crypt(req, sg, sg, icv_len, iv);
 942		aead_request_set_ad(req, skb->len - icv_len);
 943	}
 944
 945	macsec_skb_cb(skb)->req = req;
 946	skb->dev = dev;
 947	aead_request_set_callback(req, 0, macsec_decrypt_done, skb);
 948
 949	dev_hold(dev);
 950	ret = crypto_aead_decrypt(req);
 951	if (ret == -EINPROGRESS) {
 952		return ERR_PTR(ret);
 953	} else if (ret != 0) {
 954		/* decryption/authentication failed
 955		 * 10.6 if validateFrames is disabled, deliver anyway
 956		 */
 957		if (ret != -EBADMSG) {
 958			kfree_skb(skb);
 959			skb = ERR_PTR(ret);
 960		}
 961	} else {
 962		macsec_skb_cb(skb)->valid = true;
 963	}
 964	dev_put(dev);
 965
 966	aead_request_free(req);
 967
 968	return skb;
 969}
 970
 971static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci)
 972{
 973	struct macsec_rx_sc *rx_sc;
 974
 975	for_each_rxsc(secy, rx_sc) {
 976		if (rx_sc->sci == sci)
 977			return rx_sc;
 978	}
 979
 980	return NULL;
 981}
 982
 983static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci)
 984{
 985	struct macsec_rx_sc *rx_sc;
 986
 987	for_each_rxsc_rtnl(secy, rx_sc) {
 988		if (rx_sc->sci == sci)
 989			return rx_sc;
 990	}
 991
 992	return NULL;
 993}
 994
 995static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
 996{
 997	/* Deliver to the uncontrolled port by default */
 998	enum rx_handler_result ret = RX_HANDLER_PASS;
 999	struct ethhdr *hdr = eth_hdr(skb);
 
1000	struct macsec_rxh_data *rxd;
1001	struct macsec_dev *macsec;
 
1002
1003	rcu_read_lock();
1004	rxd = macsec_data_rcu(skb->dev);
 
 
1005
1006	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1007		struct sk_buff *nskb;
1008		struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
1009		struct net_device *ndev = macsec->secy.netdev;
1010
1011		/* If h/w offloading is enabled, HW decodes frames and strips
1012		 * the SecTAG, so we have to deduce which port to deliver to.
1013		 */
1014		if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1015			if (ether_addr_equal_64bits(hdr->h_dest,
1016						    ndev->dev_addr)) {
1017				/* exact match, divert skb to this port */
1018				skb->dev = ndev;
1019				skb->pkt_type = PACKET_HOST;
1020				ret = RX_HANDLER_ANOTHER;
1021				goto out;
1022			} else if (is_multicast_ether_addr_64bits(
1023					   hdr->h_dest)) {
1024				/* multicast frame, deliver on this port too */
1025				nskb = skb_clone(skb, GFP_ATOMIC);
1026				if (!nskb)
1027					break;
1028
1029				nskb->dev = ndev;
1030				if (ether_addr_equal_64bits(hdr->h_dest,
1031							    ndev->broadcast))
1032					nskb->pkt_type = PACKET_BROADCAST;
1033				else
1034					nskb->pkt_type = PACKET_MULTICAST;
1035
1036				netif_rx(nskb);
 
 
 
 
 
1037			}
 
1038			continue;
1039		}
1040
1041		/* 10.6 If the management control validateFrames is not
1042		 * Strict, frames without a SecTAG are received, counted, and
1043		 * delivered to the Controlled Port
1044		 */
1045		if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1046			u64_stats_update_begin(&secy_stats->syncp);
1047			secy_stats->stats.InPktsNoTag++;
1048			u64_stats_update_end(&secy_stats->syncp);
 
1049			continue;
1050		}
1051
1052		/* deliver on this port */
1053		nskb = skb_clone(skb, GFP_ATOMIC);
1054		if (!nskb)
1055			break;
1056
1057		nskb->dev = ndev;
1058
1059		if (netif_rx(nskb) == NET_RX_SUCCESS) {
1060			u64_stats_update_begin(&secy_stats->syncp);
1061			secy_stats->stats.InPktsUntagged++;
1062			u64_stats_update_end(&secy_stats->syncp);
1063		}
1064	}
1065
1066out:
1067	rcu_read_unlock();
1068	return ret;
1069}
1070
1071static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
1072{
1073	struct sk_buff *skb = *pskb;
1074	struct net_device *dev = skb->dev;
1075	struct macsec_eth_header *hdr;
1076	struct macsec_secy *secy = NULL;
1077	struct macsec_rx_sc *rx_sc;
1078	struct macsec_rx_sa *rx_sa;
1079	struct macsec_rxh_data *rxd;
1080	struct macsec_dev *macsec;
1081	unsigned int len;
1082	sci_t sci;
1083	u32 hdr_pn;
1084	bool cbit;
1085	struct pcpu_rx_sc_stats *rxsc_stats;
1086	struct pcpu_secy_stats *secy_stats;
1087	bool pulled_sci;
1088	int ret;
1089
1090	if (skb_headroom(skb) < ETH_HLEN)
1091		goto drop_direct;
1092
1093	hdr = macsec_ethhdr(skb);
1094	if (hdr->eth.h_proto != htons(ETH_P_MACSEC))
1095		return handle_not_macsec(skb);
1096
1097	skb = skb_unshare(skb, GFP_ATOMIC);
1098	*pskb = skb;
1099	if (!skb)
1100		return RX_HANDLER_CONSUMED;
1101
1102	pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
1103	if (!pulled_sci) {
1104		if (!pskb_may_pull(skb, macsec_extra_len(false)))
1105			goto drop_direct;
1106	}
1107
1108	hdr = macsec_ethhdr(skb);
1109
1110	/* Frames with a SecTAG that has the TCI E bit set but the C
1111	 * bit clear are discarded, as this reserved encoding is used
1112	 * to identify frames with a SecTAG that are not to be
1113	 * delivered to the Controlled Port.
1114	 */
1115	if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E)
1116		return RX_HANDLER_PASS;
1117
1118	/* now, pull the extra length */
1119	if (hdr->tci_an & MACSEC_TCI_SC) {
1120		if (!pulled_sci)
1121			goto drop_direct;
1122	}
1123
1124	/* ethernet header is part of crypto processing */
1125	skb_push(skb, ETH_HLEN);
1126
1127	macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC);
1128	macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK;
1129	sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci);
1130
1131	rcu_read_lock();
1132	rxd = macsec_data_rcu(skb->dev);
1133
1134	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1135		struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
1136
1137		sc = sc ? macsec_rxsc_get(sc) : NULL;
1138
1139		if (sc) {
1140			secy = &macsec->secy;
1141			rx_sc = sc;
1142			break;
1143		}
1144	}
1145
1146	if (!secy)
1147		goto nosci;
1148
1149	dev = secy->netdev;
1150	macsec = macsec_priv(dev);
1151	secy_stats = this_cpu_ptr(macsec->stats);
1152	rxsc_stats = this_cpu_ptr(rx_sc->stats);
1153
1154	if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) {
1155		u64_stats_update_begin(&secy_stats->syncp);
1156		secy_stats->stats.InPktsBadTag++;
1157		u64_stats_update_end(&secy_stats->syncp);
 
1158		goto drop_nosa;
1159	}
1160
1161	rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]);
1162	if (!rx_sa) {
1163		/* 10.6.1 if the SA is not in use */
1164
1165		/* If validateFrames is Strict or the C bit in the
1166		 * SecTAG is set, discard
1167		 */
1168		if (hdr->tci_an & MACSEC_TCI_C ||
1169		    secy->validate_frames == MACSEC_VALIDATE_STRICT) {
1170			u64_stats_update_begin(&rxsc_stats->syncp);
1171			rxsc_stats->stats.InPktsNotUsingSA++;
1172			u64_stats_update_end(&rxsc_stats->syncp);
 
1173			goto drop_nosa;
1174		}
1175
1176		/* not Strict, the frame (with the SecTAG and ICV
1177		 * removed) is delivered to the Controlled Port.
1178		 */
1179		u64_stats_update_begin(&rxsc_stats->syncp);
1180		rxsc_stats->stats.InPktsUnusedSA++;
1181		u64_stats_update_end(&rxsc_stats->syncp);
1182		goto deliver;
1183	}
1184
1185	/* First, PN check to avoid decrypting obviously wrong packets */
1186	hdr_pn = ntohl(hdr->packet_number);
1187	if (secy->replay_protect) {
1188		bool late;
1189
1190		spin_lock(&rx_sa->lock);
1191		late = rx_sa->next_pn_halves.lower >= secy->replay_window &&
1192		       hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window);
1193
1194		if (secy->xpn)
1195			late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn);
1196		spin_unlock(&rx_sa->lock);
1197
1198		if (late) {
1199			u64_stats_update_begin(&rxsc_stats->syncp);
1200			rxsc_stats->stats.InPktsLate++;
1201			u64_stats_update_end(&rxsc_stats->syncp);
 
1202			goto drop;
1203		}
1204	}
1205
1206	macsec_skb_cb(skb)->rx_sa = rx_sa;
1207
1208	/* Disabled && !changed text => skip validation */
1209	if (hdr->tci_an & MACSEC_TCI_C ||
1210	    secy->validate_frames != MACSEC_VALIDATE_DISABLED)
1211		skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
1212
1213	if (IS_ERR(skb)) {
1214		/* the decrypt callback needs the reference */
1215		if (PTR_ERR(skb) != -EINPROGRESS) {
1216			macsec_rxsa_put(rx_sa);
1217			macsec_rxsc_put(rx_sc);
1218		}
1219		rcu_read_unlock();
1220		*pskb = NULL;
1221		return RX_HANDLER_CONSUMED;
1222	}
1223
1224	if (!macsec_post_decrypt(skb, secy, hdr_pn))
1225		goto drop;
1226
1227deliver:
1228	macsec_finalize_skb(skb, secy->icv_len,
1229			    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
 
1230	macsec_reset_skb(skb, secy->netdev);
1231
1232	if (rx_sa)
1233		macsec_rxsa_put(rx_sa);
1234	macsec_rxsc_put(rx_sc);
1235
1236	skb_orphan(skb);
1237	len = skb->len;
1238	ret = gro_cells_receive(&macsec->gro_cells, skb);
1239	if (ret == NET_RX_SUCCESS)
1240		count_rx(dev, len);
1241	else
1242		macsec->secy.netdev->stats.rx_dropped++;
1243
1244	rcu_read_unlock();
1245
1246	*pskb = NULL;
1247	return RX_HANDLER_CONSUMED;
1248
1249drop:
1250	macsec_rxsa_put(rx_sa);
1251drop_nosa:
1252	macsec_rxsc_put(rx_sc);
1253	rcu_read_unlock();
1254drop_direct:
1255	kfree_skb(skb);
1256	*pskb = NULL;
1257	return RX_HANDLER_CONSUMED;
1258
1259nosci:
1260	/* 10.6.1 if the SC is not found */
1261	cbit = !!(hdr->tci_an & MACSEC_TCI_C);
1262	if (!cbit)
1263		macsec_finalize_skb(skb, DEFAULT_ICV_LEN,
1264				    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1265
1266	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1267		struct sk_buff *nskb;
1268
1269		secy_stats = this_cpu_ptr(macsec->stats);
1270
1271		/* If validateFrames is Strict or the C bit in the
1272		 * SecTAG is set, discard
1273		 */
1274		if (cbit ||
1275		    macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1276			u64_stats_update_begin(&secy_stats->syncp);
1277			secy_stats->stats.InPktsNoSCI++;
1278			u64_stats_update_end(&secy_stats->syncp);
 
1279			continue;
1280		}
1281
1282		/* not strict, the frame (with the SecTAG and ICV
1283		 * removed) is delivered to the Controlled Port.
1284		 */
1285		nskb = skb_clone(skb, GFP_ATOMIC);
1286		if (!nskb)
1287			break;
1288
1289		macsec_reset_skb(nskb, macsec->secy.netdev);
1290
1291		ret = netif_rx(nskb);
1292		if (ret == NET_RX_SUCCESS) {
1293			u64_stats_update_begin(&secy_stats->syncp);
1294			secy_stats->stats.InPktsUnknownSCI++;
1295			u64_stats_update_end(&secy_stats->syncp);
1296		} else {
1297			macsec->secy.netdev->stats.rx_dropped++;
1298		}
1299	}
1300
1301	rcu_read_unlock();
1302	*pskb = skb;
1303	return RX_HANDLER_PASS;
1304}
1305
1306static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
1307{
1308	struct crypto_aead *tfm;
1309	int ret;
1310
1311	/* Pick a sync gcm(aes) cipher to ensure order is preserved. */
1312	tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
1313
1314	if (IS_ERR(tfm))
1315		return tfm;
1316
1317	ret = crypto_aead_setkey(tfm, key, key_len);
1318	if (ret < 0)
1319		goto fail;
1320
1321	ret = crypto_aead_setauthsize(tfm, icv_len);
1322	if (ret < 0)
1323		goto fail;
1324
1325	return tfm;
1326fail:
1327	crypto_free_aead(tfm);
1328	return ERR_PTR(ret);
1329}
1330
1331static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
1332		      int icv_len)
1333{
1334	rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats);
1335	if (!rx_sa->stats)
1336		return -ENOMEM;
1337
1338	rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
1339	if (IS_ERR(rx_sa->key.tfm)) {
1340		free_percpu(rx_sa->stats);
1341		return PTR_ERR(rx_sa->key.tfm);
1342	}
1343
1344	rx_sa->ssci = MACSEC_UNDEF_SSCI;
1345	rx_sa->active = false;
1346	rx_sa->next_pn = 1;
1347	refcount_set(&rx_sa->refcnt, 1);
1348	spin_lock_init(&rx_sa->lock);
1349
1350	return 0;
1351}
1352
1353static void clear_rx_sa(struct macsec_rx_sa *rx_sa)
1354{
1355	rx_sa->active = false;
1356
1357	macsec_rxsa_put(rx_sa);
1358}
1359
1360static void free_rx_sc(struct macsec_rx_sc *rx_sc)
1361{
1362	int i;
1363
1364	for (i = 0; i < MACSEC_NUM_AN; i++) {
1365		struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]);
1366
1367		RCU_INIT_POINTER(rx_sc->sa[i], NULL);
1368		if (sa)
1369			clear_rx_sa(sa);
1370	}
1371
1372	macsec_rxsc_put(rx_sc);
1373}
1374
1375static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci)
1376{
1377	struct macsec_rx_sc *rx_sc, __rcu **rx_scp;
1378
1379	for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp);
1380	     rx_sc;
1381	     rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) {
1382		if (rx_sc->sci == sci) {
1383			if (rx_sc->active)
1384				secy->n_rx_sc--;
1385			rcu_assign_pointer(*rx_scp, rx_sc->next);
1386			return rx_sc;
1387		}
1388	}
1389
1390	return NULL;
1391}
1392
1393static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci)
 
1394{
1395	struct macsec_rx_sc *rx_sc;
1396	struct macsec_dev *macsec;
1397	struct net_device *real_dev = macsec_priv(dev)->real_dev;
1398	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
1399	struct macsec_secy *secy;
1400
1401	list_for_each_entry(macsec, &rxd->secys, secys) {
1402		if (find_rx_sc_rtnl(&macsec->secy, sci))
1403			return ERR_PTR(-EEXIST);
1404	}
1405
1406	rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
1407	if (!rx_sc)
1408		return ERR_PTR(-ENOMEM);
1409
1410	rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats);
1411	if (!rx_sc->stats) {
1412		kfree(rx_sc);
1413		return ERR_PTR(-ENOMEM);
1414	}
1415
1416	rx_sc->sci = sci;
1417	rx_sc->active = true;
1418	refcount_set(&rx_sc->refcnt, 1);
1419
1420	secy = &macsec_priv(dev)->secy;
1421	rcu_assign_pointer(rx_sc->next, secy->rx_sc);
1422	rcu_assign_pointer(secy->rx_sc, rx_sc);
1423
1424	if (rx_sc->active)
1425		secy->n_rx_sc++;
1426
1427	return rx_sc;
1428}
1429
1430static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len,
1431		      int icv_len)
1432{
1433	tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats);
1434	if (!tx_sa->stats)
1435		return -ENOMEM;
1436
1437	tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
1438	if (IS_ERR(tx_sa->key.tfm)) {
1439		free_percpu(tx_sa->stats);
1440		return PTR_ERR(tx_sa->key.tfm);
1441	}
1442
1443	tx_sa->ssci = MACSEC_UNDEF_SSCI;
1444	tx_sa->active = false;
1445	refcount_set(&tx_sa->refcnt, 1);
1446	spin_lock_init(&tx_sa->lock);
1447
1448	return 0;
1449}
1450
1451static void clear_tx_sa(struct macsec_tx_sa *tx_sa)
1452{
1453	tx_sa->active = false;
1454
1455	macsec_txsa_put(tx_sa);
1456}
1457
1458static struct genl_family macsec_fam;
1459
1460static struct net_device *get_dev_from_nl(struct net *net,
1461					  struct nlattr **attrs)
1462{
1463	int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]);
1464	struct net_device *dev;
1465
1466	dev = __dev_get_by_index(net, ifindex);
1467	if (!dev)
1468		return ERR_PTR(-ENODEV);
1469
1470	if (!netif_is_macsec(dev))
1471		return ERR_PTR(-ENODEV);
1472
1473	return dev;
1474}
1475
1476static enum macsec_offload nla_get_offload(const struct nlattr *nla)
1477{
1478	return (__force enum macsec_offload)nla_get_u8(nla);
1479}
1480
1481static sci_t nla_get_sci(const struct nlattr *nla)
1482{
1483	return (__force sci_t)nla_get_u64(nla);
1484}
1485
1486static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value,
1487		       int padattr)
1488{
1489	return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr);
1490}
1491
1492static ssci_t nla_get_ssci(const struct nlattr *nla)
1493{
1494	return (__force ssci_t)nla_get_u32(nla);
1495}
1496
1497static int nla_put_ssci(struct sk_buff *skb, int attrtype, ssci_t value)
1498{
1499	return nla_put_u32(skb, attrtype, (__force u64)value);
1500}
1501
1502static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
1503					     struct nlattr **attrs,
1504					     struct nlattr **tb_sa,
1505					     struct net_device **devp,
1506					     struct macsec_secy **secyp,
1507					     struct macsec_tx_sc **scp,
1508					     u8 *assoc_num)
1509{
1510	struct net_device *dev;
1511	struct macsec_secy *secy;
1512	struct macsec_tx_sc *tx_sc;
1513	struct macsec_tx_sa *tx_sa;
1514
1515	if (!tb_sa[MACSEC_SA_ATTR_AN])
1516		return ERR_PTR(-EINVAL);
1517
1518	*assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1519
1520	dev = get_dev_from_nl(net, attrs);
1521	if (IS_ERR(dev))
1522		return ERR_CAST(dev);
1523
1524	if (*assoc_num >= MACSEC_NUM_AN)
1525		return ERR_PTR(-EINVAL);
1526
1527	secy = &macsec_priv(dev)->secy;
1528	tx_sc = &secy->tx_sc;
1529
1530	tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]);
1531	if (!tx_sa)
1532		return ERR_PTR(-ENODEV);
1533
1534	*devp = dev;
1535	*scp = tx_sc;
1536	*secyp = secy;
1537	return tx_sa;
1538}
1539
1540static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net,
1541					     struct nlattr **attrs,
1542					     struct nlattr **tb_rxsc,
1543					     struct net_device **devp,
1544					     struct macsec_secy **secyp)
1545{
1546	struct net_device *dev;
1547	struct macsec_secy *secy;
1548	struct macsec_rx_sc *rx_sc;
1549	sci_t sci;
1550
1551	dev = get_dev_from_nl(net, attrs);
1552	if (IS_ERR(dev))
1553		return ERR_CAST(dev);
1554
1555	secy = &macsec_priv(dev)->secy;
1556
1557	if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
1558		return ERR_PTR(-EINVAL);
1559
1560	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1561	rx_sc = find_rx_sc_rtnl(secy, sci);
1562	if (!rx_sc)
1563		return ERR_PTR(-ENODEV);
1564
1565	*secyp = secy;
1566	*devp = dev;
1567
1568	return rx_sc;
1569}
1570
1571static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net,
1572					     struct nlattr **attrs,
1573					     struct nlattr **tb_rxsc,
1574					     struct nlattr **tb_sa,
1575					     struct net_device **devp,
1576					     struct macsec_secy **secyp,
1577					     struct macsec_rx_sc **scp,
1578					     u8 *assoc_num)
1579{
1580	struct macsec_rx_sc *rx_sc;
1581	struct macsec_rx_sa *rx_sa;
1582
1583	if (!tb_sa[MACSEC_SA_ATTR_AN])
1584		return ERR_PTR(-EINVAL);
1585
1586	*assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1587	if (*assoc_num >= MACSEC_NUM_AN)
1588		return ERR_PTR(-EINVAL);
1589
1590	rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp);
1591	if (IS_ERR(rx_sc))
1592		return ERR_CAST(rx_sc);
1593
1594	rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]);
1595	if (!rx_sa)
1596		return ERR_PTR(-ENODEV);
1597
1598	*scp = rx_sc;
1599	return rx_sa;
1600}
1601
1602static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
1603	[MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 },
1604	[MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED },
1605	[MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED },
1606	[MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED },
1607};
1608
1609static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
1610	[MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 },
1611	[MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 },
1612};
1613
1614static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
1615	[MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
1616	[MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
1617	[MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN_LEN(4),
1618	[MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
1619				   .len = MACSEC_KEYID_LEN, },
1620	[MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
1621				 .len = MACSEC_MAX_KEY_LEN, },
1622	[MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 },
1623	[MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY,
1624				  .len = MACSEC_SALT_LEN, },
1625};
1626
1627static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = {
1628	[MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 },
1629};
1630
1631/* Offloads an operation to a device driver */
1632static int macsec_offload(int (* const func)(struct macsec_context *),
1633			  struct macsec_context *ctx)
1634{
1635	int ret;
1636
1637	if (unlikely(!func))
1638		return 0;
1639
1640	if (ctx->offload == MACSEC_OFFLOAD_PHY)
1641		mutex_lock(&ctx->phydev->lock);
1642
1643	/* Phase I: prepare. The drive should fail here if there are going to be
1644	 * issues in the commit phase.
1645	 */
1646	ctx->prepare = true;
1647	ret = (*func)(ctx);
1648	if (ret)
1649		goto phy_unlock;
1650
1651	/* Phase II: commit. This step cannot fail. */
1652	ctx->prepare = false;
1653	ret = (*func)(ctx);
1654	/* This should never happen: commit is not allowed to fail */
1655	if (unlikely(ret))
1656		WARN(1, "MACsec offloading commit failed (%d)\n", ret);
1657
1658phy_unlock:
1659	if (ctx->offload == MACSEC_OFFLOAD_PHY)
1660		mutex_unlock(&ctx->phydev->lock);
1661
1662	return ret;
1663}
1664
1665static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
1666{
1667	if (!attrs[MACSEC_ATTR_SA_CONFIG])
1668		return -EINVAL;
1669
1670	if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL))
1671		return -EINVAL;
1672
1673	return 0;
1674}
1675
1676static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc)
1677{
1678	if (!attrs[MACSEC_ATTR_RXSC_CONFIG])
1679		return -EINVAL;
1680
1681	if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL))
1682		return -EINVAL;
1683
1684	return 0;
1685}
1686
1687static bool validate_add_rxsa(struct nlattr **attrs)
1688{
1689	if (!attrs[MACSEC_SA_ATTR_AN] ||
1690	    !attrs[MACSEC_SA_ATTR_KEY] ||
1691	    !attrs[MACSEC_SA_ATTR_KEYID])
1692		return false;
1693
1694	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1695		return false;
1696
1697	if (attrs[MACSEC_SA_ATTR_PN] &&
1698	    *(u64 *)nla_data(attrs[MACSEC_SA_ATTR_PN]) == 0)
1699		return false;
1700
1701	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1702		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1703			return false;
1704	}
1705
1706	if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1707		return false;
1708
1709	return true;
1710}
1711
1712static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
1713{
1714	struct net_device *dev;
1715	struct nlattr **attrs = info->attrs;
1716	struct macsec_secy *secy;
1717	struct macsec_rx_sc *rx_sc;
1718	struct macsec_rx_sa *rx_sa;
1719	unsigned char assoc_num;
1720	int pn_len;
1721	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1722	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1723	int err;
1724
1725	if (!attrs[MACSEC_ATTR_IFINDEX])
1726		return -EINVAL;
1727
1728	if (parse_sa_config(attrs, tb_sa))
1729		return -EINVAL;
1730
1731	if (parse_rxsc_config(attrs, tb_rxsc))
1732		return -EINVAL;
1733
1734	if (!validate_add_rxsa(tb_sa))
1735		return -EINVAL;
1736
1737	rtnl_lock();
1738	rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
1739	if (IS_ERR(rx_sc)) {
1740		rtnl_unlock();
1741		return PTR_ERR(rx_sc);
1742	}
1743
1744	assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1745
1746	if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
1747		pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n",
1748			  nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
1749		rtnl_unlock();
1750		return -EINVAL;
1751	}
1752
1753	pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
1754	if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
 
1755		pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n",
1756			  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
1757		rtnl_unlock();
1758		return -EINVAL;
1759	}
1760
1761	if (secy->xpn) {
1762		if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
1763			rtnl_unlock();
1764			return -EINVAL;
1765		}
1766
1767		if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
1768			pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
1769				  nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
1770				  MACSEC_SA_ATTR_SALT);
1771			rtnl_unlock();
1772			return -EINVAL;
1773		}
1774	}
1775
1776	rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]);
1777	if (rx_sa) {
1778		rtnl_unlock();
1779		return -EBUSY;
1780	}
1781
1782	rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
1783	if (!rx_sa) {
1784		rtnl_unlock();
1785		return -ENOMEM;
1786	}
1787
1788	err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1789			 secy->key_len, secy->icv_len);
1790	if (err < 0) {
1791		kfree(rx_sa);
1792		rtnl_unlock();
1793		return err;
1794	}
1795
1796	if (tb_sa[MACSEC_SA_ATTR_PN]) {
1797		spin_lock_bh(&rx_sa->lock);
1798		rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
1799		spin_unlock_bh(&rx_sa->lock);
1800	}
1801
1802	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
1803		rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
1804
1805	rx_sa->sc = rx_sc;
1806
 
 
 
 
 
 
1807	/* If h/w offloading is available, propagate to the device */
1808	if (macsec_is_offloaded(netdev_priv(dev))) {
1809		const struct macsec_ops *ops;
1810		struct macsec_context ctx;
1811
1812		ops = macsec_get_ops(netdev_priv(dev), &ctx);
1813		if (!ops) {
1814			err = -EOPNOTSUPP;
1815			goto cleanup;
1816		}
1817
1818		ctx.sa.assoc_num = assoc_num;
1819		ctx.sa.rx_sa = rx_sa;
1820		ctx.secy = secy;
1821		memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1822		       secy->key_len);
1823
1824		err = macsec_offload(ops->mdo_add_rxsa, &ctx);
 
1825		if (err)
1826			goto cleanup;
1827	}
1828
1829	if (secy->xpn) {
1830		rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
1831		nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
1832			   MACSEC_SALT_LEN);
1833	}
1834
1835	nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
1836	rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
1837
1838	rtnl_unlock();
1839
1840	return 0;
1841
1842cleanup:
1843	kfree(rx_sa);
1844	rtnl_unlock();
1845	return err;
1846}
1847
1848static bool validate_add_rxsc(struct nlattr **attrs)
1849{
1850	if (!attrs[MACSEC_RXSC_ATTR_SCI])
1851		return false;
1852
1853	if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) {
1854		if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1)
1855			return false;
1856	}
1857
1858	return true;
1859}
1860
1861static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
1862{
1863	struct net_device *dev;
1864	sci_t sci = MACSEC_UNDEF_SCI;
1865	struct nlattr **attrs = info->attrs;
1866	struct macsec_rx_sc *rx_sc;
1867	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1868	struct macsec_secy *secy;
1869	bool was_active;
1870	int ret;
1871
1872	if (!attrs[MACSEC_ATTR_IFINDEX])
1873		return -EINVAL;
1874
1875	if (parse_rxsc_config(attrs, tb_rxsc))
1876		return -EINVAL;
1877
1878	if (!validate_add_rxsc(tb_rxsc))
1879		return -EINVAL;
1880
1881	rtnl_lock();
1882	dev = get_dev_from_nl(genl_info_net(info), attrs);
1883	if (IS_ERR(dev)) {
1884		rtnl_unlock();
1885		return PTR_ERR(dev);
1886	}
1887
1888	secy = &macsec_priv(dev)->secy;
1889	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1890
1891	rx_sc = create_rx_sc(dev, sci);
 
 
 
1892	if (IS_ERR(rx_sc)) {
1893		rtnl_unlock();
1894		return PTR_ERR(rx_sc);
1895	}
1896
1897	was_active = rx_sc->active;
1898	if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
1899		rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
1900
1901	if (macsec_is_offloaded(netdev_priv(dev))) {
1902		const struct macsec_ops *ops;
1903		struct macsec_context ctx;
1904
1905		ops = macsec_get_ops(netdev_priv(dev), &ctx);
1906		if (!ops) {
1907			ret = -EOPNOTSUPP;
1908			goto cleanup;
1909		}
1910
1911		ctx.rx_sc = rx_sc;
1912		ctx.secy = secy;
1913
1914		ret = macsec_offload(ops->mdo_add_rxsc, &ctx);
1915		if (ret)
1916			goto cleanup;
1917	}
1918
1919	rtnl_unlock();
1920
1921	return 0;
1922
1923cleanup:
1924	rx_sc->active = was_active;
 
1925	rtnl_unlock();
1926	return ret;
1927}
1928
1929static bool validate_add_txsa(struct nlattr **attrs)
1930{
1931	if (!attrs[MACSEC_SA_ATTR_AN] ||
1932	    !attrs[MACSEC_SA_ATTR_PN] ||
1933	    !attrs[MACSEC_SA_ATTR_KEY] ||
1934	    !attrs[MACSEC_SA_ATTR_KEYID])
1935		return false;
1936
1937	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1938		return false;
1939
1940	if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
1941		return false;
1942
1943	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1944		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1945			return false;
1946	}
1947
1948	if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1949		return false;
1950
1951	return true;
1952}
1953
1954static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
1955{
1956	struct net_device *dev;
1957	struct nlattr **attrs = info->attrs;
1958	struct macsec_secy *secy;
1959	struct macsec_tx_sc *tx_sc;
1960	struct macsec_tx_sa *tx_sa;
1961	unsigned char assoc_num;
1962	int pn_len;
1963	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1964	bool was_operational;
1965	int err;
1966
1967	if (!attrs[MACSEC_ATTR_IFINDEX])
1968		return -EINVAL;
1969
1970	if (parse_sa_config(attrs, tb_sa))
1971		return -EINVAL;
1972
1973	if (!validate_add_txsa(tb_sa))
1974		return -EINVAL;
1975
1976	rtnl_lock();
1977	dev = get_dev_from_nl(genl_info_net(info), attrs);
1978	if (IS_ERR(dev)) {
1979		rtnl_unlock();
1980		return PTR_ERR(dev);
1981	}
1982
1983	secy = &macsec_priv(dev)->secy;
1984	tx_sc = &secy->tx_sc;
1985
1986	assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1987
1988	if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
1989		pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n",
1990			  nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
1991		rtnl_unlock();
1992		return -EINVAL;
1993	}
1994
1995	pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
1996	if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
1997		pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n",
1998			  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
1999		rtnl_unlock();
2000		return -EINVAL;
2001	}
2002
2003	if (secy->xpn) {
2004		if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
2005			rtnl_unlock();
2006			return -EINVAL;
2007		}
2008
2009		if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
2010			pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
2011				  nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
2012				  MACSEC_SA_ATTR_SALT);
2013			rtnl_unlock();
2014			return -EINVAL;
2015		}
2016	}
2017
2018	tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]);
2019	if (tx_sa) {
2020		rtnl_unlock();
2021		return -EBUSY;
2022	}
2023
2024	tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
2025	if (!tx_sa) {
2026		rtnl_unlock();
2027		return -ENOMEM;
2028	}
2029
2030	err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
2031			 secy->key_len, secy->icv_len);
2032	if (err < 0) {
2033		kfree(tx_sa);
2034		rtnl_unlock();
2035		return err;
2036	}
2037
2038	spin_lock_bh(&tx_sa->lock);
2039	tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
2040	spin_unlock_bh(&tx_sa->lock);
2041
2042	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2043		tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2044
2045	was_operational = secy->operational;
2046	if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
2047		secy->operational = true;
2048
 
 
 
 
 
 
2049	/* If h/w offloading is available, propagate to the device */
2050	if (macsec_is_offloaded(netdev_priv(dev))) {
2051		const struct macsec_ops *ops;
2052		struct macsec_context ctx;
2053
2054		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2055		if (!ops) {
2056			err = -EOPNOTSUPP;
2057			goto cleanup;
2058		}
2059
2060		ctx.sa.assoc_num = assoc_num;
2061		ctx.sa.tx_sa = tx_sa;
2062		ctx.secy = secy;
2063		memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
2064		       secy->key_len);
2065
2066		err = macsec_offload(ops->mdo_add_txsa, &ctx);
 
2067		if (err)
2068			goto cleanup;
2069	}
2070
2071	if (secy->xpn) {
2072		tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
2073		nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
2074			   MACSEC_SALT_LEN);
2075	}
2076
2077	nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
2078	rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
2079
2080	rtnl_unlock();
2081
2082	return 0;
2083
2084cleanup:
2085	secy->operational = was_operational;
2086	kfree(tx_sa);
2087	rtnl_unlock();
2088	return err;
2089}
2090
2091static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
2092{
2093	struct nlattr **attrs = info->attrs;
2094	struct net_device *dev;
2095	struct macsec_secy *secy;
2096	struct macsec_rx_sc *rx_sc;
2097	struct macsec_rx_sa *rx_sa;
2098	u8 assoc_num;
2099	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2100	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2101	int ret;
2102
2103	if (!attrs[MACSEC_ATTR_IFINDEX])
2104		return -EINVAL;
2105
2106	if (parse_sa_config(attrs, tb_sa))
2107		return -EINVAL;
2108
2109	if (parse_rxsc_config(attrs, tb_rxsc))
2110		return -EINVAL;
2111
2112	rtnl_lock();
2113	rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
2114				 &dev, &secy, &rx_sc, &assoc_num);
2115	if (IS_ERR(rx_sa)) {
2116		rtnl_unlock();
2117		return PTR_ERR(rx_sa);
2118	}
2119
2120	if (rx_sa->active) {
2121		rtnl_unlock();
2122		return -EBUSY;
2123	}
2124
2125	/* If h/w offloading is available, propagate to the device */
2126	if (macsec_is_offloaded(netdev_priv(dev))) {
2127		const struct macsec_ops *ops;
2128		struct macsec_context ctx;
2129
2130		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2131		if (!ops) {
2132			ret = -EOPNOTSUPP;
2133			goto cleanup;
2134		}
2135
2136		ctx.sa.assoc_num = assoc_num;
2137		ctx.sa.rx_sa = rx_sa;
2138		ctx.secy = secy;
2139
2140		ret = macsec_offload(ops->mdo_del_rxsa, &ctx);
2141		if (ret)
2142			goto cleanup;
2143	}
2144
2145	RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL);
2146	clear_rx_sa(rx_sa);
2147
2148	rtnl_unlock();
2149
2150	return 0;
2151
2152cleanup:
2153	rtnl_unlock();
2154	return ret;
2155}
2156
2157static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
2158{
2159	struct nlattr **attrs = info->attrs;
2160	struct net_device *dev;
2161	struct macsec_secy *secy;
2162	struct macsec_rx_sc *rx_sc;
2163	sci_t sci;
2164	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2165	int ret;
2166
2167	if (!attrs[MACSEC_ATTR_IFINDEX])
2168		return -EINVAL;
2169
2170	if (parse_rxsc_config(attrs, tb_rxsc))
2171		return -EINVAL;
2172
2173	if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
2174		return -EINVAL;
2175
2176	rtnl_lock();
2177	dev = get_dev_from_nl(genl_info_net(info), info->attrs);
2178	if (IS_ERR(dev)) {
2179		rtnl_unlock();
2180		return PTR_ERR(dev);
2181	}
2182
2183	secy = &macsec_priv(dev)->secy;
2184	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
2185
2186	rx_sc = del_rx_sc(secy, sci);
2187	if (!rx_sc) {
2188		rtnl_unlock();
2189		return -ENODEV;
2190	}
2191
2192	/* If h/w offloading is available, propagate to the device */
2193	if (macsec_is_offloaded(netdev_priv(dev))) {
2194		const struct macsec_ops *ops;
2195		struct macsec_context ctx;
2196
2197		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2198		if (!ops) {
2199			ret = -EOPNOTSUPP;
2200			goto cleanup;
2201		}
2202
2203		ctx.rx_sc = rx_sc;
2204		ctx.secy = secy;
2205		ret = macsec_offload(ops->mdo_del_rxsc, &ctx);
2206		if (ret)
2207			goto cleanup;
2208	}
2209
2210	free_rx_sc(rx_sc);
2211	rtnl_unlock();
2212
2213	return 0;
2214
2215cleanup:
2216	rtnl_unlock();
2217	return ret;
2218}
2219
2220static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
2221{
2222	struct nlattr **attrs = info->attrs;
2223	struct net_device *dev;
2224	struct macsec_secy *secy;
2225	struct macsec_tx_sc *tx_sc;
2226	struct macsec_tx_sa *tx_sa;
2227	u8 assoc_num;
2228	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2229	int ret;
2230
2231	if (!attrs[MACSEC_ATTR_IFINDEX])
2232		return -EINVAL;
2233
2234	if (parse_sa_config(attrs, tb_sa))
2235		return -EINVAL;
2236
2237	rtnl_lock();
2238	tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2239				 &dev, &secy, &tx_sc, &assoc_num);
2240	if (IS_ERR(tx_sa)) {
2241		rtnl_unlock();
2242		return PTR_ERR(tx_sa);
2243	}
2244
2245	if (tx_sa->active) {
2246		rtnl_unlock();
2247		return -EBUSY;
2248	}
2249
2250	/* If h/w offloading is available, propagate to the device */
2251	if (macsec_is_offloaded(netdev_priv(dev))) {
2252		const struct macsec_ops *ops;
2253		struct macsec_context ctx;
2254
2255		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2256		if (!ops) {
2257			ret = -EOPNOTSUPP;
2258			goto cleanup;
2259		}
2260
2261		ctx.sa.assoc_num = assoc_num;
2262		ctx.sa.tx_sa = tx_sa;
2263		ctx.secy = secy;
2264
2265		ret = macsec_offload(ops->mdo_del_txsa, &ctx);
2266		if (ret)
2267			goto cleanup;
2268	}
2269
2270	RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL);
2271	clear_tx_sa(tx_sa);
2272
2273	rtnl_unlock();
2274
2275	return 0;
2276
2277cleanup:
2278	rtnl_unlock();
2279	return ret;
2280}
2281
2282static bool validate_upd_sa(struct nlattr **attrs)
2283{
2284	if (!attrs[MACSEC_SA_ATTR_AN] ||
2285	    attrs[MACSEC_SA_ATTR_KEY] ||
2286	    attrs[MACSEC_SA_ATTR_KEYID] ||
2287	    attrs[MACSEC_SA_ATTR_SSCI] ||
2288	    attrs[MACSEC_SA_ATTR_SALT])
2289		return false;
2290
2291	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
2292		return false;
2293
2294	if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
2295		return false;
2296
2297	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
2298		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
2299			return false;
2300	}
2301
2302	return true;
2303}
2304
2305static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
2306{
2307	struct nlattr **attrs = info->attrs;
2308	struct net_device *dev;
2309	struct macsec_secy *secy;
2310	struct macsec_tx_sc *tx_sc;
2311	struct macsec_tx_sa *tx_sa;
2312	u8 assoc_num;
2313	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2314	bool was_operational, was_active;
2315	pn_t prev_pn;
2316	int ret = 0;
2317
2318	prev_pn.full64 = 0;
2319
2320	if (!attrs[MACSEC_ATTR_IFINDEX])
2321		return -EINVAL;
2322
2323	if (parse_sa_config(attrs, tb_sa))
2324		return -EINVAL;
2325
2326	if (!validate_upd_sa(tb_sa))
2327		return -EINVAL;
2328
2329	rtnl_lock();
2330	tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2331				 &dev, &secy, &tx_sc, &assoc_num);
2332	if (IS_ERR(tx_sa)) {
2333		rtnl_unlock();
2334		return PTR_ERR(tx_sa);
2335	}
2336
2337	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2338		int pn_len;
2339
2340		pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2341		if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2342			pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n",
2343				  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2344			rtnl_unlock();
2345			return -EINVAL;
2346		}
2347
2348		spin_lock_bh(&tx_sa->lock);
2349		prev_pn = tx_sa->next_pn_halves;
2350		tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
2351		spin_unlock_bh(&tx_sa->lock);
2352	}
2353
2354	was_active = tx_sa->active;
2355	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2356		tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2357
2358	was_operational = secy->operational;
2359	if (assoc_num == tx_sc->encoding_sa)
2360		secy->operational = tx_sa->active;
2361
2362	/* If h/w offloading is available, propagate to the device */
2363	if (macsec_is_offloaded(netdev_priv(dev))) {
2364		const struct macsec_ops *ops;
2365		struct macsec_context ctx;
2366
2367		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2368		if (!ops) {
2369			ret = -EOPNOTSUPP;
2370			goto cleanup;
2371		}
2372
2373		ctx.sa.assoc_num = assoc_num;
2374		ctx.sa.tx_sa = tx_sa;
 
2375		ctx.secy = secy;
2376
2377		ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
2378		if (ret)
2379			goto cleanup;
2380	}
2381
2382	rtnl_unlock();
2383
2384	return 0;
2385
2386cleanup:
2387	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2388		spin_lock_bh(&tx_sa->lock);
2389		tx_sa->next_pn_halves = prev_pn;
2390		spin_unlock_bh(&tx_sa->lock);
2391	}
2392	tx_sa->active = was_active;
2393	secy->operational = was_operational;
2394	rtnl_unlock();
2395	return ret;
2396}
2397
2398static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
2399{
2400	struct nlattr **attrs = info->attrs;
2401	struct net_device *dev;
2402	struct macsec_secy *secy;
2403	struct macsec_rx_sc *rx_sc;
2404	struct macsec_rx_sa *rx_sa;
2405	u8 assoc_num;
2406	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2407	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2408	bool was_active;
2409	pn_t prev_pn;
2410	int ret = 0;
2411
2412	prev_pn.full64 = 0;
2413
2414	if (!attrs[MACSEC_ATTR_IFINDEX])
2415		return -EINVAL;
2416
2417	if (parse_rxsc_config(attrs, tb_rxsc))
2418		return -EINVAL;
2419
2420	if (parse_sa_config(attrs, tb_sa))
2421		return -EINVAL;
2422
2423	if (!validate_upd_sa(tb_sa))
2424		return -EINVAL;
2425
2426	rtnl_lock();
2427	rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
2428				 &dev, &secy, &rx_sc, &assoc_num);
2429	if (IS_ERR(rx_sa)) {
2430		rtnl_unlock();
2431		return PTR_ERR(rx_sa);
2432	}
2433
2434	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2435		int pn_len;
2436
2437		pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2438		if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2439			pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n",
2440				  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2441			rtnl_unlock();
2442			return -EINVAL;
2443		}
2444
2445		spin_lock_bh(&rx_sa->lock);
2446		prev_pn = rx_sa->next_pn_halves;
2447		rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
2448		spin_unlock_bh(&rx_sa->lock);
2449	}
2450
2451	was_active = rx_sa->active;
2452	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2453		rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2454
2455	/* If h/w offloading is available, propagate to the device */
2456	if (macsec_is_offloaded(netdev_priv(dev))) {
2457		const struct macsec_ops *ops;
2458		struct macsec_context ctx;
2459
2460		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2461		if (!ops) {
2462			ret = -EOPNOTSUPP;
2463			goto cleanup;
2464		}
2465
2466		ctx.sa.assoc_num = assoc_num;
2467		ctx.sa.rx_sa = rx_sa;
 
2468		ctx.secy = secy;
2469
2470		ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
2471		if (ret)
2472			goto cleanup;
2473	}
2474
2475	rtnl_unlock();
2476	return 0;
2477
2478cleanup:
2479	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2480		spin_lock_bh(&rx_sa->lock);
2481		rx_sa->next_pn_halves = prev_pn;
2482		spin_unlock_bh(&rx_sa->lock);
2483	}
2484	rx_sa->active = was_active;
2485	rtnl_unlock();
2486	return ret;
2487}
2488
2489static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
2490{
2491	struct nlattr **attrs = info->attrs;
2492	struct net_device *dev;
2493	struct macsec_secy *secy;
2494	struct macsec_rx_sc *rx_sc;
2495	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2496	unsigned int prev_n_rx_sc;
2497	bool was_active;
2498	int ret;
2499
2500	if (!attrs[MACSEC_ATTR_IFINDEX])
2501		return -EINVAL;
2502
2503	if (parse_rxsc_config(attrs, tb_rxsc))
2504		return -EINVAL;
2505
2506	if (!validate_add_rxsc(tb_rxsc))
2507		return -EINVAL;
2508
2509	rtnl_lock();
2510	rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
2511	if (IS_ERR(rx_sc)) {
2512		rtnl_unlock();
2513		return PTR_ERR(rx_sc);
2514	}
2515
2516	was_active = rx_sc->active;
2517	prev_n_rx_sc = secy->n_rx_sc;
2518	if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) {
2519		bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
2520
2521		if (rx_sc->active != new)
2522			secy->n_rx_sc += new ? 1 : -1;
2523
2524		rx_sc->active = new;
2525	}
2526
2527	/* If h/w offloading is available, propagate to the device */
2528	if (macsec_is_offloaded(netdev_priv(dev))) {
2529		const struct macsec_ops *ops;
2530		struct macsec_context ctx;
2531
2532		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2533		if (!ops) {
2534			ret = -EOPNOTSUPP;
2535			goto cleanup;
2536		}
2537
2538		ctx.rx_sc = rx_sc;
2539		ctx.secy = secy;
2540
2541		ret = macsec_offload(ops->mdo_upd_rxsc, &ctx);
2542		if (ret)
2543			goto cleanup;
2544	}
2545
2546	rtnl_unlock();
2547
2548	return 0;
2549
2550cleanup:
2551	secy->n_rx_sc = prev_n_rx_sc;
2552	rx_sc->active = was_active;
2553	rtnl_unlock();
2554	return ret;
2555}
2556
2557static bool macsec_is_configured(struct macsec_dev *macsec)
2558{
2559	struct macsec_secy *secy = &macsec->secy;
2560	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
2561	int i;
2562
2563	if (secy->n_rx_sc > 0)
2564		return true;
2565
2566	for (i = 0; i < MACSEC_NUM_AN; i++)
2567		if (tx_sc->sa[i])
2568			return true;
2569
2570	return false;
2571}
2572
2573static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
 
 
 
 
 
 
 
2574{
2575	struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1];
2576	enum macsec_offload offload, prev_offload;
2577	int (*func)(struct macsec_context *ctx);
2578	struct nlattr **attrs = info->attrs;
2579	struct net_device *dev;
2580	const struct macsec_ops *ops;
2581	struct macsec_context ctx;
2582	struct macsec_dev *macsec;
2583	int ret;
2584
2585	if (!attrs[MACSEC_ATTR_IFINDEX])
2586		return -EINVAL;
 
 
 
 
 
 
2587
2588	if (!attrs[MACSEC_ATTR_OFFLOAD])
2589		return -EINVAL;
 
2590
2591	if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX,
2592					attrs[MACSEC_ATTR_OFFLOAD],
2593					macsec_genl_offload_policy, NULL))
2594		return -EINVAL;
2595
2596	dev = get_dev_from_nl(genl_info_net(info), attrs);
2597	if (IS_ERR(dev))
2598		return PTR_ERR(dev);
2599	macsec = macsec_priv(dev);
 
 
2600
2601	if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE])
2602		return -EINVAL;
 
 
 
 
 
2603
2604	offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
2605	if (macsec->offload == offload)
2606		return 0;
2607
2608	/* Check if the offloading mode is supported by the underlying layers */
2609	if (offload != MACSEC_OFFLOAD_OFF &&
2610	    !macsec_check_offload(offload, macsec))
2611		return -EOPNOTSUPP;
2612
2613	/* Check if the net device is busy. */
2614	if (netif_running(dev))
2615		return -EBUSY;
2616
2617	rtnl_lock();
2618
2619	prev_offload = macsec->offload;
2620	macsec->offload = offload;
2621
2622	/* Check if the device already has rules configured: we do not support
2623	 * rules migration.
2624	 */
2625	if (macsec_is_configured(macsec)) {
2626		ret = -EBUSY;
2627		goto rollback;
2628	}
2629
2630	ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
2631			       macsec, &ctx);
2632	if (!ops) {
2633		ret = -EOPNOTSUPP;
2634		goto rollback;
 
 
 
 
 
 
 
 
2635	}
2636
2637	if (prev_offload == MACSEC_OFFLOAD_OFF)
2638		func = ops->mdo_add_secy;
2639	else
2640		func = ops->mdo_del_secy;
2641
2642	ctx.secy = &macsec->secy;
2643	ret = macsec_offload(func, &ctx);
2644	if (ret)
2645		goto rollback;
2646
2647	/* Force features update, since they are different for SW MACSec and
2648	 * HW offloading cases.
2649	 */
2650	netdev_update_features(dev);
2651
2652	rtnl_unlock();
2653	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2654
2655rollback:
2656	macsec->offload = prev_offload;
2657
 
 
 
2658	rtnl_unlock();
2659	return ret;
2660}
2661
2662static void get_tx_sa_stats(struct net_device *dev, int an,
2663			    struct macsec_tx_sa *tx_sa,
2664			    struct macsec_tx_sa_stats *sum)
2665{
2666	struct macsec_dev *macsec = macsec_priv(dev);
2667	int cpu;
2668
2669	/* If h/w offloading is available, propagate to the device */
2670	if (macsec_is_offloaded(macsec)) {
2671		const struct macsec_ops *ops;
2672		struct macsec_context ctx;
2673
2674		ops = macsec_get_ops(macsec, &ctx);
2675		if (ops) {
2676			ctx.sa.assoc_num = an;
2677			ctx.sa.tx_sa = tx_sa;
2678			ctx.stats.tx_sa_stats = sum;
2679			ctx.secy = &macsec_priv(dev)->secy;
2680			macsec_offload(ops->mdo_get_tx_sa_stats, &ctx);
2681		}
2682		return;
2683	}
2684
2685	for_each_possible_cpu(cpu) {
2686		const struct macsec_tx_sa_stats *stats =
2687			per_cpu_ptr(tx_sa->stats, cpu);
2688
2689		sum->OutPktsProtected += stats->OutPktsProtected;
2690		sum->OutPktsEncrypted += stats->OutPktsEncrypted;
2691	}
2692}
2693
2694static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum)
2695{
2696	if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED,
2697			sum->OutPktsProtected) ||
2698	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED,
2699			sum->OutPktsEncrypted))
2700		return -EMSGSIZE;
2701
2702	return 0;
2703}
2704
2705static void get_rx_sa_stats(struct net_device *dev,
2706			    struct macsec_rx_sc *rx_sc, int an,
2707			    struct macsec_rx_sa *rx_sa,
2708			    struct macsec_rx_sa_stats *sum)
2709{
2710	struct macsec_dev *macsec = macsec_priv(dev);
2711	int cpu;
2712
2713	/* If h/w offloading is available, propagate to the device */
2714	if (macsec_is_offloaded(macsec)) {
2715		const struct macsec_ops *ops;
2716		struct macsec_context ctx;
2717
2718		ops = macsec_get_ops(macsec, &ctx);
2719		if (ops) {
2720			ctx.sa.assoc_num = an;
2721			ctx.sa.rx_sa = rx_sa;
2722			ctx.stats.rx_sa_stats = sum;
2723			ctx.secy = &macsec_priv(dev)->secy;
2724			ctx.rx_sc = rx_sc;
2725			macsec_offload(ops->mdo_get_rx_sa_stats, &ctx);
2726		}
2727		return;
2728	}
2729
2730	for_each_possible_cpu(cpu) {
2731		const struct macsec_rx_sa_stats *stats =
2732			per_cpu_ptr(rx_sa->stats, cpu);
2733
2734		sum->InPktsOK         += stats->InPktsOK;
2735		sum->InPktsInvalid    += stats->InPktsInvalid;
2736		sum->InPktsNotValid   += stats->InPktsNotValid;
2737		sum->InPktsNotUsingSA += stats->InPktsNotUsingSA;
2738		sum->InPktsUnusedSA   += stats->InPktsUnusedSA;
2739	}
2740}
2741
2742static int copy_rx_sa_stats(struct sk_buff *skb,
2743			    struct macsec_rx_sa_stats *sum)
2744{
2745	if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) ||
2746	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID,
2747			sum->InPktsInvalid) ||
2748	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID,
2749			sum->InPktsNotValid) ||
2750	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA,
2751			sum->InPktsNotUsingSA) ||
2752	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA,
2753			sum->InPktsUnusedSA))
2754		return -EMSGSIZE;
2755
2756	return 0;
2757}
2758
2759static void get_rx_sc_stats(struct net_device *dev,
2760			    struct macsec_rx_sc *rx_sc,
2761			    struct macsec_rx_sc_stats *sum)
2762{
2763	struct macsec_dev *macsec = macsec_priv(dev);
2764	int cpu;
2765
2766	/* If h/w offloading is available, propagate to the device */
2767	if (macsec_is_offloaded(macsec)) {
2768		const struct macsec_ops *ops;
2769		struct macsec_context ctx;
2770
2771		ops = macsec_get_ops(macsec, &ctx);
2772		if (ops) {
2773			ctx.stats.rx_sc_stats = sum;
2774			ctx.secy = &macsec_priv(dev)->secy;
2775			ctx.rx_sc = rx_sc;
2776			macsec_offload(ops->mdo_get_rx_sc_stats, &ctx);
2777		}
2778		return;
2779	}
2780
2781	for_each_possible_cpu(cpu) {
2782		const struct pcpu_rx_sc_stats *stats;
2783		struct macsec_rx_sc_stats tmp;
2784		unsigned int start;
2785
2786		stats = per_cpu_ptr(rx_sc->stats, cpu);
2787		do {
2788			start = u64_stats_fetch_begin_irq(&stats->syncp);
2789			memcpy(&tmp, &stats->stats, sizeof(tmp));
2790		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2791
2792		sum->InOctetsValidated += tmp.InOctetsValidated;
2793		sum->InOctetsDecrypted += tmp.InOctetsDecrypted;
2794		sum->InPktsUnchecked   += tmp.InPktsUnchecked;
2795		sum->InPktsDelayed     += tmp.InPktsDelayed;
2796		sum->InPktsOK          += tmp.InPktsOK;
2797		sum->InPktsInvalid     += tmp.InPktsInvalid;
2798		sum->InPktsLate        += tmp.InPktsLate;
2799		sum->InPktsNotValid    += tmp.InPktsNotValid;
2800		sum->InPktsNotUsingSA  += tmp.InPktsNotUsingSA;
2801		sum->InPktsUnusedSA    += tmp.InPktsUnusedSA;
2802	}
2803}
2804
2805static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum)
2806{
2807	if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
2808			      sum->InOctetsValidated,
2809			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2810	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
2811			      sum->InOctetsDecrypted,
2812			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2813	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
2814			      sum->InPktsUnchecked,
2815			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2816	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
2817			      sum->InPktsDelayed,
2818			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2819	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
2820			      sum->InPktsOK,
2821			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2822	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
2823			      sum->InPktsInvalid,
2824			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2825	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
2826			      sum->InPktsLate,
2827			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2828	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
2829			      sum->InPktsNotValid,
2830			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2831	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
2832			      sum->InPktsNotUsingSA,
2833			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2834	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
2835			      sum->InPktsUnusedSA,
2836			      MACSEC_RXSC_STATS_ATTR_PAD))
2837		return -EMSGSIZE;
2838
2839	return 0;
2840}
2841
2842static void get_tx_sc_stats(struct net_device *dev,
2843			    struct macsec_tx_sc_stats *sum)
2844{
2845	struct macsec_dev *macsec = macsec_priv(dev);
2846	int cpu;
2847
2848	/* If h/w offloading is available, propagate to the device */
2849	if (macsec_is_offloaded(macsec)) {
2850		const struct macsec_ops *ops;
2851		struct macsec_context ctx;
2852
2853		ops = macsec_get_ops(macsec, &ctx);
2854		if (ops) {
2855			ctx.stats.tx_sc_stats = sum;
2856			ctx.secy = &macsec_priv(dev)->secy;
2857			macsec_offload(ops->mdo_get_tx_sc_stats, &ctx);
2858		}
2859		return;
2860	}
2861
2862	for_each_possible_cpu(cpu) {
2863		const struct pcpu_tx_sc_stats *stats;
2864		struct macsec_tx_sc_stats tmp;
2865		unsigned int start;
2866
2867		stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu);
2868		do {
2869			start = u64_stats_fetch_begin_irq(&stats->syncp);
2870			memcpy(&tmp, &stats->stats, sizeof(tmp));
2871		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2872
2873		sum->OutPktsProtected   += tmp.OutPktsProtected;
2874		sum->OutPktsEncrypted   += tmp.OutPktsEncrypted;
2875		sum->OutOctetsProtected += tmp.OutOctetsProtected;
2876		sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted;
2877	}
2878}
2879
2880static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum)
2881{
2882	if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
2883			      sum->OutPktsProtected,
2884			      MACSEC_TXSC_STATS_ATTR_PAD) ||
2885	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
2886			      sum->OutPktsEncrypted,
2887			      MACSEC_TXSC_STATS_ATTR_PAD) ||
2888	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
2889			      sum->OutOctetsProtected,
2890			      MACSEC_TXSC_STATS_ATTR_PAD) ||
2891	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
2892			      sum->OutOctetsEncrypted,
2893			      MACSEC_TXSC_STATS_ATTR_PAD))
2894		return -EMSGSIZE;
2895
2896	return 0;
2897}
2898
2899static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum)
2900{
2901	struct macsec_dev *macsec = macsec_priv(dev);
2902	int cpu;
2903
2904	/* If h/w offloading is available, propagate to the device */
2905	if (macsec_is_offloaded(macsec)) {
2906		const struct macsec_ops *ops;
2907		struct macsec_context ctx;
2908
2909		ops = macsec_get_ops(macsec, &ctx);
2910		if (ops) {
2911			ctx.stats.dev_stats = sum;
2912			ctx.secy = &macsec_priv(dev)->secy;
2913			macsec_offload(ops->mdo_get_dev_stats, &ctx);
2914		}
2915		return;
2916	}
2917
2918	for_each_possible_cpu(cpu) {
2919		const struct pcpu_secy_stats *stats;
2920		struct macsec_dev_stats tmp;
2921		unsigned int start;
2922
2923		stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu);
2924		do {
2925			start = u64_stats_fetch_begin_irq(&stats->syncp);
2926			memcpy(&tmp, &stats->stats, sizeof(tmp));
2927		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2928
2929		sum->OutPktsUntagged  += tmp.OutPktsUntagged;
2930		sum->InPktsUntagged   += tmp.InPktsUntagged;
2931		sum->OutPktsTooLong   += tmp.OutPktsTooLong;
2932		sum->InPktsNoTag      += tmp.InPktsNoTag;
2933		sum->InPktsBadTag     += tmp.InPktsBadTag;
2934		sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI;
2935		sum->InPktsNoSCI      += tmp.InPktsNoSCI;
2936		sum->InPktsOverrun    += tmp.InPktsOverrun;
2937	}
2938}
2939
2940static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum)
2941{
2942	if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
2943			      sum->OutPktsUntagged,
2944			      MACSEC_SECY_STATS_ATTR_PAD) ||
2945	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
2946			      sum->InPktsUntagged,
2947			      MACSEC_SECY_STATS_ATTR_PAD) ||
2948	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
2949			      sum->OutPktsTooLong,
2950			      MACSEC_SECY_STATS_ATTR_PAD) ||
2951	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
2952			      sum->InPktsNoTag,
2953			      MACSEC_SECY_STATS_ATTR_PAD) ||
2954	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
2955			      sum->InPktsBadTag,
2956			      MACSEC_SECY_STATS_ATTR_PAD) ||
2957	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
2958			      sum->InPktsUnknownSCI,
2959			      MACSEC_SECY_STATS_ATTR_PAD) ||
2960	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
2961			      sum->InPktsNoSCI,
2962			      MACSEC_SECY_STATS_ATTR_PAD) ||
2963	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
2964			      sum->InPktsOverrun,
2965			      MACSEC_SECY_STATS_ATTR_PAD))
2966		return -EMSGSIZE;
2967
2968	return 0;
2969}
2970
2971static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
2972{
2973	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
2974	struct nlattr *secy_nest = nla_nest_start_noflag(skb,
2975							 MACSEC_ATTR_SECY);
2976	u64 csid;
2977
2978	if (!secy_nest)
2979		return 1;
2980
2981	switch (secy->key_len) {
2982	case MACSEC_GCM_AES_128_SAK_LEN:
2983		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
2984		break;
2985	case MACSEC_GCM_AES_256_SAK_LEN:
2986		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
2987		break;
2988	default:
2989		goto cancel;
2990	}
2991
2992	if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
2993			MACSEC_SECY_ATTR_PAD) ||
2994	    nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
2995			      csid, MACSEC_SECY_ATTR_PAD) ||
2996	    nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
2997	    nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
2998	    nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
2999	    nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) ||
3000	    nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) ||
3001	    nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) ||
3002	    nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) ||
3003	    nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) ||
3004	    nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) ||
3005	    nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa))
3006		goto cancel;
3007
3008	if (secy->replay_protect) {
3009		if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window))
3010			goto cancel;
3011	}
3012
3013	nla_nest_end(skb, secy_nest);
3014	return 0;
3015
3016cancel:
3017	nla_nest_cancel(skb, secy_nest);
3018	return 1;
3019}
3020
3021static noinline_for_stack int
3022dump_secy(struct macsec_secy *secy, struct net_device *dev,
3023	  struct sk_buff *skb, struct netlink_callback *cb)
3024{
3025	struct macsec_tx_sc_stats tx_sc_stats = {0, };
3026	struct macsec_tx_sa_stats tx_sa_stats = {0, };
3027	struct macsec_rx_sc_stats rx_sc_stats = {0, };
3028	struct macsec_rx_sa_stats rx_sa_stats = {0, };
3029	struct macsec_dev *macsec = netdev_priv(dev);
3030	struct macsec_dev_stats dev_stats = {0, };
3031	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
3032	struct nlattr *txsa_list, *rxsc_list;
3033	struct macsec_rx_sc *rx_sc;
3034	struct nlattr *attr;
3035	void *hdr;
3036	int i, j;
3037
3038	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3039			  &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC);
3040	if (!hdr)
3041		return -EMSGSIZE;
3042
3043	genl_dump_check_consistent(cb, hdr);
3044
3045	if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
3046		goto nla_put_failure;
3047
3048	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD);
3049	if (!attr)
3050		goto nla_put_failure;
3051	if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload))
3052		goto nla_put_failure;
3053	nla_nest_end(skb, attr);
3054
3055	if (nla_put_secy(secy, skb))
3056		goto nla_put_failure;
3057
3058	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS);
3059	if (!attr)
3060		goto nla_put_failure;
3061
3062	get_tx_sc_stats(dev, &tx_sc_stats);
3063	if (copy_tx_sc_stats(skb, &tx_sc_stats)) {
3064		nla_nest_cancel(skb, attr);
3065		goto nla_put_failure;
3066	}
3067	nla_nest_end(skb, attr);
3068
3069	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS);
3070	if (!attr)
3071		goto nla_put_failure;
3072	get_secy_stats(dev, &dev_stats);
3073	if (copy_secy_stats(skb, &dev_stats)) {
3074		nla_nest_cancel(skb, attr);
3075		goto nla_put_failure;
3076	}
3077	nla_nest_end(skb, attr);
3078
3079	txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST);
3080	if (!txsa_list)
3081		goto nla_put_failure;
3082	for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) {
3083		struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]);
3084		struct nlattr *txsa_nest;
3085		u64 pn;
3086		int pn_len;
3087
3088		if (!tx_sa)
3089			continue;
3090
3091		txsa_nest = nla_nest_start_noflag(skb, j++);
3092		if (!txsa_nest) {
3093			nla_nest_cancel(skb, txsa_list);
3094			goto nla_put_failure;
3095		}
3096
3097		attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS);
3098		if (!attr) {
3099			nla_nest_cancel(skb, txsa_nest);
3100			nla_nest_cancel(skb, txsa_list);
3101			goto nla_put_failure;
3102		}
3103		memset(&tx_sa_stats, 0, sizeof(tx_sa_stats));
3104		get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats);
3105		if (copy_tx_sa_stats(skb, &tx_sa_stats)) {
3106			nla_nest_cancel(skb, attr);
3107			nla_nest_cancel(skb, txsa_nest);
3108			nla_nest_cancel(skb, txsa_list);
3109			goto nla_put_failure;
3110		}
3111		nla_nest_end(skb, attr);
3112
3113		if (secy->xpn) {
3114			pn = tx_sa->next_pn;
3115			pn_len = MACSEC_XPN_PN_LEN;
3116		} else {
3117			pn = tx_sa->next_pn_halves.lower;
3118			pn_len = MACSEC_DEFAULT_PN_LEN;
3119		}
3120
3121		if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
3122		    nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
3123		    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
3124		    (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, tx_sa->ssci)) ||
3125		    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
3126			nla_nest_cancel(skb, txsa_nest);
3127			nla_nest_cancel(skb, txsa_list);
3128			goto nla_put_failure;
3129		}
3130
3131		nla_nest_end(skb, txsa_nest);
3132	}
3133	nla_nest_end(skb, txsa_list);
3134
3135	rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST);
3136	if (!rxsc_list)
3137		goto nla_put_failure;
3138
3139	j = 1;
3140	for_each_rxsc_rtnl(secy, rx_sc) {
3141		int k;
3142		struct nlattr *rxsa_list;
3143		struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++);
3144
3145		if (!rxsc_nest) {
3146			nla_nest_cancel(skb, rxsc_list);
3147			goto nla_put_failure;
3148		}
3149
3150		if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) ||
3151		    nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci,
3152				MACSEC_RXSC_ATTR_PAD)) {
3153			nla_nest_cancel(skb, rxsc_nest);
3154			nla_nest_cancel(skb, rxsc_list);
3155			goto nla_put_failure;
3156		}
3157
3158		attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS);
3159		if (!attr) {
3160			nla_nest_cancel(skb, rxsc_nest);
3161			nla_nest_cancel(skb, rxsc_list);
3162			goto nla_put_failure;
3163		}
3164		memset(&rx_sc_stats, 0, sizeof(rx_sc_stats));
3165		get_rx_sc_stats(dev, rx_sc, &rx_sc_stats);
3166		if (copy_rx_sc_stats(skb, &rx_sc_stats)) {
3167			nla_nest_cancel(skb, attr);
3168			nla_nest_cancel(skb, rxsc_nest);
3169			nla_nest_cancel(skb, rxsc_list);
3170			goto nla_put_failure;
3171		}
3172		nla_nest_end(skb, attr);
3173
3174		rxsa_list = nla_nest_start_noflag(skb,
3175						  MACSEC_RXSC_ATTR_SA_LIST);
3176		if (!rxsa_list) {
3177			nla_nest_cancel(skb, rxsc_nest);
3178			nla_nest_cancel(skb, rxsc_list);
3179			goto nla_put_failure;
3180		}
3181
3182		for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) {
3183			struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]);
3184			struct nlattr *rxsa_nest;
3185			u64 pn;
3186			int pn_len;
3187
3188			if (!rx_sa)
3189				continue;
3190
3191			rxsa_nest = nla_nest_start_noflag(skb, k++);
3192			if (!rxsa_nest) {
3193				nla_nest_cancel(skb, rxsa_list);
3194				nla_nest_cancel(skb, rxsc_nest);
3195				nla_nest_cancel(skb, rxsc_list);
3196				goto nla_put_failure;
3197			}
3198
3199			attr = nla_nest_start_noflag(skb,
3200						     MACSEC_SA_ATTR_STATS);
3201			if (!attr) {
3202				nla_nest_cancel(skb, rxsa_list);
3203				nla_nest_cancel(skb, rxsc_nest);
3204				nla_nest_cancel(skb, rxsc_list);
3205				goto nla_put_failure;
3206			}
3207			memset(&rx_sa_stats, 0, sizeof(rx_sa_stats));
3208			get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats);
3209			if (copy_rx_sa_stats(skb, &rx_sa_stats)) {
3210				nla_nest_cancel(skb, attr);
3211				nla_nest_cancel(skb, rxsa_list);
3212				nla_nest_cancel(skb, rxsc_nest);
3213				nla_nest_cancel(skb, rxsc_list);
3214				goto nla_put_failure;
3215			}
3216			nla_nest_end(skb, attr);
3217
3218			if (secy->xpn) {
3219				pn = rx_sa->next_pn;
3220				pn_len = MACSEC_XPN_PN_LEN;
3221			} else {
3222				pn = rx_sa->next_pn_halves.lower;
3223				pn_len = MACSEC_DEFAULT_PN_LEN;
3224			}
3225
3226			if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
3227			    nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
3228			    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
3229			    (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, rx_sa->ssci)) ||
3230			    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
3231				nla_nest_cancel(skb, rxsa_nest);
3232				nla_nest_cancel(skb, rxsc_nest);
3233				nla_nest_cancel(skb, rxsc_list);
3234				goto nla_put_failure;
3235			}
3236			nla_nest_end(skb, rxsa_nest);
3237		}
3238
3239		nla_nest_end(skb, rxsa_list);
3240		nla_nest_end(skb, rxsc_nest);
3241	}
3242
3243	nla_nest_end(skb, rxsc_list);
3244
3245	genlmsg_end(skb, hdr);
3246
3247	return 0;
3248
3249nla_put_failure:
3250	genlmsg_cancel(skb, hdr);
3251	return -EMSGSIZE;
3252}
3253
3254static int macsec_generation = 1; /* protected by RTNL */
3255
3256static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
3257{
3258	struct net *net = sock_net(skb->sk);
3259	struct net_device *dev;
3260	int dev_idx, d;
3261
3262	dev_idx = cb->args[0];
3263
3264	d = 0;
3265	rtnl_lock();
3266
3267	cb->seq = macsec_generation;
3268
3269	for_each_netdev(net, dev) {
3270		struct macsec_secy *secy;
3271
3272		if (d < dev_idx)
3273			goto next;
3274
3275		if (!netif_is_macsec(dev))
3276			goto next;
3277
3278		secy = &macsec_priv(dev)->secy;
3279		if (dump_secy(secy, dev, skb, cb) < 0)
3280			goto done;
3281next:
3282		d++;
3283	}
3284
3285done:
3286	rtnl_unlock();
3287	cb->args[0] = d;
3288	return skb->len;
3289}
3290
3291static const struct genl_small_ops macsec_genl_ops[] = {
3292	{
3293		.cmd = MACSEC_CMD_GET_TXSC,
3294		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3295		.dumpit = macsec_dump_txsc,
3296	},
3297	{
3298		.cmd = MACSEC_CMD_ADD_RXSC,
3299		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3300		.doit = macsec_add_rxsc,
3301		.flags = GENL_ADMIN_PERM,
3302	},
3303	{
3304		.cmd = MACSEC_CMD_DEL_RXSC,
3305		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3306		.doit = macsec_del_rxsc,
3307		.flags = GENL_ADMIN_PERM,
3308	},
3309	{
3310		.cmd = MACSEC_CMD_UPD_RXSC,
3311		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3312		.doit = macsec_upd_rxsc,
3313		.flags = GENL_ADMIN_PERM,
3314	},
3315	{
3316		.cmd = MACSEC_CMD_ADD_TXSA,
3317		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3318		.doit = macsec_add_txsa,
3319		.flags = GENL_ADMIN_PERM,
3320	},
3321	{
3322		.cmd = MACSEC_CMD_DEL_TXSA,
3323		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3324		.doit = macsec_del_txsa,
3325		.flags = GENL_ADMIN_PERM,
3326	},
3327	{
3328		.cmd = MACSEC_CMD_UPD_TXSA,
3329		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3330		.doit = macsec_upd_txsa,
3331		.flags = GENL_ADMIN_PERM,
3332	},
3333	{
3334		.cmd = MACSEC_CMD_ADD_RXSA,
3335		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3336		.doit = macsec_add_rxsa,
3337		.flags = GENL_ADMIN_PERM,
3338	},
3339	{
3340		.cmd = MACSEC_CMD_DEL_RXSA,
3341		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3342		.doit = macsec_del_rxsa,
3343		.flags = GENL_ADMIN_PERM,
3344	},
3345	{
3346		.cmd = MACSEC_CMD_UPD_RXSA,
3347		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3348		.doit = macsec_upd_rxsa,
3349		.flags = GENL_ADMIN_PERM,
3350	},
3351	{
3352		.cmd = MACSEC_CMD_UPD_OFFLOAD,
3353		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3354		.doit = macsec_upd_offload,
3355		.flags = GENL_ADMIN_PERM,
3356	},
3357};
3358
3359static struct genl_family macsec_fam __ro_after_init = {
3360	.name		= MACSEC_GENL_NAME,
3361	.hdrsize	= 0,
3362	.version	= MACSEC_GENL_VERSION,
3363	.maxattr	= MACSEC_ATTR_MAX,
3364	.policy = macsec_genl_policy,
3365	.netnsok	= true,
3366	.module		= THIS_MODULE,
3367	.small_ops	= macsec_genl_ops,
3368	.n_small_ops	= ARRAY_SIZE(macsec_genl_ops),
 
3369};
3370
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3371static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
3372				     struct net_device *dev)
3373{
3374	struct macsec_dev *macsec = netdev_priv(dev);
3375	struct macsec_secy *secy = &macsec->secy;
3376	struct pcpu_secy_stats *secy_stats;
3377	int ret, len;
3378
3379	if (macsec_is_offloaded(netdev_priv(dev))) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3380		skb->dev = macsec->real_dev;
3381		return dev_queue_xmit(skb);
3382	}
3383
3384	/* 10.5 */
3385	if (!secy->protect_frames) {
3386		secy_stats = this_cpu_ptr(macsec->stats);
3387		u64_stats_update_begin(&secy_stats->syncp);
3388		secy_stats->stats.OutPktsUntagged++;
3389		u64_stats_update_end(&secy_stats->syncp);
3390		skb->dev = macsec->real_dev;
3391		len = skb->len;
3392		ret = dev_queue_xmit(skb);
3393		count_tx(dev, ret, len);
3394		return ret;
3395	}
3396
3397	if (!secy->operational) {
3398		kfree_skb(skb);
3399		dev->stats.tx_dropped++;
3400		return NETDEV_TX_OK;
3401	}
3402
 
3403	skb = macsec_encrypt(skb, dev);
3404	if (IS_ERR(skb)) {
3405		if (PTR_ERR(skb) != -EINPROGRESS)
3406			dev->stats.tx_dropped++;
3407		return NETDEV_TX_OK;
3408	}
3409
3410	macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
3411
3412	macsec_encrypt_finish(skb, dev);
3413	len = skb->len;
3414	ret = dev_queue_xmit(skb);
3415	count_tx(dev, ret, len);
3416	return ret;
3417}
3418
3419#define SW_MACSEC_FEATURES \
3420	(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
3421
3422/* If h/w offloading is enabled, use real device features save for
3423 *   VLAN_FEATURES - they require additional ops
3424 *   HW_MACSEC - no reason to report it
3425 */
3426#define REAL_DEV_FEATURES(dev) \
3427	((dev)->features & ~(NETIF_F_VLAN_FEATURES | NETIF_F_HW_MACSEC))
3428
3429static int macsec_dev_init(struct net_device *dev)
3430{
3431	struct macsec_dev *macsec = macsec_priv(dev);
3432	struct net_device *real_dev = macsec->real_dev;
3433	int err;
3434
3435	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
3436	if (!dev->tstats)
3437		return -ENOMEM;
3438
3439	err = gro_cells_init(&macsec->gro_cells, dev);
3440	if (err) {
3441		free_percpu(dev->tstats);
3442		return err;
3443	}
3444
3445	if (macsec_is_offloaded(macsec)) {
3446		dev->features = REAL_DEV_FEATURES(real_dev);
3447	} else {
3448		dev->features = real_dev->features & SW_MACSEC_FEATURES;
3449		dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
3450	}
 
 
 
3451
3452	dev->needed_headroom = real_dev->needed_headroom +
3453			       MACSEC_NEEDED_HEADROOM;
3454	dev->needed_tailroom = real_dev->needed_tailroom +
3455			       MACSEC_NEEDED_TAILROOM;
3456
3457	if (is_zero_ether_addr(dev->dev_addr))
3458		eth_hw_addr_inherit(dev, real_dev);
3459	if (is_zero_ether_addr(dev->broadcast))
3460		memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
3461
 
 
 
3462	return 0;
3463}
3464
3465static void macsec_dev_uninit(struct net_device *dev)
3466{
3467	struct macsec_dev *macsec = macsec_priv(dev);
3468
3469	gro_cells_destroy(&macsec->gro_cells);
3470	free_percpu(dev->tstats);
3471}
3472
3473static netdev_features_t macsec_fix_features(struct net_device *dev,
3474					     netdev_features_t features)
3475{
3476	struct macsec_dev *macsec = macsec_priv(dev);
3477	struct net_device *real_dev = macsec->real_dev;
 
3478
3479	if (macsec_is_offloaded(macsec))
3480		return REAL_DEV_FEATURES(real_dev);
3481
3482	features &= (real_dev->features & SW_MACSEC_FEATURES) |
3483		    NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
3484	features |= NETIF_F_LLTX;
3485
3486	return features;
3487}
3488
3489static int macsec_dev_open(struct net_device *dev)
3490{
3491	struct macsec_dev *macsec = macsec_priv(dev);
3492	struct net_device *real_dev = macsec->real_dev;
3493	int err;
3494
3495	err = dev_uc_add(real_dev, dev->dev_addr);
3496	if (err < 0)
3497		return err;
3498
3499	if (dev->flags & IFF_ALLMULTI) {
3500		err = dev_set_allmulti(real_dev, 1);
3501		if (err < 0)
3502			goto del_unicast;
3503	}
3504
3505	if (dev->flags & IFF_PROMISC) {
3506		err = dev_set_promiscuity(real_dev, 1);
3507		if (err < 0)
3508			goto clear_allmulti;
3509	}
3510
3511	/* If h/w offloading is available, propagate to the device */
3512	if (macsec_is_offloaded(macsec)) {
3513		const struct macsec_ops *ops;
3514		struct macsec_context ctx;
3515
3516		ops = macsec_get_ops(netdev_priv(dev), &ctx);
3517		if (!ops) {
3518			err = -EOPNOTSUPP;
3519			goto clear_allmulti;
3520		}
3521
3522		ctx.secy = &macsec->secy;
3523		err = macsec_offload(ops->mdo_dev_open, &ctx);
3524		if (err)
3525			goto clear_allmulti;
3526	}
3527
3528	if (netif_carrier_ok(real_dev))
3529		netif_carrier_on(dev);
3530
3531	return 0;
3532clear_allmulti:
3533	if (dev->flags & IFF_ALLMULTI)
3534		dev_set_allmulti(real_dev, -1);
3535del_unicast:
3536	dev_uc_del(real_dev, dev->dev_addr);
3537	netif_carrier_off(dev);
3538	return err;
3539}
3540
3541static int macsec_dev_stop(struct net_device *dev)
3542{
3543	struct macsec_dev *macsec = macsec_priv(dev);
3544	struct net_device *real_dev = macsec->real_dev;
3545
3546	netif_carrier_off(dev);
3547
3548	/* If h/w offloading is available, propagate to the device */
3549	if (macsec_is_offloaded(macsec)) {
3550		const struct macsec_ops *ops;
3551		struct macsec_context ctx;
3552
3553		ops = macsec_get_ops(macsec, &ctx);
3554		if (ops) {
3555			ctx.secy = &macsec->secy;
3556			macsec_offload(ops->mdo_dev_stop, &ctx);
3557		}
3558	}
3559
3560	dev_mc_unsync(real_dev, dev);
3561	dev_uc_unsync(real_dev, dev);
3562
3563	if (dev->flags & IFF_ALLMULTI)
3564		dev_set_allmulti(real_dev, -1);
3565
3566	if (dev->flags & IFF_PROMISC)
3567		dev_set_promiscuity(real_dev, -1);
3568
3569	dev_uc_del(real_dev, dev->dev_addr);
3570
3571	return 0;
3572}
3573
3574static void macsec_dev_change_rx_flags(struct net_device *dev, int change)
3575{
3576	struct net_device *real_dev = macsec_priv(dev)->real_dev;
3577
3578	if (!(dev->flags & IFF_UP))
3579		return;
3580
3581	if (change & IFF_ALLMULTI)
3582		dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
3583
3584	if (change & IFF_PROMISC)
3585		dev_set_promiscuity(real_dev,
3586				    dev->flags & IFF_PROMISC ? 1 : -1);
3587}
3588
3589static void macsec_dev_set_rx_mode(struct net_device *dev)
3590{
3591	struct net_device *real_dev = macsec_priv(dev)->real_dev;
3592
3593	dev_mc_sync(real_dev, dev);
3594	dev_uc_sync(real_dev, dev);
3595}
3596
3597static int macsec_set_mac_address(struct net_device *dev, void *p)
3598{
3599	struct macsec_dev *macsec = macsec_priv(dev);
3600	struct net_device *real_dev = macsec->real_dev;
3601	struct sockaddr *addr = p;
 
3602	int err;
3603
3604	if (!is_valid_ether_addr(addr->sa_data))
3605		return -EADDRNOTAVAIL;
3606
3607	if (!(dev->flags & IFF_UP))
3608		goto out;
3609
3610	err = dev_uc_add(real_dev, addr->sa_data);
3611	if (err < 0)
3612		return err;
3613
3614	dev_uc_del(real_dev, dev->dev_addr);
3615
3616out:
3617	ether_addr_copy(dev->dev_addr, addr->sa_data);
3618	macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
3619
3620	/* If h/w offloading is available, propagate to the device */
3621	if (macsec_is_offloaded(macsec)) {
3622		const struct macsec_ops *ops;
3623		struct macsec_context ctx;
3624
3625		ops = macsec_get_ops(macsec, &ctx);
3626		if (ops) {
3627			ctx.secy = &macsec->secy;
3628			macsec_offload(ops->mdo_upd_secy, &ctx);
3629		}
 
 
 
 
 
3630	}
3631
 
 
 
3632	return 0;
 
 
 
 
 
 
 
 
3633}
3634
3635static int macsec_change_mtu(struct net_device *dev, int new_mtu)
3636{
3637	struct macsec_dev *macsec = macsec_priv(dev);
3638	unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true);
3639
3640	if (macsec->real_dev->mtu - extra < new_mtu)
3641		return -ERANGE;
3642
3643	dev->mtu = new_mtu;
3644
3645	return 0;
3646}
3647
3648static void macsec_get_stats64(struct net_device *dev,
3649			       struct rtnl_link_stats64 *s)
3650{
3651	if (!dev->tstats)
3652		return;
3653
3654	dev_fetch_sw_netstats(s, dev->tstats);
3655
3656	s->rx_dropped = dev->stats.rx_dropped;
3657	s->tx_dropped = dev->stats.tx_dropped;
 
3658}
3659
3660static int macsec_get_iflink(const struct net_device *dev)
3661{
3662	return macsec_priv(dev)->real_dev->ifindex;
3663}
3664
3665static const struct net_device_ops macsec_netdev_ops = {
3666	.ndo_init		= macsec_dev_init,
3667	.ndo_uninit		= macsec_dev_uninit,
3668	.ndo_open		= macsec_dev_open,
3669	.ndo_stop		= macsec_dev_stop,
3670	.ndo_fix_features	= macsec_fix_features,
3671	.ndo_change_mtu		= macsec_change_mtu,
3672	.ndo_set_rx_mode	= macsec_dev_set_rx_mode,
3673	.ndo_change_rx_flags	= macsec_dev_change_rx_flags,
3674	.ndo_set_mac_address	= macsec_set_mac_address,
3675	.ndo_start_xmit		= macsec_start_xmit,
3676	.ndo_get_stats64	= macsec_get_stats64,
3677	.ndo_get_iflink		= macsec_get_iflink,
3678};
3679
3680static const struct device_type macsec_type = {
3681	.name = "macsec",
3682};
3683
3684static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
3685	[IFLA_MACSEC_SCI] = { .type = NLA_U64 },
3686	[IFLA_MACSEC_PORT] = { .type = NLA_U16 },
3687	[IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
3688	[IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
3689	[IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
3690	[IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 },
3691	[IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 },
3692	[IFLA_MACSEC_PROTECT] = { .type = NLA_U8 },
3693	[IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 },
3694	[IFLA_MACSEC_ES] = { .type = NLA_U8 },
3695	[IFLA_MACSEC_SCB] = { .type = NLA_U8 },
3696	[IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
3697	[IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
 
3698};
3699
3700static void macsec_free_netdev(struct net_device *dev)
3701{
3702	struct macsec_dev *macsec = macsec_priv(dev);
3703
 
3704	free_percpu(macsec->stats);
3705	free_percpu(macsec->secy.tx_sc.stats);
3706
 
 
3707}
3708
3709static void macsec_setup(struct net_device *dev)
3710{
3711	ether_setup(dev);
3712	dev->min_mtu = 0;
3713	dev->max_mtu = ETH_MAX_MTU;
3714	dev->priv_flags |= IFF_NO_QUEUE;
3715	dev->netdev_ops = &macsec_netdev_ops;
3716	dev->needs_free_netdev = true;
3717	dev->priv_destructor = macsec_free_netdev;
3718	SET_NETDEV_DEVTYPE(dev, &macsec_type);
3719
3720	eth_zero_addr(dev->broadcast);
3721}
3722
3723static int macsec_changelink_common(struct net_device *dev,
3724				    struct nlattr *data[])
3725{
3726	struct macsec_secy *secy;
3727	struct macsec_tx_sc *tx_sc;
3728
3729	secy = &macsec_priv(dev)->secy;
3730	tx_sc = &secy->tx_sc;
3731
3732	if (data[IFLA_MACSEC_ENCODING_SA]) {
3733		struct macsec_tx_sa *tx_sa;
3734
3735		tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]);
3736		tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]);
3737
3738		secy->operational = tx_sa && tx_sa->active;
3739	}
3740
3741	if (data[IFLA_MACSEC_WINDOW])
3742		secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
3743
3744	if (data[IFLA_MACSEC_ENCRYPT])
3745		tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
3746
3747	if (data[IFLA_MACSEC_PROTECT])
3748		secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]);
3749
3750	if (data[IFLA_MACSEC_INC_SCI])
3751		tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
3752
3753	if (data[IFLA_MACSEC_ES])
3754		tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]);
3755
3756	if (data[IFLA_MACSEC_SCB])
3757		tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]);
3758
3759	if (data[IFLA_MACSEC_REPLAY_PROTECT])
3760		secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]);
3761
3762	if (data[IFLA_MACSEC_VALIDATION])
3763		secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]);
3764
3765	if (data[IFLA_MACSEC_CIPHER_SUITE]) {
3766		switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) {
3767		case MACSEC_CIPHER_ID_GCM_AES_128:
3768		case MACSEC_DEFAULT_CIPHER_ID:
3769			secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
3770			secy->xpn = false;
3771			break;
3772		case MACSEC_CIPHER_ID_GCM_AES_256:
3773			secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
3774			secy->xpn = false;
3775			break;
3776		case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
3777			secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
3778			secy->xpn = true;
3779			break;
3780		case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
3781			secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
3782			secy->xpn = true;
3783			break;
3784		default:
3785			return -EINVAL;
3786		}
3787	}
3788
 
 
 
 
 
 
 
 
 
 
3789	return 0;
3790}
3791
3792static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
3793			     struct nlattr *data[],
3794			     struct netlink_ext_ack *extack)
3795{
3796	struct macsec_dev *macsec = macsec_priv(dev);
 
 
3797	struct macsec_tx_sc tx_sc;
3798	struct macsec_secy secy;
3799	int ret;
3800
3801	if (!data)
3802		return 0;
3803
3804	if (data[IFLA_MACSEC_CIPHER_SUITE] ||
3805	    data[IFLA_MACSEC_ICV_LEN] ||
3806	    data[IFLA_MACSEC_SCI] ||
3807	    data[IFLA_MACSEC_PORT])
3808		return -EINVAL;
3809
3810	/* Keep a copy of unmodified secy and tx_sc, in case the offload
3811	 * propagation fails, to revert macsec_changelink_common.
3812	 */
3813	memcpy(&secy, &macsec->secy, sizeof(secy));
3814	memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc));
3815
3816	ret = macsec_changelink_common(dev, data);
3817	if (ret)
3818		return ret;
 
 
 
 
 
 
 
 
 
 
3819
3820	/* If h/w offloading is available, propagate to the device */
3821	if (macsec_is_offloaded(macsec)) {
3822		const struct macsec_ops *ops;
3823		struct macsec_context ctx;
3824		int ret;
3825
3826		ops = macsec_get_ops(netdev_priv(dev), &ctx);
3827		if (!ops) {
3828			ret = -EOPNOTSUPP;
3829			goto cleanup;
3830		}
3831
3832		ctx.secy = &macsec->secy;
3833		ret = macsec_offload(ops->mdo_upd_secy, &ctx);
3834		if (ret)
3835			goto cleanup;
3836	}
3837
3838	return 0;
3839
3840cleanup:
3841	memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc));
3842	memcpy(&macsec->secy, &secy, sizeof(secy));
3843
3844	return ret;
3845}
3846
3847static void macsec_del_dev(struct macsec_dev *macsec)
3848{
3849	int i;
3850
3851	while (macsec->secy.rx_sc) {
3852		struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc);
3853
3854		rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next);
3855		free_rx_sc(rx_sc);
3856	}
3857
3858	for (i = 0; i < MACSEC_NUM_AN; i++) {
3859		struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]);
3860
3861		if (sa) {
3862			RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL);
3863			clear_tx_sa(sa);
3864		}
3865	}
3866}
3867
3868static void macsec_common_dellink(struct net_device *dev, struct list_head *head)
3869{
3870	struct macsec_dev *macsec = macsec_priv(dev);
3871	struct net_device *real_dev = macsec->real_dev;
3872
 
 
 
 
 
 
 
 
 
 
 
 
3873	unregister_netdevice_queue(dev, head);
3874	list_del_rcu(&macsec->secys);
3875	macsec_del_dev(macsec);
3876	netdev_upper_dev_unlink(real_dev, dev);
3877
3878	macsec_generation++;
3879}
3880
3881static void macsec_dellink(struct net_device *dev, struct list_head *head)
3882{
3883	struct macsec_dev *macsec = macsec_priv(dev);
3884	struct net_device *real_dev = macsec->real_dev;
3885	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3886
3887	/* If h/w offloading is available, propagate to the device */
3888	if (macsec_is_offloaded(macsec)) {
3889		const struct macsec_ops *ops;
3890		struct macsec_context ctx;
3891
3892		ops = macsec_get_ops(netdev_priv(dev), &ctx);
3893		if (ops) {
3894			ctx.secy = &macsec->secy;
3895			macsec_offload(ops->mdo_del_secy, &ctx);
3896		}
3897	}
3898
3899	macsec_common_dellink(dev, head);
3900
3901	if (list_empty(&rxd->secys)) {
3902		netdev_rx_handler_unregister(real_dev);
3903		kfree(rxd);
3904	}
3905}
3906
3907static int register_macsec_dev(struct net_device *real_dev,
3908			       struct net_device *dev)
3909{
3910	struct macsec_dev *macsec = macsec_priv(dev);
3911	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3912
3913	if (!rxd) {
3914		int err;
3915
3916		rxd = kmalloc(sizeof(*rxd), GFP_KERNEL);
3917		if (!rxd)
3918			return -ENOMEM;
3919
3920		INIT_LIST_HEAD(&rxd->secys);
3921
3922		err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
3923						 rxd);
3924		if (err < 0) {
3925			kfree(rxd);
3926			return err;
3927		}
3928	}
3929
3930	list_add_tail_rcu(&macsec->secys, &rxd->secys);
3931	return 0;
3932}
3933
3934static bool sci_exists(struct net_device *dev, sci_t sci)
3935{
3936	struct macsec_rxh_data *rxd = macsec_data_rtnl(dev);
3937	struct macsec_dev *macsec;
3938
3939	list_for_each_entry(macsec, &rxd->secys, secys) {
3940		if (macsec->secy.sci == sci)
3941			return true;
3942	}
3943
3944	return false;
3945}
3946
 
 
 
 
 
3947static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
3948{
3949	struct macsec_dev *macsec = macsec_priv(dev);
3950	struct macsec_secy *secy = &macsec->secy;
3951
3952	macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats);
3953	if (!macsec->stats)
3954		return -ENOMEM;
3955
3956	secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats);
3957	if (!secy->tx_sc.stats) {
3958		free_percpu(macsec->stats);
 
 
 
 
 
 
3959		return -ENOMEM;
3960	}
3961
3962	if (sci == MACSEC_UNDEF_SCI)
3963		sci = dev_to_sci(dev, MACSEC_PORT_ES);
3964
3965	secy->netdev = dev;
3966	secy->operational = true;
3967	secy->key_len = DEFAULT_SAK_LEN;
3968	secy->icv_len = icv_len;
3969	secy->validate_frames = MACSEC_VALIDATE_DEFAULT;
3970	secy->protect_frames = true;
3971	secy->replay_protect = false;
3972	secy->xpn = DEFAULT_XPN;
3973
3974	secy->sci = sci;
 
3975	secy->tx_sc.active = true;
3976	secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA;
3977	secy->tx_sc.encrypt = DEFAULT_ENCRYPT;
3978	secy->tx_sc.send_sci = DEFAULT_SEND_SCI;
3979	secy->tx_sc.end_station = false;
3980	secy->tx_sc.scb = false;
3981
3982	return 0;
3983}
3984
3985static struct lock_class_key macsec_netdev_addr_lock_key;
3986
3987static int macsec_newlink(struct net *net, struct net_device *dev,
3988			  struct nlattr *tb[], struct nlattr *data[],
3989			  struct netlink_ext_ack *extack)
3990{
3991	struct macsec_dev *macsec = macsec_priv(dev);
3992	rx_handler_func_t *rx_handler;
3993	u8 icv_len = DEFAULT_ICV_LEN;
3994	struct net_device *real_dev;
3995	int err, mtu;
3996	sci_t sci;
3997
3998	if (!tb[IFLA_LINK])
3999		return -EINVAL;
4000	real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
4001	if (!real_dev)
4002		return -ENODEV;
4003	if (real_dev->type != ARPHRD_ETHER)
4004		return -EINVAL;
4005
4006	dev->priv_flags |= IFF_MACSEC;
4007
4008	macsec->real_dev = real_dev;
4009
4010	if (data && data[IFLA_MACSEC_OFFLOAD])
4011		macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]);
4012	else
4013		/* MACsec offloading is off by default */
4014		macsec->offload = MACSEC_OFFLOAD_OFF;
4015
4016	/* Check if the offloading mode is supported by the underlying layers */
4017	if (macsec->offload != MACSEC_OFFLOAD_OFF &&
4018	    !macsec_check_offload(macsec->offload, macsec))
4019		return -EOPNOTSUPP;
4020
 
 
 
 
 
 
 
 
 
4021	if (data && data[IFLA_MACSEC_ICV_LEN])
4022		icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
4023	mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
4024	if (mtu < 0)
4025		dev->mtu = 0;
4026	else
4027		dev->mtu = mtu;
4028
4029	rx_handler = rtnl_dereference(real_dev->rx_handler);
4030	if (rx_handler && rx_handler != macsec_handle_frame)
4031		return -EBUSY;
4032
4033	err = register_netdevice(dev);
4034	if (err < 0)
4035		return err;
4036
4037	netdev_lockdep_set_classes(dev);
4038	lockdep_set_class(&dev->addr_list_lock,
4039			  &macsec_netdev_addr_lock_key);
4040
4041	err = netdev_upper_dev_link(real_dev, dev, extack);
4042	if (err < 0)
4043		goto unregister;
4044
4045	/* need to be already registered so that ->init has run and
4046	 * the MAC addr is set
4047	 */
4048	if (data && data[IFLA_MACSEC_SCI])
4049		sci = nla_get_sci(data[IFLA_MACSEC_SCI]);
4050	else if (data && data[IFLA_MACSEC_PORT])
4051		sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT]));
4052	else
4053		sci = dev_to_sci(dev, MACSEC_PORT_ES);
4054
4055	if (rx_handler && sci_exists(real_dev, sci)) {
4056		err = -EBUSY;
4057		goto unlink;
4058	}
4059
4060	err = macsec_add_dev(dev, sci, icv_len);
4061	if (err)
4062		goto unlink;
4063
4064	if (data) {
4065		err = macsec_changelink_common(dev, data);
4066		if (err)
4067			goto del_dev;
4068	}
4069
4070	/* If h/w offloading is available, propagate to the device */
4071	if (macsec_is_offloaded(macsec)) {
4072		const struct macsec_ops *ops;
4073		struct macsec_context ctx;
4074
4075		ops = macsec_get_ops(macsec, &ctx);
4076		if (ops) {
4077			ctx.secy = &macsec->secy;
4078			err = macsec_offload(ops->mdo_add_secy, &ctx);
4079			if (err)
4080				goto del_dev;
 
 
 
4081		}
4082	}
4083
4084	err = register_macsec_dev(real_dev, dev);
4085	if (err < 0)
4086		goto del_dev;
4087
4088	netif_stacked_transfer_operstate(real_dev, dev);
4089	linkwatch_fire_event(dev);
4090
4091	macsec_generation++;
4092
4093	return 0;
4094
4095del_dev:
4096	macsec_del_dev(macsec);
4097unlink:
4098	netdev_upper_dev_unlink(real_dev, dev);
4099unregister:
4100	unregister_netdevice(dev);
4101	return err;
4102}
4103
4104static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
4105				struct netlink_ext_ack *extack)
4106{
4107	u64 csid = MACSEC_DEFAULT_CIPHER_ID;
4108	u8 icv_len = DEFAULT_ICV_LEN;
4109	int flag;
4110	bool es, scb, sci;
4111
4112	if (!data)
4113		return 0;
4114
4115	if (data[IFLA_MACSEC_CIPHER_SUITE])
4116		csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]);
4117
4118	if (data[IFLA_MACSEC_ICV_LEN]) {
4119		icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
4120		if (icv_len != DEFAULT_ICV_LEN) {
4121			char dummy_key[DEFAULT_SAK_LEN] = { 0 };
4122			struct crypto_aead *dummy_tfm;
4123
4124			dummy_tfm = macsec_alloc_tfm(dummy_key,
4125						     DEFAULT_SAK_LEN,
4126						     icv_len);
4127			if (IS_ERR(dummy_tfm))
4128				return PTR_ERR(dummy_tfm);
4129			crypto_free_aead(dummy_tfm);
4130		}
4131	}
4132
4133	switch (csid) {
4134	case MACSEC_CIPHER_ID_GCM_AES_128:
4135	case MACSEC_CIPHER_ID_GCM_AES_256:
4136	case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
4137	case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
4138	case MACSEC_DEFAULT_CIPHER_ID:
4139		if (icv_len < MACSEC_MIN_ICV_LEN ||
4140		    icv_len > MACSEC_STD_ICV_LEN)
4141			return -EINVAL;
4142		break;
4143	default:
4144		return -EINVAL;
4145	}
4146
4147	if (data[IFLA_MACSEC_ENCODING_SA]) {
4148		if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN)
4149			return -EINVAL;
4150	}
4151
4152	for (flag = IFLA_MACSEC_ENCODING_SA + 1;
4153	     flag < IFLA_MACSEC_VALIDATION;
4154	     flag++) {
4155		if (data[flag]) {
4156			if (nla_get_u8(data[flag]) > 1)
4157				return -EINVAL;
4158		}
4159	}
4160
4161	es  = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false;
4162	sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false;
4163	scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false;
4164
4165	if ((sci && (scb || es)) || (scb && es))
4166		return -EINVAL;
4167
4168	if (data[IFLA_MACSEC_VALIDATION] &&
4169	    nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
4170		return -EINVAL;
4171
4172	if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
4173	     nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
4174	    !data[IFLA_MACSEC_WINDOW])
4175		return -EINVAL;
4176
4177	return 0;
4178}
4179
4180static struct net *macsec_get_link_net(const struct net_device *dev)
4181{
4182	return dev_net(macsec_priv(dev)->real_dev);
4183}
4184
 
 
 
 
 
 
 
 
 
 
 
 
4185static size_t macsec_get_size(const struct net_device *dev)
4186{
4187	return  nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */
4188		nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */
4189		nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */
4190		nla_total_size(4) + /* IFLA_MACSEC_WINDOW */
4191		nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */
4192		nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */
4193		nla_total_size(1) + /* IFLA_MACSEC_PROTECT */
4194		nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */
4195		nla_total_size(1) + /* IFLA_MACSEC_ES */
4196		nla_total_size(1) + /* IFLA_MACSEC_SCB */
4197		nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
4198		nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
 
4199		0;
4200}
4201
4202static int macsec_fill_info(struct sk_buff *skb,
4203			    const struct net_device *dev)
4204{
4205	struct macsec_secy *secy = &macsec_priv(dev)->secy;
4206	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
 
4207	u64 csid;
4208
 
 
 
 
4209	switch (secy->key_len) {
4210	case MACSEC_GCM_AES_128_SAK_LEN:
4211		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
4212		break;
4213	case MACSEC_GCM_AES_256_SAK_LEN:
4214		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
4215		break;
4216	default:
4217		goto nla_put_failure;
4218	}
4219
4220	if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
4221			IFLA_MACSEC_PAD) ||
4222	    nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
4223	    nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
4224			      csid, IFLA_MACSEC_PAD) ||
4225	    nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
4226	    nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
4227	    nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
4228	    nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) ||
4229	    nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) ||
4230	    nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
4231	    nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
4232	    nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) ||
 
4233	    0)
4234		goto nla_put_failure;
4235
4236	if (secy->replay_protect) {
4237		if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window))
4238			goto nla_put_failure;
4239	}
4240
4241	return 0;
4242
4243nla_put_failure:
4244	return -EMSGSIZE;
4245}
4246
4247static struct rtnl_link_ops macsec_link_ops __read_mostly = {
4248	.kind		= "macsec",
4249	.priv_size	= sizeof(struct macsec_dev),
4250	.maxtype	= IFLA_MACSEC_MAX,
4251	.policy		= macsec_rtnl_policy,
4252	.setup		= macsec_setup,
4253	.validate	= macsec_validate_attr,
4254	.newlink	= macsec_newlink,
4255	.changelink	= macsec_changelink,
4256	.dellink	= macsec_dellink,
4257	.get_size	= macsec_get_size,
4258	.fill_info	= macsec_fill_info,
4259	.get_link_net	= macsec_get_link_net,
4260};
4261
4262static bool is_macsec_master(struct net_device *dev)
4263{
4264	return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame;
4265}
4266
4267static int macsec_notify(struct notifier_block *this, unsigned long event,
4268			 void *ptr)
4269{
4270	struct net_device *real_dev = netdev_notifier_info_to_dev(ptr);
 
 
4271	LIST_HEAD(head);
4272
4273	if (!is_macsec_master(real_dev))
4274		return NOTIFY_DONE;
4275
 
 
4276	switch (event) {
4277	case NETDEV_DOWN:
4278	case NETDEV_UP:
4279	case NETDEV_CHANGE: {
4280		struct macsec_dev *m, *n;
4281		struct macsec_rxh_data *rxd;
4282
4283		rxd = macsec_data_rtnl(real_dev);
4284		list_for_each_entry_safe(m, n, &rxd->secys, secys) {
4285			struct net_device *dev = m->secy.netdev;
4286
4287			netif_stacked_transfer_operstate(real_dev, dev);
4288		}
4289		break;
4290	}
4291	case NETDEV_UNREGISTER: {
4292		struct macsec_dev *m, *n;
4293		struct macsec_rxh_data *rxd;
4294
4295		rxd = macsec_data_rtnl(real_dev);
4296		list_for_each_entry_safe(m, n, &rxd->secys, secys) {
4297			macsec_common_dellink(m->secy.netdev, &head);
4298		}
4299
4300		netdev_rx_handler_unregister(real_dev);
4301		kfree(rxd);
4302
4303		unregister_netdevice_many(&head);
4304		break;
4305	}
4306	case NETDEV_CHANGEMTU: {
4307		struct macsec_dev *m;
4308		struct macsec_rxh_data *rxd;
4309
4310		rxd = macsec_data_rtnl(real_dev);
4311		list_for_each_entry(m, &rxd->secys, secys) {
4312			struct net_device *dev = m->secy.netdev;
4313			unsigned int mtu = real_dev->mtu - (m->secy.icv_len +
4314							    macsec_extra_len(true));
4315
4316			if (dev->mtu > mtu)
4317				dev_set_mtu(dev, mtu);
4318		}
4319	}
 
 
 
 
 
 
4320	}
4321
4322	return NOTIFY_OK;
4323}
4324
4325static struct notifier_block macsec_notifier = {
4326	.notifier_call = macsec_notify,
4327};
4328
4329static int __init macsec_init(void)
4330{
4331	int err;
4332
4333	pr_info("MACsec IEEE 802.1AE\n");
4334	err = register_netdevice_notifier(&macsec_notifier);
4335	if (err)
4336		return err;
4337
4338	err = rtnl_link_register(&macsec_link_ops);
4339	if (err)
4340		goto notifier;
4341
4342	err = genl_register_family(&macsec_fam);
4343	if (err)
4344		goto rtnl;
4345
4346	return 0;
4347
4348rtnl:
4349	rtnl_link_unregister(&macsec_link_ops);
4350notifier:
4351	unregister_netdevice_notifier(&macsec_notifier);
4352	return err;
4353}
4354
4355static void __exit macsec_exit(void)
4356{
4357	genl_unregister_family(&macsec_fam);
4358	rtnl_link_unregister(&macsec_link_ops);
4359	unregister_netdevice_notifier(&macsec_notifier);
4360	rcu_barrier();
4361}
4362
4363module_init(macsec_init);
4364module_exit(macsec_exit);
4365
4366MODULE_ALIAS_RTNL_LINK("macsec");
4367MODULE_ALIAS_GENL_FAMILY("macsec");
4368
4369MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
4370MODULE_LICENSE("GPL v2");