Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (C) 2018-2021, Intel Corporation. */
  3
  4/* Link Aggregation code */
  5
  6#include "ice.h"
  7#include "ice_lag.h"
  8
  9/**
 10 * ice_lag_nop_handler - no-op Rx handler to disable LAG
 11 * @pskb: pointer to skb pointer
 12 */
 13rx_handler_result_t ice_lag_nop_handler(struct sk_buff __always_unused **pskb)
 14{
 15	return RX_HANDLER_PASS;
 16}
 17
 18/**
 19 * ice_lag_set_primary - set PF LAG state as Primary
 20 * @lag: LAG info struct
 21 */
 22static void ice_lag_set_primary(struct ice_lag *lag)
 23{
 24	struct ice_pf *pf = lag->pf;
 25
 26	if (!pf)
 27		return;
 28
 29	if (lag->role != ICE_LAG_UNSET && lag->role != ICE_LAG_BACKUP) {
 30		dev_warn(ice_pf_to_dev(pf), "%s: Attempt to be Primary, but incompatible state.\n",
 31			 netdev_name(lag->netdev));
 32		return;
 33	}
 34
 35	lag->role = ICE_LAG_PRIMARY;
 36}
 37
 38/**
 39 * ice_lag_set_backup - set PF LAG state to Backup
 40 * @lag: LAG info struct
 41 */
 42static void ice_lag_set_backup(struct ice_lag *lag)
 43{
 44	struct ice_pf *pf = lag->pf;
 45
 46	if (!pf)
 47		return;
 48
 49	if (lag->role != ICE_LAG_UNSET && lag->role != ICE_LAG_PRIMARY) {
 50		dev_dbg(ice_pf_to_dev(pf), "%s: Attempt to be Backup, but incompatible state\n",
 51			netdev_name(lag->netdev));
 52		return;
 53	}
 54
 55	lag->role = ICE_LAG_BACKUP;
 56}
 57
 58/**
 59 * ice_display_lag_info - print LAG info
 60 * @lag: LAG info struct
 61 */
 62static void ice_display_lag_info(struct ice_lag *lag)
 63{
 64	const char *name, *peer, *upper, *role, *bonded, *primary;
 65	struct device *dev = &lag->pf->pdev->dev;
 66
 67	name = lag->netdev ? netdev_name(lag->netdev) : "unset";
 68	peer = lag->peer_netdev ? netdev_name(lag->peer_netdev) : "unset";
 69	upper = lag->upper_netdev ? netdev_name(lag->upper_netdev) : "unset";
 70	primary = lag->primary ? "TRUE" : "FALSE";
 71	bonded = lag->bonded ? "BONDED" : "UNBONDED";
 72
 73	switch (lag->role) {
 74	case ICE_LAG_NONE:
 75		role = "NONE";
 76		break;
 77	case ICE_LAG_PRIMARY:
 78		role = "PRIMARY";
 79		break;
 80	case ICE_LAG_BACKUP:
 81		role = "BACKUP";
 82		break;
 83	case ICE_LAG_UNSET:
 84		role = "UNSET";
 85		break;
 86	default:
 87		role = "ERROR";
 88	}
 89
 90	dev_dbg(dev, "%s %s, peer:%s, upper:%s, role:%s, primary:%s\n", name,
 91		bonded, peer, upper, role, primary);
 92}
 93
 94/**
 95 * ice_lag_info_event - handle NETDEV_BONDING_INFO event
 96 * @lag: LAG info struct
 97 * @ptr: opaque data pointer
 98 *
 99 * ptr is to be cast to (netdev_notifier_bonding_info *)
100 */
101static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
102{
 
103	struct netdev_notifier_bonding_info *info;
104	struct netdev_bonding_info *bonding_info;
105	struct net_device *event_netdev;
106	const char *lag_netdev_name;
107
108	event_netdev = netdev_notifier_info_to_dev(ptr);
109	info = ptr;
110	lag_netdev_name = netdev_name(lag->netdev);
111	bonding_info = &info->bonding_info;
112
113	if (event_netdev != lag->netdev || !lag->bonded || !lag->upper_netdev)
114		return;
115
116	if (bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) {
117		netdev_dbg(lag->netdev, "Bonding event recv, but mode not active/backup\n");
118		goto lag_out;
119	}
120
121	if (strcmp(bonding_info->slave.slave_name, lag_netdev_name)) {
122		netdev_dbg(lag->netdev, "Bonding event recv, but secondary info not for us\n");
123		goto lag_out;
124	}
125
 
 
 
 
 
 
 
 
 
 
 
 
 
126	if (bonding_info->slave.state)
127		ice_lag_set_backup(lag);
128	else
129		ice_lag_set_primary(lag);
130
131lag_out:
132	ice_display_lag_info(lag);
133}
134
135/**
136 * ice_lag_link - handle LAG link event
137 * @lag: LAG info struct
138 * @info: info from the netdev notifier
139 */
140static void
141ice_lag_link(struct ice_lag *lag, struct netdev_notifier_changeupper_info *info)
142{
143	struct net_device *netdev_tmp, *upper = info->upper_dev;
144	struct ice_pf *pf = lag->pf;
145	int peers = 0;
146
147	if (lag->bonded)
148		dev_warn(ice_pf_to_dev(pf), "%s Already part of a bond\n",
149			 netdev_name(lag->netdev));
150
151	rcu_read_lock();
152	for_each_netdev_in_bond_rcu(upper, netdev_tmp)
153		peers++;
154	rcu_read_unlock();
155
156	if (lag->upper_netdev != upper) {
157		dev_hold(upper);
158		lag->upper_netdev = upper;
159	}
160
161	ice_clear_sriov_cap(pf);
162	ice_clear_rdma_cap(pf);
163
164	lag->bonded = true;
165	lag->role = ICE_LAG_UNSET;
166
167	/* if this is the first element in an LAG mark as primary */
168	lag->primary = !!(peers == 1);
169}
170
171/**
172 * ice_lag_unlink - handle unlink event
173 * @lag: LAG info struct
174 * @info: info from netdev notification
175 */
176static void
177ice_lag_unlink(struct ice_lag *lag,
178	       struct netdev_notifier_changeupper_info *info)
179{
180	struct net_device *netdev_tmp, *upper = info->upper_dev;
181	struct ice_pf *pf = lag->pf;
182	bool found = false;
183
184	if (!lag->bonded) {
185		netdev_dbg(lag->netdev, "bonding unlink event on non-LAG netdev\n");
186		return;
187	}
188
189	/* determine if we are in the new LAG config or not */
190	rcu_read_lock();
191	for_each_netdev_in_bond_rcu(upper, netdev_tmp) {
192		if (netdev_tmp == lag->netdev) {
193			found = true;
194			break;
195		}
196	}
197	rcu_read_unlock();
198
199	if (found)
200		return;
201
202	if (lag->upper_netdev) {
203		dev_put(lag->upper_netdev);
204		lag->upper_netdev = NULL;
205	}
206
207	lag->peer_netdev = NULL;
 
 
 
 
208	ice_set_sriov_cap(pf);
209	ice_set_rdma_cap(pf);
210	lag->bonded = false;
211	lag->role = ICE_LAG_NONE;
212}
213
214/**
215 * ice_lag_unregister - handle netdev unregister events
216 * @lag: LAG info struct
217 * @netdev: netdev reporting the event
218 */
219static void ice_lag_unregister(struct ice_lag *lag, struct net_device *netdev)
220{
221	struct ice_pf *pf = lag->pf;
222
223	/* check to see if this event is for this netdev
224	 * check that we are in an aggregate
225	 */
226	if (netdev != lag->netdev || !lag->bonded)
227		return;
228
229	if (lag->upper_netdev) {
230		dev_put(lag->upper_netdev);
231		lag->upper_netdev = NULL;
232		ice_set_sriov_cap(pf);
233		ice_set_rdma_cap(pf);
234	}
235	/* perform some cleanup in case we come back */
236	lag->bonded = false;
237	lag->role = ICE_LAG_NONE;
238}
239
240/**
241 * ice_lag_changeupper_event - handle LAG changeupper event
242 * @lag: LAG info struct
243 * @ptr: opaque pointer data
244 *
245 * ptr is to be cast into netdev_notifier_changeupper_info
246 */
247static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
248{
249	struct netdev_notifier_changeupper_info *info;
250	struct net_device *netdev;
251
252	info = ptr;
253	netdev = netdev_notifier_info_to_dev(ptr);
254
255	/* not for this netdev */
256	if (netdev != lag->netdev)
257		return;
258
259	if (!info->upper_dev) {
260		netdev_dbg(netdev, "changeupper rcvd, but no upper defined\n");
261		return;
262	}
263
264	netdev_dbg(netdev, "bonding %s\n", info->linking ? "LINK" : "UNLINK");
265
266	if (!netif_is_lag_master(info->upper_dev)) {
267		netdev_dbg(netdev, "changeupper rcvd, but not primary. bail\n");
268		return;
269	}
270
271	if (info->linking)
272		ice_lag_link(lag, info);
273	else
274		ice_lag_unlink(lag, info);
275
276	ice_display_lag_info(lag);
277}
278
279/**
280 * ice_lag_changelower_event - handle LAG changelower event
281 * @lag: LAG info struct
282 * @ptr: opaque data pointer
283 *
284 * ptr to be cast to netdev_notifier_changelowerstate_info
285 */
286static void ice_lag_changelower_event(struct ice_lag *lag, void *ptr)
287{
288	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
289
290	if (netdev != lag->netdev)
291		return;
292
293	netdev_dbg(netdev, "bonding info\n");
294
295	if (!netif_is_lag_port(netdev))
296		netdev_dbg(netdev, "CHANGELOWER rcvd, but netdev not in LAG. Bail\n");
297}
298
299/**
300 * ice_lag_event_handler - handle LAG events from netdev
301 * @notif_blk: notifier block registered by this netdev
302 * @event: event type
303 * @ptr: opaque data containing notifier event
304 */
305static int
306ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
307		      void *ptr)
308{
309	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
310	struct ice_lag *lag;
311
312	lag = container_of(notif_blk, struct ice_lag, notif_block);
313
314	if (!lag->netdev)
315		return NOTIFY_DONE;
316
317	/* Check that the netdev is in the working namespace */
318	if (!net_eq(dev_net(netdev), &init_net))
319		return NOTIFY_DONE;
320
321	switch (event) {
322	case NETDEV_CHANGEUPPER:
323		ice_lag_changeupper_event(lag, ptr);
324		break;
325	case NETDEV_CHANGELOWERSTATE:
326		ice_lag_changelower_event(lag, ptr);
327		break;
328	case NETDEV_BONDING_INFO:
329		ice_lag_info_event(lag, ptr);
330		break;
331	case NETDEV_UNREGISTER:
332		ice_lag_unregister(lag, netdev);
333		break;
334	default:
335		break;
336	}
337
338	return NOTIFY_DONE;
339}
340
341/**
342 * ice_register_lag_handler - register LAG handler on netdev
343 * @lag: LAG struct
344 */
345static int ice_register_lag_handler(struct ice_lag *lag)
346{
347	struct device *dev = ice_pf_to_dev(lag->pf);
348	struct notifier_block *notif_blk;
349
350	notif_blk = &lag->notif_block;
351
352	if (!notif_blk->notifier_call) {
353		notif_blk->notifier_call = ice_lag_event_handler;
354		if (register_netdevice_notifier(notif_blk)) {
355			notif_blk->notifier_call = NULL;
356			dev_err(dev, "FAIL register LAG event handler!\n");
357			return -EINVAL;
358		}
359		dev_dbg(dev, "LAG event handler registered\n");
360	}
361	return 0;
362}
363
364/**
365 * ice_unregister_lag_handler - unregister LAG handler on netdev
366 * @lag: LAG struct
367 */
368static void ice_unregister_lag_handler(struct ice_lag *lag)
369{
370	struct device *dev = ice_pf_to_dev(lag->pf);
371	struct notifier_block *notif_blk;
372
373	notif_blk = &lag->notif_block;
374	if (notif_blk->notifier_call) {
375		unregister_netdevice_notifier(notif_blk);
376		dev_dbg(dev, "LAG event handler unregistered\n");
377	}
378}
379
380/**
381 * ice_init_lag - initialize support for LAG
382 * @pf: PF struct
383 *
384 * Alloc memory for LAG structs and initialize the elements.
385 * Memory will be freed in ice_deinit_lag
386 */
387int ice_init_lag(struct ice_pf *pf)
388{
389	struct device *dev = ice_pf_to_dev(pf);
390	struct ice_lag *lag;
391	struct ice_vsi *vsi;
392	int err;
393
394	pf->lag = kzalloc(sizeof(*lag), GFP_KERNEL);
395	if (!pf->lag)
396		return -ENOMEM;
397	lag = pf->lag;
398
399	vsi = ice_get_main_vsi(pf);
400	if (!vsi) {
401		dev_err(dev, "couldn't get main vsi, link aggregation init fail\n");
402		err = -EIO;
403		goto lag_error;
404	}
405
406	lag->pf = pf;
407	lag->netdev = vsi->netdev;
408	lag->role = ICE_LAG_NONE;
409	lag->bonded = false;
410	lag->peer_netdev = NULL;
411	lag->upper_netdev = NULL;
412	lag->notif_block.notifier_call = NULL;
413
414	err = ice_register_lag_handler(lag);
415	if (err) {
416		dev_warn(dev, "INIT LAG: Failed to register event handler\n");
417		goto lag_error;
418	}
419
420	ice_display_lag_info(lag);
421
422	dev_dbg(dev, "INIT LAG complete\n");
423	return 0;
424
425lag_error:
426	kfree(lag);
427	pf->lag = NULL;
428	return err;
429}
430
431/**
432 * ice_deinit_lag - Clean up LAG
433 * @pf: PF struct
434 *
435 * Clean up kernel LAG info and free memory
436 * This function is meant to only be called on driver remove/shutdown
437 */
438void ice_deinit_lag(struct ice_pf *pf)
439{
440	struct ice_lag *lag;
441
442	lag = pf->lag;
443
444	if (!lag)
445		return;
446
447	if (lag->pf)
448		ice_unregister_lag_handler(lag);
449
450	dev_put(lag->upper_netdev);
 
451
452	dev_put(lag->peer_netdev);
 
453
454	kfree(lag);
455
456	pf->lag = NULL;
457}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (C) 2018-2021, Intel Corporation. */
  3
  4/* Link Aggregation code */
  5
  6#include "ice.h"
  7#include "ice_lag.h"
  8
  9/**
 10 * ice_lag_nop_handler - no-op Rx handler to disable LAG
 11 * @pskb: pointer to skb pointer
 12 */
 13rx_handler_result_t ice_lag_nop_handler(struct sk_buff __always_unused **pskb)
 14{
 15	return RX_HANDLER_PASS;
 16}
 17
 18/**
 19 * ice_lag_set_primary - set PF LAG state as Primary
 20 * @lag: LAG info struct
 21 */
 22static void ice_lag_set_primary(struct ice_lag *lag)
 23{
 24	struct ice_pf *pf = lag->pf;
 25
 26	if (!pf)
 27		return;
 28
 29	if (lag->role != ICE_LAG_UNSET && lag->role != ICE_LAG_BACKUP) {
 30		dev_warn(ice_pf_to_dev(pf), "%s: Attempt to be Primary, but incompatible state.\n",
 31			 netdev_name(lag->netdev));
 32		return;
 33	}
 34
 35	lag->role = ICE_LAG_PRIMARY;
 36}
 37
 38/**
 39 * ice_lag_set_backup - set PF LAG state to Backup
 40 * @lag: LAG info struct
 41 */
 42static void ice_lag_set_backup(struct ice_lag *lag)
 43{
 44	struct ice_pf *pf = lag->pf;
 45
 46	if (!pf)
 47		return;
 48
 49	if (lag->role != ICE_LAG_UNSET && lag->role != ICE_LAG_PRIMARY) {
 50		dev_dbg(ice_pf_to_dev(pf), "%s: Attempt to be Backup, but incompatible state\n",
 51			netdev_name(lag->netdev));
 52		return;
 53	}
 54
 55	lag->role = ICE_LAG_BACKUP;
 56}
 57
 58/**
 59 * ice_display_lag_info - print LAG info
 60 * @lag: LAG info struct
 61 */
 62static void ice_display_lag_info(struct ice_lag *lag)
 63{
 64	const char *name, *peer, *upper, *role, *bonded, *master;
 65	struct device *dev = &lag->pf->pdev->dev;
 66
 67	name = lag->netdev ? netdev_name(lag->netdev) : "unset";
 68	peer = lag->peer_netdev ? netdev_name(lag->peer_netdev) : "unset";
 69	upper = lag->upper_netdev ? netdev_name(lag->upper_netdev) : "unset";
 70	master = lag->master ? "TRUE" : "FALSE";
 71	bonded = lag->bonded ? "BONDED" : "UNBONDED";
 72
 73	switch (lag->role) {
 74	case ICE_LAG_NONE:
 75		role = "NONE";
 76		break;
 77	case ICE_LAG_PRIMARY:
 78		role = "PRIMARY";
 79		break;
 80	case ICE_LAG_BACKUP:
 81		role = "BACKUP";
 82		break;
 83	case ICE_LAG_UNSET:
 84		role = "UNSET";
 85		break;
 86	default:
 87		role = "ERROR";
 88	}
 89
 90	dev_dbg(dev, "%s %s, peer:%s, upper:%s, role:%s, master:%s\n", name,
 91		bonded, peer, upper, role, master);
 92}
 93
 94/**
 95 * ice_lag_info_event - handle NETDEV_BONDING_INFO event
 96 * @lag: LAG info struct
 97 * @ptr: opaque data pointer
 98 *
 99 * ptr is to be cast to (netdev_notifier_bonding_info *)
100 */
101static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
102{
103	struct net_device *event_netdev, *netdev_tmp;
104	struct netdev_notifier_bonding_info *info;
105	struct netdev_bonding_info *bonding_info;
 
106	const char *lag_netdev_name;
107
108	event_netdev = netdev_notifier_info_to_dev(ptr);
109	info = ptr;
110	lag_netdev_name = netdev_name(lag->netdev);
111	bonding_info = &info->bonding_info;
112
113	if (event_netdev != lag->netdev || !lag->bonded || !lag->upper_netdev)
114		return;
115
116	if (bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) {
117		netdev_dbg(lag->netdev, "Bonding event recv, but mode not active/backup\n");
118		goto lag_out;
119	}
120
121	if (strcmp(bonding_info->slave.slave_name, lag_netdev_name)) {
122		netdev_dbg(lag->netdev, "Bonding event recv, but slave info not for us\n");
123		goto lag_out;
124	}
125
126	rcu_read_lock();
127	for_each_netdev_in_bond_rcu(lag->upper_netdev, netdev_tmp) {
128		if (!netif_is_ice(netdev_tmp))
129			continue;
130
131		if (netdev_tmp && netdev_tmp != lag->netdev &&
132		    lag->peer_netdev != netdev_tmp) {
133			dev_hold(netdev_tmp);
134			lag->peer_netdev = netdev_tmp;
135		}
136	}
137	rcu_read_unlock();
138
139	if (bonding_info->slave.state)
140		ice_lag_set_backup(lag);
141	else
142		ice_lag_set_primary(lag);
143
144lag_out:
145	ice_display_lag_info(lag);
146}
147
148/**
149 * ice_lag_link - handle LAG link event
150 * @lag: LAG info struct
151 * @info: info from the netdev notifier
152 */
153static void
154ice_lag_link(struct ice_lag *lag, struct netdev_notifier_changeupper_info *info)
155{
156	struct net_device *netdev_tmp, *upper = info->upper_dev;
157	struct ice_pf *pf = lag->pf;
158	int peers = 0;
159
160	if (lag->bonded)
161		dev_warn(ice_pf_to_dev(pf), "%s Already part of a bond\n",
162			 netdev_name(lag->netdev));
163
164	rcu_read_lock();
165	for_each_netdev_in_bond_rcu(upper, netdev_tmp)
166		peers++;
167	rcu_read_unlock();
168
169	if (lag->upper_netdev != upper) {
170		dev_hold(upper);
171		lag->upper_netdev = upper;
172	}
173
174	ice_clear_sriov_cap(pf);
175	ice_clear_rdma_cap(pf);
176
177	lag->bonded = true;
178	lag->role = ICE_LAG_UNSET;
179
180	/* if this is the first element in an LAG mark as master */
181	lag->master = !!(peers == 1);
182}
183
184/**
185 * ice_lag_unlink - handle unlink event
186 * @lag: LAG info struct
187 * @info: info from netdev notification
188 */
189static void
190ice_lag_unlink(struct ice_lag *lag,
191	       struct netdev_notifier_changeupper_info *info)
192{
193	struct net_device *netdev_tmp, *upper = info->upper_dev;
194	struct ice_pf *pf = lag->pf;
195	bool found = false;
196
197	if (!lag->bonded) {
198		netdev_dbg(lag->netdev, "bonding unlink event on non-LAG netdev\n");
199		return;
200	}
201
202	/* determine if we are in the new LAG config or not */
203	rcu_read_lock();
204	for_each_netdev_in_bond_rcu(upper, netdev_tmp) {
205		if (netdev_tmp == lag->netdev) {
206			found = true;
207			break;
208		}
209	}
210	rcu_read_unlock();
211
212	if (found)
213		return;
214
215	if (lag->upper_netdev) {
216		dev_put(lag->upper_netdev);
217		lag->upper_netdev = NULL;
218	}
219
220	if (lag->peer_netdev) {
221		dev_put(lag->peer_netdev);
222		lag->peer_netdev = NULL;
223	}
224
225	ice_set_sriov_cap(pf);
226	ice_set_rdma_cap(pf);
227	lag->bonded = false;
228	lag->role = ICE_LAG_NONE;
229}
230
231/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232 * ice_lag_changeupper_event - handle LAG changeupper event
233 * @lag: LAG info struct
234 * @ptr: opaque pointer data
235 *
236 * ptr is to be cast into netdev_notifier_changeupper_info
237 */
238static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
239{
240	struct netdev_notifier_changeupper_info *info;
241	struct net_device *netdev;
242
243	info = ptr;
244	netdev = netdev_notifier_info_to_dev(ptr);
245
246	/* not for this netdev */
247	if (netdev != lag->netdev)
248		return;
249
250	if (!info->upper_dev) {
251		netdev_dbg(netdev, "changeupper rcvd, but no upper defined\n");
252		return;
253	}
254
255	netdev_dbg(netdev, "bonding %s\n", info->linking ? "LINK" : "UNLINK");
256
257	if (!netif_is_lag_master(info->upper_dev)) {
258		netdev_dbg(netdev, "changeupper rcvd, but not master. bail\n");
259		return;
260	}
261
262	if (info->linking)
263		ice_lag_link(lag, info);
264	else
265		ice_lag_unlink(lag, info);
266
267	ice_display_lag_info(lag);
268}
269
270/**
271 * ice_lag_changelower_event - handle LAG changelower event
272 * @lag: LAG info struct
273 * @ptr: opaque data pointer
274 *
275 * ptr to be cast to netdev_notifier_changelowerstate_info
276 */
277static void ice_lag_changelower_event(struct ice_lag *lag, void *ptr)
278{
279	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
280
281	if (netdev != lag->netdev)
282		return;
283
284	netdev_dbg(netdev, "bonding info\n");
285
286	if (!netif_is_lag_port(netdev))
287		netdev_dbg(netdev, "CHANGELOWER rcvd, but netdev not in LAG. Bail\n");
288}
289
290/**
291 * ice_lag_event_handler - handle LAG events from netdev
292 * @notif_blk: notifier block registered by this netdev
293 * @event: event type
294 * @ptr: opaque data containing notifier event
295 */
296static int
297ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
298		      void *ptr)
299{
300	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
301	struct ice_lag *lag;
302
303	lag = container_of(notif_blk, struct ice_lag, notif_block);
304
305	if (!lag->netdev)
306		return NOTIFY_DONE;
307
308	/* Check that the netdev is in the working namespace */
309	if (!net_eq(dev_net(netdev), &init_net))
310		return NOTIFY_DONE;
311
312	switch (event) {
313	case NETDEV_CHANGEUPPER:
314		ice_lag_changeupper_event(lag, ptr);
315		break;
316	case NETDEV_CHANGELOWERSTATE:
317		ice_lag_changelower_event(lag, ptr);
318		break;
319	case NETDEV_BONDING_INFO:
320		ice_lag_info_event(lag, ptr);
321		break;
 
 
 
322	default:
323		break;
324	}
325
326	return NOTIFY_DONE;
327}
328
329/**
330 * ice_register_lag_handler - register LAG handler on netdev
331 * @lag: LAG struct
332 */
333static int ice_register_lag_handler(struct ice_lag *lag)
334{
335	struct device *dev = ice_pf_to_dev(lag->pf);
336	struct notifier_block *notif_blk;
337
338	notif_blk = &lag->notif_block;
339
340	if (!notif_blk->notifier_call) {
341		notif_blk->notifier_call = ice_lag_event_handler;
342		if (register_netdevice_notifier(notif_blk)) {
343			notif_blk->notifier_call = NULL;
344			dev_err(dev, "FAIL register LAG event handler!\n");
345			return -EINVAL;
346		}
347		dev_dbg(dev, "LAG event handler registered\n");
348	}
349	return 0;
350}
351
352/**
353 * ice_unregister_lag_handler - unregister LAG handler on netdev
354 * @lag: LAG struct
355 */
356static void ice_unregister_lag_handler(struct ice_lag *lag)
357{
358	struct device *dev = ice_pf_to_dev(lag->pf);
359	struct notifier_block *notif_blk;
360
361	notif_blk = &lag->notif_block;
362	if (notif_blk->notifier_call) {
363		unregister_netdevice_notifier(notif_blk);
364		dev_dbg(dev, "LAG event handler unregistered\n");
365	}
366}
367
368/**
369 * ice_init_lag - initialize support for LAG
370 * @pf: PF struct
371 *
372 * Alloc memory for LAG structs and initialize the elements.
373 * Memory will be freed in ice_deinit_lag
374 */
375int ice_init_lag(struct ice_pf *pf)
376{
377	struct device *dev = ice_pf_to_dev(pf);
378	struct ice_lag *lag;
379	struct ice_vsi *vsi;
380	int err;
381
382	pf->lag = kzalloc(sizeof(*lag), GFP_KERNEL);
383	if (!pf->lag)
384		return -ENOMEM;
385	lag = pf->lag;
386
387	vsi = ice_get_main_vsi(pf);
388	if (!vsi) {
389		dev_err(dev, "couldn't get main vsi, link aggregation init fail\n");
390		err = -EIO;
391		goto lag_error;
392	}
393
394	lag->pf = pf;
395	lag->netdev = vsi->netdev;
396	lag->role = ICE_LAG_NONE;
397	lag->bonded = false;
398	lag->peer_netdev = NULL;
399	lag->upper_netdev = NULL;
400	lag->notif_block.notifier_call = NULL;
401
402	err = ice_register_lag_handler(lag);
403	if (err) {
404		dev_warn(dev, "INIT LAG: Failed to register event handler\n");
405		goto lag_error;
406	}
407
408	ice_display_lag_info(lag);
409
410	dev_dbg(dev, "INIT LAG complete\n");
411	return 0;
412
413lag_error:
414	kfree(lag);
415	pf->lag = NULL;
416	return err;
417}
418
419/**
420 * ice_deinit_lag - Clean up LAG
421 * @pf: PF struct
422 *
423 * Clean up kernel LAG info and free memory
424 * This function is meant to only be called on driver remove/shutdown
425 */
426void ice_deinit_lag(struct ice_pf *pf)
427{
428	struct ice_lag *lag;
429
430	lag = pf->lag;
431
432	if (!lag)
433		return;
434
435	if (lag->pf)
436		ice_unregister_lag_handler(lag);
437
438	if (lag->upper_netdev)
439		dev_put(lag->upper_netdev);
440
441	if (lag->peer_netdev)
442		dev_put(lag->peer_netdev);
443
444	kfree(lag);
445
446	pf->lag = NULL;
447}