Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright(c) 2013 - 2019 Intel Corporation. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  3
  4#include "fm10k.h"
  5#include "fm10k_vf.h"
  6#include "fm10k_pf.h"
  7
  8static s32 fm10k_iov_msg_error(struct fm10k_hw *hw, u32 **results,
  9			       struct fm10k_mbx_info *mbx)
 10{
 11	struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
 12	struct fm10k_intfc *interface = hw->back;
 13	struct pci_dev *pdev = interface->pdev;
 14
 15	dev_err(&pdev->dev, "Unknown message ID %u on VF %d\n",
 16		**results & FM10K_TLV_ID_MASK, vf_info->vf_idx);
 17
 18	return fm10k_tlv_msg_error(hw, results, mbx);
 19}
 20
 21/**
 22 *  fm10k_iov_msg_queue_mac_vlan - Message handler for MAC/VLAN request from VF
 23 *  @hw: Pointer to hardware structure
 24 *  @results: Pointer array to message, results[0] is pointer to message
 25 *  @mbx: Pointer to mailbox information structure
 26 *
 27 *  This function is a custom handler for MAC/VLAN requests from the VF. The
 28 *  assumption is that it is acceptable to directly hand off the message from
 29 *  the VF to the PF's switch manager. However, we use a MAC/VLAN message
 30 *  queue to avoid overloading the mailbox when a large number of requests
 31 *  come in.
 32 **/
 33static s32 fm10k_iov_msg_queue_mac_vlan(struct fm10k_hw *hw, u32 **results,
 34					struct fm10k_mbx_info *mbx)
 35{
 36	struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
 37	struct fm10k_intfc *interface = hw->back;
 38	u8 mac[ETH_ALEN];
 39	u32 *result;
 40	int err = 0;
 41	bool set;
 42	u16 vlan;
 43	u32 vid;
 44
 45	/* we shouldn't be updating rules on a disabled interface */
 46	if (!FM10K_VF_FLAG_ENABLED(vf_info))
 47		err = FM10K_ERR_PARAM;
 48
 49	if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) {
 50		result = results[FM10K_MAC_VLAN_MSG_VLAN];
 51
 52		/* record VLAN id requested */
 53		err = fm10k_tlv_attr_get_u32(result, &vid);
 54		if (err)
 55			return err;
 56
 57		set = !(vid & FM10K_VLAN_CLEAR);
 58		vid &= ~FM10K_VLAN_CLEAR;
 59
 60		/* if the length field has been set, this is a multi-bit
 61		 * update request. For multi-bit requests, simply disallow
 62		 * them when the pf_vid has been set. In this case, the PF
 63		 * should have already cleared the VLAN_TABLE, and if we
 64		 * allowed them, it could allow a rogue VF to receive traffic
 65		 * on a VLAN it was not assigned. In the single-bit case, we
 66		 * need to modify requests for VLAN 0 to use the default PF or
 67		 * SW vid when assigned.
 68		 */
 69
 70		if (vid >> 16) {
 71			/* prevent multi-bit requests when PF has
 72			 * administratively set the VLAN for this VF
 73			 */
 74			if (vf_info->pf_vid)
 75				return FM10K_ERR_PARAM;
 76		} else {
 77			err = fm10k_iov_select_vid(vf_info, (u16)vid);
 78			if (err < 0)
 79				return err;
 80
 81			vid = err;
 82		}
 83
 84		/* update VSI info for VF in regards to VLAN table */
 85		err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
 86	}
 87
 88	if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) {
 89		result = results[FM10K_MAC_VLAN_MSG_MAC];
 90
 91		/* record unicast MAC address requested */
 92		err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
 93		if (err)
 94			return err;
 95
 96		/* block attempts to set MAC for a locked device */
 97		if (is_valid_ether_addr(vf_info->mac) &&
 98		    !ether_addr_equal(mac, vf_info->mac))
 99			return FM10K_ERR_PARAM;
100
101		set = !(vlan & FM10K_VLAN_CLEAR);
102		vlan &= ~FM10K_VLAN_CLEAR;
103
104		err = fm10k_iov_select_vid(vf_info, vlan);
105		if (err < 0)
106			return err;
107
108		vlan = (u16)err;
109
110		/* Add this request to the MAC/VLAN queue */
111		err = fm10k_queue_mac_request(interface, vf_info->glort,
112					      mac, vlan, set);
113	}
114
115	if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) {
116		result = results[FM10K_MAC_VLAN_MSG_MULTICAST];
117
118		/* record multicast MAC address requested */
119		err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
120		if (err)
121			return err;
122
123		/* verify that the VF is allowed to request multicast */
124		if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED))
125			return FM10K_ERR_PARAM;
126
127		set = !(vlan & FM10K_VLAN_CLEAR);
128		vlan &= ~FM10K_VLAN_CLEAR;
129
130		err = fm10k_iov_select_vid(vf_info, vlan);
131		if (err < 0)
132			return err;
133
134		vlan = (u16)err;
135
136		/* Add this request to the MAC/VLAN queue */
137		err = fm10k_queue_mac_request(interface, vf_info->glort,
138					      mac, vlan, set);
139	}
140
141	return err;
142}
143
144static const struct fm10k_msg_data iov_mbx_data[] = {
145	FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
146	FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf),
147	FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_queue_mac_vlan),
148	FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf),
149	FM10K_TLV_MSG_ERROR_HANDLER(fm10k_iov_msg_error),
150};
151
152s32 fm10k_iov_event(struct fm10k_intfc *interface)
153{
154	struct fm10k_hw *hw = &interface->hw;
155	struct fm10k_iov_data *iov_data;
156	s64 vflre;
157	int i;
158
159	/* if there is no iov_data then there is no mailbox to process */
160	if (!READ_ONCE(interface->iov_data))
161		return 0;
162
163	rcu_read_lock();
164
165	iov_data = interface->iov_data;
166
167	/* check again now that we are in the RCU block */
168	if (!iov_data)
169		goto read_unlock;
170
171	if (!(fm10k_read_reg(hw, FM10K_EICR) & FM10K_EICR_VFLR))
172		goto read_unlock;
173
174	/* read VFLRE to determine if any VFs have been reset */
175	vflre = fm10k_read_reg(hw, FM10K_PFVFLRE(1));
176	vflre <<= 32;
177	vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(0));
 
 
 
178
179	i = iov_data->num_vfs;
180
181	for (vflre <<= 64 - i; vflre && i--; vflre += vflre) {
182		struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
183
184		if (vflre >= 0)
185			continue;
186
187		hw->iov.ops.reset_resources(hw, vf_info);
188		vf_info->mbx.ops.connect(hw, &vf_info->mbx);
189	}
 
190
191read_unlock:
192	rcu_read_unlock();
193
194	return 0;
195}
196
197s32 fm10k_iov_mbx(struct fm10k_intfc *interface)
198{
199	struct fm10k_hw *hw = &interface->hw;
200	struct fm10k_iov_data *iov_data;
201	int i;
202
203	/* if there is no iov_data then there is no mailbox to process */
204	if (!READ_ONCE(interface->iov_data))
205		return 0;
206
207	rcu_read_lock();
208
209	iov_data = interface->iov_data;
210
211	/* check again now that we are in the RCU block */
212	if (!iov_data)
213		goto read_unlock;
214
215	/* lock the mailbox for transmit and receive */
216	fm10k_mbx_lock(interface);
217
218	/* Most VF messages sent to the PF cause the PF to respond by
219	 * requesting from the SM mailbox. This means that too many VF
220	 * messages processed at once could cause a mailbox timeout on the PF.
221	 * To prevent this, store a pointer to the next VF mbx to process. Use
222	 * that as the start of the loop so that we don't starve whichever VF
223	 * got ignored on the previous run.
224	 */
225process_mbx:
226	for (i = iov_data->next_vf_mbx ? : iov_data->num_vfs; i--;) {
227		struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
228		struct fm10k_mbx_info *mbx = &vf_info->mbx;
229		u16 glort = vf_info->glort;
230
231		/* process the SM mailbox first to drain outgoing messages */
232		hw->mbx.ops.process(hw, &hw->mbx);
233
234		/* verify port mapping is valid, if not reset port */
235		if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort)) {
236			hw->iov.ops.reset_lport(hw, vf_info);
237			fm10k_clear_macvlan_queue(interface, glort, false);
238		}
239
240		/* reset VFs that have mailbox timed out */
241		if (!mbx->timeout) {
242			hw->iov.ops.reset_resources(hw, vf_info);
243			mbx->ops.connect(hw, mbx);
244		}
245
246		/* guarantee we have free space in the SM mailbox */
247		if (hw->mbx.state == FM10K_STATE_OPEN &&
248		    !hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) {
249			/* keep track of how many times this occurs */
250			interface->hw_sm_mbx_full++;
251
252			/* make sure we try again momentarily */
253			fm10k_service_event_schedule(interface);
254
255			break;
256		}
257
258		/* cleanup mailbox and process received messages */
259		mbx->ops.process(hw, mbx);
260	}
261
262	/* if we stopped processing mailboxes early, update next_vf_mbx.
263	 * Otherwise, reset next_vf_mbx, and restart loop so that we process
264	 * the remaining mailboxes we skipped at the start.
265	 */
266	if (i >= 0) {
267		iov_data->next_vf_mbx = i + 1;
268	} else if (iov_data->next_vf_mbx) {
269		iov_data->next_vf_mbx = 0;
270		goto process_mbx;
271	}
272
273	/* free the lock */
274	fm10k_mbx_unlock(interface);
275
276read_unlock:
277	rcu_read_unlock();
278
279	return 0;
280}
281
282void fm10k_iov_suspend(struct pci_dev *pdev)
283{
284	struct fm10k_intfc *interface = pci_get_drvdata(pdev);
285	struct fm10k_iov_data *iov_data = interface->iov_data;
286	struct fm10k_hw *hw = &interface->hw;
287	int num_vfs, i;
288
289	/* pull out num_vfs from iov_data */
290	num_vfs = iov_data ? iov_data->num_vfs : 0;
291
292	/* shut down queue mapping for VFs */
293	fm10k_write_reg(hw, FM10K_DGLORTMAP(fm10k_dglort_vf_rss),
294			FM10K_DGLORTMAP_NONE);
295
296	/* Stop any active VFs and reset their resources */
297	for (i = 0; i < num_vfs; i++) {
298		struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
299
300		hw->iov.ops.reset_resources(hw, vf_info);
301		hw->iov.ops.reset_lport(hw, vf_info);
302		fm10k_clear_macvlan_queue(interface, vf_info->glort, false);
303	}
304}
305
306static void fm10k_mask_aer_comp_abort(struct pci_dev *pdev)
307{
308	u32 err_mask;
309	int pos;
310
311	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
312	if (!pos)
313		return;
314
315	/* Mask the completion abort bit in the ERR_UNCOR_MASK register,
316	 * preventing the device from reporting these errors to the upstream
317	 * PCIe root device. This avoids bringing down platforms which upgrade
318	 * non-fatal completer aborts into machine check exceptions. Completer
319	 * aborts can occur whenever a VF reads a queue it doesn't own.
320	 */
321	pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, &err_mask);
322	err_mask |= PCI_ERR_UNC_COMP_ABORT;
323	pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, err_mask);
324}
325
326int fm10k_iov_resume(struct pci_dev *pdev)
327{
328	struct fm10k_intfc *interface = pci_get_drvdata(pdev);
329	struct fm10k_iov_data *iov_data = interface->iov_data;
330	struct fm10k_dglort_cfg dglort = { 0 };
331	struct fm10k_hw *hw = &interface->hw;
332	int num_vfs, i;
333
334	/* pull out num_vfs from iov_data */
335	num_vfs = iov_data ? iov_data->num_vfs : 0;
336
337	/* return error if iov_data is not already populated */
338	if (!iov_data)
339		return -ENOMEM;
340
341	/* Lower severity of completer abort error reporting as
342	 * the VFs can trigger this any time they read a queue
343	 * that they don't own.
344	 */
345	fm10k_mask_aer_comp_abort(pdev);
346
347	/* allocate hardware resources for the VFs */
348	hw->iov.ops.assign_resources(hw, num_vfs, num_vfs);
349
350	/* configure DGLORT mapping for RSS */
351	dglort.glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE;
352	dglort.idx = fm10k_dglort_vf_rss;
353	dglort.inner_rss = 1;
354	dglort.rss_l = fls(fm10k_queues_per_pool(hw) - 1);
355	dglort.queue_b = fm10k_vf_queue_index(hw, 0);
356	dglort.vsi_l = fls(hw->iov.total_vfs - 1);
357	dglort.vsi_b = 1;
358
359	hw->mac.ops.configure_dglort_map(hw, &dglort);
360
361	/* assign resources to the device */
362	for (i = 0; i < num_vfs; i++) {
363		struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
364
365		/* allocate all but the last GLORT to the VFs */
366		if (i == (~hw->mac.dglort_map >> FM10K_DGLORTMAP_MASK_SHIFT))
367			break;
368
369		/* assign GLORT to VF, and restrict it to multicast */
370		hw->iov.ops.set_lport(hw, vf_info, i,
371				      FM10K_VF_FLAG_MULTI_CAPABLE);
372
373		/* mailbox is disconnected so we don't send a message */
374		hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
375
376		/* now we are ready so we can connect */
377		vf_info->mbx.ops.connect(hw, &vf_info->mbx);
378	}
379
380	return 0;
381}
382
383s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid)
384{
385	struct fm10k_iov_data *iov_data = interface->iov_data;
386	struct fm10k_hw *hw = &interface->hw;
387	struct fm10k_vf_info *vf_info;
388	u16 vf_idx = (glort - hw->mac.dglort_map) & FM10K_DGLORTMAP_NONE;
389
390	/* no IOV support, not our message to process */
391	if (!iov_data)
392		return FM10K_ERR_PARAM;
393
394	/* glort outside our range, not our message to process */
395	if (vf_idx >= iov_data->num_vfs)
396		return FM10K_ERR_PARAM;
397
398	/* determine if an update has occurred and if so notify the VF */
399	vf_info = &iov_data->vf_info[vf_idx];
400	if (vf_info->sw_vid != pvid) {
401		vf_info->sw_vid = pvid;
402		hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
403	}
404
405	return 0;
406}
407
408static void fm10k_iov_free_data(struct pci_dev *pdev)
409{
410	struct fm10k_intfc *interface = pci_get_drvdata(pdev);
411
412	if (!interface->iov_data)
413		return;
414
415	/* reclaim hardware resources */
416	fm10k_iov_suspend(pdev);
417
418	/* drop iov_data from interface */
419	kfree_rcu(interface->iov_data, rcu);
420	interface->iov_data = NULL;
421}
422
423static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs)
424{
425	struct fm10k_intfc *interface = pci_get_drvdata(pdev);
426	struct fm10k_iov_data *iov_data = interface->iov_data;
427	struct fm10k_hw *hw = &interface->hw;
428	size_t size;
429	int i;
430
431	/* return error if iov_data is already populated */
432	if (iov_data)
433		return -EBUSY;
434
435	/* The PF should always be able to assign resources */
436	if (!hw->iov.ops.assign_resources)
437		return -ENODEV;
438
439	/* nothing to do if no VFs are requested */
440	if (!num_vfs)
441		return 0;
442
443	/* allocate memory for VF storage */
444	size = offsetof(struct fm10k_iov_data, vf_info[num_vfs]);
445	iov_data = kzalloc(size, GFP_KERNEL);
446	if (!iov_data)
447		return -ENOMEM;
448
449	/* record number of VFs */
450	iov_data->num_vfs = num_vfs;
451
452	/* loop through vf_info structures initializing each entry */
453	for (i = 0; i < num_vfs; i++) {
454		struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
455		int err;
456
457		/* Record VF VSI value */
458		vf_info->vsi = i + 1;
459		vf_info->vf_idx = i;
460
461		/* initialize mailbox memory */
462		err = fm10k_pfvf_mbx_init(hw, &vf_info->mbx, iov_mbx_data, i);
463		if (err) {
464			dev_err(&pdev->dev,
465				"Unable to initialize SR-IOV mailbox\n");
466			kfree(iov_data);
467			return err;
468		}
469	}
470
471	/* assign iov_data to interface */
472	interface->iov_data = iov_data;
473
474	/* allocate hardware resources for the VFs */
475	fm10k_iov_resume(pdev);
476
477	return 0;
478}
479
480void fm10k_iov_disable(struct pci_dev *pdev)
481{
482	if (pci_num_vf(pdev) && pci_vfs_assigned(pdev))
483		dev_err(&pdev->dev,
484			"Cannot disable SR-IOV while VFs are assigned\n");
485	else
486		pci_disable_sriov(pdev);
487
488	fm10k_iov_free_data(pdev);
489}
490
 
 
 
 
 
 
 
 
 
 
 
 
 
 
491int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
492{
493	int current_vfs = pci_num_vf(pdev);
494	int err = 0;
495
496	if (current_vfs && pci_vfs_assigned(pdev)) {
497		dev_err(&pdev->dev,
498			"Cannot modify SR-IOV while VFs are assigned\n");
499		num_vfs = current_vfs;
500	} else {
501		pci_disable_sriov(pdev);
502		fm10k_iov_free_data(pdev);
503	}
504
505	/* allocate resources for the VFs */
506	err = fm10k_iov_alloc_data(pdev, num_vfs);
507	if (err)
508		return err;
509
510	/* allocate VFs if not already allocated */
511	if (num_vfs && num_vfs != current_vfs) {
 
 
 
 
 
 
512		err = pci_enable_sriov(pdev, num_vfs);
513		if (err) {
514			dev_err(&pdev->dev,
515				"Enable PCI SR-IOV failed: %d\n", err);
516			return err;
517		}
518	}
519
520	return num_vfs;
521}
522
523/**
524 * fm10k_iov_update_stats - Update stats for all VFs
525 * @interface: device private structure
526 *
527 * Updates the VF statistics for all enabled VFs. Expects to be called by
528 * fm10k_update_stats and assumes that locking via the __FM10K_UPDATING_STATS
529 * bit is already handled.
530 */
531void fm10k_iov_update_stats(struct fm10k_intfc *interface)
532{
533	struct fm10k_iov_data *iov_data = interface->iov_data;
534	struct fm10k_hw *hw = &interface->hw;
535	int i;
536
537	if (!iov_data)
538		return;
539
540	for (i = 0; i < iov_data->num_vfs; i++)
541		hw->iov.ops.update_stats(hw, iov_data->vf_info[i].stats, i);
542}
543
544static inline void fm10k_reset_vf_info(struct fm10k_intfc *interface,
545				       struct fm10k_vf_info *vf_info)
546{
547	struct fm10k_hw *hw = &interface->hw;
548
549	/* assigning the MAC address will send a mailbox message */
550	fm10k_mbx_lock(interface);
551
552	/* disable LPORT for this VF which clears switch rules */
553	hw->iov.ops.reset_lport(hw, vf_info);
554
555	fm10k_clear_macvlan_queue(interface, vf_info->glort, false);
556
557	/* assign new MAC+VLAN for this VF */
558	hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
559
560	/* re-enable the LPORT for this VF */
561	hw->iov.ops.set_lport(hw, vf_info, vf_info->vf_idx,
562			      FM10K_VF_FLAG_MULTI_CAPABLE);
563
564	fm10k_mbx_unlock(interface);
565}
566
567int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac)
568{
569	struct fm10k_intfc *interface = netdev_priv(netdev);
570	struct fm10k_iov_data *iov_data = interface->iov_data;
571	struct fm10k_vf_info *vf_info;
572
573	/* verify SR-IOV is active and that vf idx is valid */
574	if (!iov_data || vf_idx >= iov_data->num_vfs)
575		return -EINVAL;
576
577	/* verify MAC addr is valid */
578	if (!is_zero_ether_addr(mac) && !is_valid_ether_addr(mac))
579		return -EINVAL;
580
581	/* record new MAC address */
582	vf_info = &iov_data->vf_info[vf_idx];
583	ether_addr_copy(vf_info->mac, mac);
584
585	fm10k_reset_vf_info(interface, vf_info);
586
587	return 0;
588}
589
590int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid,
591			  u8 qos, __be16 vlan_proto)
592{
593	struct fm10k_intfc *interface = netdev_priv(netdev);
594	struct fm10k_iov_data *iov_data = interface->iov_data;
595	struct fm10k_hw *hw = &interface->hw;
596	struct fm10k_vf_info *vf_info;
597
598	/* verify SR-IOV is active and that vf idx is valid */
599	if (!iov_data || vf_idx >= iov_data->num_vfs)
600		return -EINVAL;
601
602	/* QOS is unsupported and VLAN IDs accepted range 0-4094 */
603	if (qos || (vid > (VLAN_VID_MASK - 1)))
604		return -EINVAL;
605
606	/* VF VLAN Protocol part to default is unsupported */
607	if (vlan_proto != htons(ETH_P_8021Q))
608		return -EPROTONOSUPPORT;
609
610	vf_info = &iov_data->vf_info[vf_idx];
611
612	/* exit if there is nothing to do */
613	if (vf_info->pf_vid == vid)
614		return 0;
615
616	/* record default VLAN ID for VF */
617	vf_info->pf_vid = vid;
618
619	/* Clear the VLAN table for the VF */
620	hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, vf_info->vsi, false);
621
622	fm10k_reset_vf_info(interface, vf_info);
623
624	return 0;
625}
626
627int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx,
628			int __always_unused min_rate, int max_rate)
629{
630	struct fm10k_intfc *interface = netdev_priv(netdev);
631	struct fm10k_iov_data *iov_data = interface->iov_data;
632	struct fm10k_hw *hw = &interface->hw;
633
634	/* verify SR-IOV is active and that vf idx is valid */
635	if (!iov_data || vf_idx >= iov_data->num_vfs)
636		return -EINVAL;
637
638	/* rate limit cannot be less than 10Mbs or greater than link speed */
639	if (max_rate &&
640	    (max_rate < FM10K_VF_TC_MIN || max_rate > FM10K_VF_TC_MAX))
641		return -EINVAL;
642
643	/* store values */
644	iov_data->vf_info[vf_idx].rate = max_rate;
645
646	/* update hardware configuration */
647	hw->iov.ops.configure_tc(hw, vf_idx, max_rate);
648
649	return 0;
650}
651
652int fm10k_ndo_get_vf_config(struct net_device *netdev,
653			    int vf_idx, struct ifla_vf_info *ivi)
654{
655	struct fm10k_intfc *interface = netdev_priv(netdev);
656	struct fm10k_iov_data *iov_data = interface->iov_data;
657	struct fm10k_vf_info *vf_info;
658
659	/* verify SR-IOV is active and that vf idx is valid */
660	if (!iov_data || vf_idx >= iov_data->num_vfs)
661		return -EINVAL;
662
663	vf_info = &iov_data->vf_info[vf_idx];
664
665	ivi->vf = vf_idx;
666	ivi->max_tx_rate = vf_info->rate;
667	ivi->min_tx_rate = 0;
668	ether_addr_copy(ivi->mac, vf_info->mac);
669	ivi->vlan = vf_info->pf_vid;
670	ivi->qos = 0;
671
672	return 0;
673}
674
675int fm10k_ndo_get_vf_stats(struct net_device *netdev,
676			   int vf_idx, struct ifla_vf_stats *stats)
677{
678	struct fm10k_intfc *interface = netdev_priv(netdev);
679	struct fm10k_iov_data *iov_data = interface->iov_data;
680	struct fm10k_hw *hw = &interface->hw;
681	struct fm10k_hw_stats_q *hw_stats;
682	u32 idx, qpp;
683
684	/* verify SR-IOV is active and that vf idx is valid */
685	if (!iov_data || vf_idx >= iov_data->num_vfs)
686		return -EINVAL;
687
688	qpp = fm10k_queues_per_pool(hw);
689	hw_stats = iov_data->vf_info[vf_idx].stats;
690
691	for (idx = 0; idx < qpp; idx++) {
692		stats->rx_packets += hw_stats[idx].rx_packets.count;
693		stats->tx_packets += hw_stats[idx].tx_packets.count;
694		stats->rx_bytes += hw_stats[idx].rx_bytes.count;
695		stats->tx_bytes += hw_stats[idx].tx_bytes.count;
696		stats->rx_dropped += hw_stats[idx].rx_drops.count;
697	}
698
699	return 0;
700}
v4.6
  1/* Intel Ethernet Switch Host Interface Driver
  2 * Copyright(c) 2013 - 2015 Intel Corporation.
  3 *
  4 * This program is free software; you can redistribute it and/or modify it
  5 * under the terms and conditions of the GNU General Public License,
  6 * version 2, as published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope it will be useful, but WITHOUT
  9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 11 * more details.
 12 *
 13 * The full GNU General Public License is included in this distribution in
 14 * the file called "COPYING".
 15 *
 16 * Contact Information:
 17 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
 18 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 19 */
 20
 21#include "fm10k.h"
 22#include "fm10k_vf.h"
 23#include "fm10k_pf.h"
 24
 25static s32 fm10k_iov_msg_error(struct fm10k_hw *hw, u32 **results,
 26			       struct fm10k_mbx_info *mbx)
 27{
 28	struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
 29	struct fm10k_intfc *interface = hw->back;
 30	struct pci_dev *pdev = interface->pdev;
 31
 32	dev_err(&pdev->dev, "Unknown message ID %u on VF %d\n",
 33		**results & FM10K_TLV_ID_MASK, vf_info->vf_idx);
 34
 35	return fm10k_tlv_msg_error(hw, results, mbx);
 36}
 37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 38static const struct fm10k_msg_data iov_mbx_data[] = {
 39	FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
 40	FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf),
 41	FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_mac_vlan_pf),
 42	FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf),
 43	FM10K_TLV_MSG_ERROR_HANDLER(fm10k_iov_msg_error),
 44};
 45
 46s32 fm10k_iov_event(struct fm10k_intfc *interface)
 47{
 48	struct fm10k_hw *hw = &interface->hw;
 49	struct fm10k_iov_data *iov_data;
 50	s64 vflre;
 51	int i;
 52
 53	/* if there is no iov_data then there is no mailboxes to process */
 54	if (!ACCESS_ONCE(interface->iov_data))
 55		return 0;
 56
 57	rcu_read_lock();
 58
 59	iov_data = interface->iov_data;
 60
 61	/* check again now that we are in the RCU block */
 62	if (!iov_data)
 63		goto read_unlock;
 64
 65	if (!(fm10k_read_reg(hw, FM10K_EICR) & FM10K_EICR_VFLR))
 66		goto read_unlock;
 67
 68	/* read VFLRE to determine if any VFs have been reset */
 69	do {
 70		vflre = fm10k_read_reg(hw, FM10K_PFVFLRE(0));
 71		vflre <<= 32;
 72		vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(1));
 73		vflre = (vflre << 32) | (vflre >> 32);
 74		vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(0));
 75
 76		i = iov_data->num_vfs;
 77
 78		for (vflre <<= 64 - i; vflre && i--; vflre += vflre) {
 79			struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
 80
 81			if (vflre >= 0)
 82				continue;
 83
 84			hw->iov.ops.reset_resources(hw, vf_info);
 85			vf_info->mbx.ops.connect(hw, &vf_info->mbx);
 86		}
 87	} while (i != iov_data->num_vfs);
 88
 89read_unlock:
 90	rcu_read_unlock();
 91
 92	return 0;
 93}
 94
 95s32 fm10k_iov_mbx(struct fm10k_intfc *interface)
 96{
 97	struct fm10k_hw *hw = &interface->hw;
 98	struct fm10k_iov_data *iov_data;
 99	int i;
100
101	/* if there is no iov_data then there is no mailboxes to process */
102	if (!ACCESS_ONCE(interface->iov_data))
103		return 0;
104
105	rcu_read_lock();
106
107	iov_data = interface->iov_data;
108
109	/* check again now that we are in the RCU block */
110	if (!iov_data)
111		goto read_unlock;
112
113	/* lock the mailbox for transmit and receive */
114	fm10k_mbx_lock(interface);
115
116	/* Most VF messages sent to the PF cause the PF to respond by
117	 * requesting from the SM mailbox. This means that too many VF
118	 * messages processed at once could cause a mailbox timeout on the PF.
119	 * To prevent this, store a pointer to the next VF mbx to process. Use
120	 * that as the start of the loop so that we don't starve whichever VF
121	 * got ignored on the previous run.
122	 */
123process_mbx:
124	for (i = iov_data->next_vf_mbx ? : iov_data->num_vfs; i--;) {
125		struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
126		struct fm10k_mbx_info *mbx = &vf_info->mbx;
127		u16 glort = vf_info->glort;
128
 
 
 
129		/* verify port mapping is valid, if not reset port */
130		if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort))
131			hw->iov.ops.reset_lport(hw, vf_info);
 
 
132
133		/* reset VFs that have mailbox timed out */
134		if (!mbx->timeout) {
135			hw->iov.ops.reset_resources(hw, vf_info);
136			mbx->ops.connect(hw, mbx);
137		}
138
139		/* guarantee we have free space in the SM mailbox */
140		if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) {
 
141			/* keep track of how many times this occurs */
142			interface->hw_sm_mbx_full++;
 
 
 
 
143			break;
144		}
145
146		/* cleanup mailbox and process received messages */
147		mbx->ops.process(hw, mbx);
148	}
149
150	/* if we stopped processing mailboxes early, update next_vf_mbx.
151	 * Otherwise, reset next_vf_mbx, and restart loop so that we process
152	 * the remaining mailboxes we skipped at the start.
153	 */
154	if (i >= 0) {
155		iov_data->next_vf_mbx = i + 1;
156	} else if (iov_data->next_vf_mbx) {
157		iov_data->next_vf_mbx = 0;
158		goto process_mbx;
159	}
160
161	/* free the lock */
162	fm10k_mbx_unlock(interface);
163
164read_unlock:
165	rcu_read_unlock();
166
167	return 0;
168}
169
170void fm10k_iov_suspend(struct pci_dev *pdev)
171{
172	struct fm10k_intfc *interface = pci_get_drvdata(pdev);
173	struct fm10k_iov_data *iov_data = interface->iov_data;
174	struct fm10k_hw *hw = &interface->hw;
175	int num_vfs, i;
176
177	/* pull out num_vfs from iov_data */
178	num_vfs = iov_data ? iov_data->num_vfs : 0;
179
180	/* shut down queue mapping for VFs */
181	fm10k_write_reg(hw, FM10K_DGLORTMAP(fm10k_dglort_vf_rss),
182			FM10K_DGLORTMAP_NONE);
183
184	/* Stop any active VFs and reset their resources */
185	for (i = 0; i < num_vfs; i++) {
186		struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
187
188		hw->iov.ops.reset_resources(hw, vf_info);
189		hw->iov.ops.reset_lport(hw, vf_info);
 
190	}
191}
192
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193int fm10k_iov_resume(struct pci_dev *pdev)
194{
195	struct fm10k_intfc *interface = pci_get_drvdata(pdev);
196	struct fm10k_iov_data *iov_data = interface->iov_data;
197	struct fm10k_dglort_cfg dglort = { 0 };
198	struct fm10k_hw *hw = &interface->hw;
199	int num_vfs, i;
200
201	/* pull out num_vfs from iov_data */
202	num_vfs = iov_data ? iov_data->num_vfs : 0;
203
204	/* return error if iov_data is not already populated */
205	if (!iov_data)
206		return -ENOMEM;
207
 
 
 
 
 
 
208	/* allocate hardware resources for the VFs */
209	hw->iov.ops.assign_resources(hw, num_vfs, num_vfs);
210
211	/* configure DGLORT mapping for RSS */
212	dglort.glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE;
213	dglort.idx = fm10k_dglort_vf_rss;
214	dglort.inner_rss = 1;
215	dglort.rss_l = fls(fm10k_queues_per_pool(hw) - 1);
216	dglort.queue_b = fm10k_vf_queue_index(hw, 0);
217	dglort.vsi_l = fls(hw->iov.total_vfs - 1);
218	dglort.vsi_b = 1;
219
220	hw->mac.ops.configure_dglort_map(hw, &dglort);
221
222	/* assign resources to the device */
223	for (i = 0; i < num_vfs; i++) {
224		struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
225
226		/* allocate all but the last GLORT to the VFs */
227		if (i == ((~hw->mac.dglort_map) >> FM10K_DGLORTMAP_MASK_SHIFT))
228			break;
229
230		/* assign GLORT to VF, and restrict it to multicast */
231		hw->iov.ops.set_lport(hw, vf_info, i,
232				      FM10K_VF_FLAG_MULTI_CAPABLE);
233
234		/* mailbox is disconnected so we don't send a message */
235		hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
236
237		/* now we are ready so we can connect */
238		vf_info->mbx.ops.connect(hw, &vf_info->mbx);
239	}
240
241	return 0;
242}
243
244s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid)
245{
246	struct fm10k_iov_data *iov_data = interface->iov_data;
247	struct fm10k_hw *hw = &interface->hw;
248	struct fm10k_vf_info *vf_info;
249	u16 vf_idx = (glort - hw->mac.dglort_map) & FM10K_DGLORTMAP_NONE;
250
251	/* no IOV support, not our message to process */
252	if (!iov_data)
253		return FM10K_ERR_PARAM;
254
255	/* glort outside our range, not our message to process */
256	if (vf_idx >= iov_data->num_vfs)
257		return FM10K_ERR_PARAM;
258
259	/* determine if an update has occurred and if so notify the VF */
260	vf_info = &iov_data->vf_info[vf_idx];
261	if (vf_info->sw_vid != pvid) {
262		vf_info->sw_vid = pvid;
263		hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
264	}
265
266	return 0;
267}
268
269static void fm10k_iov_free_data(struct pci_dev *pdev)
270{
271	struct fm10k_intfc *interface = pci_get_drvdata(pdev);
272
273	if (!interface->iov_data)
274		return;
275
276	/* reclaim hardware resources */
277	fm10k_iov_suspend(pdev);
278
279	/* drop iov_data from interface */
280	kfree_rcu(interface->iov_data, rcu);
281	interface->iov_data = NULL;
282}
283
284static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs)
285{
286	struct fm10k_intfc *interface = pci_get_drvdata(pdev);
287	struct fm10k_iov_data *iov_data = interface->iov_data;
288	struct fm10k_hw *hw = &interface->hw;
289	size_t size;
290	int i, err;
291
292	/* return error if iov_data is already populated */
293	if (iov_data)
294		return -EBUSY;
295
296	/* The PF should always be able to assign resources */
297	if (!hw->iov.ops.assign_resources)
298		return -ENODEV;
299
300	/* nothing to do if no VFs are requested */
301	if (!num_vfs)
302		return 0;
303
304	/* allocate memory for VF storage */
305	size = offsetof(struct fm10k_iov_data, vf_info[num_vfs]);
306	iov_data = kzalloc(size, GFP_KERNEL);
307	if (!iov_data)
308		return -ENOMEM;
309
310	/* record number of VFs */
311	iov_data->num_vfs = num_vfs;
312
313	/* loop through vf_info structures initializing each entry */
314	for (i = 0; i < num_vfs; i++) {
315		struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
 
316
317		/* Record VF VSI value */
318		vf_info->vsi = i + 1;
319		vf_info->vf_idx = i;
320
321		/* initialize mailbox memory */
322		err = fm10k_pfvf_mbx_init(hw, &vf_info->mbx, iov_mbx_data, i);
323		if (err) {
324			dev_err(&pdev->dev,
325				"Unable to initialize SR-IOV mailbox\n");
326			kfree(iov_data);
327			return err;
328		}
329	}
330
331	/* assign iov_data to interface */
332	interface->iov_data = iov_data;
333
334	/* allocate hardware resources for the VFs */
335	fm10k_iov_resume(pdev);
336
337	return 0;
338}
339
340void fm10k_iov_disable(struct pci_dev *pdev)
341{
342	if (pci_num_vf(pdev) && pci_vfs_assigned(pdev))
343		dev_err(&pdev->dev,
344			"Cannot disable SR-IOV while VFs are assigned\n");
345	else
346		pci_disable_sriov(pdev);
347
348	fm10k_iov_free_data(pdev);
349}
350
351static void fm10k_disable_aer_comp_abort(struct pci_dev *pdev)
352{
353	u32 err_sev;
354	int pos;
355
356	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
357	if (!pos)
358		return;
359
360	pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &err_sev);
361	err_sev &= ~PCI_ERR_UNC_COMP_ABORT;
362	pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, err_sev);
363}
364
365int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
366{
367	int current_vfs = pci_num_vf(pdev);
368	int err = 0;
369
370	if (current_vfs && pci_vfs_assigned(pdev)) {
371		dev_err(&pdev->dev,
372			"Cannot modify SR-IOV while VFs are assigned\n");
373		num_vfs = current_vfs;
374	} else {
375		pci_disable_sriov(pdev);
376		fm10k_iov_free_data(pdev);
377	}
378
379	/* allocate resources for the VFs */
380	err = fm10k_iov_alloc_data(pdev, num_vfs);
381	if (err)
382		return err;
383
384	/* allocate VFs if not already allocated */
385	if (num_vfs && (num_vfs != current_vfs)) {
386		/* Disable completer abort error reporting as
387		 * the VFs can trigger this any time they read a queue
388		 * that they don't own.
389		 */
390		fm10k_disable_aer_comp_abort(pdev);
391
392		err = pci_enable_sriov(pdev, num_vfs);
393		if (err) {
394			dev_err(&pdev->dev,
395				"Enable PCI SR-IOV failed: %d\n", err);
396			return err;
397		}
398	}
399
400	return num_vfs;
401}
402
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
403static inline void fm10k_reset_vf_info(struct fm10k_intfc *interface,
404				       struct fm10k_vf_info *vf_info)
405{
406	struct fm10k_hw *hw = &interface->hw;
407
408	/* assigning the MAC address will send a mailbox message */
409	fm10k_mbx_lock(interface);
410
411	/* disable LPORT for this VF which clears switch rules */
412	hw->iov.ops.reset_lport(hw, vf_info);
413
 
 
414	/* assign new MAC+VLAN for this VF */
415	hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
416
417	/* re-enable the LPORT for this VF */
418	hw->iov.ops.set_lport(hw, vf_info, vf_info->vf_idx,
419			      FM10K_VF_FLAG_MULTI_CAPABLE);
420
421	fm10k_mbx_unlock(interface);
422}
423
424int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac)
425{
426	struct fm10k_intfc *interface = netdev_priv(netdev);
427	struct fm10k_iov_data *iov_data = interface->iov_data;
428	struct fm10k_vf_info *vf_info;
429
430	/* verify SR-IOV is active and that vf idx is valid */
431	if (!iov_data || vf_idx >= iov_data->num_vfs)
432		return -EINVAL;
433
434	/* verify MAC addr is valid */
435	if (!is_zero_ether_addr(mac) && !is_valid_ether_addr(mac))
436		return -EINVAL;
437
438	/* record new MAC address */
439	vf_info = &iov_data->vf_info[vf_idx];
440	ether_addr_copy(vf_info->mac, mac);
441
442	fm10k_reset_vf_info(interface, vf_info);
443
444	return 0;
445}
446
447int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid,
448			  u8 qos)
449{
450	struct fm10k_intfc *interface = netdev_priv(netdev);
451	struct fm10k_iov_data *iov_data = interface->iov_data;
452	struct fm10k_hw *hw = &interface->hw;
453	struct fm10k_vf_info *vf_info;
454
455	/* verify SR-IOV is active and that vf idx is valid */
456	if (!iov_data || vf_idx >= iov_data->num_vfs)
457		return -EINVAL;
458
459	/* QOS is unsupported and VLAN IDs accepted range 0-4094 */
460	if (qos || (vid > (VLAN_VID_MASK - 1)))
461		return -EINVAL;
462
 
 
 
 
463	vf_info = &iov_data->vf_info[vf_idx];
464
465	/* exit if there is nothing to do */
466	if (vf_info->pf_vid == vid)
467		return 0;
468
469	/* record default VLAN ID for VF */
470	vf_info->pf_vid = vid;
471
472	/* Clear the VLAN table for the VF */
473	hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, vf_info->vsi, false);
474
475	fm10k_reset_vf_info(interface, vf_info);
476
477	return 0;
478}
479
480int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx,
481			int __always_unused unused, int rate)
482{
483	struct fm10k_intfc *interface = netdev_priv(netdev);
484	struct fm10k_iov_data *iov_data = interface->iov_data;
485	struct fm10k_hw *hw = &interface->hw;
486
487	/* verify SR-IOV is active and that vf idx is valid */
488	if (!iov_data || vf_idx >= iov_data->num_vfs)
489		return -EINVAL;
490
491	/* rate limit cannot be less than 10Mbs or greater than link speed */
492	if (rate && ((rate < FM10K_VF_TC_MIN) || rate > FM10K_VF_TC_MAX))
 
493		return -EINVAL;
494
495	/* store values */
496	iov_data->vf_info[vf_idx].rate = rate;
497
498	/* update hardware configuration */
499	hw->iov.ops.configure_tc(hw, vf_idx, rate);
500
501	return 0;
502}
503
504int fm10k_ndo_get_vf_config(struct net_device *netdev,
505			    int vf_idx, struct ifla_vf_info *ivi)
506{
507	struct fm10k_intfc *interface = netdev_priv(netdev);
508	struct fm10k_iov_data *iov_data = interface->iov_data;
509	struct fm10k_vf_info *vf_info;
510
511	/* verify SR-IOV is active and that vf idx is valid */
512	if (!iov_data || vf_idx >= iov_data->num_vfs)
513		return -EINVAL;
514
515	vf_info = &iov_data->vf_info[vf_idx];
516
517	ivi->vf = vf_idx;
518	ivi->max_tx_rate = vf_info->rate;
519	ivi->min_tx_rate = 0;
520	ether_addr_copy(ivi->mac, vf_info->mac);
521	ivi->vlan = vf_info->pf_vid;
522	ivi->qos = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
523
524	return 0;
525}