Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.13.7.
  1/*******************************************************************************
  2 *
  3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
  4 * Copyright(c) 2013 - 2014 Intel Corporation.
  5 *
  6 * This program is free software; you can redistribute it and/or modify it
  7 * under the terms and conditions of the GNU General Public License,
  8 * version 2, as published by the Free Software Foundation.
  9 *
 10 * This program is distributed in the hope it will be useful, but WITHOUT
 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 13 * more details.
 14 *
 15 * The full GNU General Public License is included in this distribution in
 16 * the file called "COPYING".
 17 *
 18 * Contact Information:
 19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
 20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 21 *
 22 ******************************************************************************/
 23
 24#include "i40evf.h"
 25#include "i40e_prototype.h"
 26
 27/* busy wait delay in msec */
 28#define I40EVF_BUSY_WAIT_DELAY 10
 29#define I40EVF_BUSY_WAIT_COUNT 50
 30
 31/**
 32 * i40evf_send_pf_msg
 33 * @adapter: adapter structure
 34 * @op: virtual channel opcode
 35 * @msg: pointer to message buffer
 36 * @len: message length
 37 *
 38 * Send message to PF and print status if failure.
 39 **/
 40static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
 41			      enum i40e_virtchnl_ops op, u8 *msg, u16 len)
 42{
 43	struct i40e_hw *hw = &adapter->hw;
 44	i40e_status err;
 45
 46	if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
 47		return 0; /* nothing to see here, move along */
 48
 49	err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
 50	if (err)
 51		dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n",
 52			op, err, hw->aq.asq_last_status);
 53	return err;
 54}
 55
 56/**
 57 * i40evf_send_api_ver
 58 * @adapter: adapter structure
 59 *
 60 * Send API version admin queue message to the PF. The reply is not checked
 61 * in this function. Returns 0 if the message was successfully
 62 * sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
 63 **/
 64int i40evf_send_api_ver(struct i40evf_adapter *adapter)
 65{
 66	struct i40e_virtchnl_version_info vvi;
 67
 68	vvi.major = I40E_VIRTCHNL_VERSION_MAJOR;
 69	vvi.minor = I40E_VIRTCHNL_VERSION_MINOR;
 70
 71	return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_VERSION, (u8 *)&vvi,
 72				  sizeof(vvi));
 73}
 74
 75/**
 76 * i40evf_verify_api_ver
 77 * @adapter: adapter structure
 78 *
 79 * Compare API versions with the PF. Must be called after admin queue is
 80 * initialized. Returns 0 if API versions match, -EIO if
 81 * they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
 82 **/
 83int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
 84{
 85	struct i40e_virtchnl_version_info *pf_vvi;
 86	struct i40e_hw *hw = &adapter->hw;
 87	struct i40e_arq_event_info event;
 88	i40e_status err;
 89
 90	event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
 91	event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
 92	if (!event.msg_buf) {
 93		err = -ENOMEM;
 94		goto out;
 95	}
 96
 97	err = i40evf_clean_arq_element(hw, &event, NULL);
 98	if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
 99		goto out_alloc;
100
101	err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
102	if (err) {
103		err = -EIO;
104		goto out_alloc;
105	}
106
107	if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) !=
108	    I40E_VIRTCHNL_OP_VERSION) {
109		err = -EIO;
110		goto out_alloc;
111	}
112
113	pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
114	if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) ||
115	    (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR))
116		err = -EIO;
117
118out_alloc:
119	kfree(event.msg_buf);
120out:
121	return err;
122}
123
124/**
125 * i40evf_send_vf_config_msg
126 * @adapter: adapter structure
127 *
128 * Send VF configuration request admin queue message to the PF. The reply
129 * is not checked in this function. Returns 0 if the message was
130 * successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
131 **/
132int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
133{
134	return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
135				  NULL, 0);
136}
137
138/**
139 * i40evf_get_vf_config
140 * @hw: pointer to the hardware structure
141 * @len: length of buffer
142 *
143 * Get VF configuration from PF and populate hw structure. Must be called after
144 * admin queue is initialized. Busy waits until response is received from PF,
145 * with maximum timeout. Response from PF is returned in the buffer for further
146 * processing by the caller.
147 **/
148int i40evf_get_vf_config(struct i40evf_adapter *adapter)
149{
150	struct i40e_hw *hw = &adapter->hw;
151	struct i40e_arq_event_info event;
152	u16 len;
153	i40e_status err;
154
155	len =  sizeof(struct i40e_virtchnl_vf_resource) +
156		I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
157	event.msg_size = len;
158	event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
159	if (!event.msg_buf) {
160		err = -ENOMEM;
161		goto out;
162	}
163
164	err = i40evf_clean_arq_element(hw, &event, NULL);
165	if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
166		goto out_alloc;
167
168	err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
169	if (err) {
170		dev_err(&adapter->pdev->dev,
171			"%s: Error returned from PF, %d, %d\n", __func__,
172			le32_to_cpu(event.desc.cookie_high),
173			le32_to_cpu(event.desc.cookie_low));
174		err = -EIO;
175		goto out_alloc;
176	}
177
178	if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) !=
179	    I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
180		dev_err(&adapter->pdev->dev,
181			"%s: Invalid response from PF, %d, %d\n", __func__,
182			le32_to_cpu(event.desc.cookie_high),
183			le32_to_cpu(event.desc.cookie_low));
184		err = -EIO;
185		goto out_alloc;
186	}
187	memcpy(adapter->vf_res, event.msg_buf, min(event.msg_size, len));
188
189	i40e_vf_parse_hw_config(hw, adapter->vf_res);
190out_alloc:
191	kfree(event.msg_buf);
192out:
193	return err;
194}
195
196/**
197 * i40evf_configure_queues
198 * @adapter: adapter structure
199 *
200 * Request that the PF set up our (previously allocated) queues.
201 **/
202void i40evf_configure_queues(struct i40evf_adapter *adapter)
203{
204	struct i40e_virtchnl_vsi_queue_config_info *vqci;
205	struct i40e_virtchnl_queue_pair_info *vqpi;
206	int pairs = adapter->vsi_res->num_queue_pairs;
207	int i, len;
208
209	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
210		/* bail because we already have a command pending */
211		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
212			__func__, adapter->current_op);
213		return;
214	}
215	adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
216	len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
217		       (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
218	vqci = kzalloc(len, GFP_ATOMIC);
219	if (!vqci) {
220		dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
221			__func__);
222		return;
223	}
224	vqci->vsi_id = adapter->vsi_res->vsi_id;
225	vqci->num_queue_pairs = pairs;
226	vqpi = vqci->qpair;
227	/* Size check is not needed here - HW max is 16 queue pairs, and we
228	 * can fit info for 31 of them into the AQ buffer before it overflows.
229	 */
230	for (i = 0; i < pairs; i++) {
231		vqpi->txq.vsi_id = vqci->vsi_id;
232		vqpi->txq.queue_id = i;
233		vqpi->txq.ring_len = adapter->tx_rings[i]->count;
234		vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma;
235
236		vqpi->rxq.vsi_id = vqci->vsi_id;
237		vqpi->rxq.queue_id = i;
238		vqpi->rxq.ring_len = adapter->rx_rings[i]->count;
239		vqpi->rxq.dma_ring_addr = adapter->rx_rings[i]->dma;
240		vqpi->rxq.max_pkt_size = adapter->netdev->mtu
241					+ ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
242		vqpi->rxq.databuffer_size = adapter->rx_rings[i]->rx_buf_len;
243		vqpi++;
244	}
245
246	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
247			   (u8 *)vqci, len);
248	kfree(vqci);
249	adapter->aq_pending |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
250	adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
251}
252
253/**
254 * i40evf_enable_queues
255 * @adapter: adapter structure
256 *
257 * Request that the PF enable all of our queues.
258 **/
259void i40evf_enable_queues(struct i40evf_adapter *adapter)
260{
261	struct i40e_virtchnl_queue_select vqs;
262
263	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
264		/* bail because we already have a command pending */
265		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
266			__func__, adapter->current_op);
267		return;
268	}
269	adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
270	vqs.vsi_id = adapter->vsi_res->vsi_id;
271	vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1;
272	vqs.rx_queues = vqs.tx_queues;
273	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
274			   (u8 *)&vqs, sizeof(vqs));
275	adapter->aq_pending |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
276	adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
277}
278
279/**
280 * i40evf_disable_queues
281 * @adapter: adapter structure
282 *
283 * Request that the PF disable all of our queues.
284 **/
285void i40evf_disable_queues(struct i40evf_adapter *adapter)
286{
287	struct i40e_virtchnl_queue_select vqs;
288
289	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
290		/* bail because we already have a command pending */
291		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
292			__func__, adapter->current_op);
293		return;
294	}
295	adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
296	vqs.vsi_id = adapter->vsi_res->vsi_id;
297	vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1;
298	vqs.rx_queues = vqs.tx_queues;
299	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
300			   (u8 *)&vqs, sizeof(vqs));
301	adapter->aq_pending |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
302	adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
303}
304
305/**
306 * i40evf_map_queues
307 * @adapter: adapter structure
308 *
309 * Request that the PF map queues to interrupt vectors. Misc causes, including
310 * admin queue, are always mapped to vector 0.
311 **/
312void i40evf_map_queues(struct i40evf_adapter *adapter)
313{
314	struct i40e_virtchnl_irq_map_info *vimi;
315	int v_idx, q_vectors, len;
316	struct i40e_q_vector *q_vector;
317
318	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
319		/* bail because we already have a command pending */
320		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
321			__func__, adapter->current_op);
322		return;
323	}
324	adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
325
326	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
327
328	len = sizeof(struct i40e_virtchnl_irq_map_info) +
329	      (adapter->num_msix_vectors *
330		sizeof(struct i40e_virtchnl_vector_map));
331	vimi = kzalloc(len, GFP_ATOMIC);
332	if (!vimi) {
333		dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
334			__func__);
335		return;
336	}
337
338	vimi->num_vectors = adapter->num_msix_vectors;
339	/* Queue vectors first */
340	for (v_idx = 0; v_idx < q_vectors; v_idx++) {
341		q_vector = adapter->q_vector[v_idx];
342		vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id;
343		vimi->vecmap[v_idx].vector_id = v_idx + NONQ_VECS;
344		vimi->vecmap[v_idx].txq_map = q_vector->ring_mask;
345		vimi->vecmap[v_idx].rxq_map = q_vector->ring_mask;
346	}
347	/* Misc vector last - this is only for AdminQ messages */
348	vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id;
349	vimi->vecmap[v_idx].vector_id = 0;
350	vimi->vecmap[v_idx].txq_map = 0;
351	vimi->vecmap[v_idx].rxq_map = 0;
352
353	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
354			   (u8 *)vimi, len);
355	kfree(vimi);
356	adapter->aq_pending |= I40EVF_FLAG_AQ_MAP_VECTORS;
357	adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS;
358}
359
360/**
361 * i40evf_add_ether_addrs
362 * @adapter: adapter structure
363 * @addrs: the MAC address filters to add (contiguous)
364 * @count: number of filters
365 *
366 * Request that the PF add one or more addresses to our filters.
367 **/
368void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
369{
370	struct i40e_virtchnl_ether_addr_list *veal;
371	int len, i = 0, count = 0;
372	struct i40evf_mac_filter *f;
373
374	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
375		/* bail because we already have a command pending */
376		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
377			__func__, adapter->current_op);
378		return;
379	}
380	list_for_each_entry(f, &adapter->mac_filter_list, list) {
381		if (f->add)
382			count++;
383	}
384	if (!count) {
385		adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
386		return;
387	}
388	adapter->current_op = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
389
390	len = sizeof(struct i40e_virtchnl_ether_addr_list) +
391	      (count * sizeof(struct i40e_virtchnl_ether_addr));
392	if (len > I40EVF_MAX_AQ_BUF_SIZE) {
393		dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request.\n",
394			__func__);
395		count = (I40EVF_MAX_AQ_BUF_SIZE -
396			 sizeof(struct i40e_virtchnl_ether_addr_list)) /
397			sizeof(struct i40e_virtchnl_ether_addr);
398		len = I40EVF_MAX_AQ_BUF_SIZE;
399	}
400
401	veal = kzalloc(len, GFP_ATOMIC);
402	if (!veal) {
403		dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
404			__func__);
405		return;
406	}
407	veal->vsi_id = adapter->vsi_res->vsi_id;
408	veal->num_elements = count;
409	list_for_each_entry(f, &adapter->mac_filter_list, list) {
410		if (f->add) {
411			memcpy(veal->list[i].addr, f->macaddr, ETH_ALEN);
412			i++;
413			f->add = false;
414		}
415	}
416	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
417			   (u8 *)veal, len);
418	kfree(veal);
419	adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
420	adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
421
422}
423
424/**
425 * i40evf_del_ether_addrs
426 * @adapter: adapter structure
427 * @addrs: the MAC address filters to remove (contiguous)
428 * @count: number of filtes
429 *
430 * Request that the PF remove one or more addresses from our filters.
431 **/
432void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
433{
434	struct i40e_virtchnl_ether_addr_list *veal;
435	struct i40evf_mac_filter *f, *ftmp;
436	int len, i = 0, count = 0;
437
438	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
439		/* bail because we already have a command pending */
440		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
441			__func__, adapter->current_op);
442		return;
443	}
444	list_for_each_entry(f, &adapter->mac_filter_list, list) {
445		if (f->remove)
446			count++;
447	}
448	if (!count) {
449		adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
450		return;
451	}
452	adapter->current_op = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
453
454	len = sizeof(struct i40e_virtchnl_ether_addr_list) +
455	      (count * sizeof(struct i40e_virtchnl_ether_addr));
456	if (len > I40EVF_MAX_AQ_BUF_SIZE) {
457		dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request.\n",
458			__func__);
459		count = (I40EVF_MAX_AQ_BUF_SIZE -
460			 sizeof(struct i40e_virtchnl_ether_addr_list)) /
461			sizeof(struct i40e_virtchnl_ether_addr);
462		len = I40EVF_MAX_AQ_BUF_SIZE;
463	}
464	veal = kzalloc(len, GFP_ATOMIC);
465	if (!veal) {
466		dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
467			__func__);
468		return;
469	}
470	veal->vsi_id = adapter->vsi_res->vsi_id;
471	veal->num_elements = count;
472	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
473		if (f->remove) {
474			memcpy(veal->list[i].addr, f->macaddr, ETH_ALEN);
475			i++;
476			list_del(&f->list);
477			kfree(f);
478		}
479	}
480	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
481			   (u8 *)veal, len);
482	kfree(veal);
483	adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
484	adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
485}
486
487/**
488 * i40evf_add_vlans
489 * @adapter: adapter structure
490 * @vlans: the VLANs to add
491 * @count: number of VLANs
492 *
493 * Request that the PF add one or more VLAN filters to our VSI.
494 **/
495void i40evf_add_vlans(struct i40evf_adapter *adapter)
496{
497	struct i40e_virtchnl_vlan_filter_list *vvfl;
498	int len, i = 0, count = 0;
499	struct i40evf_vlan_filter *f;
500
501	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
502		/* bail because we already have a command pending */
503		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
504			__func__, adapter->current_op);
505		return;
506	}
507
508	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
509		if (f->add)
510			count++;
511	}
512	if (!count) {
513		adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
514		return;
515	}
516	adapter->current_op = I40E_VIRTCHNL_OP_ADD_VLAN;
517
518	len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
519	      (count * sizeof(u16));
520	if (len > I40EVF_MAX_AQ_BUF_SIZE) {
521		dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request.\n",
522			__func__);
523		count = (I40EVF_MAX_AQ_BUF_SIZE -
524			 sizeof(struct i40e_virtchnl_vlan_filter_list)) /
525			sizeof(u16);
526		len = I40EVF_MAX_AQ_BUF_SIZE;
527	}
528	vvfl = kzalloc(len, GFP_ATOMIC);
529	if (!vvfl) {
530		dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
531			__func__);
532		return;
533	}
534	vvfl->vsi_id = adapter->vsi_res->vsi_id;
535	vvfl->num_elements = count;
536	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
537		if (f->add) {
538			vvfl->vlan_id[i] = f->vlan;
539			i++;
540			f->add = false;
541		}
542	}
543	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
544	kfree(vvfl);
545	adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
546	adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
547}
548
549/**
550 * i40evf_del_vlans
551 * @adapter: adapter structure
552 * @vlans: the VLANs to remove
553 * @count: number of VLANs
554 *
555 * Request that the PF remove one or more VLAN filters from our VSI.
556 **/
557void i40evf_del_vlans(struct i40evf_adapter *adapter)
558{
559	struct i40e_virtchnl_vlan_filter_list *vvfl;
560	struct i40evf_vlan_filter *f, *ftmp;
561	int len, i = 0, count = 0;
562
563	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
564		/* bail because we already have a command pending */
565		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
566			__func__, adapter->current_op);
567		return;
568	}
569
570	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
571		if (f->remove)
572			count++;
573	}
574	if (!count) {
575		adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
576		return;
577	}
578	adapter->current_op = I40E_VIRTCHNL_OP_DEL_VLAN;
579
580	len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
581	      (count * sizeof(u16));
582	if (len > I40EVF_MAX_AQ_BUF_SIZE) {
583		dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request.\n",
584			__func__);
585		count = (I40EVF_MAX_AQ_BUF_SIZE -
586			 sizeof(struct i40e_virtchnl_vlan_filter_list)) /
587			sizeof(u16);
588		len = I40EVF_MAX_AQ_BUF_SIZE;
589	}
590	vvfl = kzalloc(len, GFP_ATOMIC);
591	if (!vvfl) {
592		dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
593			__func__);
594		return;
595	}
596	vvfl->vsi_id = adapter->vsi_res->vsi_id;
597	vvfl->num_elements = count;
598	list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
599		if (f->remove) {
600			vvfl->vlan_id[i] = f->vlan;
601			i++;
602			list_del(&f->list);
603			kfree(f);
604		}
605	}
606	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
607	kfree(vvfl);
608	adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
609	adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
610}
611
612/**
613 * i40evf_set_promiscuous
614 * @adapter: adapter structure
615 * @flags: bitmask to control unicast/multicast promiscuous.
616 *
617 * Request that the PF enable promiscuous mode for our VSI.
618 **/
619void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
620{
621	struct i40e_virtchnl_promisc_info vpi;
622
623	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
624		/* bail because we already have a command pending */
625		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
626			__func__, adapter->current_op);
627		return;
628	}
629	adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
630	vpi.vsi_id = adapter->vsi_res->vsi_id;
631	vpi.flags = flags;
632	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
633			   (u8 *)&vpi, sizeof(vpi));
634}
635
636/**
637 * i40evf_request_stats
638 * @adapter: adapter structure
639 *
640 * Request VSI statistics from PF.
641 **/
642void i40evf_request_stats(struct i40evf_adapter *adapter)
643{
644	struct i40e_virtchnl_queue_select vqs;
645	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
646		/* no error message, this isn't crucial */
647		return;
648	}
649	adapter->current_op = I40E_VIRTCHNL_OP_GET_STATS;
650	vqs.vsi_id = adapter->vsi_res->vsi_id;
651	/* queue maps are ignored for this message - only the vsi is used */
652	if (i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_STATS,
653			       (u8 *)&vqs, sizeof(vqs)))
654		/* if the request failed, don't lock out others */
655		adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
656}
657/**
658 * i40evf_request_reset
659 * @adapter: adapter structure
660 *
661 * Request that the PF reset this VF. No response is expected.
662 **/
663void i40evf_request_reset(struct i40evf_adapter *adapter)
664{
665	/* Don't check CURRENT_OP - this is always higher priority */
666	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
667	adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
668}
669
670/**
671 * i40evf_virtchnl_completion
672 * @adapter: adapter structure
673 * @v_opcode: opcode sent by PF
674 * @v_retval: retval sent by PF
675 * @msg: message sent by PF
676 * @msglen: message length
677 *
678 * Asynchronous completion function for admin queue messages. Rather than busy
679 * wait, we fire off our requests and assume that no errors will be returned.
680 * This function handles the reply messages.
681 **/
682void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
683				enum i40e_virtchnl_ops v_opcode,
684				i40e_status v_retval,
685				u8 *msg, u16 msglen)
686{
687	struct net_device *netdev = adapter->netdev;
688
689	if (v_opcode == I40E_VIRTCHNL_OP_EVENT) {
690		struct i40e_virtchnl_pf_event *vpe =
691			(struct i40e_virtchnl_pf_event *)msg;
692		switch (vpe->event) {
693		case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
694			adapter->link_up =
695				vpe->event_data.link_event.link_status;
696			if (adapter->link_up && !netif_carrier_ok(netdev)) {
697				dev_info(&adapter->pdev->dev, "NIC Link is Up\n");
698				netif_carrier_on(netdev);
699				netif_tx_wake_all_queues(netdev);
700			} else if (!adapter->link_up) {
701				dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
702				netif_carrier_off(netdev);
703				netif_tx_stop_all_queues(netdev);
704			}
705			break;
706		case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
707			dev_info(&adapter->pdev->dev, "PF reset warning received\n");
708			if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
709				adapter->flags |= I40EVF_FLAG_RESET_PENDING;
710				dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
711				schedule_work(&adapter->reset_task);
712			}
713			break;
714		default:
715			dev_err(&adapter->pdev->dev,
716				"%s: Unknown event %d from pf\n",
717				__func__, vpe->event);
718			break;
719
720		}
721		return;
722	}
723	if (v_opcode != adapter->current_op) {
724		dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d.\n",
725			__func__, adapter->current_op, v_opcode);
726		/* We're probably completely screwed at this point, but clear
727		 * the current op and try to carry on....
728		 */
729		adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
730		return;
731	}
732	if (v_retval) {
733		dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d!\n",
734			__func__, v_retval, v_opcode);
735	}
736	switch (v_opcode) {
737	case I40E_VIRTCHNL_OP_GET_STATS: {
738		struct i40e_eth_stats *stats =
739			(struct i40e_eth_stats *)msg;
740		adapter->net_stats.rx_packets = stats->rx_unicast +
741						 stats->rx_multicast +
742						 stats->rx_broadcast;
743		adapter->net_stats.tx_packets = stats->tx_unicast +
744						 stats->tx_multicast +
745						 stats->tx_broadcast;
746		adapter->net_stats.rx_bytes = stats->rx_bytes;
747		adapter->net_stats.tx_bytes = stats->tx_bytes;
748		adapter->net_stats.rx_errors = stats->rx_errors;
749		adapter->net_stats.tx_errors = stats->tx_errors;
750		adapter->net_stats.rx_dropped = stats->rx_missed;
751		adapter->net_stats.tx_dropped = stats->tx_discards;
752		adapter->current_stats = *stats;
753		}
754		break;
755	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
756		adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_MAC_FILTER);
757		break;
758	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
759		adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_MAC_FILTER);
760		break;
761	case I40E_VIRTCHNL_OP_ADD_VLAN:
762		adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_VLAN_FILTER);
763		break;
764	case I40E_VIRTCHNL_OP_DEL_VLAN:
765		adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_VLAN_FILTER);
766		break;
767	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
768		adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ENABLE_QUEUES);
769		/* enable transmits */
770		i40evf_irq_enable(adapter, true);
771		netif_tx_start_all_queues(adapter->netdev);
772		netif_carrier_on(adapter->netdev);
773		break;
774	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
775		adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DISABLE_QUEUES);
776		break;
777	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
778		adapter->aq_pending &= ~(I40EVF_FLAG_AQ_CONFIGURE_QUEUES);
779		break;
780	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
781		adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS);
782		break;
783	default:
784		dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF.\n",
785			__func__, v_opcode);
786		break;
787	} /* switch v_opcode */
788	adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
789}