Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4#include <linux/pci.h>
5#include <linux/delay.h>
6#include <linux/sched.h>
7
8#include "ixgbe.h"
9#include "ixgbe_mbx.h"
10#include "ixgbe_phy.h"
11
12#define IXGBE_82598_MAX_TX_QUEUES 32
13#define IXGBE_82598_MAX_RX_QUEUES 64
14#define IXGBE_82598_RAR_ENTRIES 16
15#define IXGBE_82598_MC_TBL_SIZE 128
16#define IXGBE_82598_VFT_TBL_SIZE 128
17#define IXGBE_82598_RX_PB_SIZE 512
18
19static int ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
20 ixgbe_link_speed speed,
21 bool autoneg_wait_to_complete);
22static int ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
23 u8 *eeprom_data);
24
25/**
26 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
27 * @hw: pointer to the HW structure
28 *
29 * The defaults for 82598 should be in the range of 50us to 50ms,
30 * however the hardware default for these parts is 500us to 1ms which is less
31 * than the 10ms recommended by the pci-e spec. To address this we need to
32 * increase the value to either 10ms to 250ms for capability version 1 config,
33 * or 16ms to 55ms for version 2.
34 **/
35static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
36{
37 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
38 u16 pcie_devctl2;
39
40 if (ixgbe_removed(hw->hw_addr))
41 return;
42
43 /* only take action if timeout value is defaulted to 0 */
44 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
45 goto out;
46
47 /*
48 * if capababilities version is type 1 we can write the
49 * timeout of 10ms to 250ms through the GCR register
50 */
51 if (!(gcr & IXGBE_GCR_CAP_VER2)) {
52 gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
53 goto out;
54 }
55
56 /*
57 * for version 2 capabilities we need to write the config space
58 * directly in order to set the completion timeout value for
59 * 16ms to 55ms
60 */
61 pcie_devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2);
62 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
63 ixgbe_write_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
64out:
65 /* disable completion timeout resend */
66 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
67 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
68}
69
70static int ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
71{
72 struct ixgbe_mac_info *mac = &hw->mac;
73
74 /* Call PHY identify routine to get the phy type */
75 ixgbe_identify_phy_generic(hw);
76
77 mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
78 mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
79 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
80 mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE;
81 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
82 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
83 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
84
85 return 0;
86}
87
88/**
89 * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
90 * @hw: pointer to hardware structure
91 *
92 * Initialize any function pointers that were not able to be
93 * set during get_invariants because the PHY/SFP type was
94 * not known. Perform the SFP init if necessary.
95 *
96 **/
97static int ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
98{
99 struct ixgbe_mac_info *mac = &hw->mac;
100 struct ixgbe_phy_info *phy = &hw->phy;
101 u16 list_offset, data_offset;
102 int ret_val;
103
104 /* Identify the PHY */
105 phy->ops.identify(hw);
106
107 /* Overwrite the link function pointers if copper PHY */
108 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
109 mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
110 mac->ops.get_link_capabilities =
111 &ixgbe_get_copper_link_capabilities_generic;
112 }
113
114 switch (hw->phy.type) {
115 case ixgbe_phy_tn:
116 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
117 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
118 break;
119 case ixgbe_phy_nl:
120 phy->ops.reset = &ixgbe_reset_phy_nl;
121
122 /* Call SFP+ identify routine to get the SFP+ module type */
123 ret_val = phy->ops.identify_sfp(hw);
124 if (ret_val)
125 return ret_val;
126 if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
127 return -EOPNOTSUPP;
128
129 /* Check to see if SFP+ module is supported */
130 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
131 &list_offset,
132 &data_offset);
133 if (ret_val)
134 return -EOPNOTSUPP;
135 break;
136 default:
137 break;
138 }
139
140 return 0;
141}
142
143/**
144 * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
145 * @hw: pointer to hardware structure
146 *
147 * Starts the hardware using the generic start_hw function.
148 * Disables relaxed ordering for archs other than SPARC
149 * Then set pcie completion timeout
150 *
151 **/
152static int ixgbe_start_hw_82598(struct ixgbe_hw *hw)
153{
154 int ret_val;
155
156 ret_val = ixgbe_start_hw_generic(hw);
157 if (ret_val)
158 return ret_val;
159
160 /* set the completion timeout for interface */
161 ixgbe_set_pcie_completion_timeout(hw);
162
163 return 0;
164}
165
166/**
167 * ixgbe_get_link_capabilities_82598 - Determines link capabilities
168 * @hw: pointer to hardware structure
169 * @speed: pointer to link speed
170 * @autoneg: boolean auto-negotiation value
171 *
172 * Determines the link capabilities by reading the AUTOC register.
173 **/
174static int ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
175 ixgbe_link_speed *speed,
176 bool *autoneg)
177{
178 u32 autoc = 0;
179
180 /*
181 * Determine link capabilities based on the stored value of AUTOC,
182 * which represents EEPROM defaults. If AUTOC value has not been
183 * stored, use the current register value.
184 */
185 if (hw->mac.orig_link_settings_stored)
186 autoc = hw->mac.orig_autoc;
187 else
188 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
189
190 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
191 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
192 *speed = IXGBE_LINK_SPEED_1GB_FULL;
193 *autoneg = false;
194 break;
195
196 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
197 *speed = IXGBE_LINK_SPEED_10GB_FULL;
198 *autoneg = false;
199 break;
200
201 case IXGBE_AUTOC_LMS_1G_AN:
202 *speed = IXGBE_LINK_SPEED_1GB_FULL;
203 *autoneg = true;
204 break;
205
206 case IXGBE_AUTOC_LMS_KX4_AN:
207 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
208 *speed = IXGBE_LINK_SPEED_UNKNOWN;
209 if (autoc & IXGBE_AUTOC_KX4_SUPP)
210 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
211 if (autoc & IXGBE_AUTOC_KX_SUPP)
212 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
213 *autoneg = true;
214 break;
215
216 default:
217 return -EIO;
218 }
219
220 return 0;
221}
222
223/**
224 * ixgbe_get_media_type_82598 - Determines media type
225 * @hw: pointer to hardware structure
226 *
227 * Returns the media type (fiber, copper, backplane)
228 **/
229static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
230{
231 /* Detect if there is a copper PHY attached. */
232 switch (hw->phy.type) {
233 case ixgbe_phy_cu_unknown:
234 case ixgbe_phy_tn:
235 return ixgbe_media_type_copper;
236
237 default:
238 break;
239 }
240
241 /* Media type for I82598 is based on device ID */
242 switch (hw->device_id) {
243 case IXGBE_DEV_ID_82598:
244 case IXGBE_DEV_ID_82598_BX:
245 /* Default device ID is mezzanine card KX/KX4 */
246 return ixgbe_media_type_backplane;
247
248 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
249 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
250 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
251 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
252 case IXGBE_DEV_ID_82598EB_XF_LR:
253 case IXGBE_DEV_ID_82598EB_SFP_LOM:
254 return ixgbe_media_type_fiber;
255
256 case IXGBE_DEV_ID_82598EB_CX4:
257 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
258 return ixgbe_media_type_cx4;
259
260 case IXGBE_DEV_ID_82598AT:
261 case IXGBE_DEV_ID_82598AT2:
262 return ixgbe_media_type_copper;
263
264 default:
265 return ixgbe_media_type_unknown;
266 }
267}
268
269/**
270 * ixgbe_fc_enable_82598 - Enable flow control
271 * @hw: pointer to hardware structure
272 *
273 * Enable flow control according to the current settings.
274 **/
275static int ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
276{
277 u32 fctrl_reg;
278 u32 rmcs_reg;
279 u32 reg;
280 u32 fcrtl, fcrth;
281 u32 link_speed = 0;
282 int i;
283 bool link_up;
284
285 /* Validate the water mark configuration */
286 if (!hw->fc.pause_time)
287 return -EINVAL;
288
289 /* Low water mark of zero causes XOFF floods */
290 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
291 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
292 hw->fc.high_water[i]) {
293 if (!hw->fc.low_water[i] ||
294 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
295 hw_dbg(hw, "Invalid water mark configuration\n");
296 return -EINVAL;
297 }
298 }
299 }
300
301 /*
302 * On 82598 having Rx FC on causes resets while doing 1G
303 * so if it's on turn it off once we know link_speed. For
304 * more details see 82598 Specification update.
305 */
306 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
307 if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
308 switch (hw->fc.requested_mode) {
309 case ixgbe_fc_full:
310 hw->fc.requested_mode = ixgbe_fc_tx_pause;
311 break;
312 case ixgbe_fc_rx_pause:
313 hw->fc.requested_mode = ixgbe_fc_none;
314 break;
315 default:
316 /* no change */
317 break;
318 }
319 }
320
321 /* Negotiate the fc mode to use */
322 hw->mac.ops.fc_autoneg(hw);
323
324 /* Disable any previous flow control settings */
325 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
326 fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
327
328 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
329 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
330
331 /*
332 * The possible values of fc.current_mode are:
333 * 0: Flow control is completely disabled
334 * 1: Rx flow control is enabled (we can receive pause frames,
335 * but not send pause frames).
336 * 2: Tx flow control is enabled (we can send pause frames but
337 * we do not support receiving pause frames).
338 * 3: Both Rx and Tx flow control (symmetric) are enabled.
339 * other: Invalid.
340 */
341 switch (hw->fc.current_mode) {
342 case ixgbe_fc_none:
343 /*
344 * Flow control is disabled by software override or autoneg.
345 * The code below will actually disable it in the HW.
346 */
347 break;
348 case ixgbe_fc_rx_pause:
349 /*
350 * Rx Flow control is enabled and Tx Flow control is
351 * disabled by software override. Since there really
352 * isn't a way to advertise that we are capable of RX
353 * Pause ONLY, we will advertise that we support both
354 * symmetric and asymmetric Rx PAUSE. Later, we will
355 * disable the adapter's ability to send PAUSE frames.
356 */
357 fctrl_reg |= IXGBE_FCTRL_RFCE;
358 break;
359 case ixgbe_fc_tx_pause:
360 /*
361 * Tx Flow control is enabled, and Rx Flow control is
362 * disabled by software override.
363 */
364 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
365 break;
366 case ixgbe_fc_full:
367 /* Flow control (both Rx and Tx) is enabled by SW override. */
368 fctrl_reg |= IXGBE_FCTRL_RFCE;
369 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
370 break;
371 default:
372 hw_dbg(hw, "Flow control param set incorrectly\n");
373 return -EIO;
374 }
375
376 /* Set 802.3x based flow control settings. */
377 fctrl_reg |= IXGBE_FCTRL_DPF;
378 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
379 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
380
381 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
382 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
383 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
384 hw->fc.high_water[i]) {
385 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
386 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
387 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
388 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
389 } else {
390 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
391 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
392 }
393
394 }
395
396 /* Configure pause time (2 TCs per register) */
397 reg = hw->fc.pause_time * 0x00010001;
398 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
399 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
400
401 /* Configure flow control refresh threshold value */
402 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
403
404 return 0;
405}
406
407/**
408 * ixgbe_start_mac_link_82598 - Configures MAC link settings
409 * @hw: pointer to hardware structure
410 * @autoneg_wait_to_complete: true when waiting for completion is needed
411 *
412 * Configures link settings based on values in the ixgbe_hw struct.
413 * Restarts the link. Performs autonegotiation if needed.
414 **/
415static int ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
416 bool autoneg_wait_to_complete)
417{
418 int status = 0;
419 u32 autoc_reg;
420 u32 links_reg;
421 u32 i;
422
423 /* Restart link */
424 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
425 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
426 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
427
428 /* Only poll for autoneg to complete if specified to do so */
429 if (autoneg_wait_to_complete) {
430 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
431 IXGBE_AUTOC_LMS_KX4_AN ||
432 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
433 IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
434 links_reg = 0; /* Just in case Autoneg time = 0 */
435 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
436 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
437 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
438 break;
439 msleep(100);
440 }
441 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
442 status = -EIO;
443 hw_dbg(hw, "Autonegotiation did not complete.\n");
444 }
445 }
446 }
447
448 /* Add delay to filter out noises during initial link setup */
449 msleep(50);
450
451 return status;
452}
453
454/**
455 * ixgbe_validate_link_ready - Function looks for phy link
456 * @hw: pointer to hardware structure
457 *
458 * Function indicates success when phy link is available. If phy is not ready
459 * within 5 seconds of MAC indicating link, the function returns error.
460 **/
461static int ixgbe_validate_link_ready(struct ixgbe_hw *hw)
462{
463 u32 timeout;
464 u16 an_reg;
465
466 if (hw->device_id != IXGBE_DEV_ID_82598AT2)
467 return 0;
468
469 for (timeout = 0;
470 timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
471 hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, &an_reg);
472
473 if ((an_reg & MDIO_AN_STAT1_COMPLETE) &&
474 (an_reg & MDIO_STAT1_LSTATUS))
475 break;
476
477 msleep(100);
478 }
479
480 if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
481 hw_dbg(hw, "Link was indicated but link is down\n");
482 return -EIO;
483 }
484
485 return 0;
486}
487
488/**
489 * ixgbe_check_mac_link_82598 - Get link/speed status
490 * @hw: pointer to hardware structure
491 * @speed: pointer to link speed
492 * @link_up: true is link is up, false otherwise
493 * @link_up_wait_to_complete: bool used to wait for link up or not
494 *
495 * Reads the links register to determine if link is up and the current speed
496 **/
497static int ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
498 ixgbe_link_speed *speed, bool *link_up,
499 bool link_up_wait_to_complete)
500{
501 u32 links_reg;
502 u32 i;
503 u16 link_reg, adapt_comp_reg;
504
505 /*
506 * SERDES PHY requires us to read link status from register 0xC79F.
507 * Bit 0 set indicates link is up/ready; clear indicates link down.
508 * 0xC00C is read to check that the XAUI lanes are active. Bit 0
509 * clear indicates active; set indicates inactive.
510 */
511 if (hw->phy.type == ixgbe_phy_nl) {
512 hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
513 hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
514 hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD,
515 &adapt_comp_reg);
516 if (link_up_wait_to_complete) {
517 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
518 if ((link_reg & 1) &&
519 ((adapt_comp_reg & 1) == 0)) {
520 *link_up = true;
521 break;
522 } else {
523 *link_up = false;
524 }
525 msleep(100);
526 hw->phy.ops.read_reg(hw, 0xC79F,
527 MDIO_MMD_PMAPMD,
528 &link_reg);
529 hw->phy.ops.read_reg(hw, 0xC00C,
530 MDIO_MMD_PMAPMD,
531 &adapt_comp_reg);
532 }
533 } else {
534 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
535 *link_up = true;
536 else
537 *link_up = false;
538 }
539
540 if (!*link_up)
541 return 0;
542 }
543
544 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
545 if (link_up_wait_to_complete) {
546 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
547 if (links_reg & IXGBE_LINKS_UP) {
548 *link_up = true;
549 break;
550 } else {
551 *link_up = false;
552 }
553 msleep(100);
554 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
555 }
556 } else {
557 if (links_reg & IXGBE_LINKS_UP)
558 *link_up = true;
559 else
560 *link_up = false;
561 }
562
563 if (links_reg & IXGBE_LINKS_SPEED)
564 *speed = IXGBE_LINK_SPEED_10GB_FULL;
565 else
566 *speed = IXGBE_LINK_SPEED_1GB_FULL;
567
568 if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && *link_up &&
569 (ixgbe_validate_link_ready(hw) != 0))
570 *link_up = false;
571
572 return 0;
573}
574
575/**
576 * ixgbe_setup_mac_link_82598 - Set MAC link speed
577 * @hw: pointer to hardware structure
578 * @speed: new link speed
579 * @autoneg_wait_to_complete: true when waiting for completion is needed
580 *
581 * Set the link speed in the AUTOC register and restarts link.
582 **/
583static int ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
584 ixgbe_link_speed speed,
585 bool autoneg_wait_to_complete)
586{
587 bool autoneg = false;
588 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
589 u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
590 u32 autoc = curr_autoc;
591 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
592
593 /* Check to see if speed passed in is supported. */
594 ixgbe_get_link_capabilities_82598(hw, &link_capabilities, &autoneg);
595 speed &= link_capabilities;
596
597 if (speed == IXGBE_LINK_SPEED_UNKNOWN)
598 return -EINVAL;
599
600 /* Set KX4/KX support according to speed requested */
601 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
602 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
603 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
604 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
605 autoc |= IXGBE_AUTOC_KX4_SUPP;
606 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
607 autoc |= IXGBE_AUTOC_KX_SUPP;
608 if (autoc != curr_autoc)
609 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
610 }
611
612 /* Setup and restart the link based on the new values in
613 * ixgbe_hw This will write the AUTOC register based on the new
614 * stored values
615 */
616 return ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
617}
618
619
620/**
621 * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
622 * @hw: pointer to hardware structure
623 * @speed: new link speed
624 * @autoneg_wait_to_complete: true if waiting is needed to complete
625 *
626 * Sets the link speed in the AUTOC register in the MAC and restarts link.
627 **/
628static int ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
629 ixgbe_link_speed speed,
630 bool autoneg_wait_to_complete)
631{
632 int status;
633
634 /* Setup the PHY according to input speed */
635 status = hw->phy.ops.setup_link_speed(hw, speed,
636 autoneg_wait_to_complete);
637 /* Set up MAC */
638 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
639
640 return status;
641}
642
643/**
644 * ixgbe_reset_hw_82598 - Performs hardware reset
645 * @hw: pointer to hardware structure
646 *
647 * Resets the hardware by resetting the transmit and receive units, masks and
648 * clears all interrupts, performing a PHY reset, and performing a link (MAC)
649 * reset.
650 **/
651static int ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
652{
653 int phy_status = 0;
654 u8 analog_val;
655 u32 gheccr;
656 int status;
657 u32 autoc;
658 u32 ctrl;
659 u32 i;
660
661 /* Call adapter stop to disable tx/rx and clear interrupts */
662 status = hw->mac.ops.stop_adapter(hw);
663 if (status)
664 return status;
665
666 /*
667 * Power up the Atlas Tx lanes if they are currently powered down.
668 * Atlas Tx lanes are powered down for MAC loopback tests, but
669 * they are not automatically restored on reset.
670 */
671 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
672 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
673 /* Enable Tx Atlas so packets can be transmitted again */
674 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
675 &analog_val);
676 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
677 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
678 analog_val);
679
680 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
681 &analog_val);
682 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
683 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
684 analog_val);
685
686 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
687 &analog_val);
688 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
689 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
690 analog_val);
691
692 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
693 &analog_val);
694 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
695 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
696 analog_val);
697 }
698
699 /* Reset PHY */
700 if (hw->phy.reset_disable == false) {
701 /* PHY ops must be identified and initialized prior to reset */
702
703 /* Init PHY and function pointers, perform SFP setup */
704 phy_status = hw->phy.ops.init(hw);
705 if (phy_status == -EOPNOTSUPP)
706 return phy_status;
707 if (phy_status == -ENOENT)
708 goto mac_reset_top;
709
710 hw->phy.ops.reset(hw);
711 }
712
713mac_reset_top:
714 /*
715 * Issue global reset to the MAC. This needs to be a SW reset.
716 * If link reset is used, it might reset the MAC when mng is using it
717 */
718 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
719 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
720 IXGBE_WRITE_FLUSH(hw);
721 usleep_range(1000, 1200);
722
723 /* Poll for reset bit to self-clear indicating reset is complete */
724 for (i = 0; i < 10; i++) {
725 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
726 if (!(ctrl & IXGBE_CTRL_RST))
727 break;
728 udelay(1);
729 }
730 if (ctrl & IXGBE_CTRL_RST) {
731 status = -EIO;
732 hw_dbg(hw, "Reset polling failed to complete.\n");
733 }
734
735 msleep(50);
736
737 /*
738 * Double resets are required for recovery from certain error
739 * conditions. Between resets, it is necessary to stall to allow time
740 * for any pending HW events to complete.
741 */
742 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
743 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
744 goto mac_reset_top;
745 }
746
747 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
748 gheccr &= ~(BIT(21) | BIT(18) | BIT(9) | BIT(6));
749 IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
750
751 /*
752 * Store the original AUTOC value if it has not been
753 * stored off yet. Otherwise restore the stored original
754 * AUTOC value since the reset operation sets back to deaults.
755 */
756 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
757 if (hw->mac.orig_link_settings_stored == false) {
758 hw->mac.orig_autoc = autoc;
759 hw->mac.orig_link_settings_stored = true;
760 } else if (autoc != hw->mac.orig_autoc) {
761 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
762 }
763
764 /* Store the permanent mac address */
765 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
766
767 /*
768 * Store MAC address from RAR0, clear receive address registers, and
769 * clear the multicast table
770 */
771 hw->mac.ops.init_rx_addrs(hw);
772
773 if (phy_status)
774 status = phy_status;
775
776 return status;
777}
778
779/**
780 * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
781 * @hw: pointer to hardware struct
782 * @rar: receive address register index to associate with a VMDq index
783 * @vmdq: VMDq set index
784 **/
785static int ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
786{
787 u32 rar_high;
788 u32 rar_entries = hw->mac.num_rar_entries;
789
790 /* Make sure we are using a valid rar index range */
791 if (rar >= rar_entries) {
792 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
793 return -EINVAL;
794 }
795
796 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
797 rar_high &= ~IXGBE_RAH_VIND_MASK;
798 rar_high |= FIELD_PREP(IXGBE_RAH_VIND_MASK, vmdq);
799 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
800 return 0;
801}
802
803/**
804 * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
805 * @hw: pointer to hardware struct
806 * @rar: receive address register index to associate with a VMDq index
807 * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
808 **/
809static int ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
810{
811 u32 rar_high;
812 u32 rar_entries = hw->mac.num_rar_entries;
813
814
815 /* Make sure we are using a valid rar index range */
816 if (rar >= rar_entries) {
817 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
818 return -EINVAL;
819 }
820
821 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
822 if (rar_high & IXGBE_RAH_VIND_MASK) {
823 rar_high &= ~IXGBE_RAH_VIND_MASK;
824 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
825 }
826
827 return 0;
828}
829
830/**
831 * ixgbe_set_vfta_82598 - Set VLAN filter table
832 * @hw: pointer to hardware structure
833 * @vlan: VLAN id to write to VLAN filter
834 * @vind: VMDq output index that maps queue to VLAN id in VFTA
835 * @vlan_on: boolean flag to turn on/off VLAN in VFTA
836 * @vlvf_bypass: boolean flag - unused
837 *
838 * Turn on/off specified VLAN in the VLAN filter table.
839 **/
840static int ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
841 bool vlan_on, bool vlvf_bypass)
842{
843 u32 regindex;
844 u32 bitindex;
845 u32 bits;
846 u32 vftabyte;
847
848 if (vlan > 4095)
849 return -EINVAL;
850
851 /* Determine 32-bit word position in array */
852 regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
853
854 /* Determine the location of the (VMD) queue index */
855 vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
856 bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
857
858 /* Set the nibble for VMD queue index */
859 bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
860 bits &= (~(0x0F << bitindex));
861 bits |= (vind << bitindex);
862 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
863
864 /* Determine the location of the bit for this VLAN id */
865 bitindex = vlan & 0x1F; /* lower five bits */
866
867 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
868 if (vlan_on)
869 /* Turn on this VLAN id */
870 bits |= BIT(bitindex);
871 else
872 /* Turn off this VLAN id */
873 bits &= ~BIT(bitindex);
874 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
875
876 return 0;
877}
878
879/**
880 * ixgbe_clear_vfta_82598 - Clear VLAN filter table
881 * @hw: pointer to hardware structure
882 *
883 * Clears the VLAN filter table, and the VMDq index associated with the filter
884 **/
885static int ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
886{
887 u32 offset;
888 u32 vlanbyte;
889
890 for (offset = 0; offset < hw->mac.vft_size; offset++)
891 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
892
893 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
894 for (offset = 0; offset < hw->mac.vft_size; offset++)
895 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
896 0);
897
898 return 0;
899}
900
901/**
902 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
903 * @hw: pointer to hardware structure
904 * @reg: analog register to read
905 * @val: read value
906 *
907 * Performs read operation to Atlas analog register specified.
908 **/
909static int ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
910{
911 u32 atlas_ctl;
912
913 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
914 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
915 IXGBE_WRITE_FLUSH(hw);
916 udelay(10);
917 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
918 *val = (u8)atlas_ctl;
919
920 return 0;
921}
922
923/**
924 * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
925 * @hw: pointer to hardware structure
926 * @reg: atlas register to write
927 * @val: value to write
928 *
929 * Performs write operation to Atlas analog register specified.
930 **/
931static int ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
932{
933 u32 atlas_ctl;
934
935 atlas_ctl = (reg << 8) | val;
936 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
937 IXGBE_WRITE_FLUSH(hw);
938 udelay(10);
939
940 return 0;
941}
942
943/**
944 * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
945 * @hw: pointer to hardware structure
946 * @dev_addr: address to read from
947 * @byte_offset: byte offset to read from dev_addr
948 * @eeprom_data: value read
949 *
950 * Performs 8 byte read operation to SFP module's data over I2C interface.
951 **/
952static int ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
953 u8 byte_offset, u8 *eeprom_data)
954{
955 u16 sfp_addr = 0;
956 u16 sfp_data = 0;
957 u16 sfp_stat = 0;
958 int status = 0;
959 u16 gssr;
960 u32 i;
961
962 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
963 gssr = IXGBE_GSSR_PHY1_SM;
964 else
965 gssr = IXGBE_GSSR_PHY0_SM;
966
967 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
968 return -EBUSY;
969
970 if (hw->phy.type == ixgbe_phy_nl) {
971 /*
972 * phy SDA/SCL registers are at addresses 0xC30A to
973 * 0xC30D. These registers are used to talk to the SFP+
974 * module's EEPROM through the SDA/SCL (I2C) interface.
975 */
976 sfp_addr = (dev_addr << 8) + byte_offset;
977 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
978 hw->phy.ops.write_reg_mdi(hw,
979 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
980 MDIO_MMD_PMAPMD,
981 sfp_addr);
982
983 /* Poll status */
984 for (i = 0; i < 100; i++) {
985 hw->phy.ops.read_reg_mdi(hw,
986 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
987 MDIO_MMD_PMAPMD,
988 &sfp_stat);
989 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
990 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
991 break;
992 usleep_range(10000, 20000);
993 }
994
995 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
996 hw_dbg(hw, "EEPROM read did not pass.\n");
997 status = -ENOENT;
998 goto out;
999 }
1000
1001 /* Read data */
1002 hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1003 MDIO_MMD_PMAPMD, &sfp_data);
1004
1005 *eeprom_data = (u8)(sfp_data >> 8);
1006 } else {
1007 status = -EIO;
1008 }
1009
1010out:
1011 hw->mac.ops.release_swfw_sync(hw, gssr);
1012 return status;
1013}
1014
1015/**
1016 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1017 * @hw: pointer to hardware structure
1018 * @byte_offset: EEPROM byte offset to read
1019 * @eeprom_data: value read
1020 *
1021 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1022 **/
1023static int ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1024 u8 *eeprom_data)
1025{
1026 return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
1027 byte_offset, eeprom_data);
1028}
1029
1030/**
1031 * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
1032 * @hw: pointer to hardware structure
1033 * @byte_offset: byte offset at address 0xA2
1034 * @sff8472_data: value read
1035 *
1036 * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
1037 **/
1038static int ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
1039 u8 *sff8472_data)
1040{
1041 return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
1042 byte_offset, sff8472_data);
1043}
1044
1045/**
1046 * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1047 * port devices.
1048 * @hw: pointer to the HW structure
1049 *
1050 * Calls common function and corrects issue with some single port devices
1051 * that enable LAN1 but not LAN0.
1052 **/
1053static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1054{
1055 struct ixgbe_bus_info *bus = &hw->bus;
1056 u16 pci_gen = 0;
1057 u16 pci_ctrl2 = 0;
1058
1059 ixgbe_set_lan_id_multi_port_pcie(hw);
1060
1061 /* check if LAN0 is disabled */
1062 hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1063 if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1064
1065 hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1066
1067 /* if LAN0 is completely disabled force function to 0 */
1068 if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1069 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1070 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1071
1072 bus->func = 0;
1073 }
1074 }
1075}
1076
1077/**
1078 * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
1079 * @hw: pointer to hardware structure
1080 * @num_pb: number of packet buffers to allocate
1081 * @headroom: reserve n KB of headroom
1082 * @strategy: packet buffer allocation strategy
1083 **/
1084static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1085 u32 headroom, int strategy)
1086{
1087 u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1088 u8 i = 0;
1089
1090 if (!num_pb)
1091 return;
1092
1093 /* Setup Rx packet buffer sizes */
1094 switch (strategy) {
1095 case PBA_STRATEGY_WEIGHTED:
1096 /* Setup the first four at 80KB */
1097 rxpktsize = IXGBE_RXPBSIZE_80KB;
1098 for (; i < 4; i++)
1099 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1100 /* Setup the last four at 48KB...don't re-init i */
1101 rxpktsize = IXGBE_RXPBSIZE_48KB;
1102 fallthrough;
1103 case PBA_STRATEGY_EQUAL:
1104 default:
1105 /* Divide the remaining Rx packet buffer evenly among the TCs */
1106 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1107 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1108 break;
1109 }
1110
1111 /* Setup Tx packet buffer sizes */
1112 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1113 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
1114}
1115
1116static const struct ixgbe_mac_operations mac_ops_82598 = {
1117 .init_hw = &ixgbe_init_hw_generic,
1118 .reset_hw = &ixgbe_reset_hw_82598,
1119 .start_hw = &ixgbe_start_hw_82598,
1120 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
1121 .get_media_type = &ixgbe_get_media_type_82598,
1122 .enable_rx_dma = &ixgbe_enable_rx_dma_generic,
1123 .get_mac_addr = &ixgbe_get_mac_addr_generic,
1124 .stop_adapter = &ixgbe_stop_adapter_generic,
1125 .get_bus_info = &ixgbe_get_bus_info_generic,
1126 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598,
1127 .read_analog_reg8 = &ixgbe_read_analog_reg8_82598,
1128 .write_analog_reg8 = &ixgbe_write_analog_reg8_82598,
1129 .setup_link = &ixgbe_setup_mac_link_82598,
1130 .set_rxpba = &ixgbe_set_rxpba_82598,
1131 .check_link = &ixgbe_check_mac_link_82598,
1132 .get_link_capabilities = &ixgbe_get_link_capabilities_82598,
1133 .led_on = &ixgbe_led_on_generic,
1134 .led_off = &ixgbe_led_off_generic,
1135 .init_led_link_act = ixgbe_init_led_link_act_generic,
1136 .blink_led_start = &ixgbe_blink_led_start_generic,
1137 .blink_led_stop = &ixgbe_blink_led_stop_generic,
1138 .set_rar = &ixgbe_set_rar_generic,
1139 .clear_rar = &ixgbe_clear_rar_generic,
1140 .set_vmdq = &ixgbe_set_vmdq_82598,
1141 .clear_vmdq = &ixgbe_clear_vmdq_82598,
1142 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
1143 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
1144 .enable_mc = &ixgbe_enable_mc_generic,
1145 .disable_mc = &ixgbe_disable_mc_generic,
1146 .clear_vfta = &ixgbe_clear_vfta_82598,
1147 .set_vfta = &ixgbe_set_vfta_82598,
1148 .fc_enable = &ixgbe_fc_enable_82598,
1149 .setup_fc = ixgbe_setup_fc_generic,
1150 .fc_autoneg = ixgbe_fc_autoneg,
1151 .set_fw_drv_ver = NULL,
1152 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
1153 .release_swfw_sync = &ixgbe_release_swfw_sync,
1154 .init_swfw_sync = NULL,
1155 .get_thermal_sensor_data = NULL,
1156 .init_thermal_sensor_thresh = NULL,
1157 .prot_autoc_read = &prot_autoc_read_generic,
1158 .prot_autoc_write = &prot_autoc_write_generic,
1159 .enable_rx = &ixgbe_enable_rx_generic,
1160 .disable_rx = &ixgbe_disable_rx_generic,
1161};
1162
1163static const struct ixgbe_eeprom_operations eeprom_ops_82598 = {
1164 .init_params = &ixgbe_init_eeprom_params_generic,
1165 .read = &ixgbe_read_eerd_generic,
1166 .write = &ixgbe_write_eeprom_generic,
1167 .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic,
1168 .read_buffer = &ixgbe_read_eerd_buffer_generic,
1169 .calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
1170 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
1171 .update_checksum = &ixgbe_update_eeprom_checksum_generic,
1172};
1173
1174static const struct ixgbe_phy_operations phy_ops_82598 = {
1175 .identify = &ixgbe_identify_phy_generic,
1176 .identify_sfp = &ixgbe_identify_module_generic,
1177 .init = &ixgbe_init_phy_ops_82598,
1178 .reset = &ixgbe_reset_phy_generic,
1179 .read_reg = &ixgbe_read_phy_reg_generic,
1180 .write_reg = &ixgbe_write_phy_reg_generic,
1181 .read_reg_mdi = &ixgbe_read_phy_reg_mdi,
1182 .write_reg_mdi = &ixgbe_write_phy_reg_mdi,
1183 .setup_link = &ixgbe_setup_phy_link_generic,
1184 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
1185 .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598,
1186 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598,
1187 .check_overtemp = &ixgbe_tn_check_overtemp,
1188};
1189
1190const struct ixgbe_info ixgbe_82598_info = {
1191 .mac = ixgbe_mac_82598EB,
1192 .get_invariants = &ixgbe_get_invariants_82598,
1193 .mac_ops = &mac_ops_82598,
1194 .eeprom_ops = &eeprom_ops_82598,
1195 .phy_ops = &phy_ops_82598,
1196 .mvals = ixgbe_mvals_8259X,
1197};
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/pci.h>
29#include <linux/delay.h>
30#include <linux/sched.h>
31
32#include "ixgbe.h"
33#include "ixgbe_phy.h"
34
35#define IXGBE_82598_MAX_TX_QUEUES 32
36#define IXGBE_82598_MAX_RX_QUEUES 64
37#define IXGBE_82598_RAR_ENTRIES 16
38#define IXGBE_82598_MC_TBL_SIZE 128
39#define IXGBE_82598_VFT_TBL_SIZE 128
40#define IXGBE_82598_RX_PB_SIZE 512
41
42static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
43 ixgbe_link_speed speed,
44 bool autoneg,
45 bool autoneg_wait_to_complete);
46static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
47 u8 *eeprom_data);
48
49/**
50 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
51 * @hw: pointer to the HW structure
52 *
53 * The defaults for 82598 should be in the range of 50us to 50ms,
54 * however the hardware default for these parts is 500us to 1ms which is less
55 * than the 10ms recommended by the pci-e spec. To address this we need to
56 * increase the value to either 10ms to 250ms for capability version 1 config,
57 * or 16ms to 55ms for version 2.
58 **/
59static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
60{
61 struct ixgbe_adapter *adapter = hw->back;
62 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
63 u16 pcie_devctl2;
64
65 /* only take action if timeout value is defaulted to 0 */
66 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
67 goto out;
68
69 /*
70 * if capababilities version is type 1 we can write the
71 * timeout of 10ms to 250ms through the GCR register
72 */
73 if (!(gcr & IXGBE_GCR_CAP_VER2)) {
74 gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
75 goto out;
76 }
77
78 /*
79 * for version 2 capabilities we need to write the config space
80 * directly in order to set the completion timeout value for
81 * 16ms to 55ms
82 */
83 pci_read_config_word(adapter->pdev,
84 IXGBE_PCI_DEVICE_CONTROL2, &pcie_devctl2);
85 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
86 pci_write_config_word(adapter->pdev,
87 IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
88out:
89 /* disable completion timeout resend */
90 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
91 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
92}
93
94static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
95{
96 struct ixgbe_mac_info *mac = &hw->mac;
97
98 /* Call PHY identify routine to get the phy type */
99 ixgbe_identify_phy_generic(hw);
100
101 mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
102 mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
103 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
104 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
105 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
106 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
107
108 return 0;
109}
110
111/**
112 * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
113 * @hw: pointer to hardware structure
114 *
115 * Initialize any function pointers that were not able to be
116 * set during get_invariants because the PHY/SFP type was
117 * not known. Perform the SFP init if necessary.
118 *
119 **/
120static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
121{
122 struct ixgbe_mac_info *mac = &hw->mac;
123 struct ixgbe_phy_info *phy = &hw->phy;
124 s32 ret_val = 0;
125 u16 list_offset, data_offset;
126
127 /* Identify the PHY */
128 phy->ops.identify(hw);
129
130 /* Overwrite the link function pointers if copper PHY */
131 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
132 mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
133 mac->ops.get_link_capabilities =
134 &ixgbe_get_copper_link_capabilities_generic;
135 }
136
137 switch (hw->phy.type) {
138 case ixgbe_phy_tn:
139 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
140 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
141 phy->ops.get_firmware_version =
142 &ixgbe_get_phy_firmware_version_tnx;
143 break;
144 case ixgbe_phy_nl:
145 phy->ops.reset = &ixgbe_reset_phy_nl;
146
147 /* Call SFP+ identify routine to get the SFP+ module type */
148 ret_val = phy->ops.identify_sfp(hw);
149 if (ret_val != 0)
150 goto out;
151 else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
152 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
153 goto out;
154 }
155
156 /* Check to see if SFP+ module is supported */
157 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
158 &list_offset,
159 &data_offset);
160 if (ret_val != 0) {
161 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
162 goto out;
163 }
164 break;
165 default:
166 break;
167 }
168
169out:
170 return ret_val;
171}
172
173/**
174 * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
175 * @hw: pointer to hardware structure
176 *
177 * Starts the hardware using the generic start_hw function.
178 * Disables relaxed ordering Then set pcie completion timeout
179 *
180 **/
181static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
182{
183 u32 regval;
184 u32 i;
185 s32 ret_val = 0;
186
187 ret_val = ixgbe_start_hw_generic(hw);
188
189 /* Disable relaxed ordering */
190 for (i = 0; ((i < hw->mac.max_tx_queues) &&
191 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
192 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
193 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
194 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
195 }
196
197 for (i = 0; ((i < hw->mac.max_rx_queues) &&
198 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
199 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
200 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
201 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
202 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
203 }
204
205 hw->mac.rx_pb_size = IXGBE_82598_RX_PB_SIZE;
206
207 /* set the completion timeout for interface */
208 if (ret_val == 0)
209 ixgbe_set_pcie_completion_timeout(hw);
210
211 return ret_val;
212}
213
214/**
215 * ixgbe_get_link_capabilities_82598 - Determines link capabilities
216 * @hw: pointer to hardware structure
217 * @speed: pointer to link speed
218 * @autoneg: boolean auto-negotiation value
219 *
220 * Determines the link capabilities by reading the AUTOC register.
221 **/
222static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
223 ixgbe_link_speed *speed,
224 bool *autoneg)
225{
226 s32 status = 0;
227 u32 autoc = 0;
228
229 /*
230 * Determine link capabilities based on the stored value of AUTOC,
231 * which represents EEPROM defaults. If AUTOC value has not been
232 * stored, use the current register value.
233 */
234 if (hw->mac.orig_link_settings_stored)
235 autoc = hw->mac.orig_autoc;
236 else
237 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
238
239 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
240 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
241 *speed = IXGBE_LINK_SPEED_1GB_FULL;
242 *autoneg = false;
243 break;
244
245 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
246 *speed = IXGBE_LINK_SPEED_10GB_FULL;
247 *autoneg = false;
248 break;
249
250 case IXGBE_AUTOC_LMS_1G_AN:
251 *speed = IXGBE_LINK_SPEED_1GB_FULL;
252 *autoneg = true;
253 break;
254
255 case IXGBE_AUTOC_LMS_KX4_AN:
256 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
257 *speed = IXGBE_LINK_SPEED_UNKNOWN;
258 if (autoc & IXGBE_AUTOC_KX4_SUPP)
259 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
260 if (autoc & IXGBE_AUTOC_KX_SUPP)
261 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
262 *autoneg = true;
263 break;
264
265 default:
266 status = IXGBE_ERR_LINK_SETUP;
267 break;
268 }
269
270 return status;
271}
272
273/**
274 * ixgbe_get_media_type_82598 - Determines media type
275 * @hw: pointer to hardware structure
276 *
277 * Returns the media type (fiber, copper, backplane)
278 **/
279static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
280{
281 enum ixgbe_media_type media_type;
282
283 /* Detect if there is a copper PHY attached. */
284 switch (hw->phy.type) {
285 case ixgbe_phy_cu_unknown:
286 case ixgbe_phy_tn:
287 media_type = ixgbe_media_type_copper;
288 goto out;
289 default:
290 break;
291 }
292
293 /* Media type for I82598 is based on device ID */
294 switch (hw->device_id) {
295 case IXGBE_DEV_ID_82598:
296 case IXGBE_DEV_ID_82598_BX:
297 /* Default device ID is mezzanine card KX/KX4 */
298 media_type = ixgbe_media_type_backplane;
299 break;
300 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
301 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
302 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
303 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
304 case IXGBE_DEV_ID_82598EB_XF_LR:
305 case IXGBE_DEV_ID_82598EB_SFP_LOM:
306 media_type = ixgbe_media_type_fiber;
307 break;
308 case IXGBE_DEV_ID_82598EB_CX4:
309 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
310 media_type = ixgbe_media_type_cx4;
311 break;
312 case IXGBE_DEV_ID_82598AT:
313 case IXGBE_DEV_ID_82598AT2:
314 media_type = ixgbe_media_type_copper;
315 break;
316 default:
317 media_type = ixgbe_media_type_unknown;
318 break;
319 }
320out:
321 return media_type;
322}
323
324/**
325 * ixgbe_fc_enable_82598 - Enable flow control
326 * @hw: pointer to hardware structure
327 *
328 * Enable flow control according to the current settings.
329 **/
330static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
331{
332 s32 ret_val = 0;
333 u32 fctrl_reg;
334 u32 rmcs_reg;
335 u32 reg;
336 u32 fcrtl, fcrth;
337 u32 link_speed = 0;
338 int i;
339 bool link_up;
340
341 /*
342 * Validate the water mark configuration for packet buffer 0. Zero
343 * water marks indicate that the packet buffer was not configured
344 * and the watermarks for packet buffer 0 should always be configured.
345 */
346 if (!hw->fc.low_water ||
347 !hw->fc.high_water[0] ||
348 !hw->fc.pause_time) {
349 hw_dbg(hw, "Invalid water mark configuration\n");
350 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
351 goto out;
352 }
353
354 /*
355 * On 82598 having Rx FC on causes resets while doing 1G
356 * so if it's on turn it off once we know link_speed. For
357 * more details see 82598 Specification update.
358 */
359 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
360 if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
361 switch (hw->fc.requested_mode) {
362 case ixgbe_fc_full:
363 hw->fc.requested_mode = ixgbe_fc_tx_pause;
364 break;
365 case ixgbe_fc_rx_pause:
366 hw->fc.requested_mode = ixgbe_fc_none;
367 break;
368 default:
369 /* no change */
370 break;
371 }
372 }
373
374 /* Negotiate the fc mode to use */
375 ixgbe_fc_autoneg(hw);
376
377 /* Disable any previous flow control settings */
378 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
379 fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
380
381 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
382 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
383
384 /*
385 * The possible values of fc.current_mode are:
386 * 0: Flow control is completely disabled
387 * 1: Rx flow control is enabled (we can receive pause frames,
388 * but not send pause frames).
389 * 2: Tx flow control is enabled (we can send pause frames but
390 * we do not support receiving pause frames).
391 * 3: Both Rx and Tx flow control (symmetric) are enabled.
392 * other: Invalid.
393 */
394 switch (hw->fc.current_mode) {
395 case ixgbe_fc_none:
396 /*
397 * Flow control is disabled by software override or autoneg.
398 * The code below will actually disable it in the HW.
399 */
400 break;
401 case ixgbe_fc_rx_pause:
402 /*
403 * Rx Flow control is enabled and Tx Flow control is
404 * disabled by software override. Since there really
405 * isn't a way to advertise that we are capable of RX
406 * Pause ONLY, we will advertise that we support both
407 * symmetric and asymmetric Rx PAUSE. Later, we will
408 * disable the adapter's ability to send PAUSE frames.
409 */
410 fctrl_reg |= IXGBE_FCTRL_RFCE;
411 break;
412 case ixgbe_fc_tx_pause:
413 /*
414 * Tx Flow control is enabled, and Rx Flow control is
415 * disabled by software override.
416 */
417 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
418 break;
419 case ixgbe_fc_full:
420 /* Flow control (both Rx and Tx) is enabled by SW override. */
421 fctrl_reg |= IXGBE_FCTRL_RFCE;
422 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
423 break;
424 default:
425 hw_dbg(hw, "Flow control param set incorrectly\n");
426 ret_val = IXGBE_ERR_CONFIG;
427 goto out;
428 break;
429 }
430
431 /* Set 802.3x based flow control settings. */
432 fctrl_reg |= IXGBE_FCTRL_DPF;
433 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
434 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
435
436 fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
437
438 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
439 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
440 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
441 hw->fc.high_water[i]) {
442 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
443 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
444 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
445 } else {
446 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
447 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
448 }
449
450 }
451
452 /* Configure pause time (2 TCs per register) */
453 reg = hw->fc.pause_time * 0x00010001;
454 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
455 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
456
457 /* Configure flow control refresh threshold value */
458 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
459
460out:
461 return ret_val;
462}
463
464/**
465 * ixgbe_start_mac_link_82598 - Configures MAC link settings
466 * @hw: pointer to hardware structure
467 *
468 * Configures link settings based on values in the ixgbe_hw struct.
469 * Restarts the link. Performs autonegotiation if needed.
470 **/
471static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
472 bool autoneg_wait_to_complete)
473{
474 u32 autoc_reg;
475 u32 links_reg;
476 u32 i;
477 s32 status = 0;
478
479 /* Restart link */
480 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
481 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
482 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
483
484 /* Only poll for autoneg to complete if specified to do so */
485 if (autoneg_wait_to_complete) {
486 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
487 IXGBE_AUTOC_LMS_KX4_AN ||
488 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
489 IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
490 links_reg = 0; /* Just in case Autoneg time = 0 */
491 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
492 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
493 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
494 break;
495 msleep(100);
496 }
497 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
498 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
499 hw_dbg(hw, "Autonegotiation did not complete.\n");
500 }
501 }
502 }
503
504 /* Add delay to filter out noises during initial link setup */
505 msleep(50);
506
507 return status;
508}
509
510/**
511 * ixgbe_validate_link_ready - Function looks for phy link
512 * @hw: pointer to hardware structure
513 *
514 * Function indicates success when phy link is available. If phy is not ready
515 * within 5 seconds of MAC indicating link, the function returns error.
516 **/
517static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
518{
519 u32 timeout;
520 u16 an_reg;
521
522 if (hw->device_id != IXGBE_DEV_ID_82598AT2)
523 return 0;
524
525 for (timeout = 0;
526 timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
527 hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, &an_reg);
528
529 if ((an_reg & MDIO_AN_STAT1_COMPLETE) &&
530 (an_reg & MDIO_STAT1_LSTATUS))
531 break;
532
533 msleep(100);
534 }
535
536 if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
537 hw_dbg(hw, "Link was indicated but link is down\n");
538 return IXGBE_ERR_LINK_SETUP;
539 }
540
541 return 0;
542}
543
544/**
545 * ixgbe_check_mac_link_82598 - Get link/speed status
546 * @hw: pointer to hardware structure
547 * @speed: pointer to link speed
548 * @link_up: true is link is up, false otherwise
549 * @link_up_wait_to_complete: bool used to wait for link up or not
550 *
551 * Reads the links register to determine if link is up and the current speed
552 **/
553static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
554 ixgbe_link_speed *speed, bool *link_up,
555 bool link_up_wait_to_complete)
556{
557 u32 links_reg;
558 u32 i;
559 u16 link_reg, adapt_comp_reg;
560
561 /*
562 * SERDES PHY requires us to read link status from register 0xC79F.
563 * Bit 0 set indicates link is up/ready; clear indicates link down.
564 * 0xC00C is read to check that the XAUI lanes are active. Bit 0
565 * clear indicates active; set indicates inactive.
566 */
567 if (hw->phy.type == ixgbe_phy_nl) {
568 hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
569 hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
570 hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD,
571 &adapt_comp_reg);
572 if (link_up_wait_to_complete) {
573 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
574 if ((link_reg & 1) &&
575 ((adapt_comp_reg & 1) == 0)) {
576 *link_up = true;
577 break;
578 } else {
579 *link_up = false;
580 }
581 msleep(100);
582 hw->phy.ops.read_reg(hw, 0xC79F,
583 MDIO_MMD_PMAPMD,
584 &link_reg);
585 hw->phy.ops.read_reg(hw, 0xC00C,
586 MDIO_MMD_PMAPMD,
587 &adapt_comp_reg);
588 }
589 } else {
590 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
591 *link_up = true;
592 else
593 *link_up = false;
594 }
595
596 if (!*link_up)
597 goto out;
598 }
599
600 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
601 if (link_up_wait_to_complete) {
602 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
603 if (links_reg & IXGBE_LINKS_UP) {
604 *link_up = true;
605 break;
606 } else {
607 *link_up = false;
608 }
609 msleep(100);
610 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
611 }
612 } else {
613 if (links_reg & IXGBE_LINKS_UP)
614 *link_up = true;
615 else
616 *link_up = false;
617 }
618
619 if (links_reg & IXGBE_LINKS_SPEED)
620 *speed = IXGBE_LINK_SPEED_10GB_FULL;
621 else
622 *speed = IXGBE_LINK_SPEED_1GB_FULL;
623
624 if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && *link_up &&
625 (ixgbe_validate_link_ready(hw) != 0))
626 *link_up = false;
627
628out:
629 return 0;
630}
631
632/**
633 * ixgbe_setup_mac_link_82598 - Set MAC link speed
634 * @hw: pointer to hardware structure
635 * @speed: new link speed
636 * @autoneg: true if auto-negotiation enabled
637 * @autoneg_wait_to_complete: true when waiting for completion is needed
638 *
639 * Set the link speed in the AUTOC register and restarts link.
640 **/
641static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
642 ixgbe_link_speed speed, bool autoneg,
643 bool autoneg_wait_to_complete)
644{
645 s32 status = 0;
646 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
647 u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
648 u32 autoc = curr_autoc;
649 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
650
651 /* Check to see if speed passed in is supported. */
652 ixgbe_get_link_capabilities_82598(hw, &link_capabilities, &autoneg);
653 speed &= link_capabilities;
654
655 if (speed == IXGBE_LINK_SPEED_UNKNOWN)
656 status = IXGBE_ERR_LINK_SETUP;
657
658 /* Set KX4/KX support according to speed requested */
659 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
660 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
661 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
662 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
663 autoc |= IXGBE_AUTOC_KX4_SUPP;
664 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
665 autoc |= IXGBE_AUTOC_KX_SUPP;
666 if (autoc != curr_autoc)
667 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
668 }
669
670 if (status == 0) {
671 /*
672 * Setup and restart the link based on the new values in
673 * ixgbe_hw This will write the AUTOC register based on the new
674 * stored values
675 */
676 status = ixgbe_start_mac_link_82598(hw,
677 autoneg_wait_to_complete);
678 }
679
680 return status;
681}
682
683
684/**
685 * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
686 * @hw: pointer to hardware structure
687 * @speed: new link speed
688 * @autoneg: true if autonegotiation enabled
689 * @autoneg_wait_to_complete: true if waiting is needed to complete
690 *
691 * Sets the link speed in the AUTOC register in the MAC and restarts link.
692 **/
693static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
694 ixgbe_link_speed speed,
695 bool autoneg,
696 bool autoneg_wait_to_complete)
697{
698 s32 status;
699
700 /* Setup the PHY according to input speed */
701 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
702 autoneg_wait_to_complete);
703 /* Set up MAC */
704 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
705
706 return status;
707}
708
709/**
710 * ixgbe_reset_hw_82598 - Performs hardware reset
711 * @hw: pointer to hardware structure
712 *
713 * Resets the hardware by resetting the transmit and receive units, masks and
714 * clears all interrupts, performing a PHY reset, and performing a link (MAC)
715 * reset.
716 **/
717static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
718{
719 s32 status = 0;
720 s32 phy_status = 0;
721 u32 ctrl;
722 u32 gheccr;
723 u32 i;
724 u32 autoc;
725 u8 analog_val;
726
727 /* Call adapter stop to disable tx/rx and clear interrupts */
728 status = hw->mac.ops.stop_adapter(hw);
729 if (status != 0)
730 goto reset_hw_out;
731
732 /*
733 * Power up the Atlas Tx lanes if they are currently powered down.
734 * Atlas Tx lanes are powered down for MAC loopback tests, but
735 * they are not automatically restored on reset.
736 */
737 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
738 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
739 /* Enable Tx Atlas so packets can be transmitted again */
740 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
741 &analog_val);
742 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
743 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
744 analog_val);
745
746 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
747 &analog_val);
748 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
749 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
750 analog_val);
751
752 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
753 &analog_val);
754 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
755 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
756 analog_val);
757
758 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
759 &analog_val);
760 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
761 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
762 analog_val);
763 }
764
765 /* Reset PHY */
766 if (hw->phy.reset_disable == false) {
767 /* PHY ops must be identified and initialized prior to reset */
768
769 /* Init PHY and function pointers, perform SFP setup */
770 phy_status = hw->phy.ops.init(hw);
771 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
772 goto reset_hw_out;
773 if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
774 goto mac_reset_top;
775
776 hw->phy.ops.reset(hw);
777 }
778
779mac_reset_top:
780 /*
781 * Issue global reset to the MAC. This needs to be a SW reset.
782 * If link reset is used, it might reset the MAC when mng is using it
783 */
784 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
785 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
786 IXGBE_WRITE_FLUSH(hw);
787
788 /* Poll for reset bit to self-clear indicating reset is complete */
789 for (i = 0; i < 10; i++) {
790 udelay(1);
791 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
792 if (!(ctrl & IXGBE_CTRL_RST))
793 break;
794 }
795 if (ctrl & IXGBE_CTRL_RST) {
796 status = IXGBE_ERR_RESET_FAILED;
797 hw_dbg(hw, "Reset polling failed to complete.\n");
798 }
799
800 msleep(50);
801
802 /*
803 * Double resets are required for recovery from certain error
804 * conditions. Between resets, it is necessary to stall to allow time
805 * for any pending HW events to complete.
806 */
807 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
808 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
809 goto mac_reset_top;
810 }
811
812 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
813 gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
814 IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
815
816 /*
817 * Store the original AUTOC value if it has not been
818 * stored off yet. Otherwise restore the stored original
819 * AUTOC value since the reset operation sets back to deaults.
820 */
821 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
822 if (hw->mac.orig_link_settings_stored == false) {
823 hw->mac.orig_autoc = autoc;
824 hw->mac.orig_link_settings_stored = true;
825 } else if (autoc != hw->mac.orig_autoc) {
826 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
827 }
828
829 /* Store the permanent mac address */
830 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
831
832 /*
833 * Store MAC address from RAR0, clear receive address registers, and
834 * clear the multicast table
835 */
836 hw->mac.ops.init_rx_addrs(hw);
837
838reset_hw_out:
839 if (phy_status)
840 status = phy_status;
841
842 return status;
843}
844
845/**
846 * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
847 * @hw: pointer to hardware struct
848 * @rar: receive address register index to associate with a VMDq index
849 * @vmdq: VMDq set index
850 **/
851static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
852{
853 u32 rar_high;
854 u32 rar_entries = hw->mac.num_rar_entries;
855
856 /* Make sure we are using a valid rar index range */
857 if (rar >= rar_entries) {
858 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
859 return IXGBE_ERR_INVALID_ARGUMENT;
860 }
861
862 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
863 rar_high &= ~IXGBE_RAH_VIND_MASK;
864 rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
865 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
866 return 0;
867}
868
869/**
870 * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
871 * @hw: pointer to hardware struct
872 * @rar: receive address register index to associate with a VMDq index
873 * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
874 **/
875static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
876{
877 u32 rar_high;
878 u32 rar_entries = hw->mac.num_rar_entries;
879
880
881 /* Make sure we are using a valid rar index range */
882 if (rar >= rar_entries) {
883 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
884 return IXGBE_ERR_INVALID_ARGUMENT;
885 }
886
887 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
888 if (rar_high & IXGBE_RAH_VIND_MASK) {
889 rar_high &= ~IXGBE_RAH_VIND_MASK;
890 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
891 }
892
893 return 0;
894}
895
896/**
897 * ixgbe_set_vfta_82598 - Set VLAN filter table
898 * @hw: pointer to hardware structure
899 * @vlan: VLAN id to write to VLAN filter
900 * @vind: VMDq output index that maps queue to VLAN id in VFTA
901 * @vlan_on: boolean flag to turn on/off VLAN in VFTA
902 *
903 * Turn on/off specified VLAN in the VLAN filter table.
904 **/
905static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
906 bool vlan_on)
907{
908 u32 regindex;
909 u32 bitindex;
910 u32 bits;
911 u32 vftabyte;
912
913 if (vlan > 4095)
914 return IXGBE_ERR_PARAM;
915
916 /* Determine 32-bit word position in array */
917 regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
918
919 /* Determine the location of the (VMD) queue index */
920 vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
921 bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
922
923 /* Set the nibble for VMD queue index */
924 bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
925 bits &= (~(0x0F << bitindex));
926 bits |= (vind << bitindex);
927 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
928
929 /* Determine the location of the bit for this VLAN id */
930 bitindex = vlan & 0x1F; /* lower five bits */
931
932 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
933 if (vlan_on)
934 /* Turn on this VLAN id */
935 bits |= (1 << bitindex);
936 else
937 /* Turn off this VLAN id */
938 bits &= ~(1 << bitindex);
939 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
940
941 return 0;
942}
943
944/**
945 * ixgbe_clear_vfta_82598 - Clear VLAN filter table
946 * @hw: pointer to hardware structure
947 *
948 * Clears the VLAN filer table, and the VMDq index associated with the filter
949 **/
950static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
951{
952 u32 offset;
953 u32 vlanbyte;
954
955 for (offset = 0; offset < hw->mac.vft_size; offset++)
956 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
957
958 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
959 for (offset = 0; offset < hw->mac.vft_size; offset++)
960 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
961 0);
962
963 return 0;
964}
965
966/**
967 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
968 * @hw: pointer to hardware structure
969 * @reg: analog register to read
970 * @val: read value
971 *
972 * Performs read operation to Atlas analog register specified.
973 **/
974static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
975{
976 u32 atlas_ctl;
977
978 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
979 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
980 IXGBE_WRITE_FLUSH(hw);
981 udelay(10);
982 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
983 *val = (u8)atlas_ctl;
984
985 return 0;
986}
987
988/**
989 * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
990 * @hw: pointer to hardware structure
991 * @reg: atlas register to write
992 * @val: value to write
993 *
994 * Performs write operation to Atlas analog register specified.
995 **/
996static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
997{
998 u32 atlas_ctl;
999
1000 atlas_ctl = (reg << 8) | val;
1001 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1002 IXGBE_WRITE_FLUSH(hw);
1003 udelay(10);
1004
1005 return 0;
1006}
1007
1008/**
1009 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1010 * @hw: pointer to hardware structure
1011 * @byte_offset: EEPROM byte offset to read
1012 * @eeprom_data: value read
1013 *
1014 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1015 **/
1016static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1017 u8 *eeprom_data)
1018{
1019 s32 status = 0;
1020 u16 sfp_addr = 0;
1021 u16 sfp_data = 0;
1022 u16 sfp_stat = 0;
1023 u32 i;
1024
1025 if (hw->phy.type == ixgbe_phy_nl) {
1026 /*
1027 * phy SDA/SCL registers are at addresses 0xC30A to
1028 * 0xC30D. These registers are used to talk to the SFP+
1029 * module's EEPROM through the SDA/SCL (I2C) interface.
1030 */
1031 sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
1032 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1033 hw->phy.ops.write_reg(hw,
1034 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1035 MDIO_MMD_PMAPMD,
1036 sfp_addr);
1037
1038 /* Poll status */
1039 for (i = 0; i < 100; i++) {
1040 hw->phy.ops.read_reg(hw,
1041 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1042 MDIO_MMD_PMAPMD,
1043 &sfp_stat);
1044 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1045 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1046 break;
1047 usleep_range(10000, 20000);
1048 }
1049
1050 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1051 hw_dbg(hw, "EEPROM read did not pass.\n");
1052 status = IXGBE_ERR_SFP_NOT_PRESENT;
1053 goto out;
1054 }
1055
1056 /* Read data */
1057 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1058 MDIO_MMD_PMAPMD, &sfp_data);
1059
1060 *eeprom_data = (u8)(sfp_data >> 8);
1061 } else {
1062 status = IXGBE_ERR_PHY;
1063 goto out;
1064 }
1065
1066out:
1067 return status;
1068}
1069
1070/**
1071 * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1072 * @hw: pointer to hardware structure
1073 *
1074 * Determines physical layer capabilities of the current configuration.
1075 **/
1076static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1077{
1078 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1079 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1080 u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1081 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1082 u16 ext_ability = 0;
1083
1084 hw->phy.ops.identify(hw);
1085
1086 /* Copper PHY must be checked before AUTOC LMS to determine correct
1087 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1088 switch (hw->phy.type) {
1089 case ixgbe_phy_tn:
1090 case ixgbe_phy_cu_unknown:
1091 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE,
1092 MDIO_MMD_PMAPMD, &ext_ability);
1093 if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
1094 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1095 if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
1096 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1097 if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
1098 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1099 goto out;
1100 default:
1101 break;
1102 }
1103
1104 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1105 case IXGBE_AUTOC_LMS_1G_AN:
1106 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1107 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1108 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1109 else
1110 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1111 break;
1112 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1113 if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1114 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1115 else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1116 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1117 else /* XAUI */
1118 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1119 break;
1120 case IXGBE_AUTOC_LMS_KX4_AN:
1121 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1122 if (autoc & IXGBE_AUTOC_KX_SUPP)
1123 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1124 if (autoc & IXGBE_AUTOC_KX4_SUPP)
1125 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1126 break;
1127 default:
1128 break;
1129 }
1130
1131 if (hw->phy.type == ixgbe_phy_nl) {
1132 hw->phy.ops.identify_sfp(hw);
1133
1134 switch (hw->phy.sfp_type) {
1135 case ixgbe_sfp_type_da_cu:
1136 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1137 break;
1138 case ixgbe_sfp_type_sr:
1139 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1140 break;
1141 case ixgbe_sfp_type_lr:
1142 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1143 break;
1144 default:
1145 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1146 break;
1147 }
1148 }
1149
1150 switch (hw->device_id) {
1151 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1152 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1153 break;
1154 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1155 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1156 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1157 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1158 break;
1159 case IXGBE_DEV_ID_82598EB_XF_LR:
1160 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1161 break;
1162 default:
1163 break;
1164 }
1165
1166out:
1167 return physical_layer;
1168}
1169
1170/**
1171 * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1172 * port devices.
1173 * @hw: pointer to the HW structure
1174 *
1175 * Calls common function and corrects issue with some single port devices
1176 * that enable LAN1 but not LAN0.
1177 **/
1178static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1179{
1180 struct ixgbe_bus_info *bus = &hw->bus;
1181 u16 pci_gen = 0;
1182 u16 pci_ctrl2 = 0;
1183
1184 ixgbe_set_lan_id_multi_port_pcie(hw);
1185
1186 /* check if LAN0 is disabled */
1187 hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1188 if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1189
1190 hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1191
1192 /* if LAN0 is completely disabled force function to 0 */
1193 if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1194 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1195 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1196
1197 bus->func = 0;
1198 }
1199 }
1200}
1201
1202/**
1203 * ixgbe_set_rxpba_82598 - Configure packet buffers
1204 * @hw: pointer to hardware structure
1205 * @dcb_config: pointer to ixgbe_dcb_config structure
1206 *
1207 * Configure packet buffers.
1208 */
1209static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, u32 headroom,
1210 int strategy)
1211{
1212 u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1213 u8 i = 0;
1214
1215 if (!num_pb)
1216 return;
1217
1218 /* Setup Rx packet buffer sizes */
1219 switch (strategy) {
1220 case PBA_STRATEGY_WEIGHTED:
1221 /* Setup the first four at 80KB */
1222 rxpktsize = IXGBE_RXPBSIZE_80KB;
1223 for (; i < 4; i++)
1224 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1225 /* Setup the last four at 48KB...don't re-init i */
1226 rxpktsize = IXGBE_RXPBSIZE_48KB;
1227 /* Fall Through */
1228 case PBA_STRATEGY_EQUAL:
1229 default:
1230 /* Divide the remaining Rx packet buffer evenly among the TCs */
1231 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1232 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1233 break;
1234 }
1235
1236 /* Setup Tx packet buffer sizes */
1237 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1238 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
1239
1240 return;
1241}
1242
1243static struct ixgbe_mac_operations mac_ops_82598 = {
1244 .init_hw = &ixgbe_init_hw_generic,
1245 .reset_hw = &ixgbe_reset_hw_82598,
1246 .start_hw = &ixgbe_start_hw_82598,
1247 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
1248 .get_media_type = &ixgbe_get_media_type_82598,
1249 .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598,
1250 .enable_rx_dma = &ixgbe_enable_rx_dma_generic,
1251 .get_mac_addr = &ixgbe_get_mac_addr_generic,
1252 .stop_adapter = &ixgbe_stop_adapter_generic,
1253 .get_bus_info = &ixgbe_get_bus_info_generic,
1254 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598,
1255 .read_analog_reg8 = &ixgbe_read_analog_reg8_82598,
1256 .write_analog_reg8 = &ixgbe_write_analog_reg8_82598,
1257 .setup_link = &ixgbe_setup_mac_link_82598,
1258 .set_rxpba = &ixgbe_set_rxpba_82598,
1259 .check_link = &ixgbe_check_mac_link_82598,
1260 .get_link_capabilities = &ixgbe_get_link_capabilities_82598,
1261 .led_on = &ixgbe_led_on_generic,
1262 .led_off = &ixgbe_led_off_generic,
1263 .blink_led_start = &ixgbe_blink_led_start_generic,
1264 .blink_led_stop = &ixgbe_blink_led_stop_generic,
1265 .set_rar = &ixgbe_set_rar_generic,
1266 .clear_rar = &ixgbe_clear_rar_generic,
1267 .set_vmdq = &ixgbe_set_vmdq_82598,
1268 .clear_vmdq = &ixgbe_clear_vmdq_82598,
1269 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
1270 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
1271 .enable_mc = &ixgbe_enable_mc_generic,
1272 .disable_mc = &ixgbe_disable_mc_generic,
1273 .clear_vfta = &ixgbe_clear_vfta_82598,
1274 .set_vfta = &ixgbe_set_vfta_82598,
1275 .fc_enable = &ixgbe_fc_enable_82598,
1276 .set_fw_drv_ver = NULL,
1277 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
1278 .release_swfw_sync = &ixgbe_release_swfw_sync,
1279 .get_thermal_sensor_data = NULL,
1280 .init_thermal_sensor_thresh = NULL,
1281};
1282
1283static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
1284 .init_params = &ixgbe_init_eeprom_params_generic,
1285 .read = &ixgbe_read_eerd_generic,
1286 .write = &ixgbe_write_eeprom_generic,
1287 .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic,
1288 .read_buffer = &ixgbe_read_eerd_buffer_generic,
1289 .calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
1290 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
1291 .update_checksum = &ixgbe_update_eeprom_checksum_generic,
1292};
1293
1294static struct ixgbe_phy_operations phy_ops_82598 = {
1295 .identify = &ixgbe_identify_phy_generic,
1296 .identify_sfp = &ixgbe_identify_sfp_module_generic,
1297 .init = &ixgbe_init_phy_ops_82598,
1298 .reset = &ixgbe_reset_phy_generic,
1299 .read_reg = &ixgbe_read_phy_reg_generic,
1300 .write_reg = &ixgbe_write_phy_reg_generic,
1301 .setup_link = &ixgbe_setup_phy_link_generic,
1302 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
1303 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598,
1304 .check_overtemp = &ixgbe_tn_check_overtemp,
1305};
1306
1307struct ixgbe_info ixgbe_82598_info = {
1308 .mac = ixgbe_mac_82598EB,
1309 .get_invariants = &ixgbe_get_invariants_82598,
1310 .mac_ops = &mac_ops_82598,
1311 .eeprom_ops = &eeprom_ops_82598,
1312 .phy_ops = &phy_ops_82598,
1313};
1314