Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4#include <linux/pci.h>
5#include <linux/delay.h>
6#include <linux/sched.h>
7
8#include "ixgbe.h"
9#include "ixgbe_phy.h"
10
11#define IXGBE_82598_MAX_TX_QUEUES 32
12#define IXGBE_82598_MAX_RX_QUEUES 64
13#define IXGBE_82598_RAR_ENTRIES 16
14#define IXGBE_82598_MC_TBL_SIZE 128
15#define IXGBE_82598_VFT_TBL_SIZE 128
16#define IXGBE_82598_RX_PB_SIZE 512
17
18static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
19 ixgbe_link_speed speed,
20 bool autoneg_wait_to_complete);
21static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
22 u8 *eeprom_data);
23
24/**
25 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
26 * @hw: pointer to the HW structure
27 *
28 * The defaults for 82598 should be in the range of 50us to 50ms,
29 * however the hardware default for these parts is 500us to 1ms which is less
30 * than the 10ms recommended by the pci-e spec. To address this we need to
31 * increase the value to either 10ms to 250ms for capability version 1 config,
32 * or 16ms to 55ms for version 2.
33 **/
34static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
35{
36 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
37 u16 pcie_devctl2;
38
39 if (ixgbe_removed(hw->hw_addr))
40 return;
41
42 /* only take action if timeout value is defaulted to 0 */
43 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
44 goto out;
45
46 /*
47 * if capababilities version is type 1 we can write the
48 * timeout of 10ms to 250ms through the GCR register
49 */
50 if (!(gcr & IXGBE_GCR_CAP_VER2)) {
51 gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
52 goto out;
53 }
54
55 /*
56 * for version 2 capabilities we need to write the config space
57 * directly in order to set the completion timeout value for
58 * 16ms to 55ms
59 */
60 pcie_devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2);
61 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
62 ixgbe_write_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
63out:
64 /* disable completion timeout resend */
65 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
66 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
67}
68
69static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
70{
71 struct ixgbe_mac_info *mac = &hw->mac;
72
73 /* Call PHY identify routine to get the phy type */
74 ixgbe_identify_phy_generic(hw);
75
76 mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
77 mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
78 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
79 mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE;
80 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
81 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
82 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
83
84 return 0;
85}
86
87/**
88 * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
89 * @hw: pointer to hardware structure
90 *
91 * Initialize any function pointers that were not able to be
92 * set during get_invariants because the PHY/SFP type was
93 * not known. Perform the SFP init if necessary.
94 *
95 **/
96static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
97{
98 struct ixgbe_mac_info *mac = &hw->mac;
99 struct ixgbe_phy_info *phy = &hw->phy;
100 s32 ret_val;
101 u16 list_offset, data_offset;
102
103 /* Identify the PHY */
104 phy->ops.identify(hw);
105
106 /* Overwrite the link function pointers if copper PHY */
107 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
108 mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
109 mac->ops.get_link_capabilities =
110 &ixgbe_get_copper_link_capabilities_generic;
111 }
112
113 switch (hw->phy.type) {
114 case ixgbe_phy_tn:
115 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
116 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
117 break;
118 case ixgbe_phy_nl:
119 phy->ops.reset = &ixgbe_reset_phy_nl;
120
121 /* Call SFP+ identify routine to get the SFP+ module type */
122 ret_val = phy->ops.identify_sfp(hw);
123 if (ret_val)
124 return ret_val;
125 if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
126 return -EOPNOTSUPP;
127
128 /* Check to see if SFP+ module is supported */
129 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
130 &list_offset,
131 &data_offset);
132 if (ret_val)
133 return -EOPNOTSUPP;
134 break;
135 default:
136 break;
137 }
138
139 return 0;
140}
141
142/**
143 * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
144 * @hw: pointer to hardware structure
145 *
146 * Starts the hardware using the generic start_hw function.
147 * Disables relaxed ordering for archs other than SPARC
148 * Then set pcie completion timeout
149 *
150 **/
151static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
152{
153 s32 ret_val;
154
155 ret_val = ixgbe_start_hw_generic(hw);
156 if (ret_val)
157 return ret_val;
158
159 /* set the completion timeout for interface */
160 ixgbe_set_pcie_completion_timeout(hw);
161
162 return 0;
163}
164
165/**
166 * ixgbe_get_link_capabilities_82598 - Determines link capabilities
167 * @hw: pointer to hardware structure
168 * @speed: pointer to link speed
169 * @autoneg: boolean auto-negotiation value
170 *
171 * Determines the link capabilities by reading the AUTOC register.
172 **/
173static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
174 ixgbe_link_speed *speed,
175 bool *autoneg)
176{
177 u32 autoc = 0;
178
179 /*
180 * Determine link capabilities based on the stored value of AUTOC,
181 * which represents EEPROM defaults. If AUTOC value has not been
182 * stored, use the current register value.
183 */
184 if (hw->mac.orig_link_settings_stored)
185 autoc = hw->mac.orig_autoc;
186 else
187 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
188
189 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
190 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
191 *speed = IXGBE_LINK_SPEED_1GB_FULL;
192 *autoneg = false;
193 break;
194
195 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
196 *speed = IXGBE_LINK_SPEED_10GB_FULL;
197 *autoneg = false;
198 break;
199
200 case IXGBE_AUTOC_LMS_1G_AN:
201 *speed = IXGBE_LINK_SPEED_1GB_FULL;
202 *autoneg = true;
203 break;
204
205 case IXGBE_AUTOC_LMS_KX4_AN:
206 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
207 *speed = IXGBE_LINK_SPEED_UNKNOWN;
208 if (autoc & IXGBE_AUTOC_KX4_SUPP)
209 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
210 if (autoc & IXGBE_AUTOC_KX_SUPP)
211 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
212 *autoneg = true;
213 break;
214
215 default:
216 return -EIO;
217 }
218
219 return 0;
220}
221
222/**
223 * ixgbe_get_media_type_82598 - Determines media type
224 * @hw: pointer to hardware structure
225 *
226 * Returns the media type (fiber, copper, backplane)
227 **/
228static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
229{
230 /* Detect if there is a copper PHY attached. */
231 switch (hw->phy.type) {
232 case ixgbe_phy_cu_unknown:
233 case ixgbe_phy_tn:
234 return ixgbe_media_type_copper;
235
236 default:
237 break;
238 }
239
240 /* Media type for I82598 is based on device ID */
241 switch (hw->device_id) {
242 case IXGBE_DEV_ID_82598:
243 case IXGBE_DEV_ID_82598_BX:
244 /* Default device ID is mezzanine card KX/KX4 */
245 return ixgbe_media_type_backplane;
246
247 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
248 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
249 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
250 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
251 case IXGBE_DEV_ID_82598EB_XF_LR:
252 case IXGBE_DEV_ID_82598EB_SFP_LOM:
253 return ixgbe_media_type_fiber;
254
255 case IXGBE_DEV_ID_82598EB_CX4:
256 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
257 return ixgbe_media_type_cx4;
258
259 case IXGBE_DEV_ID_82598AT:
260 case IXGBE_DEV_ID_82598AT2:
261 return ixgbe_media_type_copper;
262
263 default:
264 return ixgbe_media_type_unknown;
265 }
266}
267
268/**
269 * ixgbe_fc_enable_82598 - Enable flow control
270 * @hw: pointer to hardware structure
271 *
272 * Enable flow control according to the current settings.
273 **/
274static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
275{
276 u32 fctrl_reg;
277 u32 rmcs_reg;
278 u32 reg;
279 u32 fcrtl, fcrth;
280 u32 link_speed = 0;
281 int i;
282 bool link_up;
283
284 /* Validate the water mark configuration */
285 if (!hw->fc.pause_time)
286 return -EINVAL;
287
288 /* Low water mark of zero causes XOFF floods */
289 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
290 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
291 hw->fc.high_water[i]) {
292 if (!hw->fc.low_water[i] ||
293 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
294 hw_dbg(hw, "Invalid water mark configuration\n");
295 return -EINVAL;
296 }
297 }
298 }
299
300 /*
301 * On 82598 having Rx FC on causes resets while doing 1G
302 * so if it's on turn it off once we know link_speed. For
303 * more details see 82598 Specification update.
304 */
305 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
306 if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
307 switch (hw->fc.requested_mode) {
308 case ixgbe_fc_full:
309 hw->fc.requested_mode = ixgbe_fc_tx_pause;
310 break;
311 case ixgbe_fc_rx_pause:
312 hw->fc.requested_mode = ixgbe_fc_none;
313 break;
314 default:
315 /* no change */
316 break;
317 }
318 }
319
320 /* Negotiate the fc mode to use */
321 hw->mac.ops.fc_autoneg(hw);
322
323 /* Disable any previous flow control settings */
324 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
325 fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
326
327 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
328 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
329
330 /*
331 * The possible values of fc.current_mode are:
332 * 0: Flow control is completely disabled
333 * 1: Rx flow control is enabled (we can receive pause frames,
334 * but not send pause frames).
335 * 2: Tx flow control is enabled (we can send pause frames but
336 * we do not support receiving pause frames).
337 * 3: Both Rx and Tx flow control (symmetric) are enabled.
338 * other: Invalid.
339 */
340 switch (hw->fc.current_mode) {
341 case ixgbe_fc_none:
342 /*
343 * Flow control is disabled by software override or autoneg.
344 * The code below will actually disable it in the HW.
345 */
346 break;
347 case ixgbe_fc_rx_pause:
348 /*
349 * Rx Flow control is enabled and Tx Flow control is
350 * disabled by software override. Since there really
351 * isn't a way to advertise that we are capable of RX
352 * Pause ONLY, we will advertise that we support both
353 * symmetric and asymmetric Rx PAUSE. Later, we will
354 * disable the adapter's ability to send PAUSE frames.
355 */
356 fctrl_reg |= IXGBE_FCTRL_RFCE;
357 break;
358 case ixgbe_fc_tx_pause:
359 /*
360 * Tx Flow control is enabled, and Rx Flow control is
361 * disabled by software override.
362 */
363 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
364 break;
365 case ixgbe_fc_full:
366 /* Flow control (both Rx and Tx) is enabled by SW override. */
367 fctrl_reg |= IXGBE_FCTRL_RFCE;
368 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
369 break;
370 default:
371 hw_dbg(hw, "Flow control param set incorrectly\n");
372 return -EIO;
373 }
374
375 /* Set 802.3x based flow control settings. */
376 fctrl_reg |= IXGBE_FCTRL_DPF;
377 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
378 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
379
380 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
381 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
382 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
383 hw->fc.high_water[i]) {
384 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
385 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
386 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
387 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
388 } else {
389 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
390 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
391 }
392
393 }
394
395 /* Configure pause time (2 TCs per register) */
396 reg = hw->fc.pause_time * 0x00010001;
397 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
398 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
399
400 /* Configure flow control refresh threshold value */
401 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
402
403 return 0;
404}
405
406/**
407 * ixgbe_start_mac_link_82598 - Configures MAC link settings
408 * @hw: pointer to hardware structure
409 * @autoneg_wait_to_complete: true when waiting for completion is needed
410 *
411 * Configures link settings based on values in the ixgbe_hw struct.
412 * Restarts the link. Performs autonegotiation if needed.
413 **/
414static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
415 bool autoneg_wait_to_complete)
416{
417 u32 autoc_reg;
418 u32 links_reg;
419 u32 i;
420 s32 status = 0;
421
422 /* Restart link */
423 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
424 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
425 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
426
427 /* Only poll for autoneg to complete if specified to do so */
428 if (autoneg_wait_to_complete) {
429 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
430 IXGBE_AUTOC_LMS_KX4_AN ||
431 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
432 IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
433 links_reg = 0; /* Just in case Autoneg time = 0 */
434 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
435 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
436 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
437 break;
438 msleep(100);
439 }
440 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
441 status = -EIO;
442 hw_dbg(hw, "Autonegotiation did not complete.\n");
443 }
444 }
445 }
446
447 /* Add delay to filter out noises during initial link setup */
448 msleep(50);
449
450 return status;
451}
452
453/**
454 * ixgbe_validate_link_ready - Function looks for phy link
455 * @hw: pointer to hardware structure
456 *
457 * Function indicates success when phy link is available. If phy is not ready
458 * within 5 seconds of MAC indicating link, the function returns error.
459 **/
460static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
461{
462 u32 timeout;
463 u16 an_reg;
464
465 if (hw->device_id != IXGBE_DEV_ID_82598AT2)
466 return 0;
467
468 for (timeout = 0;
469 timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
470 hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, &an_reg);
471
472 if ((an_reg & MDIO_AN_STAT1_COMPLETE) &&
473 (an_reg & MDIO_STAT1_LSTATUS))
474 break;
475
476 msleep(100);
477 }
478
479 if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
480 hw_dbg(hw, "Link was indicated but link is down\n");
481 return -EIO;
482 }
483
484 return 0;
485}
486
487/**
488 * ixgbe_check_mac_link_82598 - Get link/speed status
489 * @hw: pointer to hardware structure
490 * @speed: pointer to link speed
491 * @link_up: true is link is up, false otherwise
492 * @link_up_wait_to_complete: bool used to wait for link up or not
493 *
494 * Reads the links register to determine if link is up and the current speed
495 **/
496static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
497 ixgbe_link_speed *speed, bool *link_up,
498 bool link_up_wait_to_complete)
499{
500 u32 links_reg;
501 u32 i;
502 u16 link_reg, adapt_comp_reg;
503
504 /*
505 * SERDES PHY requires us to read link status from register 0xC79F.
506 * Bit 0 set indicates link is up/ready; clear indicates link down.
507 * 0xC00C is read to check that the XAUI lanes are active. Bit 0
508 * clear indicates active; set indicates inactive.
509 */
510 if (hw->phy.type == ixgbe_phy_nl) {
511 hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
512 hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
513 hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD,
514 &adapt_comp_reg);
515 if (link_up_wait_to_complete) {
516 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
517 if ((link_reg & 1) &&
518 ((adapt_comp_reg & 1) == 0)) {
519 *link_up = true;
520 break;
521 } else {
522 *link_up = false;
523 }
524 msleep(100);
525 hw->phy.ops.read_reg(hw, 0xC79F,
526 MDIO_MMD_PMAPMD,
527 &link_reg);
528 hw->phy.ops.read_reg(hw, 0xC00C,
529 MDIO_MMD_PMAPMD,
530 &adapt_comp_reg);
531 }
532 } else {
533 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
534 *link_up = true;
535 else
536 *link_up = false;
537 }
538
539 if (!*link_up)
540 return 0;
541 }
542
543 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
544 if (link_up_wait_to_complete) {
545 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
546 if (links_reg & IXGBE_LINKS_UP) {
547 *link_up = true;
548 break;
549 } else {
550 *link_up = false;
551 }
552 msleep(100);
553 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
554 }
555 } else {
556 if (links_reg & IXGBE_LINKS_UP)
557 *link_up = true;
558 else
559 *link_up = false;
560 }
561
562 if (links_reg & IXGBE_LINKS_SPEED)
563 *speed = IXGBE_LINK_SPEED_10GB_FULL;
564 else
565 *speed = IXGBE_LINK_SPEED_1GB_FULL;
566
567 if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && *link_up &&
568 (ixgbe_validate_link_ready(hw) != 0))
569 *link_up = false;
570
571 return 0;
572}
573
574/**
575 * ixgbe_setup_mac_link_82598 - Set MAC link speed
576 * @hw: pointer to hardware structure
577 * @speed: new link speed
578 * @autoneg_wait_to_complete: true when waiting for completion is needed
579 *
580 * Set the link speed in the AUTOC register and restarts link.
581 **/
582static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
583 ixgbe_link_speed speed,
584 bool autoneg_wait_to_complete)
585{
586 bool autoneg = false;
587 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
588 u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
589 u32 autoc = curr_autoc;
590 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
591
592 /* Check to see if speed passed in is supported. */
593 ixgbe_get_link_capabilities_82598(hw, &link_capabilities, &autoneg);
594 speed &= link_capabilities;
595
596 if (speed == IXGBE_LINK_SPEED_UNKNOWN)
597 return -EINVAL;
598
599 /* Set KX4/KX support according to speed requested */
600 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
601 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
602 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
603 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
604 autoc |= IXGBE_AUTOC_KX4_SUPP;
605 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
606 autoc |= IXGBE_AUTOC_KX_SUPP;
607 if (autoc != curr_autoc)
608 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
609 }
610
611 /* Setup and restart the link based on the new values in
612 * ixgbe_hw This will write the AUTOC register based on the new
613 * stored values
614 */
615 return ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
616}
617
618
619/**
620 * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
621 * @hw: pointer to hardware structure
622 * @speed: new link speed
623 * @autoneg_wait_to_complete: true if waiting is needed to complete
624 *
625 * Sets the link speed in the AUTOC register in the MAC and restarts link.
626 **/
627static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
628 ixgbe_link_speed speed,
629 bool autoneg_wait_to_complete)
630{
631 s32 status;
632
633 /* Setup the PHY according to input speed */
634 status = hw->phy.ops.setup_link_speed(hw, speed,
635 autoneg_wait_to_complete);
636 /* Set up MAC */
637 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
638
639 return status;
640}
641
642/**
643 * ixgbe_reset_hw_82598 - Performs hardware reset
644 * @hw: pointer to hardware structure
645 *
646 * Resets the hardware by resetting the transmit and receive units, masks and
647 * clears all interrupts, performing a PHY reset, and performing a link (MAC)
648 * reset.
649 **/
650static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
651{
652 s32 status;
653 s32 phy_status = 0;
654 u32 ctrl;
655 u32 gheccr;
656 u32 i;
657 u32 autoc;
658 u8 analog_val;
659
660 /* Call adapter stop to disable tx/rx and clear interrupts */
661 status = hw->mac.ops.stop_adapter(hw);
662 if (status)
663 return status;
664
665 /*
666 * Power up the Atlas Tx lanes if they are currently powered down.
667 * Atlas Tx lanes are powered down for MAC loopback tests, but
668 * they are not automatically restored on reset.
669 */
670 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
671 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
672 /* Enable Tx Atlas so packets can be transmitted again */
673 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
674 &analog_val);
675 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
676 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
677 analog_val);
678
679 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
680 &analog_val);
681 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
682 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
683 analog_val);
684
685 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
686 &analog_val);
687 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
688 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
689 analog_val);
690
691 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
692 &analog_val);
693 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
694 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
695 analog_val);
696 }
697
698 /* Reset PHY */
699 if (hw->phy.reset_disable == false) {
700 /* PHY ops must be identified and initialized prior to reset */
701
702 /* Init PHY and function pointers, perform SFP setup */
703 phy_status = hw->phy.ops.init(hw);
704 if (phy_status == -EOPNOTSUPP)
705 return phy_status;
706 if (phy_status == -ENOENT)
707 goto mac_reset_top;
708
709 hw->phy.ops.reset(hw);
710 }
711
712mac_reset_top:
713 /*
714 * Issue global reset to the MAC. This needs to be a SW reset.
715 * If link reset is used, it might reset the MAC when mng is using it
716 */
717 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
718 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
719 IXGBE_WRITE_FLUSH(hw);
720 usleep_range(1000, 1200);
721
722 /* Poll for reset bit to self-clear indicating reset is complete */
723 for (i = 0; i < 10; i++) {
724 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
725 if (!(ctrl & IXGBE_CTRL_RST))
726 break;
727 udelay(1);
728 }
729 if (ctrl & IXGBE_CTRL_RST) {
730 status = -EIO;
731 hw_dbg(hw, "Reset polling failed to complete.\n");
732 }
733
734 msleep(50);
735
736 /*
737 * Double resets are required for recovery from certain error
738 * conditions. Between resets, it is necessary to stall to allow time
739 * for any pending HW events to complete.
740 */
741 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
742 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
743 goto mac_reset_top;
744 }
745
746 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
747 gheccr &= ~(BIT(21) | BIT(18) | BIT(9) | BIT(6));
748 IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
749
750 /*
751 * Store the original AUTOC value if it has not been
752 * stored off yet. Otherwise restore the stored original
753 * AUTOC value since the reset operation sets back to deaults.
754 */
755 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
756 if (hw->mac.orig_link_settings_stored == false) {
757 hw->mac.orig_autoc = autoc;
758 hw->mac.orig_link_settings_stored = true;
759 } else if (autoc != hw->mac.orig_autoc) {
760 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
761 }
762
763 /* Store the permanent mac address */
764 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
765
766 /*
767 * Store MAC address from RAR0, clear receive address registers, and
768 * clear the multicast table
769 */
770 hw->mac.ops.init_rx_addrs(hw);
771
772 if (phy_status)
773 status = phy_status;
774
775 return status;
776}
777
778/**
779 * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
780 * @hw: pointer to hardware struct
781 * @rar: receive address register index to associate with a VMDq index
782 * @vmdq: VMDq set index
783 **/
784static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
785{
786 u32 rar_high;
787 u32 rar_entries = hw->mac.num_rar_entries;
788
789 /* Make sure we are using a valid rar index range */
790 if (rar >= rar_entries) {
791 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
792 return -EINVAL;
793 }
794
795 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
796 rar_high &= ~IXGBE_RAH_VIND_MASK;
797 rar_high |= FIELD_PREP(IXGBE_RAH_VIND_MASK, vmdq);
798 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
799 return 0;
800}
801
802/**
803 * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
804 * @hw: pointer to hardware struct
805 * @rar: receive address register index to associate with a VMDq index
806 * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
807 **/
808static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
809{
810 u32 rar_high;
811 u32 rar_entries = hw->mac.num_rar_entries;
812
813
814 /* Make sure we are using a valid rar index range */
815 if (rar >= rar_entries) {
816 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
817 return -EINVAL;
818 }
819
820 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
821 if (rar_high & IXGBE_RAH_VIND_MASK) {
822 rar_high &= ~IXGBE_RAH_VIND_MASK;
823 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
824 }
825
826 return 0;
827}
828
829/**
830 * ixgbe_set_vfta_82598 - Set VLAN filter table
831 * @hw: pointer to hardware structure
832 * @vlan: VLAN id to write to VLAN filter
833 * @vind: VMDq output index that maps queue to VLAN id in VFTA
834 * @vlan_on: boolean flag to turn on/off VLAN in VFTA
835 * @vlvf_bypass: boolean flag - unused
836 *
837 * Turn on/off specified VLAN in the VLAN filter table.
838 **/
839static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
840 bool vlan_on, bool vlvf_bypass)
841{
842 u32 regindex;
843 u32 bitindex;
844 u32 bits;
845 u32 vftabyte;
846
847 if (vlan > 4095)
848 return -EINVAL;
849
850 /* Determine 32-bit word position in array */
851 regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
852
853 /* Determine the location of the (VMD) queue index */
854 vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
855 bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
856
857 /* Set the nibble for VMD queue index */
858 bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
859 bits &= (~(0x0F << bitindex));
860 bits |= (vind << bitindex);
861 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
862
863 /* Determine the location of the bit for this VLAN id */
864 bitindex = vlan & 0x1F; /* lower five bits */
865
866 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
867 if (vlan_on)
868 /* Turn on this VLAN id */
869 bits |= BIT(bitindex);
870 else
871 /* Turn off this VLAN id */
872 bits &= ~BIT(bitindex);
873 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
874
875 return 0;
876}
877
878/**
879 * ixgbe_clear_vfta_82598 - Clear VLAN filter table
880 * @hw: pointer to hardware structure
881 *
882 * Clears the VLAN filter table, and the VMDq index associated with the filter
883 **/
884static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
885{
886 u32 offset;
887 u32 vlanbyte;
888
889 for (offset = 0; offset < hw->mac.vft_size; offset++)
890 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
891
892 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
893 for (offset = 0; offset < hw->mac.vft_size; offset++)
894 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
895 0);
896
897 return 0;
898}
899
900/**
901 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
902 * @hw: pointer to hardware structure
903 * @reg: analog register to read
904 * @val: read value
905 *
906 * Performs read operation to Atlas analog register specified.
907 **/
908static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
909{
910 u32 atlas_ctl;
911
912 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
913 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
914 IXGBE_WRITE_FLUSH(hw);
915 udelay(10);
916 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
917 *val = (u8)atlas_ctl;
918
919 return 0;
920}
921
922/**
923 * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
924 * @hw: pointer to hardware structure
925 * @reg: atlas register to write
926 * @val: value to write
927 *
928 * Performs write operation to Atlas analog register specified.
929 **/
930static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
931{
932 u32 atlas_ctl;
933
934 atlas_ctl = (reg << 8) | val;
935 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
936 IXGBE_WRITE_FLUSH(hw);
937 udelay(10);
938
939 return 0;
940}
941
942/**
943 * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
944 * @hw: pointer to hardware structure
945 * @dev_addr: address to read from
946 * @byte_offset: byte offset to read from dev_addr
947 * @eeprom_data: value read
948 *
949 * Performs 8 byte read operation to SFP module's data over I2C interface.
950 **/
951static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
952 u8 byte_offset, u8 *eeprom_data)
953{
954 s32 status = 0;
955 u16 sfp_addr = 0;
956 u16 sfp_data = 0;
957 u16 sfp_stat = 0;
958 u16 gssr;
959 u32 i;
960
961 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
962 gssr = IXGBE_GSSR_PHY1_SM;
963 else
964 gssr = IXGBE_GSSR_PHY0_SM;
965
966 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
967 return -EBUSY;
968
969 if (hw->phy.type == ixgbe_phy_nl) {
970 /*
971 * phy SDA/SCL registers are at addresses 0xC30A to
972 * 0xC30D. These registers are used to talk to the SFP+
973 * module's EEPROM through the SDA/SCL (I2C) interface.
974 */
975 sfp_addr = (dev_addr << 8) + byte_offset;
976 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
977 hw->phy.ops.write_reg_mdi(hw,
978 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
979 MDIO_MMD_PMAPMD,
980 sfp_addr);
981
982 /* Poll status */
983 for (i = 0; i < 100; i++) {
984 hw->phy.ops.read_reg_mdi(hw,
985 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
986 MDIO_MMD_PMAPMD,
987 &sfp_stat);
988 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
989 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
990 break;
991 usleep_range(10000, 20000);
992 }
993
994 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
995 hw_dbg(hw, "EEPROM read did not pass.\n");
996 status = -ENOENT;
997 goto out;
998 }
999
1000 /* Read data */
1001 hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1002 MDIO_MMD_PMAPMD, &sfp_data);
1003
1004 *eeprom_data = (u8)(sfp_data >> 8);
1005 } else {
1006 status = -EIO;
1007 }
1008
1009out:
1010 hw->mac.ops.release_swfw_sync(hw, gssr);
1011 return status;
1012}
1013
1014/**
1015 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1016 * @hw: pointer to hardware structure
1017 * @byte_offset: EEPROM byte offset to read
1018 * @eeprom_data: value read
1019 *
1020 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1021 **/
1022static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1023 u8 *eeprom_data)
1024{
1025 return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
1026 byte_offset, eeprom_data);
1027}
1028
1029/**
1030 * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
1031 * @hw: pointer to hardware structure
1032 * @byte_offset: byte offset at address 0xA2
1033 * @sff8472_data: value read
1034 *
1035 * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
1036 **/
1037static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
1038 u8 *sff8472_data)
1039{
1040 return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
1041 byte_offset, sff8472_data);
1042}
1043
1044/**
1045 * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1046 * port devices.
1047 * @hw: pointer to the HW structure
1048 *
1049 * Calls common function and corrects issue with some single port devices
1050 * that enable LAN1 but not LAN0.
1051 **/
1052static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1053{
1054 struct ixgbe_bus_info *bus = &hw->bus;
1055 u16 pci_gen = 0;
1056 u16 pci_ctrl2 = 0;
1057
1058 ixgbe_set_lan_id_multi_port_pcie(hw);
1059
1060 /* check if LAN0 is disabled */
1061 hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1062 if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1063
1064 hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1065
1066 /* if LAN0 is completely disabled force function to 0 */
1067 if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1068 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1069 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1070
1071 bus->func = 0;
1072 }
1073 }
1074}
1075
1076/**
1077 * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
1078 * @hw: pointer to hardware structure
1079 * @num_pb: number of packet buffers to allocate
1080 * @headroom: reserve n KB of headroom
1081 * @strategy: packet buffer allocation strategy
1082 **/
1083static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1084 u32 headroom, int strategy)
1085{
1086 u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1087 u8 i = 0;
1088
1089 if (!num_pb)
1090 return;
1091
1092 /* Setup Rx packet buffer sizes */
1093 switch (strategy) {
1094 case PBA_STRATEGY_WEIGHTED:
1095 /* Setup the first four at 80KB */
1096 rxpktsize = IXGBE_RXPBSIZE_80KB;
1097 for (; i < 4; i++)
1098 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1099 /* Setup the last four at 48KB...don't re-init i */
1100 rxpktsize = IXGBE_RXPBSIZE_48KB;
1101 fallthrough;
1102 case PBA_STRATEGY_EQUAL:
1103 default:
1104 /* Divide the remaining Rx packet buffer evenly among the TCs */
1105 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1106 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1107 break;
1108 }
1109
1110 /* Setup Tx packet buffer sizes */
1111 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1112 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
1113}
1114
1115static const struct ixgbe_mac_operations mac_ops_82598 = {
1116 .init_hw = &ixgbe_init_hw_generic,
1117 .reset_hw = &ixgbe_reset_hw_82598,
1118 .start_hw = &ixgbe_start_hw_82598,
1119 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
1120 .get_media_type = &ixgbe_get_media_type_82598,
1121 .enable_rx_dma = &ixgbe_enable_rx_dma_generic,
1122 .get_mac_addr = &ixgbe_get_mac_addr_generic,
1123 .stop_adapter = &ixgbe_stop_adapter_generic,
1124 .get_bus_info = &ixgbe_get_bus_info_generic,
1125 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598,
1126 .read_analog_reg8 = &ixgbe_read_analog_reg8_82598,
1127 .write_analog_reg8 = &ixgbe_write_analog_reg8_82598,
1128 .setup_link = &ixgbe_setup_mac_link_82598,
1129 .set_rxpba = &ixgbe_set_rxpba_82598,
1130 .check_link = &ixgbe_check_mac_link_82598,
1131 .get_link_capabilities = &ixgbe_get_link_capabilities_82598,
1132 .led_on = &ixgbe_led_on_generic,
1133 .led_off = &ixgbe_led_off_generic,
1134 .init_led_link_act = ixgbe_init_led_link_act_generic,
1135 .blink_led_start = &ixgbe_blink_led_start_generic,
1136 .blink_led_stop = &ixgbe_blink_led_stop_generic,
1137 .set_rar = &ixgbe_set_rar_generic,
1138 .clear_rar = &ixgbe_clear_rar_generic,
1139 .set_vmdq = &ixgbe_set_vmdq_82598,
1140 .clear_vmdq = &ixgbe_clear_vmdq_82598,
1141 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
1142 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
1143 .enable_mc = &ixgbe_enable_mc_generic,
1144 .disable_mc = &ixgbe_disable_mc_generic,
1145 .clear_vfta = &ixgbe_clear_vfta_82598,
1146 .set_vfta = &ixgbe_set_vfta_82598,
1147 .fc_enable = &ixgbe_fc_enable_82598,
1148 .setup_fc = ixgbe_setup_fc_generic,
1149 .fc_autoneg = ixgbe_fc_autoneg,
1150 .set_fw_drv_ver = NULL,
1151 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
1152 .release_swfw_sync = &ixgbe_release_swfw_sync,
1153 .init_swfw_sync = NULL,
1154 .get_thermal_sensor_data = NULL,
1155 .init_thermal_sensor_thresh = NULL,
1156 .prot_autoc_read = &prot_autoc_read_generic,
1157 .prot_autoc_write = &prot_autoc_write_generic,
1158 .enable_rx = &ixgbe_enable_rx_generic,
1159 .disable_rx = &ixgbe_disable_rx_generic,
1160};
1161
1162static const struct ixgbe_eeprom_operations eeprom_ops_82598 = {
1163 .init_params = &ixgbe_init_eeprom_params_generic,
1164 .read = &ixgbe_read_eerd_generic,
1165 .write = &ixgbe_write_eeprom_generic,
1166 .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic,
1167 .read_buffer = &ixgbe_read_eerd_buffer_generic,
1168 .calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
1169 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
1170 .update_checksum = &ixgbe_update_eeprom_checksum_generic,
1171};
1172
1173static const struct ixgbe_phy_operations phy_ops_82598 = {
1174 .identify = &ixgbe_identify_phy_generic,
1175 .identify_sfp = &ixgbe_identify_module_generic,
1176 .init = &ixgbe_init_phy_ops_82598,
1177 .reset = &ixgbe_reset_phy_generic,
1178 .read_reg = &ixgbe_read_phy_reg_generic,
1179 .write_reg = &ixgbe_write_phy_reg_generic,
1180 .read_reg_mdi = &ixgbe_read_phy_reg_mdi,
1181 .write_reg_mdi = &ixgbe_write_phy_reg_mdi,
1182 .setup_link = &ixgbe_setup_phy_link_generic,
1183 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
1184 .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598,
1185 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598,
1186 .check_overtemp = &ixgbe_tn_check_overtemp,
1187};
1188
1189const struct ixgbe_info ixgbe_82598_info = {
1190 .mac = ixgbe_mac_82598EB,
1191 .get_invariants = &ixgbe_get_invariants_82598,
1192 .mac_ops = &mac_ops_82598,
1193 .eeprom_ops = &eeprom_ops_82598,
1194 .phy_ops = &phy_ops_82598,
1195 .mvals = ixgbe_mvals_8259X,
1196};
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2015 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include <linux/pci.h>
30#include <linux/delay.h>
31#include <linux/sched.h>
32
33#include "ixgbe.h"
34#include "ixgbe_phy.h"
35
36#define IXGBE_82598_MAX_TX_QUEUES 32
37#define IXGBE_82598_MAX_RX_QUEUES 64
38#define IXGBE_82598_RAR_ENTRIES 16
39#define IXGBE_82598_MC_TBL_SIZE 128
40#define IXGBE_82598_VFT_TBL_SIZE 128
41#define IXGBE_82598_RX_PB_SIZE 512
42
43static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
44 ixgbe_link_speed speed,
45 bool autoneg_wait_to_complete);
46static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
47 u8 *eeprom_data);
48
49/**
50 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
51 * @hw: pointer to the HW structure
52 *
53 * The defaults for 82598 should be in the range of 50us to 50ms,
54 * however the hardware default for these parts is 500us to 1ms which is less
55 * than the 10ms recommended by the pci-e spec. To address this we need to
56 * increase the value to either 10ms to 250ms for capability version 1 config,
57 * or 16ms to 55ms for version 2.
58 **/
59static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
60{
61 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
62 u16 pcie_devctl2;
63
64 if (ixgbe_removed(hw->hw_addr))
65 return;
66
67 /* only take action if timeout value is defaulted to 0 */
68 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
69 goto out;
70
71 /*
72 * if capababilities version is type 1 we can write the
73 * timeout of 10ms to 250ms through the GCR register
74 */
75 if (!(gcr & IXGBE_GCR_CAP_VER2)) {
76 gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
77 goto out;
78 }
79
80 /*
81 * for version 2 capabilities we need to write the config space
82 * directly in order to set the completion timeout value for
83 * 16ms to 55ms
84 */
85 pcie_devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2);
86 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
87 ixgbe_write_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
88out:
89 /* disable completion timeout resend */
90 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
91 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
92}
93
94static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
95{
96 struct ixgbe_mac_info *mac = &hw->mac;
97
98 /* Call PHY identify routine to get the phy type */
99 ixgbe_identify_phy_generic(hw);
100
101 mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
102 mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
103 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
104 mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE;
105 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
106 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
107 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
108
109 return 0;
110}
111
112/**
113 * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
114 * @hw: pointer to hardware structure
115 *
116 * Initialize any function pointers that were not able to be
117 * set during get_invariants because the PHY/SFP type was
118 * not known. Perform the SFP init if necessary.
119 *
120 **/
121static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
122{
123 struct ixgbe_mac_info *mac = &hw->mac;
124 struct ixgbe_phy_info *phy = &hw->phy;
125 s32 ret_val;
126 u16 list_offset, data_offset;
127
128 /* Identify the PHY */
129 phy->ops.identify(hw);
130
131 /* Overwrite the link function pointers if copper PHY */
132 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
133 mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
134 mac->ops.get_link_capabilities =
135 &ixgbe_get_copper_link_capabilities_generic;
136 }
137
138 switch (hw->phy.type) {
139 case ixgbe_phy_tn:
140 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
141 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
142 phy->ops.get_firmware_version =
143 &ixgbe_get_phy_firmware_version_tnx;
144 break;
145 case ixgbe_phy_nl:
146 phy->ops.reset = &ixgbe_reset_phy_nl;
147
148 /* Call SFP+ identify routine to get the SFP+ module type */
149 ret_val = phy->ops.identify_sfp(hw);
150 if (ret_val)
151 return ret_val;
152 if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
153 return IXGBE_ERR_SFP_NOT_SUPPORTED;
154
155 /* Check to see if SFP+ module is supported */
156 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
157 &list_offset,
158 &data_offset);
159 if (ret_val)
160 return IXGBE_ERR_SFP_NOT_SUPPORTED;
161 break;
162 default:
163 break;
164 }
165
166 return 0;
167}
168
169/**
170 * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
171 * @hw: pointer to hardware structure
172 *
173 * Starts the hardware using the generic start_hw function.
174 * Disables relaxed ordering for archs other than SPARC
175 * Then set pcie completion timeout
176 *
177 **/
178static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
179{
180#ifndef CONFIG_SPARC
181 u32 regval;
182 u32 i;
183#endif
184 s32 ret_val;
185
186 ret_val = ixgbe_start_hw_generic(hw);
187
188#ifndef CONFIG_SPARC
189 /* Disable relaxed ordering */
190 for (i = 0; ((i < hw->mac.max_tx_queues) &&
191 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
192 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
193 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
194 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
195 }
196
197 for (i = 0; ((i < hw->mac.max_rx_queues) &&
198 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
199 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
200 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
201 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
202 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
203 }
204#endif
205 if (ret_val)
206 return ret_val;
207
208 /* set the completion timeout for interface */
209 ixgbe_set_pcie_completion_timeout(hw);
210
211 return 0;
212}
213
214/**
215 * ixgbe_get_link_capabilities_82598 - Determines link capabilities
216 * @hw: pointer to hardware structure
217 * @speed: pointer to link speed
218 * @autoneg: boolean auto-negotiation value
219 *
220 * Determines the link capabilities by reading the AUTOC register.
221 **/
222static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
223 ixgbe_link_speed *speed,
224 bool *autoneg)
225{
226 u32 autoc = 0;
227
228 /*
229 * Determine link capabilities based on the stored value of AUTOC,
230 * which represents EEPROM defaults. If AUTOC value has not been
231 * stored, use the current register value.
232 */
233 if (hw->mac.orig_link_settings_stored)
234 autoc = hw->mac.orig_autoc;
235 else
236 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
237
238 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
239 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
240 *speed = IXGBE_LINK_SPEED_1GB_FULL;
241 *autoneg = false;
242 break;
243
244 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
245 *speed = IXGBE_LINK_SPEED_10GB_FULL;
246 *autoneg = false;
247 break;
248
249 case IXGBE_AUTOC_LMS_1G_AN:
250 *speed = IXGBE_LINK_SPEED_1GB_FULL;
251 *autoneg = true;
252 break;
253
254 case IXGBE_AUTOC_LMS_KX4_AN:
255 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
256 *speed = IXGBE_LINK_SPEED_UNKNOWN;
257 if (autoc & IXGBE_AUTOC_KX4_SUPP)
258 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
259 if (autoc & IXGBE_AUTOC_KX_SUPP)
260 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
261 *autoneg = true;
262 break;
263
264 default:
265 return IXGBE_ERR_LINK_SETUP;
266 }
267
268 return 0;
269}
270
271/**
272 * ixgbe_get_media_type_82598 - Determines media type
273 * @hw: pointer to hardware structure
274 *
275 * Returns the media type (fiber, copper, backplane)
276 **/
277static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
278{
279 /* Detect if there is a copper PHY attached. */
280 switch (hw->phy.type) {
281 case ixgbe_phy_cu_unknown:
282 case ixgbe_phy_tn:
283 return ixgbe_media_type_copper;
284
285 default:
286 break;
287 }
288
289 /* Media type for I82598 is based on device ID */
290 switch (hw->device_id) {
291 case IXGBE_DEV_ID_82598:
292 case IXGBE_DEV_ID_82598_BX:
293 /* Default device ID is mezzanine card KX/KX4 */
294 return ixgbe_media_type_backplane;
295
296 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
297 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
298 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
299 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
300 case IXGBE_DEV_ID_82598EB_XF_LR:
301 case IXGBE_DEV_ID_82598EB_SFP_LOM:
302 return ixgbe_media_type_fiber;
303
304 case IXGBE_DEV_ID_82598EB_CX4:
305 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
306 return ixgbe_media_type_cx4;
307
308 case IXGBE_DEV_ID_82598AT:
309 case IXGBE_DEV_ID_82598AT2:
310 return ixgbe_media_type_copper;
311
312 default:
313 return ixgbe_media_type_unknown;
314 }
315}
316
317/**
318 * ixgbe_fc_enable_82598 - Enable flow control
319 * @hw: pointer to hardware structure
320 *
321 * Enable flow control according to the current settings.
322 **/
323static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
324{
325 u32 fctrl_reg;
326 u32 rmcs_reg;
327 u32 reg;
328 u32 fcrtl, fcrth;
329 u32 link_speed = 0;
330 int i;
331 bool link_up;
332
333 /* Validate the water mark configuration */
334 if (!hw->fc.pause_time)
335 return IXGBE_ERR_INVALID_LINK_SETTINGS;
336
337 /* Low water mark of zero causes XOFF floods */
338 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
339 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
340 hw->fc.high_water[i]) {
341 if (!hw->fc.low_water[i] ||
342 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
343 hw_dbg(hw, "Invalid water mark configuration\n");
344 return IXGBE_ERR_INVALID_LINK_SETTINGS;
345 }
346 }
347 }
348
349 /*
350 * On 82598 having Rx FC on causes resets while doing 1G
351 * so if it's on turn it off once we know link_speed. For
352 * more details see 82598 Specification update.
353 */
354 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
355 if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
356 switch (hw->fc.requested_mode) {
357 case ixgbe_fc_full:
358 hw->fc.requested_mode = ixgbe_fc_tx_pause;
359 break;
360 case ixgbe_fc_rx_pause:
361 hw->fc.requested_mode = ixgbe_fc_none;
362 break;
363 default:
364 /* no change */
365 break;
366 }
367 }
368
369 /* Negotiate the fc mode to use */
370 ixgbe_fc_autoneg(hw);
371
372 /* Disable any previous flow control settings */
373 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
374 fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
375
376 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
377 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
378
379 /*
380 * The possible values of fc.current_mode are:
381 * 0: Flow control is completely disabled
382 * 1: Rx flow control is enabled (we can receive pause frames,
383 * but not send pause frames).
384 * 2: Tx flow control is enabled (we can send pause frames but
385 * we do not support receiving pause frames).
386 * 3: Both Rx and Tx flow control (symmetric) are enabled.
387 * other: Invalid.
388 */
389 switch (hw->fc.current_mode) {
390 case ixgbe_fc_none:
391 /*
392 * Flow control is disabled by software override or autoneg.
393 * The code below will actually disable it in the HW.
394 */
395 break;
396 case ixgbe_fc_rx_pause:
397 /*
398 * Rx Flow control is enabled and Tx Flow control is
399 * disabled by software override. Since there really
400 * isn't a way to advertise that we are capable of RX
401 * Pause ONLY, we will advertise that we support both
402 * symmetric and asymmetric Rx PAUSE. Later, we will
403 * disable the adapter's ability to send PAUSE frames.
404 */
405 fctrl_reg |= IXGBE_FCTRL_RFCE;
406 break;
407 case ixgbe_fc_tx_pause:
408 /*
409 * Tx Flow control is enabled, and Rx Flow control is
410 * disabled by software override.
411 */
412 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
413 break;
414 case ixgbe_fc_full:
415 /* Flow control (both Rx and Tx) is enabled by SW override. */
416 fctrl_reg |= IXGBE_FCTRL_RFCE;
417 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
418 break;
419 default:
420 hw_dbg(hw, "Flow control param set incorrectly\n");
421 return IXGBE_ERR_CONFIG;
422 }
423
424 /* Set 802.3x based flow control settings. */
425 fctrl_reg |= IXGBE_FCTRL_DPF;
426 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
427 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
428
429 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
430 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
431 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
432 hw->fc.high_water[i]) {
433 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
434 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
435 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
436 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
437 } else {
438 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
439 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
440 }
441
442 }
443
444 /* Configure pause time (2 TCs per register) */
445 reg = hw->fc.pause_time * 0x00010001;
446 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
447 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
448
449 /* Configure flow control refresh threshold value */
450 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
451
452 return 0;
453}
454
455/**
456 * ixgbe_start_mac_link_82598 - Configures MAC link settings
457 * @hw: pointer to hardware structure
458 *
459 * Configures link settings based on values in the ixgbe_hw struct.
460 * Restarts the link. Performs autonegotiation if needed.
461 **/
462static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
463 bool autoneg_wait_to_complete)
464{
465 u32 autoc_reg;
466 u32 links_reg;
467 u32 i;
468 s32 status = 0;
469
470 /* Restart link */
471 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
472 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
473 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
474
475 /* Only poll for autoneg to complete if specified to do so */
476 if (autoneg_wait_to_complete) {
477 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
478 IXGBE_AUTOC_LMS_KX4_AN ||
479 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
480 IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
481 links_reg = 0; /* Just in case Autoneg time = 0 */
482 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
483 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
484 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
485 break;
486 msleep(100);
487 }
488 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
489 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
490 hw_dbg(hw, "Autonegotiation did not complete.\n");
491 }
492 }
493 }
494
495 /* Add delay to filter out noises during initial link setup */
496 msleep(50);
497
498 return status;
499}
500
501/**
502 * ixgbe_validate_link_ready - Function looks for phy link
503 * @hw: pointer to hardware structure
504 *
505 * Function indicates success when phy link is available. If phy is not ready
506 * within 5 seconds of MAC indicating link, the function returns error.
507 **/
508static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
509{
510 u32 timeout;
511 u16 an_reg;
512
513 if (hw->device_id != IXGBE_DEV_ID_82598AT2)
514 return 0;
515
516 for (timeout = 0;
517 timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
518 hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, &an_reg);
519
520 if ((an_reg & MDIO_AN_STAT1_COMPLETE) &&
521 (an_reg & MDIO_STAT1_LSTATUS))
522 break;
523
524 msleep(100);
525 }
526
527 if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
528 hw_dbg(hw, "Link was indicated but link is down\n");
529 return IXGBE_ERR_LINK_SETUP;
530 }
531
532 return 0;
533}
534
535/**
536 * ixgbe_check_mac_link_82598 - Get link/speed status
537 * @hw: pointer to hardware structure
538 * @speed: pointer to link speed
539 * @link_up: true is link is up, false otherwise
540 * @link_up_wait_to_complete: bool used to wait for link up or not
541 *
542 * Reads the links register to determine if link is up and the current speed
543 **/
544static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
545 ixgbe_link_speed *speed, bool *link_up,
546 bool link_up_wait_to_complete)
547{
548 u32 links_reg;
549 u32 i;
550 u16 link_reg, adapt_comp_reg;
551
552 /*
553 * SERDES PHY requires us to read link status from register 0xC79F.
554 * Bit 0 set indicates link is up/ready; clear indicates link down.
555 * 0xC00C is read to check that the XAUI lanes are active. Bit 0
556 * clear indicates active; set indicates inactive.
557 */
558 if (hw->phy.type == ixgbe_phy_nl) {
559 hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
560 hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
561 hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD,
562 &adapt_comp_reg);
563 if (link_up_wait_to_complete) {
564 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
565 if ((link_reg & 1) &&
566 ((adapt_comp_reg & 1) == 0)) {
567 *link_up = true;
568 break;
569 } else {
570 *link_up = false;
571 }
572 msleep(100);
573 hw->phy.ops.read_reg(hw, 0xC79F,
574 MDIO_MMD_PMAPMD,
575 &link_reg);
576 hw->phy.ops.read_reg(hw, 0xC00C,
577 MDIO_MMD_PMAPMD,
578 &adapt_comp_reg);
579 }
580 } else {
581 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
582 *link_up = true;
583 else
584 *link_up = false;
585 }
586
587 if (!*link_up)
588 return 0;
589 }
590
591 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
592 if (link_up_wait_to_complete) {
593 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
594 if (links_reg & IXGBE_LINKS_UP) {
595 *link_up = true;
596 break;
597 } else {
598 *link_up = false;
599 }
600 msleep(100);
601 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
602 }
603 } else {
604 if (links_reg & IXGBE_LINKS_UP)
605 *link_up = true;
606 else
607 *link_up = false;
608 }
609
610 if (links_reg & IXGBE_LINKS_SPEED)
611 *speed = IXGBE_LINK_SPEED_10GB_FULL;
612 else
613 *speed = IXGBE_LINK_SPEED_1GB_FULL;
614
615 if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && *link_up &&
616 (ixgbe_validate_link_ready(hw) != 0))
617 *link_up = false;
618
619 return 0;
620}
621
622/**
623 * ixgbe_setup_mac_link_82598 - Set MAC link speed
624 * @hw: pointer to hardware structure
625 * @speed: new link speed
626 * @autoneg_wait_to_complete: true when waiting for completion is needed
627 *
628 * Set the link speed in the AUTOC register and restarts link.
629 **/
630static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
631 ixgbe_link_speed speed,
632 bool autoneg_wait_to_complete)
633{
634 bool autoneg = false;
635 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
636 u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
637 u32 autoc = curr_autoc;
638 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
639
640 /* Check to see if speed passed in is supported. */
641 ixgbe_get_link_capabilities_82598(hw, &link_capabilities, &autoneg);
642 speed &= link_capabilities;
643
644 if (speed == IXGBE_LINK_SPEED_UNKNOWN)
645 return IXGBE_ERR_LINK_SETUP;
646
647 /* Set KX4/KX support according to speed requested */
648 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
649 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
650 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
651 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
652 autoc |= IXGBE_AUTOC_KX4_SUPP;
653 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
654 autoc |= IXGBE_AUTOC_KX_SUPP;
655 if (autoc != curr_autoc)
656 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
657 }
658
659 /* Setup and restart the link based on the new values in
660 * ixgbe_hw This will write the AUTOC register based on the new
661 * stored values
662 */
663 return ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
664}
665
666
667/**
668 * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
669 * @hw: pointer to hardware structure
670 * @speed: new link speed
671 * @autoneg_wait_to_complete: true if waiting is needed to complete
672 *
673 * Sets the link speed in the AUTOC register in the MAC and restarts link.
674 **/
675static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
676 ixgbe_link_speed speed,
677 bool autoneg_wait_to_complete)
678{
679 s32 status;
680
681 /* Setup the PHY according to input speed */
682 status = hw->phy.ops.setup_link_speed(hw, speed,
683 autoneg_wait_to_complete);
684 /* Set up MAC */
685 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
686
687 return status;
688}
689
690/**
691 * ixgbe_reset_hw_82598 - Performs hardware reset
692 * @hw: pointer to hardware structure
693 *
694 * Resets the hardware by resetting the transmit and receive units, masks and
695 * clears all interrupts, performing a PHY reset, and performing a link (MAC)
696 * reset.
697 **/
698static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
699{
700 s32 status;
701 s32 phy_status = 0;
702 u32 ctrl;
703 u32 gheccr;
704 u32 i;
705 u32 autoc;
706 u8 analog_val;
707
708 /* Call adapter stop to disable tx/rx and clear interrupts */
709 status = hw->mac.ops.stop_adapter(hw);
710 if (status)
711 return status;
712
713 /*
714 * Power up the Atlas Tx lanes if they are currently powered down.
715 * Atlas Tx lanes are powered down for MAC loopback tests, but
716 * they are not automatically restored on reset.
717 */
718 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
719 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
720 /* Enable Tx Atlas so packets can be transmitted again */
721 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
722 &analog_val);
723 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
724 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
725 analog_val);
726
727 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
728 &analog_val);
729 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
730 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
731 analog_val);
732
733 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
734 &analog_val);
735 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
736 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
737 analog_val);
738
739 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
740 &analog_val);
741 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
742 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
743 analog_val);
744 }
745
746 /* Reset PHY */
747 if (hw->phy.reset_disable == false) {
748 /* PHY ops must be identified and initialized prior to reset */
749
750 /* Init PHY and function pointers, perform SFP setup */
751 phy_status = hw->phy.ops.init(hw);
752 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
753 return phy_status;
754 if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
755 goto mac_reset_top;
756
757 hw->phy.ops.reset(hw);
758 }
759
760mac_reset_top:
761 /*
762 * Issue global reset to the MAC. This needs to be a SW reset.
763 * If link reset is used, it might reset the MAC when mng is using it
764 */
765 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
766 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
767 IXGBE_WRITE_FLUSH(hw);
768 usleep_range(1000, 1200);
769
770 /* Poll for reset bit to self-clear indicating reset is complete */
771 for (i = 0; i < 10; i++) {
772 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
773 if (!(ctrl & IXGBE_CTRL_RST))
774 break;
775 udelay(1);
776 }
777 if (ctrl & IXGBE_CTRL_RST) {
778 status = IXGBE_ERR_RESET_FAILED;
779 hw_dbg(hw, "Reset polling failed to complete.\n");
780 }
781
782 msleep(50);
783
784 /*
785 * Double resets are required for recovery from certain error
786 * conditions. Between resets, it is necessary to stall to allow time
787 * for any pending HW events to complete.
788 */
789 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
790 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
791 goto mac_reset_top;
792 }
793
794 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
795 gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
796 IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
797
798 /*
799 * Store the original AUTOC value if it has not been
800 * stored off yet. Otherwise restore the stored original
801 * AUTOC value since the reset operation sets back to deaults.
802 */
803 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
804 if (hw->mac.orig_link_settings_stored == false) {
805 hw->mac.orig_autoc = autoc;
806 hw->mac.orig_link_settings_stored = true;
807 } else if (autoc != hw->mac.orig_autoc) {
808 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
809 }
810
811 /* Store the permanent mac address */
812 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
813
814 /*
815 * Store MAC address from RAR0, clear receive address registers, and
816 * clear the multicast table
817 */
818 hw->mac.ops.init_rx_addrs(hw);
819
820 if (phy_status)
821 status = phy_status;
822
823 return status;
824}
825
826/**
827 * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
828 * @hw: pointer to hardware struct
829 * @rar: receive address register index to associate with a VMDq index
830 * @vmdq: VMDq set index
831 **/
832static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
833{
834 u32 rar_high;
835 u32 rar_entries = hw->mac.num_rar_entries;
836
837 /* Make sure we are using a valid rar index range */
838 if (rar >= rar_entries) {
839 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
840 return IXGBE_ERR_INVALID_ARGUMENT;
841 }
842
843 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
844 rar_high &= ~IXGBE_RAH_VIND_MASK;
845 rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
846 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
847 return 0;
848}
849
850/**
851 * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
852 * @hw: pointer to hardware struct
853 * @rar: receive address register index to associate with a VMDq index
854 * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
855 **/
856static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
857{
858 u32 rar_high;
859 u32 rar_entries = hw->mac.num_rar_entries;
860
861
862 /* Make sure we are using a valid rar index range */
863 if (rar >= rar_entries) {
864 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
865 return IXGBE_ERR_INVALID_ARGUMENT;
866 }
867
868 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
869 if (rar_high & IXGBE_RAH_VIND_MASK) {
870 rar_high &= ~IXGBE_RAH_VIND_MASK;
871 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
872 }
873
874 return 0;
875}
876
877/**
878 * ixgbe_set_vfta_82598 - Set VLAN filter table
879 * @hw: pointer to hardware structure
880 * @vlan: VLAN id to write to VLAN filter
881 * @vind: VMDq output index that maps queue to VLAN id in VFTA
882 * @vlan_on: boolean flag to turn on/off VLAN in VFTA
883 * @vlvf_bypass: boolean flag - unused
884 *
885 * Turn on/off specified VLAN in the VLAN filter table.
886 **/
887static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
888 bool vlan_on, bool vlvf_bypass)
889{
890 u32 regindex;
891 u32 bitindex;
892 u32 bits;
893 u32 vftabyte;
894
895 if (vlan > 4095)
896 return IXGBE_ERR_PARAM;
897
898 /* Determine 32-bit word position in array */
899 regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
900
901 /* Determine the location of the (VMD) queue index */
902 vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
903 bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
904
905 /* Set the nibble for VMD queue index */
906 bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
907 bits &= (~(0x0F << bitindex));
908 bits |= (vind << bitindex);
909 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
910
911 /* Determine the location of the bit for this VLAN id */
912 bitindex = vlan & 0x1F; /* lower five bits */
913
914 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
915 if (vlan_on)
916 /* Turn on this VLAN id */
917 bits |= (1 << bitindex);
918 else
919 /* Turn off this VLAN id */
920 bits &= ~(1 << bitindex);
921 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
922
923 return 0;
924}
925
926/**
927 * ixgbe_clear_vfta_82598 - Clear VLAN filter table
928 * @hw: pointer to hardware structure
929 *
930 * Clears the VLAN filer table, and the VMDq index associated with the filter
931 **/
932static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
933{
934 u32 offset;
935 u32 vlanbyte;
936
937 for (offset = 0; offset < hw->mac.vft_size; offset++)
938 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
939
940 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
941 for (offset = 0; offset < hw->mac.vft_size; offset++)
942 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
943 0);
944
945 return 0;
946}
947
948/**
949 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
950 * @hw: pointer to hardware structure
951 * @reg: analog register to read
952 * @val: read value
953 *
954 * Performs read operation to Atlas analog register specified.
955 **/
956static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
957{
958 u32 atlas_ctl;
959
960 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
961 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
962 IXGBE_WRITE_FLUSH(hw);
963 udelay(10);
964 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
965 *val = (u8)atlas_ctl;
966
967 return 0;
968}
969
970/**
971 * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
972 * @hw: pointer to hardware structure
973 * @reg: atlas register to write
974 * @val: value to write
975 *
976 * Performs write operation to Atlas analog register specified.
977 **/
978static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
979{
980 u32 atlas_ctl;
981
982 atlas_ctl = (reg << 8) | val;
983 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
984 IXGBE_WRITE_FLUSH(hw);
985 udelay(10);
986
987 return 0;
988}
989
990/**
991 * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
992 * @hw: pointer to hardware structure
993 * @dev_addr: address to read from
994 * @byte_offset: byte offset to read from dev_addr
995 * @eeprom_data: value read
996 *
997 * Performs 8 byte read operation to SFP module's data over I2C interface.
998 **/
999static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
1000 u8 byte_offset, u8 *eeprom_data)
1001{
1002 s32 status = 0;
1003 u16 sfp_addr = 0;
1004 u16 sfp_data = 0;
1005 u16 sfp_stat = 0;
1006 u16 gssr;
1007 u32 i;
1008
1009 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
1010 gssr = IXGBE_GSSR_PHY1_SM;
1011 else
1012 gssr = IXGBE_GSSR_PHY0_SM;
1013
1014 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
1015 return IXGBE_ERR_SWFW_SYNC;
1016
1017 if (hw->phy.type == ixgbe_phy_nl) {
1018 /*
1019 * phy SDA/SCL registers are at addresses 0xC30A to
1020 * 0xC30D. These registers are used to talk to the SFP+
1021 * module's EEPROM through the SDA/SCL (I2C) interface.
1022 */
1023 sfp_addr = (dev_addr << 8) + byte_offset;
1024 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1025 hw->phy.ops.write_reg_mdi(hw,
1026 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1027 MDIO_MMD_PMAPMD,
1028 sfp_addr);
1029
1030 /* Poll status */
1031 for (i = 0; i < 100; i++) {
1032 hw->phy.ops.read_reg_mdi(hw,
1033 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1034 MDIO_MMD_PMAPMD,
1035 &sfp_stat);
1036 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1037 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1038 break;
1039 usleep_range(10000, 20000);
1040 }
1041
1042 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1043 hw_dbg(hw, "EEPROM read did not pass.\n");
1044 status = IXGBE_ERR_SFP_NOT_PRESENT;
1045 goto out;
1046 }
1047
1048 /* Read data */
1049 hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1050 MDIO_MMD_PMAPMD, &sfp_data);
1051
1052 *eeprom_data = (u8)(sfp_data >> 8);
1053 } else {
1054 status = IXGBE_ERR_PHY;
1055 }
1056
1057out:
1058 hw->mac.ops.release_swfw_sync(hw, gssr);
1059 return status;
1060}
1061
1062/**
1063 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1064 * @hw: pointer to hardware structure
1065 * @byte_offset: EEPROM byte offset to read
1066 * @eeprom_data: value read
1067 *
1068 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1069 **/
1070static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1071 u8 *eeprom_data)
1072{
1073 return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
1074 byte_offset, eeprom_data);
1075}
1076
1077/**
1078 * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
1079 * @hw: pointer to hardware structure
1080 * @byte_offset: byte offset at address 0xA2
1081 * @eeprom_data: value read
1082 *
1083 * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
1084 **/
1085static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
1086 u8 *sff8472_data)
1087{
1088 return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
1089 byte_offset, sff8472_data);
1090}
1091
1092/**
1093 * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1094 * port devices.
1095 * @hw: pointer to the HW structure
1096 *
1097 * Calls common function and corrects issue with some single port devices
1098 * that enable LAN1 but not LAN0.
1099 **/
1100static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1101{
1102 struct ixgbe_bus_info *bus = &hw->bus;
1103 u16 pci_gen = 0;
1104 u16 pci_ctrl2 = 0;
1105
1106 ixgbe_set_lan_id_multi_port_pcie(hw);
1107
1108 /* check if LAN0 is disabled */
1109 hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1110 if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1111
1112 hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1113
1114 /* if LAN0 is completely disabled force function to 0 */
1115 if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1116 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1117 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1118
1119 bus->func = 0;
1120 }
1121 }
1122}
1123
1124/**
1125 * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
1126 * @hw: pointer to hardware structure
1127 * @num_pb: number of packet buffers to allocate
1128 * @headroom: reserve n KB of headroom
1129 * @strategy: packet buffer allocation strategy
1130 **/
1131static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1132 u32 headroom, int strategy)
1133{
1134 u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1135 u8 i = 0;
1136
1137 if (!num_pb)
1138 return;
1139
1140 /* Setup Rx packet buffer sizes */
1141 switch (strategy) {
1142 case PBA_STRATEGY_WEIGHTED:
1143 /* Setup the first four at 80KB */
1144 rxpktsize = IXGBE_RXPBSIZE_80KB;
1145 for (; i < 4; i++)
1146 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1147 /* Setup the last four at 48KB...don't re-init i */
1148 rxpktsize = IXGBE_RXPBSIZE_48KB;
1149 /* Fall Through */
1150 case PBA_STRATEGY_EQUAL:
1151 default:
1152 /* Divide the remaining Rx packet buffer evenly among the TCs */
1153 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1154 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1155 break;
1156 }
1157
1158 /* Setup Tx packet buffer sizes */
1159 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1160 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
1161}
1162
1163static struct ixgbe_mac_operations mac_ops_82598 = {
1164 .init_hw = &ixgbe_init_hw_generic,
1165 .reset_hw = &ixgbe_reset_hw_82598,
1166 .start_hw = &ixgbe_start_hw_82598,
1167 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
1168 .get_media_type = &ixgbe_get_media_type_82598,
1169 .enable_rx_dma = &ixgbe_enable_rx_dma_generic,
1170 .get_mac_addr = &ixgbe_get_mac_addr_generic,
1171 .stop_adapter = &ixgbe_stop_adapter_generic,
1172 .get_bus_info = &ixgbe_get_bus_info_generic,
1173 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598,
1174 .read_analog_reg8 = &ixgbe_read_analog_reg8_82598,
1175 .write_analog_reg8 = &ixgbe_write_analog_reg8_82598,
1176 .setup_link = &ixgbe_setup_mac_link_82598,
1177 .set_rxpba = &ixgbe_set_rxpba_82598,
1178 .check_link = &ixgbe_check_mac_link_82598,
1179 .get_link_capabilities = &ixgbe_get_link_capabilities_82598,
1180 .led_on = &ixgbe_led_on_generic,
1181 .led_off = &ixgbe_led_off_generic,
1182 .blink_led_start = &ixgbe_blink_led_start_generic,
1183 .blink_led_stop = &ixgbe_blink_led_stop_generic,
1184 .set_rar = &ixgbe_set_rar_generic,
1185 .clear_rar = &ixgbe_clear_rar_generic,
1186 .set_vmdq = &ixgbe_set_vmdq_82598,
1187 .clear_vmdq = &ixgbe_clear_vmdq_82598,
1188 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
1189 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
1190 .enable_mc = &ixgbe_enable_mc_generic,
1191 .disable_mc = &ixgbe_disable_mc_generic,
1192 .clear_vfta = &ixgbe_clear_vfta_82598,
1193 .set_vfta = &ixgbe_set_vfta_82598,
1194 .fc_enable = &ixgbe_fc_enable_82598,
1195 .set_fw_drv_ver = NULL,
1196 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
1197 .release_swfw_sync = &ixgbe_release_swfw_sync,
1198 .get_thermal_sensor_data = NULL,
1199 .init_thermal_sensor_thresh = NULL,
1200 .prot_autoc_read = &prot_autoc_read_generic,
1201 .prot_autoc_write = &prot_autoc_write_generic,
1202 .enable_rx = &ixgbe_enable_rx_generic,
1203 .disable_rx = &ixgbe_disable_rx_generic,
1204};
1205
1206static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
1207 .init_params = &ixgbe_init_eeprom_params_generic,
1208 .read = &ixgbe_read_eerd_generic,
1209 .write = &ixgbe_write_eeprom_generic,
1210 .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic,
1211 .read_buffer = &ixgbe_read_eerd_buffer_generic,
1212 .calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
1213 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
1214 .update_checksum = &ixgbe_update_eeprom_checksum_generic,
1215};
1216
1217static struct ixgbe_phy_operations phy_ops_82598 = {
1218 .identify = &ixgbe_identify_phy_generic,
1219 .identify_sfp = &ixgbe_identify_module_generic,
1220 .init = &ixgbe_init_phy_ops_82598,
1221 .reset = &ixgbe_reset_phy_generic,
1222 .read_reg = &ixgbe_read_phy_reg_generic,
1223 .write_reg = &ixgbe_write_phy_reg_generic,
1224 .read_reg_mdi = &ixgbe_read_phy_reg_mdi,
1225 .write_reg_mdi = &ixgbe_write_phy_reg_mdi,
1226 .setup_link = &ixgbe_setup_phy_link_generic,
1227 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
1228 .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598,
1229 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598,
1230 .check_overtemp = &ixgbe_tn_check_overtemp,
1231};
1232
1233struct ixgbe_info ixgbe_82598_info = {
1234 .mac = ixgbe_mac_82598EB,
1235 .get_invariants = &ixgbe_get_invariants_82598,
1236 .mac_ops = &mac_ops_82598,
1237 .eeprom_ops = &eeprom_ops_82598,
1238 .phy_ops = &phy_ops_82598,
1239 .mvals = ixgbe_mvals_8259X,
1240};