Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
   2/*
   3 * Copyright(c) 2015, 2016 Intel Corporation.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   4 */
   5
   6#include <linux/firmware.h>
   7
   8#include "hfi.h"
   9#include "efivar.h"
  10#include "eprom.h"
  11
  12#define DEFAULT_PLATFORM_CONFIG_NAME "hfi1_platform.dat"
  13
  14static int validate_scratch_checksum(struct hfi1_devdata *dd)
  15{
  16	u64 checksum = 0, temp_scratch = 0;
  17	int i, j, version;
  18
  19	temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
  20	version = (temp_scratch & BITMAP_VERSION_SMASK) >> BITMAP_VERSION_SHIFT;
  21
  22	/* Prevent power on default of all zeroes from passing checksum */
  23	if (!version) {
  24		dd_dev_err(dd, "%s: Config bitmap uninitialized\n", __func__);
  25		dd_dev_err(dd,
  26			   "%s: Please update your BIOS to support active channels\n",
  27			   __func__);
  28		return 0;
  29	}
  30
  31	/*
  32	 * ASIC scratch 0 only contains the checksum and bitmap version as
  33	 * fields of interest, both of which are handled separately from the
  34	 * loop below, so skip it
  35	 */
  36	checksum += version;
  37	for (i = 1; i < ASIC_NUM_SCRATCH; i++) {
  38		temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH + (8 * i));
  39		for (j = sizeof(u64); j != 0; j -= 2) {
  40			checksum += (temp_scratch & 0xFFFF);
  41			temp_scratch >>= 16;
  42		}
  43	}
  44
  45	while (checksum >> 16)
  46		checksum = (checksum & CHECKSUM_MASK) + (checksum >> 16);
  47
  48	temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
  49	temp_scratch &= CHECKSUM_SMASK;
  50	temp_scratch >>= CHECKSUM_SHIFT;
  51
  52	if (checksum + temp_scratch == 0xFFFF)
  53		return 1;
  54
  55	dd_dev_err(dd, "%s: Configuration bitmap corrupted\n", __func__);
  56	return 0;
  57}
  58
  59static void save_platform_config_fields(struct hfi1_devdata *dd)
  60{
  61	struct hfi1_pportdata *ppd = dd->pport;
  62	u64 temp_scratch = 0, temp_dest = 0;
  63
  64	temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH_1);
  65
  66	temp_dest = temp_scratch &
  67		    (dd->hfi1_id ? PORT1_PORT_TYPE_SMASK :
  68		     PORT0_PORT_TYPE_SMASK);
  69	ppd->port_type = temp_dest >>
  70			 (dd->hfi1_id ? PORT1_PORT_TYPE_SHIFT :
  71			  PORT0_PORT_TYPE_SHIFT);
  72
  73	temp_dest = temp_scratch &
  74		    (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SMASK :
  75		     PORT0_LOCAL_ATTEN_SMASK);
  76	ppd->local_atten = temp_dest >>
  77			   (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SHIFT :
  78			    PORT0_LOCAL_ATTEN_SHIFT);
  79
  80	temp_dest = temp_scratch &
  81		    (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SMASK :
  82		     PORT0_REMOTE_ATTEN_SMASK);
  83	ppd->remote_atten = temp_dest >>
  84			    (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SHIFT :
  85			     PORT0_REMOTE_ATTEN_SHIFT);
  86
  87	temp_dest = temp_scratch &
  88		    (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SMASK :
  89		     PORT0_DEFAULT_ATTEN_SMASK);
  90	ppd->default_atten = temp_dest >>
  91			     (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SHIFT :
  92			      PORT0_DEFAULT_ATTEN_SHIFT);
  93
  94	temp_scratch = read_csr(dd, dd->hfi1_id ? ASIC_CFG_SCRATCH_3 :
  95				ASIC_CFG_SCRATCH_2);
  96
  97	ppd->tx_preset_eq = (temp_scratch & TX_EQ_SMASK) >> TX_EQ_SHIFT;
  98	ppd->tx_preset_noeq = (temp_scratch & TX_NO_EQ_SMASK) >> TX_NO_EQ_SHIFT;
  99	ppd->rx_preset = (temp_scratch & RX_SMASK) >> RX_SHIFT;
 100
 101	ppd->max_power_class = (temp_scratch & QSFP_MAX_POWER_SMASK) >>
 102				QSFP_MAX_POWER_SHIFT;
 103
 104	ppd->config_from_scratch = true;
 105}
 106
 107void get_platform_config(struct hfi1_devdata *dd)
 108{
 109	int ret = 0;
 
 110	u8 *temp_platform_config = NULL;
 111	u32 esize;
 112	const struct firmware *platform_config_file = NULL;
 113
 114	if (is_integrated(dd)) {
 115		if (validate_scratch_checksum(dd)) {
 116			save_platform_config_fields(dd);
 117			return;
 118		}
 
 
 
 
 
 119	} else {
 120		ret = eprom_read_platform_config(dd,
 121						 (void **)&temp_platform_config,
 122						 &esize);
 123		if (!ret) {
 124			/* success */
 125			dd->platform_config.data = temp_platform_config;
 126			dd->platform_config.size = esize;
 127			return;
 128		}
 
 
 
 
 
 
 
 
 
 129	}
 130	dd_dev_err(dd,
 131		   "%s: Failed to get platform config, falling back to sub-optimal default file\n",
 132		   __func__);
 133
 134	ret = request_firmware(&platform_config_file,
 135			       DEFAULT_PLATFORM_CONFIG_NAME,
 136			       &dd->pcidev->dev);
 137	if (ret) {
 138		dd_dev_err(dd,
 139			   "%s: No default platform config file found\n",
 140			   __func__);
 141		return;
 142	}
 143
 144	/*
 145	 * Allocate separate memory block to store data and free firmware
 146	 * structure. This allows free_platform_config to treat EPROM and
 147	 * fallback configs in the same manner.
 148	 */
 149	dd->platform_config.data = kmemdup(platform_config_file->data,
 150					   platform_config_file->size,
 151					   GFP_KERNEL);
 152	dd->platform_config.size = platform_config_file->size;
 153	release_firmware(platform_config_file);
 154}
 155
 156void free_platform_config(struct hfi1_devdata *dd)
 157{
 158	/* Release memory allocated for eprom or fallback file read. */
 159	kfree(dd->platform_config.data);
 160	dd->platform_config.data = NULL;
 
 
 
 
 
 
 
 
 161}
 162
 163void get_port_type(struct hfi1_pportdata *ppd)
 164{
 165	int ret;
 166	u32 temp;
 167
 168	ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
 169					PORT_TABLE_PORT_TYPE, &temp,
 170					4);
 171	if (ret) {
 172		ppd->port_type = PORT_TYPE_UNKNOWN;
 173		return;
 174	}
 175	ppd->port_type = temp;
 176}
 177
 178int set_qsfp_tx(struct hfi1_pportdata *ppd, int on)
 179{
 180	u8 tx_ctrl_byte = on ? 0x0 : 0xF;
 181	int ret = 0;
 182
 183	ret = qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_TX_CTRL_BYTE_OFFS,
 184			 &tx_ctrl_byte, 1);
 185	/* we expected 1, so consider 0 an error */
 186	if (ret == 0)
 187		ret = -EIO;
 188	else if (ret == 1)
 189		ret = 0;
 190	return ret;
 191}
 192
 193static int qual_power(struct hfi1_pportdata *ppd)
 194{
 195	u32 cable_power_class = 0, power_class_max = 0;
 196	u8 *cache = ppd->qsfp_info.cache;
 197	int ret = 0;
 198
 199	ret = get_platform_config_field(
 200		ppd->dd, PLATFORM_CONFIG_SYSTEM_TABLE, 0,
 201		SYSTEM_TABLE_QSFP_POWER_CLASS_MAX, &power_class_max, 4);
 202	if (ret)
 203		return ret;
 204
 205	cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
 206
 207	if (cable_power_class > power_class_max)
 208		ppd->offline_disabled_reason =
 209			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY);
 210
 211	if (ppd->offline_disabled_reason ==
 212			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY)) {
 213		dd_dev_err(
 214			ppd->dd,
 215			"%s: Port disabled due to system power restrictions\n",
 216			__func__);
 217		ret = -EPERM;
 218	}
 219	return ret;
 220}
 221
 222static int qual_bitrate(struct hfi1_pportdata *ppd)
 223{
 224	u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
 225	u8 *cache = ppd->qsfp_info.cache;
 226
 227	if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G) &&
 228	    cache[QSFP_NOM_BIT_RATE_250_OFFS] < 0x64)
 229		ppd->offline_disabled_reason =
 230			   HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
 231
 232	if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G) &&
 233	    cache[QSFP_NOM_BIT_RATE_100_OFFS] < 0x7D)
 234		ppd->offline_disabled_reason =
 235			   HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
 236
 237	if (ppd->offline_disabled_reason ==
 238			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY)) {
 239		dd_dev_err(
 240			ppd->dd,
 241			"%s: Cable failed bitrate check, disabling port\n",
 242			__func__);
 243		return -EPERM;
 244	}
 245	return 0;
 246}
 247
 248static int set_qsfp_high_power(struct hfi1_pportdata *ppd)
 249{
 250	u8 cable_power_class = 0, power_ctrl_byte = 0;
 251	u8 *cache = ppd->qsfp_info.cache;
 252	int ret;
 253
 254	cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
 255
 256	if (cable_power_class > QSFP_POWER_CLASS_1) {
 257		power_ctrl_byte = cache[QSFP_PWR_CTRL_BYTE_OFFS];
 258
 259		power_ctrl_byte |= 1;
 260		power_ctrl_byte &= ~(0x2);
 261
 262		ret = qsfp_write(ppd, ppd->dd->hfi1_id,
 263				 QSFP_PWR_CTRL_BYTE_OFFS,
 264				 &power_ctrl_byte, 1);
 265		if (ret != 1)
 266			return -EIO;
 267
 268		if (cable_power_class > QSFP_POWER_CLASS_4) {
 269			power_ctrl_byte |= (1 << 2);
 270			ret = qsfp_write(ppd, ppd->dd->hfi1_id,
 271					 QSFP_PWR_CTRL_BYTE_OFFS,
 272					 &power_ctrl_byte, 1);
 273			if (ret != 1)
 274				return -EIO;
 275		}
 276
 277		/* SFF 8679 rev 1.7 LPMode Deassert time */
 278		msleep(300);
 279	}
 280	return 0;
 281}
 282
 283static void apply_rx_cdr(struct hfi1_pportdata *ppd,
 284			 u32 rx_preset_index,
 285			 u8 *cdr_ctrl_byte)
 286{
 287	u32 rx_preset;
 288	u8 *cache = ppd->qsfp_info.cache;
 289	int cable_power_class;
 290
 291	if (!((cache[QSFP_MOD_PWR_OFFS] & 0x4) &&
 292	      (cache[QSFP_CDR_INFO_OFFS] & 0x40)))
 293		return;
 294
 295	/* RX CDR present, bypass supported */
 296	cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
 297
 298	if (cable_power_class <= QSFP_POWER_CLASS_3) {
 299		/* Power class <= 3, ignore config & turn RX CDR on */
 300		*cdr_ctrl_byte |= 0xF;
 301		return;
 302	}
 303
 304	get_platform_config_field(
 305		ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
 306		rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR_APPLY,
 307		&rx_preset, 4);
 308
 309	if (!rx_preset) {
 310		dd_dev_info(
 311			ppd->dd,
 312			"%s: RX_CDR_APPLY is set to disabled\n",
 313			__func__);
 314		return;
 315	}
 316	get_platform_config_field(
 317		ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
 318		rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR,
 319		&rx_preset, 4);
 320
 321	/* Expand cdr setting to all 4 lanes */
 322	rx_preset = (rx_preset | (rx_preset << 1) |
 323			(rx_preset << 2) | (rx_preset << 3));
 324
 325	if (rx_preset) {
 326		*cdr_ctrl_byte |= rx_preset;
 327	} else {
 328		*cdr_ctrl_byte &= rx_preset;
 329		/* Preserve current TX CDR status */
 330		*cdr_ctrl_byte |= (cache[QSFP_CDR_CTRL_BYTE_OFFS] & 0xF0);
 331	}
 332}
 333
 334static void apply_tx_cdr(struct hfi1_pportdata *ppd,
 335			 u32 tx_preset_index,
 336			 u8 *cdr_ctrl_byte)
 337{
 338	u32 tx_preset;
 339	u8 *cache = ppd->qsfp_info.cache;
 340	int cable_power_class;
 341
 342	if (!((cache[QSFP_MOD_PWR_OFFS] & 0x8) &&
 343	      (cache[QSFP_CDR_INFO_OFFS] & 0x80)))
 344		return;
 345
 346	/* TX CDR present, bypass supported */
 347	cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
 348
 349	if (cable_power_class <= QSFP_POWER_CLASS_3) {
 350		/* Power class <= 3, ignore config & turn TX CDR on */
 351		*cdr_ctrl_byte |= 0xF0;
 352		return;
 353	}
 354
 355	get_platform_config_field(
 356		ppd->dd,
 357		PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
 358		TX_PRESET_TABLE_QSFP_TX_CDR_APPLY, &tx_preset, 4);
 359
 360	if (!tx_preset) {
 361		dd_dev_info(
 362			ppd->dd,
 363			"%s: TX_CDR_APPLY is set to disabled\n",
 364			__func__);
 365		return;
 366	}
 367	get_platform_config_field(
 368		ppd->dd,
 369		PLATFORM_CONFIG_TX_PRESET_TABLE,
 370		tx_preset_index,
 371		TX_PRESET_TABLE_QSFP_TX_CDR, &tx_preset, 4);
 372
 373	/* Expand cdr setting to all 4 lanes */
 374	tx_preset = (tx_preset | (tx_preset << 1) |
 375			(tx_preset << 2) | (tx_preset << 3));
 376
 377	if (tx_preset)
 378		*cdr_ctrl_byte |= (tx_preset << 4);
 379	else
 380		/* Preserve current/determined RX CDR status */
 381		*cdr_ctrl_byte &= ((tx_preset << 4) | 0xF);
 382}
 383
 384static void apply_cdr_settings(
 385		struct hfi1_pportdata *ppd, u32 rx_preset_index,
 386		u32 tx_preset_index)
 387{
 388	u8 *cache = ppd->qsfp_info.cache;
 389	u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
 390
 391	apply_rx_cdr(ppd, rx_preset_index, &cdr_ctrl_byte);
 392
 393	apply_tx_cdr(ppd, tx_preset_index, &cdr_ctrl_byte);
 394
 395	qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
 396		   &cdr_ctrl_byte, 1);
 397}
 398
 399static void apply_tx_eq_auto(struct hfi1_pportdata *ppd)
 400{
 401	u8 *cache = ppd->qsfp_info.cache;
 402	u8 tx_eq;
 403
 404	if (!(cache[QSFP_EQ_INFO_OFFS] & 0x8))
 405		return;
 406	/* Disable adaptive TX EQ if present */
 407	tx_eq = cache[(128 * 3) + 241];
 408	tx_eq &= 0xF0;
 409	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 241, &tx_eq, 1);
 410}
 411
 412static void apply_tx_eq_prog(struct hfi1_pportdata *ppd, u32 tx_preset_index)
 413{
 414	u8 *cache = ppd->qsfp_info.cache;
 415	u32 tx_preset;
 416	u8 tx_eq;
 417
 418	if (!(cache[QSFP_EQ_INFO_OFFS] & 0x4))
 419		return;
 420
 421	get_platform_config_field(
 422		ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
 423		tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ_APPLY,
 424		&tx_preset, 4);
 425	if (!tx_preset) {
 426		dd_dev_info(
 427			ppd->dd,
 428			"%s: TX_EQ_APPLY is set to disabled\n",
 429			__func__);
 430		return;
 431	}
 432	get_platform_config_field(
 433			ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
 434			tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ,
 435			&tx_preset, 4);
 436
 437	if (((cache[(128 * 3) + 224] & 0xF0) >> 4) < tx_preset) {
 438		dd_dev_info(
 439			ppd->dd,
 440			"%s: TX EQ %x unsupported\n",
 441			__func__, tx_preset);
 442
 443		dd_dev_info(
 444			ppd->dd,
 445			"%s: Applying EQ %x\n",
 446			__func__, cache[608] & 0xF0);
 447
 448		tx_preset = (cache[608] & 0xF0) >> 4;
 449	}
 450
 451	tx_eq = tx_preset | (tx_preset << 4);
 452	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 234, &tx_eq, 1);
 453	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 235, &tx_eq, 1);
 454}
 455
 456static void apply_rx_eq_emp(struct hfi1_pportdata *ppd, u32 rx_preset_index)
 457{
 458	u32 rx_preset;
 459	u8 rx_eq, *cache = ppd->qsfp_info.cache;
 460
 461	if (!(cache[QSFP_EQ_INFO_OFFS] & 0x2))
 462		return;
 463	get_platform_config_field(
 464			ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
 465			rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP_APPLY,
 466			&rx_preset, 4);
 467
 468	if (!rx_preset) {
 469		dd_dev_info(
 470			ppd->dd,
 471			"%s: RX_EMP_APPLY is set to disabled\n",
 472			__func__);
 473		return;
 474	}
 475	get_platform_config_field(
 476		ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
 477		rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP,
 478		&rx_preset, 4);
 479
 480	if ((cache[(128 * 3) + 224] & 0xF) < rx_preset) {
 481		dd_dev_info(
 482			ppd->dd,
 483			"%s: Requested RX EMP %x\n",
 484			__func__, rx_preset);
 485
 486		dd_dev_info(
 487			ppd->dd,
 488			"%s: Applying supported EMP %x\n",
 489			__func__, cache[608] & 0xF);
 490
 491		rx_preset = cache[608] & 0xF;
 492	}
 493
 494	rx_eq = rx_preset | (rx_preset << 4);
 495
 496	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 236, &rx_eq, 1);
 497	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 237, &rx_eq, 1);
 498}
 499
 500static void apply_eq_settings(struct hfi1_pportdata *ppd,
 501			      u32 rx_preset_index, u32 tx_preset_index)
 502{
 503	u8 *cache = ppd->qsfp_info.cache;
 504
 505	/* no point going on w/o a page 3 */
 506	if (cache[2] & 4) {
 507		dd_dev_info(ppd->dd,
 508			    "%s: Upper page 03 not present\n",
 509			    __func__);
 510		return;
 511	}
 512
 513	apply_tx_eq_auto(ppd);
 514
 515	apply_tx_eq_prog(ppd, tx_preset_index);
 516
 517	apply_rx_eq_emp(ppd, rx_preset_index);
 518}
 519
 520static void apply_rx_amplitude_settings(
 521		struct hfi1_pportdata *ppd, u32 rx_preset_index,
 522		u32 tx_preset_index)
 523{
 524	u32 rx_preset;
 525	u8 rx_amp = 0, i = 0, preferred = 0, *cache = ppd->qsfp_info.cache;
 526
 527	/* no point going on w/o a page 3 */
 528	if (cache[2] & 4) {
 529		dd_dev_info(ppd->dd,
 530			    "%s: Upper page 03 not present\n",
 531			    __func__);
 532		return;
 533	}
 534	if (!(cache[QSFP_EQ_INFO_OFFS] & 0x1)) {
 535		dd_dev_info(ppd->dd,
 536			    "%s: RX_AMP_APPLY is set to disabled\n",
 537			    __func__);
 538		return;
 539	}
 540
 541	get_platform_config_field(ppd->dd,
 542				  PLATFORM_CONFIG_RX_PRESET_TABLE,
 543				  rx_preset_index,
 544				  RX_PRESET_TABLE_QSFP_RX_AMP_APPLY,
 545				  &rx_preset, 4);
 546
 547	if (!rx_preset) {
 548		dd_dev_info(ppd->dd,
 549			    "%s: RX_AMP_APPLY is set to disabled\n",
 550			    __func__);
 551		return;
 552	}
 553	get_platform_config_field(ppd->dd,
 554				  PLATFORM_CONFIG_RX_PRESET_TABLE,
 555				  rx_preset_index,
 556				  RX_PRESET_TABLE_QSFP_RX_AMP,
 557				  &rx_preset, 4);
 558
 559	dd_dev_info(ppd->dd,
 560		    "%s: Requested RX AMP %x\n",
 561		    __func__,
 562		    rx_preset);
 563
 564	for (i = 0; i < 4; i++) {
 565		if (cache[(128 * 3) + 225] & (1 << i)) {
 566			preferred = i;
 567			if (preferred == rx_preset)
 568				break;
 569		}
 570	}
 571
 572	/*
 573	 * Verify that preferred RX amplitude is not just a
 574	 * fall through of the default
 575	 */
 576	if (!preferred && !(cache[(128 * 3) + 225] & 0x1)) {
 577		dd_dev_info(ppd->dd, "No supported RX AMP, not applying\n");
 578		return;
 579	}
 580
 581	dd_dev_info(ppd->dd,
 582		    "%s: Applying RX AMP %x\n", __func__, preferred);
 583
 584	rx_amp = preferred | (preferred << 4);
 585	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 238, &rx_amp, 1);
 586	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 239, &rx_amp, 1);
 587}
 588
 589#define OPA_INVALID_INDEX 0xFFF
 590
 591static void apply_tx_lanes(struct hfi1_pportdata *ppd, u8 field_id,
 592			   u32 config_data, const char *message)
 593{
 594	u8 i;
 595	int ret;
 596
 597	for (i = 0; i < 4; i++) {
 598		ret = load_8051_config(ppd->dd, field_id, i, config_data);
 599		if (ret != HCMD_SUCCESS) {
 600			dd_dev_err(
 601				ppd->dd,
 602				"%s: %s for lane %u failed\n",
 603				message, __func__, i);
 604		}
 605	}
 606}
 607
 608/*
 609 * Return a special SerDes setting for low power AOC cables.  The power class
 610 * threshold and setting being used were all found by empirical testing.
 611 *
 612 * Summary of the logic:
 613 *
 614 * if (QSFP and QSFP_TYPE == AOC and QSFP_POWER_CLASS < 4)
 615 *     return 0xe
 616 * return 0; // leave at default
 617 */
 618static u8 aoc_low_power_setting(struct hfi1_pportdata *ppd)
 619{
 620	u8 *cache = ppd->qsfp_info.cache;
 621	int power_class;
 622
 623	/* QSFP only */
 624	if (ppd->port_type != PORT_TYPE_QSFP)
 625		return 0; /* leave at default */
 626
 627	/* active optical cables only */
 628	switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
 629	case 0x0 ... 0x9: fallthrough;
 630	case 0xC: fallthrough;
 631	case 0xE:
 632		/* active AOC */
 633		power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
 634		if (power_class < QSFP_POWER_CLASS_4)
 635			return 0xe;
 636	}
 637	return 0; /* leave at default */
 638}
 639
 640static void apply_tunings(
 641		struct hfi1_pportdata *ppd, u32 tx_preset_index,
 642		u8 tuning_method, u32 total_atten, u8 limiting_active)
 643{
 644	int ret = 0;
 645	u32 config_data = 0, tx_preset = 0;
 646	u8 precur = 0, attn = 0, postcur = 0, external_device_config = 0;
 647	u8 *cache = ppd->qsfp_info.cache;
 648
 649	/* Pass tuning method to 8051 */
 650	read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
 651			 &config_data);
 652	config_data &= ~(0xff << TUNING_METHOD_SHIFT);
 653	config_data |= ((u32)tuning_method << TUNING_METHOD_SHIFT);
 654	ret = load_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
 655			       config_data);
 656	if (ret != HCMD_SUCCESS)
 657		dd_dev_err(ppd->dd, "%s: Failed to set tuning method\n",
 658			   __func__);
 659
 660	/* Set same channel loss for both TX and RX */
 661	config_data = 0 | (total_atten << 16) | (total_atten << 24);
 662	apply_tx_lanes(ppd, CHANNEL_LOSS_SETTINGS, config_data,
 663		       "Setting channel loss");
 664
 665	/* Inform 8051 of cable capabilities */
 666	if (ppd->qsfp_info.cache_valid) {
 667		external_device_config =
 668			((cache[QSFP_MOD_PWR_OFFS] & 0x4) << 3) |
 669			((cache[QSFP_MOD_PWR_OFFS] & 0x8) << 2) |
 670			((cache[QSFP_EQ_INFO_OFFS] & 0x2) << 1) |
 671			(cache[QSFP_EQ_INFO_OFFS] & 0x4);
 672		ret = read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
 673				       GENERAL_CONFIG, &config_data);
 674		/* Clear, then set the external device config field */
 675		config_data &= ~(u32)0xFF;
 676		config_data |= external_device_config;
 677		ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
 678				       GENERAL_CONFIG, config_data);
 679		if (ret != HCMD_SUCCESS)
 680			dd_dev_err(ppd->dd,
 681				   "%s: Failed set ext device config params\n",
 682				   __func__);
 683	}
 684
 685	if (tx_preset_index == OPA_INVALID_INDEX) {
 686		if (ppd->port_type == PORT_TYPE_QSFP && limiting_active)
 687			dd_dev_err(ppd->dd, "%s: Invalid Tx preset index\n",
 688				   __func__);
 689		return;
 690	}
 691
 692	/* Following for limiting active channels only */
 693	get_platform_config_field(
 694		ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
 695		TX_PRESET_TABLE_PRECUR, &tx_preset, 4);
 696	precur = tx_preset;
 697
 698	get_platform_config_field(
 699		ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
 700		tx_preset_index, TX_PRESET_TABLE_ATTN, &tx_preset, 4);
 701	attn = tx_preset;
 702
 703	get_platform_config_field(
 704		ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
 705		tx_preset_index, TX_PRESET_TABLE_POSTCUR, &tx_preset, 4);
 706	postcur = tx_preset;
 707
 708	/*
 709	 * NOTES:
 710	 * o The aoc_low_power_setting is applied to all lanes even
 711	 *   though only lane 0's value is examined by the firmware.
 712	 * o A lingering low power setting after a cable swap does
 713	 *   not occur.  On cable unplug the 8051 is reset and
 714	 *   restarted on cable insert.  This resets all settings to
 715	 *   their default, erasing any previous low power setting.
 716	 */
 717	config_data = precur | (attn << 8) | (postcur << 16) |
 718			(aoc_low_power_setting(ppd) << 24);
 719
 720	apply_tx_lanes(ppd, TX_EQ_SETTINGS, config_data,
 721		       "Applying TX settings");
 722}
 723
 724/* Must be holding the QSFP i2c resource */
 725static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
 726			    u32 *ptr_rx_preset, u32 *ptr_total_atten)
 727{
 728	int ret;
 729	u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
 730	u8 *cache = ppd->qsfp_info.cache;
 731
 732	ppd->qsfp_info.limiting_active = 1;
 733
 734	ret = set_qsfp_tx(ppd, 0);
 735	if (ret)
 736		return ret;
 737
 738	ret = qual_power(ppd);
 739	if (ret)
 740		return ret;
 741
 742	ret = qual_bitrate(ppd);
 743	if (ret)
 744		return ret;
 745
 746	/*
 747	 * We'll change the QSFP memory contents from here on out, thus we set a
 748	 * flag here to remind ourselves to reset the QSFP module. This prevents
 749	 * reuse of stale settings established in our previous pass through.
 750	 */
 751	if (ppd->qsfp_info.reset_needed) {
 752		ret = reset_qsfp(ppd);
 753		if (ret)
 754			return ret;
 755		refresh_qsfp_cache(ppd, &ppd->qsfp_info);
 756	} else {
 757		ppd->qsfp_info.reset_needed = 1;
 758	}
 759
 760	ret = set_qsfp_high_power(ppd);
 761	if (ret)
 762		return ret;
 763
 764	if (cache[QSFP_EQ_INFO_OFFS] & 0x4) {
 765		ret = get_platform_config_field(
 766			ppd->dd,
 767			PLATFORM_CONFIG_PORT_TABLE, 0,
 768			PORT_TABLE_TX_PRESET_IDX_ACTIVE_EQ,
 769			ptr_tx_preset, 4);
 770		if (ret) {
 771			*ptr_tx_preset = OPA_INVALID_INDEX;
 772			return ret;
 773		}
 774	} else {
 775		ret = get_platform_config_field(
 776			ppd->dd,
 777			PLATFORM_CONFIG_PORT_TABLE, 0,
 778			PORT_TABLE_TX_PRESET_IDX_ACTIVE_NO_EQ,
 779			ptr_tx_preset, 4);
 780		if (ret) {
 781			*ptr_tx_preset = OPA_INVALID_INDEX;
 782			return ret;
 783		}
 784	}
 785
 786	ret = get_platform_config_field(
 787		ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
 788		PORT_TABLE_RX_PRESET_IDX, ptr_rx_preset, 4);
 789	if (ret) {
 790		*ptr_rx_preset = OPA_INVALID_INDEX;
 791		return ret;
 792	}
 793
 794	if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
 795		get_platform_config_field(
 796			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
 797			PORT_TABLE_LOCAL_ATTEN_25G, ptr_total_atten, 4);
 798	else if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G))
 799		get_platform_config_field(
 800			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
 801			PORT_TABLE_LOCAL_ATTEN_12G, ptr_total_atten, 4);
 802
 803	apply_cdr_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
 804
 805	apply_eq_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
 806
 807	apply_rx_amplitude_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
 808
 809	ret = set_qsfp_tx(ppd, 1);
 810
 811	return ret;
 812}
 813
 814static int tune_qsfp(struct hfi1_pportdata *ppd,
 815		     u32 *ptr_tx_preset, u32 *ptr_rx_preset,
 816		     u8 *ptr_tuning_method, u32 *ptr_total_atten)
 817{
 818	u32 cable_atten = 0, remote_atten = 0, platform_atten = 0;
 819	u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
 820	int ret = 0;
 821	u8 *cache = ppd->qsfp_info.cache;
 822
 823	switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
 824	case 0xA ... 0xB:
 825		ret = get_platform_config_field(
 826			ppd->dd,
 827			PLATFORM_CONFIG_PORT_TABLE, 0,
 828			PORT_TABLE_LOCAL_ATTEN_25G,
 829			&platform_atten, 4);
 830		if (ret)
 831			return ret;
 832
 833		if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
 834			cable_atten = cache[QSFP_CU_ATTEN_12G_OFFS];
 835		else if ((lss & OPA_LINK_SPEED_12_5G) &&
 836			 (lse & OPA_LINK_SPEED_12_5G))
 837			cable_atten = cache[QSFP_CU_ATTEN_7G_OFFS];
 838
 839		/* Fallback to configured attenuation if cable memory is bad */
 840		if (cable_atten == 0 || cable_atten > 36) {
 841			ret = get_platform_config_field(
 842				ppd->dd,
 843				PLATFORM_CONFIG_SYSTEM_TABLE, 0,
 844				SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G,
 845				&cable_atten, 4);
 846			if (ret)
 847				return ret;
 848		}
 849
 850		ret = get_platform_config_field(
 851			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
 852			PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
 853		if (ret)
 854			return ret;
 855
 856		*ptr_total_atten = platform_atten + cable_atten + remote_atten;
 857
 858		*ptr_tuning_method = OPA_PASSIVE_TUNING;
 859		break;
 860	case 0x0 ... 0x9: fallthrough;
 861	case 0xC: fallthrough;
 862	case 0xE:
 863		ret = tune_active_qsfp(ppd, ptr_tx_preset, ptr_rx_preset,
 864				       ptr_total_atten);
 865		if (ret)
 866			return ret;
 867
 868		*ptr_tuning_method = OPA_ACTIVE_TUNING;
 869		break;
 870	case 0xD: fallthrough;
 871	case 0xF:
 872	default:
 873		dd_dev_warn(ppd->dd, "%s: Unknown/unsupported cable\n",
 874			    __func__);
 875		break;
 876	}
 877	return ret;
 878}
 879
 880/*
 881 * This function communicates its success or failure via ppd->driver_link_ready
 882 * Thus, it depends on its association with start_link(...) which checks
 883 * driver_link_ready before proceeding with the link negotiation and
 884 * initialization process.
 885 */
 886void tune_serdes(struct hfi1_pportdata *ppd)
 887{
 888	int ret = 0;
 889	u32 total_atten = 0;
 890	u32 remote_atten = 0, platform_atten = 0;
 891	u32 rx_preset_index, tx_preset_index;
 892	u8 tuning_method = 0, limiting_active = 0;
 893	struct hfi1_devdata *dd = ppd->dd;
 894
 895	rx_preset_index = OPA_INVALID_INDEX;
 896	tx_preset_index = OPA_INVALID_INDEX;
 897
 898	/* the link defaults to enabled */
 899	ppd->link_enabled = 1;
 900	/* the driver link ready state defaults to not ready */
 901	ppd->driver_link_ready = 0;
 902	ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
 903
 904	/* Skip the tuning for testing (loopback != none) and simulations */
 905	if (loopback != LOOPBACK_NONE ||
 906	    ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
 907		ppd->driver_link_ready = 1;
 908
 909		if (qsfp_mod_present(ppd)) {
 910			ret = acquire_chip_resource(ppd->dd,
 911						    qsfp_resource(ppd->dd),
 912						    QSFP_WAIT);
 913			if (ret) {
 914				dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
 915					   __func__, (int)ppd->dd->hfi1_id);
 916				goto bail;
 917			}
 918
 919			refresh_qsfp_cache(ppd, &ppd->qsfp_info);
 920			release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
 921		}
 922
 923		return;
 924	}
 925
 926	switch (ppd->port_type) {
 927	case PORT_TYPE_DISCONNECTED:
 928		ppd->offline_disabled_reason =
 929			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_DISCONNECTED);
 930		dd_dev_warn(dd, "%s: Port disconnected, disabling port\n",
 931			    __func__);
 932		goto bail;
 933	case PORT_TYPE_FIXED:
 934		/* platform_atten, remote_atten pre-zeroed to catch error */
 935		get_platform_config_field(
 936			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
 937			PORT_TABLE_LOCAL_ATTEN_25G, &platform_atten, 4);
 938
 939		get_platform_config_field(
 940			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
 941			PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
 942
 943		total_atten = platform_atten + remote_atten;
 944
 945		tuning_method = OPA_PASSIVE_TUNING;
 946		break;
 947	case PORT_TYPE_VARIABLE:
 948		if (qsfp_mod_present(ppd)) {
 949			/*
 950			 * platform_atten, remote_atten pre-zeroed to
 951			 * catch error
 952			 */
 953			get_platform_config_field(
 954				ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
 955				PORT_TABLE_LOCAL_ATTEN_25G,
 956				&platform_atten, 4);
 957
 958			get_platform_config_field(
 959				ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
 960				PORT_TABLE_REMOTE_ATTEN_25G,
 961				&remote_atten, 4);
 962
 963			total_atten = platform_atten + remote_atten;
 964
 965			tuning_method = OPA_PASSIVE_TUNING;
 966		} else {
 967			ppd->offline_disabled_reason =
 968			     HFI1_ODR_MASK(OPA_LINKDOWN_REASON_CHASSIS_CONFIG);
 969			goto bail;
 970		}
 971		break;
 972	case PORT_TYPE_QSFP:
 973		if (qsfp_mod_present(ppd)) {
 974			ret = acquire_chip_resource(ppd->dd,
 975						    qsfp_resource(ppd->dd),
 976						    QSFP_WAIT);
 977			if (ret) {
 978				dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
 979					   __func__, (int)ppd->dd->hfi1_id);
 980				goto bail;
 981			}
 982			refresh_qsfp_cache(ppd, &ppd->qsfp_info);
 983
 984			if (ppd->qsfp_info.cache_valid) {
 985				ret = tune_qsfp(ppd,
 986						&tx_preset_index,
 987						&rx_preset_index,
 988						&tuning_method,
 989						&total_atten);
 990
 991				/*
 992				 * We may have modified the QSFP memory, so
 993				 * update the cache to reflect the changes
 994				 */
 995				refresh_qsfp_cache(ppd, &ppd->qsfp_info);
 996				limiting_active =
 997						ppd->qsfp_info.limiting_active;
 998			} else {
 999				dd_dev_err(dd,
1000					   "%s: Reading QSFP memory failed\n",
1001					   __func__);
1002				ret = -EINVAL; /* a fail indication */
1003			}
1004			release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
1005			if (ret)
1006				goto bail;
1007		} else {
1008			ppd->offline_disabled_reason =
1009			   HFI1_ODR_MASK(
1010				OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
1011			goto bail;
1012		}
1013		break;
1014	default:
1015		dd_dev_warn(ppd->dd, "%s: Unknown port type\n", __func__);
1016		ppd->port_type = PORT_TYPE_UNKNOWN;
1017		tuning_method = OPA_UNKNOWN_TUNING;
1018		total_atten = 0;
1019		limiting_active = 0;
1020		tx_preset_index = OPA_INVALID_INDEX;
1021		break;
1022	}
1023
1024	if (ppd->offline_disabled_reason ==
1025			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
1026		apply_tunings(ppd, tx_preset_index, tuning_method,
1027			      total_atten, limiting_active);
1028
1029	if (!ret)
1030		ppd->driver_link_ready = 1;
1031
1032	return;
1033bail:
1034	ppd->driver_link_ready = 0;
1035}
v4.10.11
 
   1/*
   2 * Copyright(c) 2015, 2016 Intel Corporation.
   3 *
   4 * This file is provided under a dual BSD/GPLv2 license.  When using or
   5 * redistributing this file, you may do so under either license.
   6 *
   7 * GPL LICENSE SUMMARY
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of version 2 of the GNU General Public License as
  11 * published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful, but
  14 * WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16 * General Public License for more details.
  17 *
  18 * BSD LICENSE
  19 *
  20 * Redistribution and use in source and binary forms, with or without
  21 * modification, are permitted provided that the following conditions
  22 * are met:
  23 *
  24 *  - Redistributions of source code must retain the above copyright
  25 *    notice, this list of conditions and the following disclaimer.
  26 *  - Redistributions in binary form must reproduce the above copyright
  27 *    notice, this list of conditions and the following disclaimer in
  28 *    the documentation and/or other materials provided with the
  29 *    distribution.
  30 *  - Neither the name of Intel Corporation nor the names of its
  31 *    contributors may be used to endorse or promote products derived
  32 *    from this software without specific prior written permission.
  33 *
  34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45 *
  46 */
  47
 
 
  48#include "hfi.h"
  49#include "efivar.h"
  50#include "eprom.h"
  51
 
 
  52static int validate_scratch_checksum(struct hfi1_devdata *dd)
  53{
  54	u64 checksum = 0, temp_scratch = 0;
  55	int i, j, version;
  56
  57	temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
  58	version = (temp_scratch & BITMAP_VERSION_SMASK) >> BITMAP_VERSION_SHIFT;
  59
  60	/* Prevent power on default of all zeroes from passing checksum */
  61	if (!version)
 
 
 
 
  62		return 0;
 
  63
  64	/*
  65	 * ASIC scratch 0 only contains the checksum and bitmap version as
  66	 * fields of interest, both of which are handled separately from the
  67	 * loop below, so skip it
  68	 */
  69	checksum += version;
  70	for (i = 1; i < ASIC_NUM_SCRATCH; i++) {
  71		temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH + (8 * i));
  72		for (j = sizeof(u64); j != 0; j -= 2) {
  73			checksum += (temp_scratch & 0xFFFF);
  74			temp_scratch >>= 16;
  75		}
  76	}
  77
  78	while (checksum >> 16)
  79		checksum = (checksum & CHECKSUM_MASK) + (checksum >> 16);
  80
  81	temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
  82	temp_scratch &= CHECKSUM_SMASK;
  83	temp_scratch >>= CHECKSUM_SHIFT;
  84
  85	if (checksum + temp_scratch == 0xFFFF)
  86		return 1;
 
 
  87	return 0;
  88}
  89
  90static void save_platform_config_fields(struct hfi1_devdata *dd)
  91{
  92	struct hfi1_pportdata *ppd = dd->pport;
  93	u64 temp_scratch = 0, temp_dest = 0;
  94
  95	temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH_1);
  96
  97	temp_dest = temp_scratch &
  98		    (dd->hfi1_id ? PORT1_PORT_TYPE_SMASK :
  99		     PORT0_PORT_TYPE_SMASK);
 100	ppd->port_type = temp_dest >>
 101			 (dd->hfi1_id ? PORT1_PORT_TYPE_SHIFT :
 102			  PORT0_PORT_TYPE_SHIFT);
 103
 104	temp_dest = temp_scratch &
 105		    (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SMASK :
 106		     PORT0_LOCAL_ATTEN_SMASK);
 107	ppd->local_atten = temp_dest >>
 108			   (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SHIFT :
 109			    PORT0_LOCAL_ATTEN_SHIFT);
 110
 111	temp_dest = temp_scratch &
 112		    (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SMASK :
 113		     PORT0_REMOTE_ATTEN_SMASK);
 114	ppd->remote_atten = temp_dest >>
 115			    (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SHIFT :
 116			     PORT0_REMOTE_ATTEN_SHIFT);
 117
 118	temp_dest = temp_scratch &
 119		    (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SMASK :
 120		     PORT0_DEFAULT_ATTEN_SMASK);
 121	ppd->default_atten = temp_dest >>
 122			     (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SHIFT :
 123			      PORT0_DEFAULT_ATTEN_SHIFT);
 124
 125	temp_scratch = read_csr(dd, dd->hfi1_id ? ASIC_CFG_SCRATCH_3 :
 126				ASIC_CFG_SCRATCH_2);
 127
 128	ppd->tx_preset_eq = (temp_scratch & TX_EQ_SMASK) >> TX_EQ_SHIFT;
 129	ppd->tx_preset_noeq = (temp_scratch & TX_NO_EQ_SMASK) >> TX_NO_EQ_SHIFT;
 130	ppd->rx_preset = (temp_scratch & RX_SMASK) >> RX_SHIFT;
 131
 132	ppd->max_power_class = (temp_scratch & QSFP_MAX_POWER_SMASK) >>
 133				QSFP_MAX_POWER_SHIFT;
 
 
 134}
 135
 136void get_platform_config(struct hfi1_devdata *dd)
 137{
 138	int ret = 0;
 139	unsigned long size = 0;
 140	u8 *temp_platform_config = NULL;
 141	u32 esize;
 
 142
 143	if (is_integrated(dd)) {
 144		if (validate_scratch_checksum(dd)) {
 145			save_platform_config_fields(dd);
 146			return;
 147		}
 148		dd_dev_err(dd, "%s: Config bitmap corrupted/uninitialized\n",
 149			   __func__);
 150		dd_dev_err(dd,
 151			   "%s: Please update your BIOS to support active channels\n",
 152			   __func__);
 153	} else {
 154		ret = eprom_read_platform_config(dd,
 155						 (void **)&temp_platform_config,
 156						 &esize);
 157		if (!ret) {
 158			/* success */
 159			dd->platform_config.data = temp_platform_config;
 160			dd->platform_config.size = esize;
 161			return;
 162		}
 163		/* fail, try EFI variable */
 164
 165		ret = read_hfi1_efi_var(dd, "configuration", &size,
 166					(void **)&temp_platform_config);
 167		if (!ret) {
 168			dd->platform_config.data = temp_platform_config;
 169			dd->platform_config.size = size;
 170			return;
 171		}
 172	}
 173	dd_dev_err(dd,
 174		   "%s: Failed to get platform config, falling back to sub-optimal default file\n",
 175		   __func__);
 176	/* fall back to request firmware */
 177	platform_config_load = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 178}
 179
 180void free_platform_config(struct hfi1_devdata *dd)
 181{
 182	if (!platform_config_load) {
 183		/*
 184		 * was loaded from EFI or the EPROM, release memory
 185		 * allocated by read_efi_var/eprom_read_platform_config
 186		 */
 187		kfree(dd->platform_config.data);
 188	}
 189	/*
 190	 * else do nothing, dispose_firmware will release
 191	 * struct firmware platform_config on driver exit
 192	 */
 193}
 194
 195void get_port_type(struct hfi1_pportdata *ppd)
 196{
 197	int ret;
 198	u32 temp;
 199
 200	ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
 201					PORT_TABLE_PORT_TYPE, &temp,
 202					4);
 203	if (ret) {
 204		ppd->port_type = PORT_TYPE_UNKNOWN;
 205		return;
 206	}
 207	ppd->port_type = temp;
 208}
 209
 210int set_qsfp_tx(struct hfi1_pportdata *ppd, int on)
 211{
 212	u8 tx_ctrl_byte = on ? 0x0 : 0xF;
 213	int ret = 0;
 214
 215	ret = qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_TX_CTRL_BYTE_OFFS,
 216			 &tx_ctrl_byte, 1);
 217	/* we expected 1, so consider 0 an error */
 218	if (ret == 0)
 219		ret = -EIO;
 220	else if (ret == 1)
 221		ret = 0;
 222	return ret;
 223}
 224
 225static int qual_power(struct hfi1_pportdata *ppd)
 226{
 227	u32 cable_power_class = 0, power_class_max = 0;
 228	u8 *cache = ppd->qsfp_info.cache;
 229	int ret = 0;
 230
 231	ret = get_platform_config_field(
 232		ppd->dd, PLATFORM_CONFIG_SYSTEM_TABLE, 0,
 233		SYSTEM_TABLE_QSFP_POWER_CLASS_MAX, &power_class_max, 4);
 234	if (ret)
 235		return ret;
 236
 237	cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
 238
 239	if (cable_power_class > power_class_max)
 240		ppd->offline_disabled_reason =
 241			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY);
 242
 243	if (ppd->offline_disabled_reason ==
 244			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY)) {
 245		dd_dev_info(
 246			ppd->dd,
 247			"%s: Port disabled due to system power restrictions\n",
 248			__func__);
 249		ret = -EPERM;
 250	}
 251	return ret;
 252}
 253
 254static int qual_bitrate(struct hfi1_pportdata *ppd)
 255{
 256	u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
 257	u8 *cache = ppd->qsfp_info.cache;
 258
 259	if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G) &&
 260	    cache[QSFP_NOM_BIT_RATE_250_OFFS] < 0x64)
 261		ppd->offline_disabled_reason =
 262			   HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
 263
 264	if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G) &&
 265	    cache[QSFP_NOM_BIT_RATE_100_OFFS] < 0x7D)
 266		ppd->offline_disabled_reason =
 267			   HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
 268
 269	if (ppd->offline_disabled_reason ==
 270			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY)) {
 271		dd_dev_info(
 272			ppd->dd,
 273			"%s: Cable failed bitrate check, disabling port\n",
 274			__func__);
 275		return -EPERM;
 276	}
 277	return 0;
 278}
 279
 280static int set_qsfp_high_power(struct hfi1_pportdata *ppd)
 281{
 282	u8 cable_power_class = 0, power_ctrl_byte = 0;
 283	u8 *cache = ppd->qsfp_info.cache;
 284	int ret;
 285
 286	cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
 287
 288	if (cable_power_class > QSFP_POWER_CLASS_1) {
 289		power_ctrl_byte = cache[QSFP_PWR_CTRL_BYTE_OFFS];
 290
 291		power_ctrl_byte |= 1;
 292		power_ctrl_byte &= ~(0x2);
 293
 294		ret = qsfp_write(ppd, ppd->dd->hfi1_id,
 295				 QSFP_PWR_CTRL_BYTE_OFFS,
 296				 &power_ctrl_byte, 1);
 297		if (ret != 1)
 298			return -EIO;
 299
 300		if (cable_power_class > QSFP_POWER_CLASS_4) {
 301			power_ctrl_byte |= (1 << 2);
 302			ret = qsfp_write(ppd, ppd->dd->hfi1_id,
 303					 QSFP_PWR_CTRL_BYTE_OFFS,
 304					 &power_ctrl_byte, 1);
 305			if (ret != 1)
 306				return -EIO;
 307		}
 308
 309		/* SFF 8679 rev 1.7 LPMode Deassert time */
 310		msleep(300);
 311	}
 312	return 0;
 313}
 314
 315static void apply_rx_cdr(struct hfi1_pportdata *ppd,
 316			 u32 rx_preset_index,
 317			 u8 *cdr_ctrl_byte)
 318{
 319	u32 rx_preset;
 320	u8 *cache = ppd->qsfp_info.cache;
 321	int cable_power_class;
 322
 323	if (!((cache[QSFP_MOD_PWR_OFFS] & 0x4) &&
 324	      (cache[QSFP_CDR_INFO_OFFS] & 0x40)))
 325		return;
 326
 327	/* RX CDR present, bypass supported */
 328	cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
 329
 330	if (cable_power_class <= QSFP_POWER_CLASS_3) {
 331		/* Power class <= 3, ignore config & turn RX CDR on */
 332		*cdr_ctrl_byte |= 0xF;
 333		return;
 334	}
 335
 336	get_platform_config_field(
 337		ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
 338		rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR_APPLY,
 339		&rx_preset, 4);
 340
 341	if (!rx_preset) {
 342		dd_dev_info(
 343			ppd->dd,
 344			"%s: RX_CDR_APPLY is set to disabled\n",
 345			__func__);
 346		return;
 347	}
 348	get_platform_config_field(
 349		ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
 350		rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR,
 351		&rx_preset, 4);
 352
 353	/* Expand cdr setting to all 4 lanes */
 354	rx_preset = (rx_preset | (rx_preset << 1) |
 355			(rx_preset << 2) | (rx_preset << 3));
 356
 357	if (rx_preset) {
 358		*cdr_ctrl_byte |= rx_preset;
 359	} else {
 360		*cdr_ctrl_byte &= rx_preset;
 361		/* Preserve current TX CDR status */
 362		*cdr_ctrl_byte |= (cache[QSFP_CDR_CTRL_BYTE_OFFS] & 0xF0);
 363	}
 364}
 365
 366static void apply_tx_cdr(struct hfi1_pportdata *ppd,
 367			 u32 tx_preset_index,
 368			 u8 *cdr_ctrl_byte)
 369{
 370	u32 tx_preset;
 371	u8 *cache = ppd->qsfp_info.cache;
 372	int cable_power_class;
 373
 374	if (!((cache[QSFP_MOD_PWR_OFFS] & 0x8) &&
 375	      (cache[QSFP_CDR_INFO_OFFS] & 0x80)))
 376		return;
 377
 378	/* TX CDR present, bypass supported */
 379	cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
 380
 381	if (cable_power_class <= QSFP_POWER_CLASS_3) {
 382		/* Power class <= 3, ignore config & turn TX CDR on */
 383		*cdr_ctrl_byte |= 0xF0;
 384		return;
 385	}
 386
 387	get_platform_config_field(
 388		ppd->dd,
 389		PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
 390		TX_PRESET_TABLE_QSFP_TX_CDR_APPLY, &tx_preset, 4);
 391
 392	if (!tx_preset) {
 393		dd_dev_info(
 394			ppd->dd,
 395			"%s: TX_CDR_APPLY is set to disabled\n",
 396			__func__);
 397		return;
 398	}
 399	get_platform_config_field(
 400		ppd->dd,
 401		PLATFORM_CONFIG_TX_PRESET_TABLE,
 402		tx_preset_index,
 403		TX_PRESET_TABLE_QSFP_TX_CDR, &tx_preset, 4);
 404
 405	/* Expand cdr setting to all 4 lanes */
 406	tx_preset = (tx_preset | (tx_preset << 1) |
 407			(tx_preset << 2) | (tx_preset << 3));
 408
 409	if (tx_preset)
 410		*cdr_ctrl_byte |= (tx_preset << 4);
 411	else
 412		/* Preserve current/determined RX CDR status */
 413		*cdr_ctrl_byte &= ((tx_preset << 4) | 0xF);
 414}
 415
 416static void apply_cdr_settings(
 417		struct hfi1_pportdata *ppd, u32 rx_preset_index,
 418		u32 tx_preset_index)
 419{
 420	u8 *cache = ppd->qsfp_info.cache;
 421	u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
 422
 423	apply_rx_cdr(ppd, rx_preset_index, &cdr_ctrl_byte);
 424
 425	apply_tx_cdr(ppd, tx_preset_index, &cdr_ctrl_byte);
 426
 427	qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
 428		   &cdr_ctrl_byte, 1);
 429}
 430
 431static void apply_tx_eq_auto(struct hfi1_pportdata *ppd)
 432{
 433	u8 *cache = ppd->qsfp_info.cache;
 434	u8 tx_eq;
 435
 436	if (!(cache[QSFP_EQ_INFO_OFFS] & 0x8))
 437		return;
 438	/* Disable adaptive TX EQ if present */
 439	tx_eq = cache[(128 * 3) + 241];
 440	tx_eq &= 0xF0;
 441	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 241, &tx_eq, 1);
 442}
 443
 444static void apply_tx_eq_prog(struct hfi1_pportdata *ppd, u32 tx_preset_index)
 445{
 446	u8 *cache = ppd->qsfp_info.cache;
 447	u32 tx_preset;
 448	u8 tx_eq;
 449
 450	if (!(cache[QSFP_EQ_INFO_OFFS] & 0x4))
 451		return;
 452
 453	get_platform_config_field(
 454		ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
 455		tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ_APPLY,
 456		&tx_preset, 4);
 457	if (!tx_preset) {
 458		dd_dev_info(
 459			ppd->dd,
 460			"%s: TX_EQ_APPLY is set to disabled\n",
 461			__func__);
 462		return;
 463	}
 464	get_platform_config_field(
 465			ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
 466			tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ,
 467			&tx_preset, 4);
 468
 469	if (((cache[(128 * 3) + 224] & 0xF0) >> 4) < tx_preset) {
 470		dd_dev_info(
 471			ppd->dd,
 472			"%s: TX EQ %x unsupported\n",
 473			__func__, tx_preset);
 474
 475		dd_dev_info(
 476			ppd->dd,
 477			"%s: Applying EQ %x\n",
 478			__func__, cache[608] & 0xF0);
 479
 480		tx_preset = (cache[608] & 0xF0) >> 4;
 481	}
 482
 483	tx_eq = tx_preset | (tx_preset << 4);
 484	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 234, &tx_eq, 1);
 485	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 235, &tx_eq, 1);
 486}
 487
 488static void apply_rx_eq_emp(struct hfi1_pportdata *ppd, u32 rx_preset_index)
 489{
 490	u32 rx_preset;
 491	u8 rx_eq, *cache = ppd->qsfp_info.cache;
 492
 493	if (!(cache[QSFP_EQ_INFO_OFFS] & 0x2))
 494		return;
 495	get_platform_config_field(
 496			ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
 497			rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP_APPLY,
 498			&rx_preset, 4);
 499
 500	if (!rx_preset) {
 501		dd_dev_info(
 502			ppd->dd,
 503			"%s: RX_EMP_APPLY is set to disabled\n",
 504			__func__);
 505		return;
 506	}
 507	get_platform_config_field(
 508		ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
 509		rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP,
 510		&rx_preset, 4);
 511
 512	if ((cache[(128 * 3) + 224] & 0xF) < rx_preset) {
 513		dd_dev_info(
 514			ppd->dd,
 515			"%s: Requested RX EMP %x\n",
 516			__func__, rx_preset);
 517
 518		dd_dev_info(
 519			ppd->dd,
 520			"%s: Applying supported EMP %x\n",
 521			__func__, cache[608] & 0xF);
 522
 523		rx_preset = cache[608] & 0xF;
 524	}
 525
 526	rx_eq = rx_preset | (rx_preset << 4);
 527
 528	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 236, &rx_eq, 1);
 529	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 237, &rx_eq, 1);
 530}
 531
 532static void apply_eq_settings(struct hfi1_pportdata *ppd,
 533			      u32 rx_preset_index, u32 tx_preset_index)
 534{
 535	u8 *cache = ppd->qsfp_info.cache;
 536
 537	/* no point going on w/o a page 3 */
 538	if (cache[2] & 4) {
 539		dd_dev_info(ppd->dd,
 540			    "%s: Upper page 03 not present\n",
 541			    __func__);
 542		return;
 543	}
 544
 545	apply_tx_eq_auto(ppd);
 546
 547	apply_tx_eq_prog(ppd, tx_preset_index);
 548
 549	apply_rx_eq_emp(ppd, rx_preset_index);
 550}
 551
 552static void apply_rx_amplitude_settings(
 553		struct hfi1_pportdata *ppd, u32 rx_preset_index,
 554		u32 tx_preset_index)
 555{
 556	u32 rx_preset;
 557	u8 rx_amp = 0, i = 0, preferred = 0, *cache = ppd->qsfp_info.cache;
 558
 559	/* no point going on w/o a page 3 */
 560	if (cache[2] & 4) {
 561		dd_dev_info(ppd->dd,
 562			    "%s: Upper page 03 not present\n",
 563			    __func__);
 564		return;
 565	}
 566	if (!(cache[QSFP_EQ_INFO_OFFS] & 0x1)) {
 567		dd_dev_info(ppd->dd,
 568			    "%s: RX_AMP_APPLY is set to disabled\n",
 569			    __func__);
 570		return;
 571	}
 572
 573	get_platform_config_field(ppd->dd,
 574				  PLATFORM_CONFIG_RX_PRESET_TABLE,
 575				  rx_preset_index,
 576				  RX_PRESET_TABLE_QSFP_RX_AMP_APPLY,
 577				  &rx_preset, 4);
 578
 579	if (!rx_preset) {
 580		dd_dev_info(ppd->dd,
 581			    "%s: RX_AMP_APPLY is set to disabled\n",
 582			    __func__);
 583		return;
 584	}
 585	get_platform_config_field(ppd->dd,
 586				  PLATFORM_CONFIG_RX_PRESET_TABLE,
 587				  rx_preset_index,
 588				  RX_PRESET_TABLE_QSFP_RX_AMP,
 589				  &rx_preset, 4);
 590
 591	dd_dev_info(ppd->dd,
 592		    "%s: Requested RX AMP %x\n",
 593		    __func__,
 594		    rx_preset);
 595
 596	for (i = 0; i < 4; i++) {
 597		if (cache[(128 * 3) + 225] & (1 << i)) {
 598			preferred = i;
 599			if (preferred == rx_preset)
 600				break;
 601		}
 602	}
 603
 604	/*
 605	 * Verify that preferred RX amplitude is not just a
 606	 * fall through of the default
 607	 */
 608	if (!preferred && !(cache[(128 * 3) + 225] & 0x1)) {
 609		dd_dev_info(ppd->dd, "No supported RX AMP, not applying\n");
 610		return;
 611	}
 612
 613	dd_dev_info(ppd->dd,
 614		    "%s: Applying RX AMP %x\n", __func__, preferred);
 615
 616	rx_amp = preferred | (preferred << 4);
 617	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 238, &rx_amp, 1);
 618	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 239, &rx_amp, 1);
 619}
 620
 621#define OPA_INVALID_INDEX 0xFFF
 622
 623static void apply_tx_lanes(struct hfi1_pportdata *ppd, u8 field_id,
 624			   u32 config_data, const char *message)
 625{
 626	u8 i;
 627	int ret = HCMD_SUCCESS;
 628
 629	for (i = 0; i < 4; i++) {
 630		ret = load_8051_config(ppd->dd, field_id, i, config_data);
 631		if (ret != HCMD_SUCCESS) {
 632			dd_dev_err(
 633				ppd->dd,
 634				"%s: %s for lane %u failed\n",
 635				message, __func__, i);
 636		}
 637	}
 638}
 639
 640/*
 641 * Return a special SerDes setting for low power AOC cables.  The power class
 642 * threshold and setting being used were all found by empirical testing.
 643 *
 644 * Summary of the logic:
 645 *
 646 * if (QSFP and QSFP_TYPE == AOC and QSFP_POWER_CLASS < 4)
 647 *     return 0xe
 648 * return 0; // leave at default
 649 */
 650static u8 aoc_low_power_setting(struct hfi1_pportdata *ppd)
 651{
 652	u8 *cache = ppd->qsfp_info.cache;
 653	int power_class;
 654
 655	/* QSFP only */
 656	if (ppd->port_type != PORT_TYPE_QSFP)
 657		return 0; /* leave at default */
 658
 659	/* active optical cables only */
 660	switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
 661	case 0x0 ... 0x9: /* fallthrough */
 662	case 0xC: /* fallthrough */
 663	case 0xE:
 664		/* active AOC */
 665		power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
 666		if (power_class < QSFP_POWER_CLASS_4)
 667			return 0xe;
 668	}
 669	return 0; /* leave at default */
 670}
 671
 672static void apply_tunings(
 673		struct hfi1_pportdata *ppd, u32 tx_preset_index,
 674		u8 tuning_method, u32 total_atten, u8 limiting_active)
 675{
 676	int ret = 0;
 677	u32 config_data = 0, tx_preset = 0;
 678	u8 precur = 0, attn = 0, postcur = 0, external_device_config = 0;
 679	u8 *cache = ppd->qsfp_info.cache;
 680
 681	/* Pass tuning method to 8051 */
 682	read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
 683			 &config_data);
 684	config_data &= ~(0xff << TUNING_METHOD_SHIFT);
 685	config_data |= ((u32)tuning_method << TUNING_METHOD_SHIFT);
 686	ret = load_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
 687			       config_data);
 688	if (ret != HCMD_SUCCESS)
 689		dd_dev_err(ppd->dd, "%s: Failed to set tuning method\n",
 690			   __func__);
 691
 692	/* Set same channel loss for both TX and RX */
 693	config_data = 0 | (total_atten << 16) | (total_atten << 24);
 694	apply_tx_lanes(ppd, CHANNEL_LOSS_SETTINGS, config_data,
 695		       "Setting channel loss");
 696
 697	/* Inform 8051 of cable capabilities */
 698	if (ppd->qsfp_info.cache_valid) {
 699		external_device_config =
 700			((cache[QSFP_MOD_PWR_OFFS] & 0x4) << 3) |
 701			((cache[QSFP_MOD_PWR_OFFS] & 0x8) << 2) |
 702			((cache[QSFP_EQ_INFO_OFFS] & 0x2) << 1) |
 703			(cache[QSFP_EQ_INFO_OFFS] & 0x4);
 704		ret = read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
 705				       GENERAL_CONFIG, &config_data);
 706		/* Clear, then set the external device config field */
 707		config_data &= ~(u32)0xFF;
 708		config_data |= external_device_config;
 709		ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
 710				       GENERAL_CONFIG, config_data);
 711		if (ret != HCMD_SUCCESS)
 712			dd_dev_info(ppd->dd,
 713				    "%s: Failed set ext device config params\n",
 714				    __func__);
 715	}
 716
 717	if (tx_preset_index == OPA_INVALID_INDEX) {
 718		if (ppd->port_type == PORT_TYPE_QSFP && limiting_active)
 719			dd_dev_info(ppd->dd, "%s: Invalid Tx preset index\n",
 720				    __func__);
 721		return;
 722	}
 723
 724	/* Following for limiting active channels only */
 725	get_platform_config_field(
 726		ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
 727		TX_PRESET_TABLE_PRECUR, &tx_preset, 4);
 728	precur = tx_preset;
 729
 730	get_platform_config_field(
 731		ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
 732		tx_preset_index, TX_PRESET_TABLE_ATTN, &tx_preset, 4);
 733	attn = tx_preset;
 734
 735	get_platform_config_field(
 736		ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
 737		tx_preset_index, TX_PRESET_TABLE_POSTCUR, &tx_preset, 4);
 738	postcur = tx_preset;
 739
 740	/*
 741	 * NOTES:
 742	 * o The aoc_low_power_setting is applied to all lanes even
 743	 *   though only lane 0's value is examined by the firmware.
 744	 * o A lingering low power setting after a cable swap does
 745	 *   not occur.  On cable unplug the 8051 is reset and
 746	 *   restarted on cable insert.  This resets all settings to
 747	 *   their default, erasing any previous low power setting.
 748	 */
 749	config_data = precur | (attn << 8) | (postcur << 16) |
 750			(aoc_low_power_setting(ppd) << 24);
 751
 752	apply_tx_lanes(ppd, TX_EQ_SETTINGS, config_data,
 753		       "Applying TX settings");
 754}
 755
 756/* Must be holding the QSFP i2c resource */
 757static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
 758			    u32 *ptr_rx_preset, u32 *ptr_total_atten)
 759{
 760	int ret;
 761	u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
 762	u8 *cache = ppd->qsfp_info.cache;
 763
 764	ppd->qsfp_info.limiting_active = 1;
 765
 766	ret = set_qsfp_tx(ppd, 0);
 767	if (ret)
 768		return ret;
 769
 770	ret = qual_power(ppd);
 771	if (ret)
 772		return ret;
 773
 774	ret = qual_bitrate(ppd);
 775	if (ret)
 776		return ret;
 777
 778	/*
 779	 * We'll change the QSFP memory contents from here on out, thus we set a
 780	 * flag here to remind ourselves to reset the QSFP module. This prevents
 781	 * reuse of stale settings established in our previous pass through.
 782	 */
 783	if (ppd->qsfp_info.reset_needed) {
 784		reset_qsfp(ppd);
 
 
 785		refresh_qsfp_cache(ppd, &ppd->qsfp_info);
 786	} else {
 787		ppd->qsfp_info.reset_needed = 1;
 788	}
 789
 790	ret = set_qsfp_high_power(ppd);
 791	if (ret)
 792		return ret;
 793
 794	if (cache[QSFP_EQ_INFO_OFFS] & 0x4) {
 795		ret = get_platform_config_field(
 796			ppd->dd,
 797			PLATFORM_CONFIG_PORT_TABLE, 0,
 798			PORT_TABLE_TX_PRESET_IDX_ACTIVE_EQ,
 799			ptr_tx_preset, 4);
 800		if (ret) {
 801			*ptr_tx_preset = OPA_INVALID_INDEX;
 802			return ret;
 803		}
 804	} else {
 805		ret = get_platform_config_field(
 806			ppd->dd,
 807			PLATFORM_CONFIG_PORT_TABLE, 0,
 808			PORT_TABLE_TX_PRESET_IDX_ACTIVE_NO_EQ,
 809			ptr_tx_preset, 4);
 810		if (ret) {
 811			*ptr_tx_preset = OPA_INVALID_INDEX;
 812			return ret;
 813		}
 814	}
 815
 816	ret = get_platform_config_field(
 817		ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
 818		PORT_TABLE_RX_PRESET_IDX, ptr_rx_preset, 4);
 819	if (ret) {
 820		*ptr_rx_preset = OPA_INVALID_INDEX;
 821		return ret;
 822	}
 823
 824	if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
 825		get_platform_config_field(
 826			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
 827			PORT_TABLE_LOCAL_ATTEN_25G, ptr_total_atten, 4);
 828	else if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G))
 829		get_platform_config_field(
 830			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
 831			PORT_TABLE_LOCAL_ATTEN_12G, ptr_total_atten, 4);
 832
 833	apply_cdr_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
 834
 835	apply_eq_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
 836
 837	apply_rx_amplitude_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
 838
 839	ret = set_qsfp_tx(ppd, 1);
 840
 841	return ret;
 842}
 843
 844static int tune_qsfp(struct hfi1_pportdata *ppd,
 845		     u32 *ptr_tx_preset, u32 *ptr_rx_preset,
 846		     u8 *ptr_tuning_method, u32 *ptr_total_atten)
 847{
 848	u32 cable_atten = 0, remote_atten = 0, platform_atten = 0;
 849	u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
 850	int ret = 0;
 851	u8 *cache = ppd->qsfp_info.cache;
 852
 853	switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
 854	case 0xA ... 0xB:
 855		ret = get_platform_config_field(
 856			ppd->dd,
 857			PLATFORM_CONFIG_PORT_TABLE, 0,
 858			PORT_TABLE_LOCAL_ATTEN_25G,
 859			&platform_atten, 4);
 860		if (ret)
 861			return ret;
 862
 863		if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
 864			cable_atten = cache[QSFP_CU_ATTEN_12G_OFFS];
 865		else if ((lss & OPA_LINK_SPEED_12_5G) &&
 866			 (lse & OPA_LINK_SPEED_12_5G))
 867			cable_atten = cache[QSFP_CU_ATTEN_7G_OFFS];
 868
 869		/* Fallback to configured attenuation if cable memory is bad */
 870		if (cable_atten == 0 || cable_atten > 36) {
 871			ret = get_platform_config_field(
 872				ppd->dd,
 873				PLATFORM_CONFIG_SYSTEM_TABLE, 0,
 874				SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G,
 875				&cable_atten, 4);
 876			if (ret)
 877				return ret;
 878		}
 879
 880		ret = get_platform_config_field(
 881			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
 882			PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
 883		if (ret)
 884			return ret;
 885
 886		*ptr_total_atten = platform_atten + cable_atten + remote_atten;
 887
 888		*ptr_tuning_method = OPA_PASSIVE_TUNING;
 889		break;
 890	case 0x0 ... 0x9: /* fallthrough */
 891	case 0xC: /* fallthrough */
 892	case 0xE:
 893		ret = tune_active_qsfp(ppd, ptr_tx_preset, ptr_rx_preset,
 894				       ptr_total_atten);
 895		if (ret)
 896			return ret;
 897
 898		*ptr_tuning_method = OPA_ACTIVE_TUNING;
 899		break;
 900	case 0xD: /* fallthrough */
 901	case 0xF:
 902	default:
 903		dd_dev_info(ppd->dd, "%s: Unknown/unsupported cable\n",
 904			    __func__);
 905		break;
 906	}
 907	return ret;
 908}
 909
 910/*
 911 * This function communicates its success or failure via ppd->driver_link_ready
 912 * Thus, it depends on its association with start_link(...) which checks
 913 * driver_link_ready before proceeding with the link negotiation and
 914 * initialization process.
 915 */
 916void tune_serdes(struct hfi1_pportdata *ppd)
 917{
 918	int ret = 0;
 919	u32 total_atten = 0;
 920	u32 remote_atten = 0, platform_atten = 0;
 921	u32 rx_preset_index, tx_preset_index;
 922	u8 tuning_method = 0, limiting_active = 0;
 923	struct hfi1_devdata *dd = ppd->dd;
 924
 925	rx_preset_index = OPA_INVALID_INDEX;
 926	tx_preset_index = OPA_INVALID_INDEX;
 927
 928	/* the link defaults to enabled */
 929	ppd->link_enabled = 1;
 930	/* the driver link ready state defaults to not ready */
 931	ppd->driver_link_ready = 0;
 932	ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
 933
 934	/* Skip the tuning for testing (loopback != none) and simulations */
 935	if (loopback != LOOPBACK_NONE ||
 936	    ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
 937		ppd->driver_link_ready = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 938		return;
 939	}
 940
 941	switch (ppd->port_type) {
 942	case PORT_TYPE_DISCONNECTED:
 943		ppd->offline_disabled_reason =
 944			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_DISCONNECTED);
 945		dd_dev_info(dd, "%s: Port disconnected, disabling port\n",
 946			    __func__);
 947		goto bail;
 948	case PORT_TYPE_FIXED:
 949		/* platform_atten, remote_atten pre-zeroed to catch error */
 950		get_platform_config_field(
 951			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
 952			PORT_TABLE_LOCAL_ATTEN_25G, &platform_atten, 4);
 953
 954		get_platform_config_field(
 955			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
 956			PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
 957
 958		total_atten = platform_atten + remote_atten;
 959
 960		tuning_method = OPA_PASSIVE_TUNING;
 961		break;
 962	case PORT_TYPE_VARIABLE:
 963		if (qsfp_mod_present(ppd)) {
 964			/*
 965			 * platform_atten, remote_atten pre-zeroed to
 966			 * catch error
 967			 */
 968			get_platform_config_field(
 969				ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
 970				PORT_TABLE_LOCAL_ATTEN_25G,
 971				&platform_atten, 4);
 972
 973			get_platform_config_field(
 974				ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
 975				PORT_TABLE_REMOTE_ATTEN_25G,
 976				&remote_atten, 4);
 977
 978			total_atten = platform_atten + remote_atten;
 979
 980			tuning_method = OPA_PASSIVE_TUNING;
 981		} else {
 982			ppd->offline_disabled_reason =
 983			     HFI1_ODR_MASK(OPA_LINKDOWN_REASON_CHASSIS_CONFIG);
 984			goto bail;
 985		}
 986		break;
 987	case PORT_TYPE_QSFP:
 988		if (qsfp_mod_present(ppd)) {
 989			ret = acquire_chip_resource(ppd->dd,
 990						    qsfp_resource(ppd->dd),
 991						    QSFP_WAIT);
 992			if (ret) {
 993				dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
 994					   __func__, (int)ppd->dd->hfi1_id);
 995				goto bail;
 996			}
 997			refresh_qsfp_cache(ppd, &ppd->qsfp_info);
 998
 999			if (ppd->qsfp_info.cache_valid) {
1000				ret = tune_qsfp(ppd,
1001						&tx_preset_index,
1002						&rx_preset_index,
1003						&tuning_method,
1004						&total_atten);
1005
1006				/*
1007				 * We may have modified the QSFP memory, so
1008				 * update the cache to reflect the changes
1009				 */
1010				refresh_qsfp_cache(ppd, &ppd->qsfp_info);
1011				limiting_active =
1012						ppd->qsfp_info.limiting_active;
1013			} else {
1014				dd_dev_err(dd,
1015					   "%s: Reading QSFP memory failed\n",
1016					   __func__);
1017				ret = -EINVAL; /* a fail indication */
1018			}
1019			release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
1020			if (ret)
1021				goto bail;
1022		} else {
1023			ppd->offline_disabled_reason =
1024			   HFI1_ODR_MASK(
1025				OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
1026			goto bail;
1027		}
1028		break;
1029	default:
1030		dd_dev_info(ppd->dd, "%s: Unknown port type\n", __func__);
1031		ppd->port_type = PORT_TYPE_UNKNOWN;
1032		tuning_method = OPA_UNKNOWN_TUNING;
1033		total_atten = 0;
1034		limiting_active = 0;
1035		tx_preset_index = OPA_INVALID_INDEX;
1036		break;
1037	}
1038
1039	if (ppd->offline_disabled_reason ==
1040			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
1041		apply_tunings(ppd, tx_preset_index, tuning_method,
1042			      total_atten, limiting_active);
1043
1044	if (!ret)
1045		ppd->driver_link_ready = 1;
1046
1047	return;
1048bail:
1049	ppd->driver_link_ready = 0;
1050}