Linux Audio

Check our new training course

Loading...
   1/*
   2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
   4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#include <linux/etherdevice.h>
  36#include <linux/mlx4/cmd.h>
  37#include <linux/module.h>
  38#include <linux/cache.h>
  39
  40#include "fw.h"
  41#include "icm.h"
  42
  43enum {
  44	MLX4_COMMAND_INTERFACE_MIN_REV		= 2,
  45	MLX4_COMMAND_INTERFACE_MAX_REV		= 3,
  46	MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS	= 3,
  47};
  48
  49extern void __buggy_use_of_MLX4_GET(void);
  50extern void __buggy_use_of_MLX4_PUT(void);
  51
  52static bool enable_qos;
  53module_param(enable_qos, bool, 0444);
  54MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
  55
  56#define MLX4_GET(dest, source, offset)				      \
  57	do {							      \
  58		void *__p = (char *) (source) + (offset);	      \
  59		switch (sizeof (dest)) {			      \
  60		case 1: (dest) = *(u8 *) __p;	    break;	      \
  61		case 2: (dest) = be16_to_cpup(__p); break;	      \
  62		case 4: (dest) = be32_to_cpup(__p); break;	      \
  63		case 8: (dest) = be64_to_cpup(__p); break;	      \
  64		default: __buggy_use_of_MLX4_GET();		      \
  65		}						      \
  66	} while (0)
  67
  68#define MLX4_PUT(dest, source, offset)				      \
  69	do {							      \
  70		void *__d = ((char *) (dest) + (offset));	      \
  71		switch (sizeof(source)) {			      \
  72		case 1: *(u8 *) __d = (source);		       break; \
  73		case 2:	*(__be16 *) __d = cpu_to_be16(source); break; \
  74		case 4:	*(__be32 *) __d = cpu_to_be32(source); break; \
  75		case 8:	*(__be64 *) __d = cpu_to_be64(source); break; \
  76		default: __buggy_use_of_MLX4_PUT();		      \
  77		}						      \
  78	} while (0)
  79
  80static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
  81{
  82	static const char *fname[] = {
  83		[ 0] = "RC transport",
  84		[ 1] = "UC transport",
  85		[ 2] = "UD transport",
  86		[ 3] = "XRC transport",
  87		[ 4] = "reliable multicast",
  88		[ 5] = "FCoIB support",
  89		[ 6] = "SRQ support",
  90		[ 7] = "IPoIB checksum offload",
  91		[ 8] = "P_Key violation counter",
  92		[ 9] = "Q_Key violation counter",
  93		[10] = "VMM",
  94		[12] = "DPDP",
  95		[15] = "Big LSO headers",
  96		[16] = "MW support",
  97		[17] = "APM support",
  98		[18] = "Atomic ops support",
  99		[19] = "Raw multicast support",
 100		[20] = "Address vector port checking support",
 101		[21] = "UD multicast support",
 102		[24] = "Demand paging support",
 103		[25] = "Router support",
 104		[30] = "IBoE support",
 105		[32] = "Unicast loopback support",
 106		[34] = "FCS header control",
 107		[38] = "Wake On LAN support",
 108		[40] = "UDP RSS support",
 109		[41] = "Unicast VEP steering support",
 110		[42] = "Multicast VEP steering support",
 111		[48] = "Counters support",
 112	};
 113	int i;
 114
 115	mlx4_dbg(dev, "DEV_CAP flags:\n");
 116	for (i = 0; i < ARRAY_SIZE(fname); ++i)
 117		if (fname[i] && (flags & (1LL << i)))
 118			mlx4_dbg(dev, "    %s\n", fname[i]);
 119}
 120
 121static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
 122{
 123	static const char * const fname[] = {
 124		[0] = "RSS support",
 125		[1] = "RSS Toeplitz Hash Function support",
 126		[2] = "RSS XOR Hash Function support"
 127	};
 128	int i;
 129
 130	for (i = 0; i < ARRAY_SIZE(fname); ++i)
 131		if (fname[i] && (flags & (1LL << i)))
 132			mlx4_dbg(dev, "    %s\n", fname[i]);
 133}
 134
 135int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
 136{
 137	struct mlx4_cmd_mailbox *mailbox;
 138	u32 *inbox;
 139	int err = 0;
 140
 141#define MOD_STAT_CFG_IN_SIZE		0x100
 142
 143#define MOD_STAT_CFG_PG_SZ_M_OFFSET	0x002
 144#define MOD_STAT_CFG_PG_SZ_OFFSET	0x003
 145
 146	mailbox = mlx4_alloc_cmd_mailbox(dev);
 147	if (IS_ERR(mailbox))
 148		return PTR_ERR(mailbox);
 149	inbox = mailbox->buf;
 150
 151	memset(inbox, 0, MOD_STAT_CFG_IN_SIZE);
 152
 153	MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET);
 154	MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
 155
 156	err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
 157			MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 158
 159	mlx4_free_cmd_mailbox(dev, mailbox);
 160	return err;
 161}
 162
 163int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
 164				struct mlx4_vhcr *vhcr,
 165				struct mlx4_cmd_mailbox *inbox,
 166				struct mlx4_cmd_mailbox *outbox,
 167				struct mlx4_cmd_info *cmd)
 168{
 169	u8	field;
 170	u32	size;
 171	int	err = 0;
 172
 173#define QUERY_FUNC_CAP_FLAGS_OFFSET		0x0
 174#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET		0x1
 175#define QUERY_FUNC_CAP_PF_BHVR_OFFSET		0x4
 176#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET		0x10
 177#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET		0x14
 178#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET		0x18
 179#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET		0x20
 180#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET		0x24
 181#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET		0x28
 182#define QUERY_FUNC_CAP_MAX_EQ_OFFSET		0x2c
 183#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET	0X30
 184
 185#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET		0x3
 186#define QUERY_FUNC_CAP_ETH_PROPS_OFFSET		0xc
 187
 188	if (vhcr->op_modifier == 1) {
 189		field = vhcr->in_modifier;
 190		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
 191
 192		field = 0; /* ensure fvl bit is not set */
 193		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
 194	} else if (vhcr->op_modifier == 0) {
 195		field = 1 << 7; /* enable only ethernet interface */
 196		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
 197
 198		field = dev->caps.num_ports;
 199		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
 200
 201		size = 0; /* no PF behavious is set for now */
 202		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
 203
 204		size = dev->caps.num_qps;
 205		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
 206
 207		size = dev->caps.num_srqs;
 208		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
 209
 210		size = dev->caps.num_cqs;
 211		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
 212
 213		size = dev->caps.num_eqs;
 214		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
 215
 216		size = dev->caps.reserved_eqs;
 217		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
 218
 219		size = dev->caps.num_mpts;
 220		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
 221
 222		size = dev->caps.num_mtts;
 223		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
 224
 225		size = dev->caps.num_mgms + dev->caps.num_amgms;
 226		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
 227
 228	} else
 229		err = -EINVAL;
 230
 231	return err;
 232}
 233
 234int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap)
 235{
 236	struct mlx4_cmd_mailbox *mailbox;
 237	u32			*outbox;
 238	u8			field;
 239	u32			size;
 240	int			i;
 241	int			err = 0;
 242
 243
 244	mailbox = mlx4_alloc_cmd_mailbox(dev);
 245	if (IS_ERR(mailbox))
 246		return PTR_ERR(mailbox);
 247
 248	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FUNC_CAP,
 249			   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 250	if (err)
 251		goto out;
 252
 253	outbox = mailbox->buf;
 254
 255	MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
 256	if (!(field & (1 << 7))) {
 257		mlx4_err(dev, "The host doesn't support eth interface\n");
 258		err = -EPROTONOSUPPORT;
 259		goto out;
 260	}
 261
 262	MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
 263	func_cap->num_ports = field;
 264
 265	MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
 266	func_cap->pf_context_behaviour = size;
 267
 268	MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
 269	func_cap->qp_quota = size & 0xFFFFFF;
 270
 271	MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
 272	func_cap->srq_quota = size & 0xFFFFFF;
 273
 274	MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
 275	func_cap->cq_quota = size & 0xFFFFFF;
 276
 277	MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
 278	func_cap->max_eq = size & 0xFFFFFF;
 279
 280	MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
 281	func_cap->reserved_eq = size & 0xFFFFFF;
 282
 283	MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
 284	func_cap->mpt_quota = size & 0xFFFFFF;
 285
 286	MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
 287	func_cap->mtt_quota = size & 0xFFFFFF;
 288
 289	MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
 290	func_cap->mcg_quota = size & 0xFFFFFF;
 291
 292	for (i = 1; i <= func_cap->num_ports; ++i) {
 293		err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 1,
 294				   MLX4_CMD_QUERY_FUNC_CAP,
 295				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 296		if (err)
 297			goto out;
 298
 299		MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
 300		if (field & (1 << 7)) {
 301			mlx4_err(dev, "VLAN is enforced on this port\n");
 302			err = -EPROTONOSUPPORT;
 303			goto out;
 304		}
 305
 306		if (field & (1 << 6)) {
 307			mlx4_err(dev, "Force mac is enabled on this port\n");
 308			err = -EPROTONOSUPPORT;
 309			goto out;
 310		}
 311
 312		MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
 313		func_cap->physical_port[i] = field;
 314	}
 315
 316	/* All other resources are allocated by the master, but we still report
 317	 * 'num' and 'reserved' capabilities as follows:
 318	 * - num remains the maximum resource index
 319	 * - 'num - reserved' is the total available objects of a resource, but
 320	 *   resource indices may be less than 'reserved'
 321	 * TODO: set per-resource quotas */
 322
 323out:
 324	mlx4_free_cmd_mailbox(dev, mailbox);
 325
 326	return err;
 327}
 328
 329int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 330{
 331	struct mlx4_cmd_mailbox *mailbox;
 332	u32 *outbox;
 333	u8 field;
 334	u32 field32, flags, ext_flags;
 335	u16 size;
 336	u16 stat_rate;
 337	int err;
 338	int i;
 339
 340#define QUERY_DEV_CAP_OUT_SIZE		       0x100
 341#define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET		0x10
 342#define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET		0x11
 343#define QUERY_DEV_CAP_RSVD_QP_OFFSET		0x12
 344#define QUERY_DEV_CAP_MAX_QP_OFFSET		0x13
 345#define QUERY_DEV_CAP_RSVD_SRQ_OFFSET		0x14
 346#define QUERY_DEV_CAP_MAX_SRQ_OFFSET		0x15
 347#define QUERY_DEV_CAP_RSVD_EEC_OFFSET		0x16
 348#define QUERY_DEV_CAP_MAX_EEC_OFFSET		0x17
 349#define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET		0x19
 350#define QUERY_DEV_CAP_RSVD_CQ_OFFSET		0x1a
 351#define QUERY_DEV_CAP_MAX_CQ_OFFSET		0x1b
 352#define QUERY_DEV_CAP_MAX_MPT_OFFSET		0x1d
 353#define QUERY_DEV_CAP_RSVD_EQ_OFFSET		0x1e
 354#define QUERY_DEV_CAP_MAX_EQ_OFFSET		0x1f
 355#define QUERY_DEV_CAP_RSVD_MTT_OFFSET		0x20
 356#define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET		0x21
 357#define QUERY_DEV_CAP_RSVD_MRW_OFFSET		0x22
 358#define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET	0x23
 359#define QUERY_DEV_CAP_MAX_AV_OFFSET		0x27
 360#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET		0x29
 361#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET		0x2b
 362#define QUERY_DEV_CAP_MAX_GSO_OFFSET		0x2d
 363#define QUERY_DEV_CAP_RSS_OFFSET		0x2e
 364#define QUERY_DEV_CAP_MAX_RDMA_OFFSET		0x2f
 365#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET		0x33
 366#define QUERY_DEV_CAP_ACK_DELAY_OFFSET		0x35
 367#define QUERY_DEV_CAP_MTU_WIDTH_OFFSET		0x36
 368#define QUERY_DEV_CAP_VL_PORT_OFFSET		0x37
 369#define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET		0x38
 370#define QUERY_DEV_CAP_MAX_GID_OFFSET		0x3b
 371#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET	0x3c
 372#define QUERY_DEV_CAP_MAX_PKEY_OFFSET		0x3f
 373#define QUERY_DEV_CAP_EXT_FLAGS_OFFSET		0x40
 374#define QUERY_DEV_CAP_FLAGS_OFFSET		0x44
 375#define QUERY_DEV_CAP_RSVD_UAR_OFFSET		0x48
 376#define QUERY_DEV_CAP_UAR_SZ_OFFSET		0x49
 377#define QUERY_DEV_CAP_PAGE_SZ_OFFSET		0x4b
 378#define QUERY_DEV_CAP_BF_OFFSET			0x4c
 379#define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET	0x4d
 380#define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET	0x4e
 381#define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET	0x4f
 382#define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET		0x51
 383#define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET	0x52
 384#define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET		0x55
 385#define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET	0x56
 386#define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET		0x61
 387#define QUERY_DEV_CAP_RSVD_MCG_OFFSET		0x62
 388#define QUERY_DEV_CAP_MAX_MCG_OFFSET		0x63
 389#define QUERY_DEV_CAP_RSVD_PD_OFFSET		0x64
 390#define QUERY_DEV_CAP_MAX_PD_OFFSET		0x65
 391#define QUERY_DEV_CAP_RSVD_XRC_OFFSET		0x66
 392#define QUERY_DEV_CAP_MAX_XRC_OFFSET		0x67
 393#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET	0x68
 394#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET	0x80
 395#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET	0x82
 396#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET	0x84
 397#define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET	0x86
 398#define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET	0x88
 399#define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET	0x8a
 400#define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET	0x8c
 401#define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET	0x8e
 402#define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET	0x90
 403#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET	0x92
 404#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET		0x94
 405#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET		0x98
 406#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET		0xa0
 407
 408	dev_cap->flags2 = 0;
 409	mailbox = mlx4_alloc_cmd_mailbox(dev);
 410	if (IS_ERR(mailbox))
 411		return PTR_ERR(mailbox);
 412	outbox = mailbox->buf;
 413
 414	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
 415			   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 416	if (err)
 417		goto out;
 418
 419	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
 420	dev_cap->reserved_qps = 1 << (field & 0xf);
 421	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
 422	dev_cap->max_qps = 1 << (field & 0x1f);
 423	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET);
 424	dev_cap->reserved_srqs = 1 << (field >> 4);
 425	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET);
 426	dev_cap->max_srqs = 1 << (field & 0x1f);
 427	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET);
 428	dev_cap->max_cq_sz = 1 << field;
 429	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET);
 430	dev_cap->reserved_cqs = 1 << (field & 0xf);
 431	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET);
 432	dev_cap->max_cqs = 1 << (field & 0x1f);
 433	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
 434	dev_cap->max_mpts = 1 << (field & 0x3f);
 435	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
 436	dev_cap->reserved_eqs = field & 0xf;
 437	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
 438	dev_cap->max_eqs = 1 << (field & 0xf);
 439	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
 440	dev_cap->reserved_mtts = 1 << (field >> 4);
 441	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET);
 442	dev_cap->max_mrw_sz = 1 << field;
 443	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET);
 444	dev_cap->reserved_mrws = 1 << (field & 0xf);
 445	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET);
 446	dev_cap->max_mtt_seg = 1 << (field & 0x3f);
 447	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET);
 448	dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
 449	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
 450	dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
 451	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET);
 452	field &= 0x1f;
 453	if (!field)
 454		dev_cap->max_gso_sz = 0;
 455	else
 456		dev_cap->max_gso_sz = 1 << field;
 457
 458	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSS_OFFSET);
 459	if (field & 0x20)
 460		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_XOR;
 461	if (field & 0x10)
 462		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_TOP;
 463	field &= 0xf;
 464	if (field) {
 465		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS;
 466		dev_cap->max_rss_tbl_sz = 1 << field;
 467	} else
 468		dev_cap->max_rss_tbl_sz = 0;
 469	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
 470	dev_cap->max_rdma_global = 1 << (field & 0x3f);
 471	MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
 472	dev_cap->local_ca_ack_delay = field & 0x1f;
 473	MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
 474	dev_cap->num_ports = field & 0xf;
 475	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
 476	dev_cap->max_msg_sz = 1 << (field & 0x1f);
 477	MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
 478	dev_cap->stat_rate_support = stat_rate;
 479	MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
 480	MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
 481	dev_cap->flags = flags | (u64)ext_flags << 32;
 482	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
 483	dev_cap->reserved_uars = field >> 4;
 484	MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
 485	dev_cap->uar_size = 1 << ((field & 0x3f) + 20);
 486	MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET);
 487	dev_cap->min_page_sz = 1 << field;
 488
 489	MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET);
 490	if (field & 0x80) {
 491		MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
 492		dev_cap->bf_reg_size = 1 << (field & 0x1f);
 493		MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
 494		if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
 495			field = 3;
 496		dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
 497		mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
 498			 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
 499	} else {
 500		dev_cap->bf_reg_size = 0;
 501		mlx4_dbg(dev, "BlueFlame not available\n");
 502	}
 503
 504	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET);
 505	dev_cap->max_sq_sg = field;
 506	MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET);
 507	dev_cap->max_sq_desc_sz = size;
 508
 509	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET);
 510	dev_cap->max_qp_per_mcg = 1 << field;
 511	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET);
 512	dev_cap->reserved_mgms = field & 0xf;
 513	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET);
 514	dev_cap->max_mcgs = 1 << field;
 515	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET);
 516	dev_cap->reserved_pds = field >> 4;
 517	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
 518	dev_cap->max_pds = 1 << (field & 0x3f);
 519	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET);
 520	dev_cap->reserved_xrcds = field >> 4;
 521	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
 522	dev_cap->max_xrcds = 1 << (field & 0x1f);
 523
 524	MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET);
 525	dev_cap->rdmarc_entry_sz = size;
 526	MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET);
 527	dev_cap->qpc_entry_sz = size;
 528	MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET);
 529	dev_cap->aux_entry_sz = size;
 530	MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET);
 531	dev_cap->altc_entry_sz = size;
 532	MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET);
 533	dev_cap->eqc_entry_sz = size;
 534	MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET);
 535	dev_cap->cqc_entry_sz = size;
 536	MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET);
 537	dev_cap->srq_entry_sz = size;
 538	MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET);
 539	dev_cap->cmpt_entry_sz = size;
 540	MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET);
 541	dev_cap->mtt_entry_sz = size;
 542	MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET);
 543	dev_cap->dmpt_entry_sz = size;
 544
 545	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET);
 546	dev_cap->max_srq_sz = 1 << field;
 547	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET);
 548	dev_cap->max_qp_sz = 1 << field;
 549	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET);
 550	dev_cap->resize_srq = field & 1;
 551	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET);
 552	dev_cap->max_rq_sg = field;
 553	MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
 554	dev_cap->max_rq_desc_sz = size;
 555
 556	MLX4_GET(dev_cap->bmme_flags, outbox,
 557		 QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
 558	MLX4_GET(dev_cap->reserved_lkey, outbox,
 559		 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
 560	MLX4_GET(dev_cap->max_icm_sz, outbox,
 561		 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
 562	if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
 563		MLX4_GET(dev_cap->max_counters, outbox,
 564			 QUERY_DEV_CAP_MAX_COUNTERS_OFFSET);
 565
 566	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
 567		for (i = 1; i <= dev_cap->num_ports; ++i) {
 568			MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
 569			dev_cap->max_vl[i]	   = field >> 4;
 570			MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
 571			dev_cap->ib_mtu[i]	   = field >> 4;
 572			dev_cap->max_port_width[i] = field & 0xf;
 573			MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
 574			dev_cap->max_gids[i]	   = 1 << (field & 0xf);
 575			MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET);
 576			dev_cap->max_pkeys[i]	   = 1 << (field & 0xf);
 577		}
 578	} else {
 579#define QUERY_PORT_SUPPORTED_TYPE_OFFSET	0x00
 580#define QUERY_PORT_MTU_OFFSET			0x01
 581#define QUERY_PORT_ETH_MTU_OFFSET		0x02
 582#define QUERY_PORT_WIDTH_OFFSET			0x06
 583#define QUERY_PORT_MAX_GID_PKEY_OFFSET		0x07
 584#define QUERY_PORT_MAX_MACVLAN_OFFSET		0x0a
 585#define QUERY_PORT_MAX_VL_OFFSET		0x0b
 586#define QUERY_PORT_MAC_OFFSET			0x10
 587#define QUERY_PORT_TRANS_VENDOR_OFFSET		0x18
 588#define QUERY_PORT_WAVELENGTH_OFFSET		0x1c
 589#define QUERY_PORT_TRANS_CODE_OFFSET		0x20
 590
 591		for (i = 1; i <= dev_cap->num_ports; ++i) {
 592			err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
 593					   MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 594			if (err)
 595				goto out;
 596
 597			MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
 598			dev_cap->supported_port_types[i] = field & 3;
 599			dev_cap->suggested_type[i] = (field >> 3) & 1;
 600			dev_cap->default_sense[i] = (field >> 4) & 1;
 601			MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
 602			dev_cap->ib_mtu[i]	   = field & 0xf;
 603			MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
 604			dev_cap->max_port_width[i] = field & 0xf;
 605			MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
 606			dev_cap->max_gids[i]	   = 1 << (field >> 4);
 607			dev_cap->max_pkeys[i]	   = 1 << (field & 0xf);
 608			MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
 609			dev_cap->max_vl[i]	   = field & 0xf;
 610			MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
 611			dev_cap->log_max_macs[i]  = field & 0xf;
 612			dev_cap->log_max_vlans[i] = field >> 4;
 613			MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET);
 614			MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET);
 615			MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET);
 616			dev_cap->trans_type[i] = field32 >> 24;
 617			dev_cap->vendor_oui[i] = field32 & 0xffffff;
 618			MLX4_GET(dev_cap->wavelength[i], outbox, QUERY_PORT_WAVELENGTH_OFFSET);
 619			MLX4_GET(dev_cap->trans_code[i], outbox, QUERY_PORT_TRANS_CODE_OFFSET);
 620		}
 621	}
 622
 623	mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
 624		 dev_cap->bmme_flags, dev_cap->reserved_lkey);
 625
 626	/*
 627	 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
 628	 * we can't use any EQs whose doorbell falls on that page,
 629	 * even if the EQ itself isn't reserved.
 630	 */
 631	dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
 632				    dev_cap->reserved_eqs);
 633
 634	mlx4_dbg(dev, "Max ICM size %lld MB\n",
 635		 (unsigned long long) dev_cap->max_icm_sz >> 20);
 636	mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
 637		 dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz);
 638	mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
 639		 dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz);
 640	mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
 641		 dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz);
 642	mlx4_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
 643		 dev_cap->max_eqs, dev_cap->reserved_eqs, dev_cap->eqc_entry_sz);
 644	mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
 645		 dev_cap->reserved_mrws, dev_cap->reserved_mtts);
 646	mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
 647		 dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars);
 648	mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
 649		 dev_cap->max_pds, dev_cap->reserved_mgms);
 650	mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
 651		 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
 652	mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
 653		 dev_cap->local_ca_ack_delay, 128 << dev_cap->ib_mtu[1],
 654		 dev_cap->max_port_width[1]);
 655	mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
 656		 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
 657	mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
 658		 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
 659	mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
 660	mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters);
 661	mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz);
 662
 663	dump_dev_cap_flags(dev, dev_cap->flags);
 664	dump_dev_cap_flags2(dev, dev_cap->flags2);
 665
 666out:
 667	mlx4_free_cmd_mailbox(dev, mailbox);
 668	return err;
 669}
 670
 671int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
 672			       struct mlx4_vhcr *vhcr,
 673			       struct mlx4_cmd_mailbox *inbox,
 674			       struct mlx4_cmd_mailbox *outbox,
 675			       struct mlx4_cmd_info *cmd)
 676{
 677	int	err = 0;
 678	u8	field;
 679
 680	err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
 681			   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 682	if (err)
 683		return err;
 684
 685	/* For guests, report Blueflame disabled */
 686	MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
 687	field &= 0x7f;
 688	MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
 689
 690	return 0;
 691}
 692
 693int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
 694			    struct mlx4_vhcr *vhcr,
 695			    struct mlx4_cmd_mailbox *inbox,
 696			    struct mlx4_cmd_mailbox *outbox,
 697			    struct mlx4_cmd_info *cmd)
 698{
 699	u64 def_mac;
 700	u8 port_type;
 701	int err;
 702
 703#define MLX4_PORT_SUPPORT_IB		(1 << 0)
 704#define MLX4_PORT_SUGGEST_TYPE		(1 << 3)
 705#define MLX4_PORT_DEFAULT_SENSE		(1 << 4)
 706#define MLX4_VF_PORT_ETH_ONLY_MASK	(0xff & ~MLX4_PORT_SUPPORT_IB & \
 707					 ~MLX4_PORT_SUGGEST_TYPE & \
 708					 ~MLX4_PORT_DEFAULT_SENSE)
 709
 710	err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
 711			   MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
 712			   MLX4_CMD_NATIVE);
 713
 714	if (!err && dev->caps.function != slave) {
 715		/* set slave default_mac address */
 716		MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
 717		def_mac += slave << 8;
 718		MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
 719
 720		/* get port type - currently only eth is enabled */
 721		MLX4_GET(port_type, outbox->buf,
 722			 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
 723
 724		/* Allow only Eth port, no link sensing allowed */
 725		port_type &= MLX4_VF_PORT_ETH_ONLY_MASK;
 726
 727		/* check eth is enabled for this port */
 728		if (!(port_type & 2))
 729			mlx4_dbg(dev, "QUERY PORT: eth not supported by host");
 730
 731		MLX4_PUT(outbox->buf, port_type,
 732			 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
 733	}
 734
 735	return err;
 736}
 737
 738int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
 739{
 740	struct mlx4_cmd_mailbox *mailbox;
 741	struct mlx4_icm_iter iter;
 742	__be64 *pages;
 743	int lg;
 744	int nent = 0;
 745	int i;
 746	int err = 0;
 747	int ts = 0, tc = 0;
 748
 749	mailbox = mlx4_alloc_cmd_mailbox(dev);
 750	if (IS_ERR(mailbox))
 751		return PTR_ERR(mailbox);
 752	memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
 753	pages = mailbox->buf;
 754
 755	for (mlx4_icm_first(icm, &iter);
 756	     !mlx4_icm_last(&iter);
 757	     mlx4_icm_next(&iter)) {
 758		/*
 759		 * We have to pass pages that are aligned to their
 760		 * size, so find the least significant 1 in the
 761		 * address or size and use that as our log2 size.
 762		 */
 763		lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
 764		if (lg < MLX4_ICM_PAGE_SHIFT) {
 765			mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n",
 766				   MLX4_ICM_PAGE_SIZE,
 767				   (unsigned long long) mlx4_icm_addr(&iter),
 768				   mlx4_icm_size(&iter));
 769			err = -EINVAL;
 770			goto out;
 771		}
 772
 773		for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) {
 774			if (virt != -1) {
 775				pages[nent * 2] = cpu_to_be64(virt);
 776				virt += 1 << lg;
 777			}
 778
 779			pages[nent * 2 + 1] =
 780				cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) |
 781					    (lg - MLX4_ICM_PAGE_SHIFT));
 782			ts += 1 << (lg - 10);
 783			++tc;
 784
 785			if (++nent == MLX4_MAILBOX_SIZE / 16) {
 786				err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
 787						MLX4_CMD_TIME_CLASS_B,
 788						MLX4_CMD_NATIVE);
 789				if (err)
 790					goto out;
 791				nent = 0;
 792			}
 793		}
 794	}
 795
 796	if (nent)
 797		err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
 798			       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 799	if (err)
 800		goto out;
 801
 802	switch (op) {
 803	case MLX4_CMD_MAP_FA:
 804		mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts);
 805		break;
 806	case MLX4_CMD_MAP_ICM_AUX:
 807		mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts);
 808		break;
 809	case MLX4_CMD_MAP_ICM:
 810		mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n",
 811			  tc, ts, (unsigned long long) virt - (ts << 10));
 812		break;
 813	}
 814
 815out:
 816	mlx4_free_cmd_mailbox(dev, mailbox);
 817	return err;
 818}
 819
 820int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
 821{
 822	return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1);
 823}
 824
 825int mlx4_UNMAP_FA(struct mlx4_dev *dev)
 826{
 827	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA,
 828			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 829}
 830
 831
 832int mlx4_RUN_FW(struct mlx4_dev *dev)
 833{
 834	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW,
 835			MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 836}
 837
 838int mlx4_QUERY_FW(struct mlx4_dev *dev)
 839{
 840	struct mlx4_fw  *fw  = &mlx4_priv(dev)->fw;
 841	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
 842	struct mlx4_cmd_mailbox *mailbox;
 843	u32 *outbox;
 844	int err = 0;
 845	u64 fw_ver;
 846	u16 cmd_if_rev;
 847	u8 lg;
 848
 849#define QUERY_FW_OUT_SIZE             0x100
 850#define QUERY_FW_VER_OFFSET            0x00
 851#define QUERY_FW_PPF_ID		       0x09
 852#define QUERY_FW_CMD_IF_REV_OFFSET     0x0a
 853#define QUERY_FW_MAX_CMD_OFFSET        0x0f
 854#define QUERY_FW_ERR_START_OFFSET      0x30
 855#define QUERY_FW_ERR_SIZE_OFFSET       0x38
 856#define QUERY_FW_ERR_BAR_OFFSET        0x3c
 857
 858#define QUERY_FW_SIZE_OFFSET           0x00
 859#define QUERY_FW_CLR_INT_BASE_OFFSET   0x20
 860#define QUERY_FW_CLR_INT_BAR_OFFSET    0x28
 861
 862#define QUERY_FW_COMM_BASE_OFFSET      0x40
 863#define QUERY_FW_COMM_BAR_OFFSET       0x48
 864
 865	mailbox = mlx4_alloc_cmd_mailbox(dev);
 866	if (IS_ERR(mailbox))
 867		return PTR_ERR(mailbox);
 868	outbox = mailbox->buf;
 869
 870	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
 871			    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 872	if (err)
 873		goto out;
 874
 875	MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET);
 876	/*
 877	 * FW subminor version is at more significant bits than minor
 878	 * version, so swap here.
 879	 */
 880	dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) |
 881		((fw_ver & 0xffff0000ull) >> 16) |
 882		((fw_ver & 0x0000ffffull) << 16);
 883
 884	if (mlx4_is_slave(dev))
 885		goto out;
 886
 887	MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
 888	dev->caps.function = lg;
 889
 890	MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
 891	if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
 892	    cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
 893		mlx4_err(dev, "Installed FW has unsupported "
 894			 "command interface revision %d.\n",
 895			 cmd_if_rev);
 896		mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
 897			 (int) (dev->caps.fw_ver >> 32),
 898			 (int) (dev->caps.fw_ver >> 16) & 0xffff,
 899			 (int) dev->caps.fw_ver & 0xffff);
 900		mlx4_err(dev, "This driver version supports only revisions %d to %d.\n",
 901			 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
 902		err = -ENODEV;
 903		goto out;
 904	}
 905
 906	if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS)
 907		dev->flags |= MLX4_FLAG_OLD_PORT_CMDS;
 908
 909	MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
 910	cmd->max_cmds = 1 << lg;
 911
 912	mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n",
 913		 (int) (dev->caps.fw_ver >> 32),
 914		 (int) (dev->caps.fw_ver >> 16) & 0xffff,
 915		 (int) dev->caps.fw_ver & 0xffff,
 916		 cmd_if_rev, cmd->max_cmds);
 917
 918	MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET);
 919	MLX4_GET(fw->catas_size,   outbox, QUERY_FW_ERR_SIZE_OFFSET);
 920	MLX4_GET(fw->catas_bar,    outbox, QUERY_FW_ERR_BAR_OFFSET);
 921	fw->catas_bar = (fw->catas_bar >> 6) * 2;
 922
 923	mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
 924		 (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar);
 925
 926	MLX4_GET(fw->fw_pages,     outbox, QUERY_FW_SIZE_OFFSET);
 927	MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
 928	MLX4_GET(fw->clr_int_bar,  outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
 929	fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
 930
 931	MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET);
 932	MLX4_GET(fw->comm_bar,  outbox, QUERY_FW_COMM_BAR_OFFSET);
 933	fw->comm_bar = (fw->comm_bar >> 6) * 2;
 934	mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n",
 935		 fw->comm_bar, fw->comm_base);
 936	mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
 937
 938	/*
 939	 * Round up number of system pages needed in case
 940	 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
 941	 */
 942	fw->fw_pages =
 943		ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
 944		(PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
 945
 946	mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n",
 947		 (unsigned long long) fw->clr_int_base, fw->clr_int_bar);
 948
 949out:
 950	mlx4_free_cmd_mailbox(dev, mailbox);
 951	return err;
 952}
 953
 954int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
 955			  struct mlx4_vhcr *vhcr,
 956			  struct mlx4_cmd_mailbox *inbox,
 957			  struct mlx4_cmd_mailbox *outbox,
 958			  struct mlx4_cmd_info *cmd)
 959{
 960	u8 *outbuf;
 961	int err;
 962
 963	outbuf = outbox->buf;
 964	err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
 965			    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 966	if (err)
 967		return err;
 968
 969	/* for slaves, zero out everything except FW version */
 970	outbuf[0] = outbuf[1] = 0;
 971	memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8);
 972	return 0;
 973}
 974
 975static void get_board_id(void *vsd, char *board_id)
 976{
 977	int i;
 978
 979#define VSD_OFFSET_SIG1		0x00
 980#define VSD_OFFSET_SIG2		0xde
 981#define VSD_OFFSET_MLX_BOARD_ID	0xd0
 982#define VSD_OFFSET_TS_BOARD_ID	0x20
 983
 984#define VSD_SIGNATURE_TOPSPIN	0x5ad
 985
 986	memset(board_id, 0, MLX4_BOARD_ID_LEN);
 987
 988	if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
 989	    be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
 990		strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
 991	} else {
 992		/*
 993		 * The board ID is a string but the firmware byte
 994		 * swaps each 4-byte word before passing it back to
 995		 * us.  Therefore we need to swab it before printing.
 996		 */
 997		for (i = 0; i < 4; ++i)
 998			((u32 *) board_id)[i] =
 999				swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
1000	}
1001}
1002
1003int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
1004{
1005	struct mlx4_cmd_mailbox *mailbox;
1006	u32 *outbox;
1007	int err;
1008
1009#define QUERY_ADAPTER_OUT_SIZE             0x100
1010#define QUERY_ADAPTER_INTA_PIN_OFFSET      0x10
1011#define QUERY_ADAPTER_VSD_OFFSET           0x20
1012
1013	mailbox = mlx4_alloc_cmd_mailbox(dev);
1014	if (IS_ERR(mailbox))
1015		return PTR_ERR(mailbox);
1016	outbox = mailbox->buf;
1017
1018	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
1019			   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1020	if (err)
1021		goto out;
1022
1023	MLX4_GET(adapter->inta_pin, outbox,    QUERY_ADAPTER_INTA_PIN_OFFSET);
1024
1025	get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
1026		     adapter->board_id);
1027
1028out:
1029	mlx4_free_cmd_mailbox(dev, mailbox);
1030	return err;
1031}
1032
1033int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1034{
1035	struct mlx4_cmd_mailbox *mailbox;
1036	__be32 *inbox;
1037	int err;
1038
1039#define INIT_HCA_IN_SIZE		 0x200
1040#define INIT_HCA_VERSION_OFFSET		 0x000
1041#define	 INIT_HCA_VERSION		 2
1042#define INIT_HCA_CACHELINE_SZ_OFFSET	 0x0e
1043#define INIT_HCA_FLAGS_OFFSET		 0x014
1044#define INIT_HCA_QPC_OFFSET		 0x020
1045#define	 INIT_HCA_QPC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x10)
1046#define	 INIT_HCA_LOG_QP_OFFSET		 (INIT_HCA_QPC_OFFSET + 0x17)
1047#define	 INIT_HCA_SRQC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x28)
1048#define	 INIT_HCA_LOG_SRQ_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x2f)
1049#define	 INIT_HCA_CQC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x30)
1050#define	 INIT_HCA_LOG_CQ_OFFSET		 (INIT_HCA_QPC_OFFSET + 0x37)
1051#define	 INIT_HCA_EQE_CQE_OFFSETS	 (INIT_HCA_QPC_OFFSET + 0x38)
1052#define	 INIT_HCA_ALTC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x40)
1053#define	 INIT_HCA_AUXC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x50)
1054#define	 INIT_HCA_EQC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x60)
1055#define	 INIT_HCA_LOG_EQ_OFFSET		 (INIT_HCA_QPC_OFFSET + 0x67)
1056#define	 INIT_HCA_RDMARC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x70)
1057#define	 INIT_HCA_LOG_RD_OFFSET		 (INIT_HCA_QPC_OFFSET + 0x77)
1058#define INIT_HCA_MCAST_OFFSET		 0x0c0
1059#define	 INIT_HCA_MC_BASE_OFFSET	 (INIT_HCA_MCAST_OFFSET + 0x00)
1060#define	 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
1061#define	 INIT_HCA_LOG_MC_HASH_SZ_OFFSET	 (INIT_HCA_MCAST_OFFSET + 0x16)
1062#define  INIT_HCA_UC_STEERING_OFFSET	 (INIT_HCA_MCAST_OFFSET + 0x18)
1063#define	 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
1064#define INIT_HCA_TPT_OFFSET		 0x0f0
1065#define	 INIT_HCA_DMPT_BASE_OFFSET	 (INIT_HCA_TPT_OFFSET + 0x00)
1066#define	 INIT_HCA_LOG_MPT_SZ_OFFSET	 (INIT_HCA_TPT_OFFSET + 0x0b)
1067#define	 INIT_HCA_MTT_BASE_OFFSET	 (INIT_HCA_TPT_OFFSET + 0x10)
1068#define	 INIT_HCA_CMPT_BASE_OFFSET	 (INIT_HCA_TPT_OFFSET + 0x18)
1069#define INIT_HCA_UAR_OFFSET		 0x120
1070#define	 INIT_HCA_LOG_UAR_SZ_OFFSET	 (INIT_HCA_UAR_OFFSET + 0x0a)
1071#define  INIT_HCA_UAR_PAGE_SZ_OFFSET     (INIT_HCA_UAR_OFFSET + 0x0b)
1072
1073	mailbox = mlx4_alloc_cmd_mailbox(dev);
1074	if (IS_ERR(mailbox))
1075		return PTR_ERR(mailbox);
1076	inbox = mailbox->buf;
1077
1078	memset(inbox, 0, INIT_HCA_IN_SIZE);
1079
1080	*((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
1081
1082	*((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
1083		(ilog2(cache_line_size()) - 4) << 5;
1084
1085#if defined(__LITTLE_ENDIAN)
1086	*(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
1087#elif defined(__BIG_ENDIAN)
1088	*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1);
1089#else
1090#error Host endianness not defined
1091#endif
1092	/* Check port for UD address vector: */
1093	*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1);
1094
1095	/* Enable IPoIB checksumming if we can: */
1096	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
1097		*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
1098
1099	/* Enable QoS support if module parameter set */
1100	if (enable_qos)
1101		*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
1102
1103	/* enable counters */
1104	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)
1105		*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4);
1106
1107	/* QPC/EEC/CQC/EQC/RDMARC attributes */
1108
1109	MLX4_PUT(inbox, param->qpc_base,      INIT_HCA_QPC_BASE_OFFSET);
1110	MLX4_PUT(inbox, param->log_num_qps,   INIT_HCA_LOG_QP_OFFSET);
1111	MLX4_PUT(inbox, param->srqc_base,     INIT_HCA_SRQC_BASE_OFFSET);
1112	MLX4_PUT(inbox, param->log_num_srqs,  INIT_HCA_LOG_SRQ_OFFSET);
1113	MLX4_PUT(inbox, param->cqc_base,      INIT_HCA_CQC_BASE_OFFSET);
1114	MLX4_PUT(inbox, param->log_num_cqs,   INIT_HCA_LOG_CQ_OFFSET);
1115	MLX4_PUT(inbox, param->altc_base,     INIT_HCA_ALTC_BASE_OFFSET);
1116	MLX4_PUT(inbox, param->auxc_base,     INIT_HCA_AUXC_BASE_OFFSET);
1117	MLX4_PUT(inbox, param->eqc_base,      INIT_HCA_EQC_BASE_OFFSET);
1118	MLX4_PUT(inbox, param->log_num_eqs,   INIT_HCA_LOG_EQ_OFFSET);
1119	MLX4_PUT(inbox, param->rdmarc_base,   INIT_HCA_RDMARC_BASE_OFFSET);
1120	MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
1121
1122	/* multicast attributes */
1123
1124	MLX4_PUT(inbox, param->mc_base,		INIT_HCA_MC_BASE_OFFSET);
1125	MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1126	MLX4_PUT(inbox, param->log_mc_hash_sz,  INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
1127	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
1128		MLX4_PUT(inbox, (u8) (1 << 3),	INIT_HCA_UC_STEERING_OFFSET);
1129	MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1130
1131	/* TPT attributes */
1132
1133	MLX4_PUT(inbox, param->dmpt_base,  INIT_HCA_DMPT_BASE_OFFSET);
1134	MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
1135	MLX4_PUT(inbox, param->mtt_base,   INIT_HCA_MTT_BASE_OFFSET);
1136	MLX4_PUT(inbox, param->cmpt_base,  INIT_HCA_CMPT_BASE_OFFSET);
1137
1138	/* UAR attributes */
1139
1140	MLX4_PUT(inbox, param->uar_page_sz,	INIT_HCA_UAR_PAGE_SZ_OFFSET);
1141	MLX4_PUT(inbox, param->log_uar_sz,      INIT_HCA_LOG_UAR_SZ_OFFSET);
1142
1143	err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000,
1144		       MLX4_CMD_NATIVE);
1145
1146	if (err)
1147		mlx4_err(dev, "INIT_HCA returns %d\n", err);
1148
1149	mlx4_free_cmd_mailbox(dev, mailbox);
1150	return err;
1151}
1152
1153int mlx4_QUERY_HCA(struct mlx4_dev *dev,
1154		   struct mlx4_init_hca_param *param)
1155{
1156	struct mlx4_cmd_mailbox *mailbox;
1157	__be32 *outbox;
1158	int err;
1159
1160#define QUERY_HCA_GLOBAL_CAPS_OFFSET	0x04
1161
1162	mailbox = mlx4_alloc_cmd_mailbox(dev);
1163	if (IS_ERR(mailbox))
1164		return PTR_ERR(mailbox);
1165	outbox = mailbox->buf;
1166
1167	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
1168			   MLX4_CMD_QUERY_HCA,
1169			   MLX4_CMD_TIME_CLASS_B,
1170			   !mlx4_is_slave(dev));
1171	if (err)
1172		goto out;
1173
1174	MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET);
1175
1176	/* QPC/EEC/CQC/EQC/RDMARC attributes */
1177
1178	MLX4_GET(param->qpc_base,      outbox, INIT_HCA_QPC_BASE_OFFSET);
1179	MLX4_GET(param->log_num_qps,   outbox, INIT_HCA_LOG_QP_OFFSET);
1180	MLX4_GET(param->srqc_base,     outbox, INIT_HCA_SRQC_BASE_OFFSET);
1181	MLX4_GET(param->log_num_srqs,  outbox, INIT_HCA_LOG_SRQ_OFFSET);
1182	MLX4_GET(param->cqc_base,      outbox, INIT_HCA_CQC_BASE_OFFSET);
1183	MLX4_GET(param->log_num_cqs,   outbox, INIT_HCA_LOG_CQ_OFFSET);
1184	MLX4_GET(param->altc_base,     outbox, INIT_HCA_ALTC_BASE_OFFSET);
1185	MLX4_GET(param->auxc_base,     outbox, INIT_HCA_AUXC_BASE_OFFSET);
1186	MLX4_GET(param->eqc_base,      outbox, INIT_HCA_EQC_BASE_OFFSET);
1187	MLX4_GET(param->log_num_eqs,   outbox, INIT_HCA_LOG_EQ_OFFSET);
1188	MLX4_GET(param->rdmarc_base,   outbox, INIT_HCA_RDMARC_BASE_OFFSET);
1189	MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
1190
1191	/* multicast attributes */
1192
1193	MLX4_GET(param->mc_base,         outbox, INIT_HCA_MC_BASE_OFFSET);
1194	MLX4_GET(param->log_mc_entry_sz, outbox,
1195		 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1196	MLX4_GET(param->log_mc_hash_sz,  outbox,
1197		 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
1198	MLX4_GET(param->log_mc_table_sz, outbox,
1199		 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1200
1201	/* TPT attributes */
1202
1203	MLX4_GET(param->dmpt_base,  outbox, INIT_HCA_DMPT_BASE_OFFSET);
1204	MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
1205	MLX4_GET(param->mtt_base,   outbox, INIT_HCA_MTT_BASE_OFFSET);
1206	MLX4_GET(param->cmpt_base,  outbox, INIT_HCA_CMPT_BASE_OFFSET);
1207
1208	/* UAR attributes */
1209
1210	MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
1211	MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
1212
1213out:
1214	mlx4_free_cmd_mailbox(dev, mailbox);
1215
1216	return err;
1217}
1218
1219int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
1220			   struct mlx4_vhcr *vhcr,
1221			   struct mlx4_cmd_mailbox *inbox,
1222			   struct mlx4_cmd_mailbox *outbox,
1223			   struct mlx4_cmd_info *cmd)
1224{
1225	struct mlx4_priv *priv = mlx4_priv(dev);
1226	int port = vhcr->in_modifier;
1227	int err;
1228
1229	if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
1230		return 0;
1231
1232	if (dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB)
1233		return -ENODEV;
1234
1235	/* Enable port only if it was previously disabled */
1236	if (!priv->mfunc.master.init_port_ref[port]) {
1237		err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1238			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1239		if (err)
1240			return err;
1241	}
1242	priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1243	++priv->mfunc.master.init_port_ref[port];
1244	return 0;
1245}
1246
1247int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
1248{
1249	struct mlx4_cmd_mailbox *mailbox;
1250	u32 *inbox;
1251	int err;
1252	u32 flags;
1253	u16 field;
1254
1255	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1256#define INIT_PORT_IN_SIZE          256
1257#define INIT_PORT_FLAGS_OFFSET     0x00
1258#define INIT_PORT_FLAG_SIG         (1 << 18)
1259#define INIT_PORT_FLAG_NG          (1 << 17)
1260#define INIT_PORT_FLAG_G0          (1 << 16)
1261#define INIT_PORT_VL_SHIFT         4
1262#define INIT_PORT_PORT_WIDTH_SHIFT 8
1263#define INIT_PORT_MTU_OFFSET       0x04
1264#define INIT_PORT_MAX_GID_OFFSET   0x06
1265#define INIT_PORT_MAX_PKEY_OFFSET  0x0a
1266#define INIT_PORT_GUID0_OFFSET     0x10
1267#define INIT_PORT_NODE_GUID_OFFSET 0x18
1268#define INIT_PORT_SI_GUID_OFFSET   0x20
1269
1270		mailbox = mlx4_alloc_cmd_mailbox(dev);
1271		if (IS_ERR(mailbox))
1272			return PTR_ERR(mailbox);
1273		inbox = mailbox->buf;
1274
1275		memset(inbox, 0, INIT_PORT_IN_SIZE);
1276
1277		flags = 0;
1278		flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
1279		flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
1280		MLX4_PUT(inbox, flags,		  INIT_PORT_FLAGS_OFFSET);
1281
1282		field = 128 << dev->caps.ib_mtu_cap[port];
1283		MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
1284		field = dev->caps.gid_table_len[port];
1285		MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
1286		field = dev->caps.pkey_table_len[port];
1287		MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
1288
1289		err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
1290			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1291
1292		mlx4_free_cmd_mailbox(dev, mailbox);
1293	} else
1294		err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1295			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
1296
1297	return err;
1298}
1299EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
1300
1301int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
1302			    struct mlx4_vhcr *vhcr,
1303			    struct mlx4_cmd_mailbox *inbox,
1304			    struct mlx4_cmd_mailbox *outbox,
1305			    struct mlx4_cmd_info *cmd)
1306{
1307	struct mlx4_priv *priv = mlx4_priv(dev);
1308	int port = vhcr->in_modifier;
1309	int err;
1310
1311	if (!(priv->mfunc.master.slave_state[slave].init_port_mask &
1312	    (1 << port)))
1313		return 0;
1314
1315	if (dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB)
1316		return -ENODEV;
1317	if (priv->mfunc.master.init_port_ref[port] == 1) {
1318		err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
1319			       MLX4_CMD_NATIVE);
1320		if (err)
1321			return err;
1322	}
1323	priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
1324	--priv->mfunc.master.init_port_ref[port];
1325	return 0;
1326}
1327
1328int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
1329{
1330	return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
1331			MLX4_CMD_WRAPPED);
1332}
1333EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
1334
1335int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
1336{
1337	return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000,
1338			MLX4_CMD_NATIVE);
1339}
1340
1341int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
1342{
1343	int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
1344			       MLX4_CMD_SET_ICM_SIZE,
1345			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1346	if (ret)
1347		return ret;
1348
1349	/*
1350	 * Round up number of system pages needed in case
1351	 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
1352	 */
1353	*aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
1354		(PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
1355
1356	return 0;
1357}
1358
1359int mlx4_NOP(struct mlx4_dev *dev)
1360{
1361	/* Input modifier of 0x1f means "finish as soon as possible." */
1362	return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE);
1363}
1364
1365#define MLX4_WOL_SETUP_MODE (5 << 28)
1366int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
1367{
1368	u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
1369
1370	return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
1371			    MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
1372			    MLX4_CMD_NATIVE);
1373}
1374EXPORT_SYMBOL_GPL(mlx4_wol_read);
1375
1376int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
1377{
1378	u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
1379
1380	return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
1381			MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1382}
1383EXPORT_SYMBOL_GPL(mlx4_wol_write);