Linux Audio

Check our new training course

Loading...
v6.8
   1/*
   2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   4 * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
   5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
   6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
   7 *
   8 * This software is available to you under a choice of one of two
   9 * licenses.  You may choose to be licensed under the terms of the GNU
  10 * General Public License (GPL) Version 2, available from the file
  11 * COPYING in the main directory of this source tree, or the
  12 * OpenIB.org BSD license below:
  13 *
  14 *     Redistribution and use in source and binary forms, with or
  15 *     without modification, are permitted provided that the following
  16 *     conditions are met:
  17 *
  18 *      - Redistributions of source code must retain the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer.
  21 *
  22 *      - Redistributions in binary form must reproduce the above
  23 *        copyright notice, this list of conditions and the following
  24 *        disclaimer in the documentation and/or other materials
  25 *        provided with the distribution.
  26 *
  27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  34 * SOFTWARE.
  35 */
  36
  37#include <rdma/ib_smi.h>
  38#include <rdma/ib_umem.h>
  39#include <rdma/ib_user_verbs.h>
  40#include <rdma/uverbs_ioctl.h>
  41
  42#include <linux/sched.h>
  43#include <linux/slab.h>
  44#include <linux/stat.h>
  45#include <linux/mm.h>
  46#include <linux/export.h>
  47
  48#include "mthca_dev.h"
  49#include "mthca_cmd.h"
  50#include <rdma/mthca-abi.h>
  51#include "mthca_memfree.h"
  52
 
 
 
 
 
 
 
 
  53static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
  54			      struct ib_udata *uhw)
  55{
  56	struct ib_smp *in_mad;
  57	struct ib_smp *out_mad;
  58	int err = -ENOMEM;
  59	struct mthca_dev *mdev = to_mdev(ibdev);
  60
  61	if (uhw->inlen || uhw->outlen)
  62		return -EINVAL;
  63
  64	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
  65	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  66	if (!in_mad || !out_mad)
  67		goto out;
  68
  69	memset(props, 0, sizeof *props);
  70
  71	props->fw_ver              = mdev->fw_ver;
  72
  73	ib_init_query_mad(in_mad);
  74	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
  75
  76	err = mthca_MAD_IFC(mdev, 1, 1,
  77			    1, NULL, NULL, in_mad, out_mad);
  78	if (err)
  79		goto out;
  80
  81	props->device_cap_flags    = mdev->device_cap_flags;
  82	props->vendor_id           = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
  83		0xffffff;
  84	props->vendor_part_id      = be16_to_cpup((__be16 *) (out_mad->data + 30));
  85	props->hw_ver              = be32_to_cpup((__be32 *) (out_mad->data + 32));
  86	memcpy(&props->sys_image_guid, out_mad->data +  4, 8);
  87
  88	props->max_mr_size         = ~0ull;
  89	props->page_size_cap       = mdev->limits.page_size_cap;
  90	props->max_qp              = mdev->limits.num_qps - mdev->limits.reserved_qps;
  91	props->max_qp_wr           = mdev->limits.max_wqes;
  92	props->max_send_sge        = mdev->limits.max_sg;
  93	props->max_recv_sge        = mdev->limits.max_sg;
  94	props->max_sge_rd          = mdev->limits.max_sg;
  95	props->max_cq              = mdev->limits.num_cqs - mdev->limits.reserved_cqs;
  96	props->max_cqe             = mdev->limits.max_cqes;
  97	props->max_mr              = mdev->limits.num_mpts - mdev->limits.reserved_mrws;
  98	props->max_pd              = mdev->limits.num_pds - mdev->limits.reserved_pds;
  99	props->max_qp_rd_atom      = 1 << mdev->qp_table.rdb_shift;
 100	props->max_qp_init_rd_atom = mdev->limits.max_qp_init_rdma;
 101	props->max_res_rd_atom     = props->max_qp_rd_atom * props->max_qp;
 102	props->max_srq             = mdev->limits.num_srqs - mdev->limits.reserved_srqs;
 103	props->max_srq_wr          = mdev->limits.max_srq_wqes;
 104	props->max_srq_sge         = mdev->limits.max_srq_sge;
 105	props->local_ca_ack_delay  = mdev->limits.local_ca_ack_delay;
 106	props->atomic_cap          = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ?
 107					IB_ATOMIC_HCA : IB_ATOMIC_NONE;
 108	props->max_pkeys           = mdev->limits.pkey_table_len;
 109	props->max_mcast_grp       = mdev->limits.num_mgms + mdev->limits.num_amgms;
 110	props->max_mcast_qp_attach = MTHCA_QP_PER_MGM;
 111	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
 112					   props->max_mcast_grp;
 
 
 
 
 
 
 
 
 
 
 113
 114	err = 0;
 115 out:
 116	kfree(in_mad);
 117	kfree(out_mad);
 118	return err;
 119}
 120
 121static int mthca_query_port(struct ib_device *ibdev,
 122			    u32 port, struct ib_port_attr *props)
 123{
 124	struct ib_smp *in_mad;
 125	struct ib_smp *out_mad;
 126	int err = -ENOMEM;
 127
 128	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
 129	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
 130	if (!in_mad || !out_mad)
 131		goto out;
 132
 133	/* props being zeroed by the caller, avoid zeroing it here */
 134
 135	ib_init_query_mad(in_mad);
 136	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
 137	in_mad->attr_mod = cpu_to_be32(port);
 138
 139	err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
 140			    port, NULL, NULL, in_mad, out_mad);
 141	if (err)
 142		goto out;
 143
 144	props->lid               = be16_to_cpup((__be16 *) (out_mad->data + 16));
 145	props->lmc               = out_mad->data[34] & 0x7;
 146	props->sm_lid            = be16_to_cpup((__be16 *) (out_mad->data + 18));
 147	props->sm_sl             = out_mad->data[36] & 0xf;
 148	props->state             = out_mad->data[32] & 0xf;
 149	props->phys_state        = out_mad->data[33] >> 4;
 150	props->port_cap_flags    = be32_to_cpup((__be32 *) (out_mad->data + 20));
 151	props->gid_tbl_len       = to_mdev(ibdev)->limits.gid_table_len;
 152	props->max_msg_sz        = 0x80000000;
 153	props->pkey_tbl_len      = to_mdev(ibdev)->limits.pkey_table_len;
 154	props->bad_pkey_cntr     = be16_to_cpup((__be16 *) (out_mad->data + 46));
 155	props->qkey_viol_cntr    = be16_to_cpup((__be16 *) (out_mad->data + 48));
 156	props->active_width      = out_mad->data[31] & 0xf;
 157	props->active_speed      = out_mad->data[35] >> 4;
 158	props->max_mtu           = out_mad->data[41] & 0xf;
 159	props->active_mtu        = out_mad->data[36] >> 4;
 160	props->subnet_timeout    = out_mad->data[51] & 0x1f;
 161	props->max_vl_num        = out_mad->data[37] >> 4;
 162	props->init_type_reply   = out_mad->data[41] >> 4;
 163
 164 out:
 165	kfree(in_mad);
 166	kfree(out_mad);
 167	return err;
 168}
 169
 170static int mthca_modify_device(struct ib_device *ibdev,
 171			       int mask,
 172			       struct ib_device_modify *props)
 173{
 174	if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
 175		return -EOPNOTSUPP;
 176
 177	if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
 178		if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
 179			return -ERESTARTSYS;
 180		memcpy(ibdev->node_desc, props->node_desc,
 181		       IB_DEVICE_NODE_DESC_MAX);
 182		mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
 183	}
 184
 185	return 0;
 186}
 187
 188static int mthca_modify_port(struct ib_device *ibdev,
 189			     u32 port, int port_modify_mask,
 190			     struct ib_port_modify *props)
 191{
 192	struct mthca_set_ib_param set_ib;
 193	struct ib_port_attr attr;
 194	int err;
 195
 196	if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
 197		return -ERESTARTSYS;
 198
 199	err = ib_query_port(ibdev, port, &attr);
 200	if (err)
 201		goto out;
 202
 203	set_ib.set_si_guid     = 0;
 204	set_ib.reset_qkey_viol = !!(port_modify_mask & IB_PORT_RESET_QKEY_CNTR);
 205
 206	set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
 207		~props->clr_port_cap_mask;
 208
 209	err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port);
 210	if (err)
 211		goto out;
 212out:
 213	mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
 214	return err;
 215}
 216
 217static int mthca_query_pkey(struct ib_device *ibdev,
 218			    u32 port, u16 index, u16 *pkey)
 219{
 220	struct ib_smp *in_mad;
 221	struct ib_smp *out_mad;
 222	int err = -ENOMEM;
 223
 224	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
 225	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
 226	if (!in_mad || !out_mad)
 227		goto out;
 228
 229	ib_init_query_mad(in_mad);
 230	in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
 231	in_mad->attr_mod = cpu_to_be32(index / 32);
 232
 233	err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
 234			    port, NULL, NULL, in_mad, out_mad);
 235	if (err)
 236		goto out;
 237
 238	*pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
 239
 240 out:
 241	kfree(in_mad);
 242	kfree(out_mad);
 243	return err;
 244}
 245
 246static int mthca_query_gid(struct ib_device *ibdev, u32 port,
 247			   int index, union ib_gid *gid)
 248{
 249	struct ib_smp *in_mad;
 250	struct ib_smp *out_mad;
 251	int err = -ENOMEM;
 252
 253	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
 254	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
 255	if (!in_mad || !out_mad)
 256		goto out;
 257
 258	ib_init_query_mad(in_mad);
 259	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
 260	in_mad->attr_mod = cpu_to_be32(port);
 261
 262	err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
 263			    port, NULL, NULL, in_mad, out_mad);
 264	if (err)
 265		goto out;
 266
 267	memcpy(gid->raw, out_mad->data + 8, 8);
 268
 269	ib_init_query_mad(in_mad);
 270	in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
 271	in_mad->attr_mod = cpu_to_be32(index / 8);
 272
 273	err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
 274			    port, NULL, NULL, in_mad, out_mad);
 275	if (err)
 276		goto out;
 277
 278	memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
 279
 280 out:
 281	kfree(in_mad);
 282	kfree(out_mad);
 283	return err;
 284}
 285
 286static int mthca_alloc_ucontext(struct ib_ucontext *uctx,
 287				struct ib_udata *udata)
 288{
 289	struct ib_device *ibdev = uctx->device;
 290	struct mthca_alloc_ucontext_resp uresp = {};
 291	struct mthca_ucontext *context = to_mucontext(uctx);
 292	int                              err;
 293
 294	if (!(to_mdev(ibdev)->active))
 295		return -EAGAIN;
 
 
 296
 297	uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
 298	if (mthca_is_memfree(to_mdev(ibdev)))
 299		uresp.uarc_size = to_mdev(ibdev)->uar_table.uarc_size;
 300	else
 301		uresp.uarc_size = 0;
 302
 
 
 
 
 303	err = mthca_uar_alloc(to_mdev(ibdev), &context->uar);
 304	if (err)
 305		return err;
 
 
 306
 307	context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev));
 308	if (IS_ERR(context->db_tab)) {
 309		err = PTR_ERR(context->db_tab);
 310		mthca_uar_free(to_mdev(ibdev), &context->uar);
 311		return err;
 
 312	}
 313
 314	if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
 315		mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab);
 316		mthca_uar_free(to_mdev(ibdev), &context->uar);
 317		return -EFAULT;
 
 318	}
 319
 320	context->reg_mr_warned = 0;
 321
 322	return 0;
 323}
 324
 325static void mthca_dealloc_ucontext(struct ib_ucontext *context)
 326{
 327	mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar,
 328				  to_mucontext(context)->db_tab);
 329	mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar);
 
 
 
 330}
 331
 332static int mthca_mmap_uar(struct ib_ucontext *context,
 333			  struct vm_area_struct *vma)
 334{
 335	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
 336		return -EINVAL;
 337
 338	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 339
 340	if (io_remap_pfn_range(vma, vma->vm_start,
 341			       to_mucontext(context)->uar.pfn,
 342			       PAGE_SIZE, vma->vm_page_prot))
 343		return -EAGAIN;
 344
 345	return 0;
 346}
 347
 348static int mthca_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
 
 
 349{
 350	struct ib_device *ibdev = ibpd->device;
 351	struct mthca_pd *pd = to_mpd(ibpd);
 352	int err;
 353
 354	err = mthca_pd_alloc(to_mdev(ibdev), !udata, pd);
 355	if (err)
 356		return err;
 
 
 
 
 
 
 357
 358	if (udata) {
 359		if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) {
 360			mthca_pd_free(to_mdev(ibdev), pd);
 361			return -EFAULT;
 
 362		}
 363	}
 364
 365	return 0;
 366}
 367
 368static int mthca_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
 369{
 370	mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
 
 
 371	return 0;
 372}
 373
 374static int mthca_ah_create(struct ib_ah *ibah,
 375			   struct rdma_ah_init_attr *init_attr,
 376			   struct ib_udata *udata)
 377
 378{
 379	struct mthca_ah *ah = to_mah(ibah);
 
 
 
 
 
 380
 381	return mthca_create_ah(to_mdev(ibah->device), to_mpd(ibah->pd),
 382			       init_attr->ah_attr, ah);
 
 
 
 
 
 383}
 384
 385static int mthca_ah_destroy(struct ib_ah *ah, u32 flags)
 386{
 387	mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
 
 
 388	return 0;
 389}
 390
 391static int mthca_create_srq(struct ib_srq *ibsrq,
 392			    struct ib_srq_init_attr *init_attr,
 393			    struct ib_udata *udata)
 394{
 395	struct mthca_create_srq ucmd;
 396	struct mthca_ucontext *context = rdma_udata_to_drv_context(
 397		udata, struct mthca_ucontext, ibucontext);
 398	struct mthca_srq *srq = to_msrq(ibsrq);
 399	int err;
 400
 401	if (init_attr->srq_type != IB_SRQT_BASIC)
 402		return -EOPNOTSUPP;
 403
 404	if (udata) {
 405		if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
 406			return -EFAULT;
 
 
 
 
 
 
 
 
 407
 408		err = mthca_map_user_db(to_mdev(ibsrq->device), &context->uar,
 409					context->db_tab, ucmd.db_index,
 410					ucmd.db_page);
 411
 412		if (err)
 413			return err;
 414
 415		srq->mr.ibmr.lkey = ucmd.lkey;
 416		srq->db_index     = ucmd.db_index;
 417	}
 418
 419	err = mthca_alloc_srq(to_mdev(ibsrq->device), to_mpd(ibsrq->pd),
 420			      &init_attr->attr, srq, udata);
 421
 422	if (err && udata)
 423		mthca_unmap_user_db(to_mdev(ibsrq->device), &context->uar,
 424				    context->db_tab, ucmd.db_index);
 425
 426	if (err)
 427		return err;
 428
 429	if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) {
 430		mthca_free_srq(to_mdev(ibsrq->device), srq);
 431		return -EFAULT;
 
 432	}
 433
 434	return 0;
 
 
 
 
 
 435}
 436
 437static int mthca_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
 438{
 439	if (udata) {
 440		struct mthca_ucontext *context =
 441			rdma_udata_to_drv_context(
 442				udata,
 443				struct mthca_ucontext,
 444				ibucontext);
 445
 446		mthca_unmap_user_db(to_mdev(srq->device), &context->uar,
 447				    context->db_tab, to_msrq(srq)->db_index);
 448	}
 449
 450	mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
 
 
 451	return 0;
 452}
 453
 454static int mthca_create_qp(struct ib_qp *ibqp,
 455			   struct ib_qp_init_attr *init_attr,
 456			   struct ib_udata *udata)
 457{
 458	struct mthca_ucontext *context = rdma_udata_to_drv_context(
 459		udata, struct mthca_ucontext, ibucontext);
 460	struct mthca_create_qp ucmd;
 461	struct mthca_qp *qp = to_mqp(ibqp);
 462	struct mthca_dev *dev = to_mdev(ibqp->device);
 463	int err;
 464
 465	if (init_attr->create_flags)
 466		return -EOPNOTSUPP;
 467
 468	switch (init_attr->qp_type) {
 469	case IB_QPT_RC:
 470	case IB_QPT_UC:
 471	case IB_QPT_UD:
 472	{
 473		if (udata) {
 474			if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
 475				return -EFAULT;
 476
 477			err = mthca_map_user_db(dev, &context->uar,
 
 
 
 
 
 
 
 
 
 
 
 
 478						context->db_tab,
 479						ucmd.sq_db_index,
 480						ucmd.sq_db_page);
 481			if (err)
 482				return err;
 
 483
 484			err = mthca_map_user_db(dev, &context->uar,
 485						context->db_tab,
 486						ucmd.rq_db_index,
 487						ucmd.rq_db_page);
 488			if (err) {
 489				mthca_unmap_user_db(dev, &context->uar,
 
 490						    context->db_tab,
 491						    ucmd.sq_db_index);
 492				return err;
 
 493			}
 494
 495			qp->mr.ibmr.lkey = ucmd.lkey;
 496			qp->sq.db_index  = ucmd.sq_db_index;
 497			qp->rq.db_index  = ucmd.rq_db_index;
 498		}
 499
 500		err = mthca_alloc_qp(dev, to_mpd(ibqp->pd),
 501				     to_mcq(init_attr->send_cq),
 502				     to_mcq(init_attr->recv_cq),
 503				     init_attr->qp_type, init_attr->sq_sig_type,
 504				     &init_attr->cap, qp, udata);
 
 
 
 505
 506		if (err && udata) {
 507			mthca_unmap_user_db(dev, &context->uar, context->db_tab,
 
 508					    ucmd.sq_db_index);
 509			mthca_unmap_user_db(dev, &context->uar, context->db_tab,
 
 
 510					    ucmd.rq_db_index);
 511		}
 512
 513		qp->ibqp.qp_num = qp->qpn;
 514		break;
 515	}
 516	case IB_QPT_SMI:
 517	case IB_QPT_GSI:
 518	{
 519		qp->sqp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
 520		if (!qp->sqp)
 521			return -ENOMEM;
 
 
 
 
 522
 523		qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
 524
 525		err = mthca_alloc_sqp(dev, to_mpd(ibqp->pd),
 526				      to_mcq(init_attr->send_cq),
 527				      to_mcq(init_attr->recv_cq),
 528				      init_attr->sq_sig_type, &init_attr->cap,
 529				      qp->ibqp.qp_num, init_attr->port_num, qp,
 530				      udata);
 531		break;
 532	}
 533	default:
 534		/* Don't support raw QPs */
 535		return -EOPNOTSUPP;
 536	}
 537
 538	if (err) {
 539		kfree(qp->sqp);
 540		return err;
 541	}
 542
 543	init_attr->cap.max_send_wr     = qp->sq.max;
 544	init_attr->cap.max_recv_wr     = qp->rq.max;
 545	init_attr->cap.max_send_sge    = qp->sq.max_gs;
 546	init_attr->cap.max_recv_sge    = qp->rq.max_gs;
 547	init_attr->cap.max_inline_data = qp->max_inline_data;
 548
 549	return 0;
 550}
 551
 552static int mthca_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
 553{
 554	if (udata) {
 555		struct mthca_ucontext *context =
 556			rdma_udata_to_drv_context(
 557				udata,
 558				struct mthca_ucontext,
 559				ibucontext);
 560
 561		mthca_unmap_user_db(to_mdev(qp->device),
 562				    &context->uar,
 563				    context->db_tab,
 564				    to_mqp(qp)->sq.db_index);
 565		mthca_unmap_user_db(to_mdev(qp->device),
 566				    &context->uar,
 567				    context->db_tab,
 568				    to_mqp(qp)->rq.db_index);
 569	}
 570	mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
 571	kfree(to_mqp(qp)->sqp);
 572	return 0;
 573}
 574
 575static int mthca_create_cq(struct ib_cq *ibcq,
 576			   const struct ib_cq_init_attr *attr,
 577			   struct ib_udata *udata)
 
 578{
 579	struct ib_device *ibdev = ibcq->device;
 580	int entries = attr->cqe;
 581	struct mthca_create_cq ucmd;
 582	struct mthca_cq *cq;
 583	int nent;
 584	int err;
 585	struct mthca_ucontext *context = rdma_udata_to_drv_context(
 586		udata, struct mthca_ucontext, ibucontext);
 587
 588	if (attr->flags)
 589		return -EOPNOTSUPP;
 590
 591	if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes)
 592		return -EINVAL;
 593
 594	if (udata) {
 595		if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
 596			return -EFAULT;
 597
 598		err = mthca_map_user_db(to_mdev(ibdev), &context->uar,
 599					context->db_tab, ucmd.set_db_index,
 600					ucmd.set_db_page);
 601		if (err)
 602			return err;
 603
 604		err = mthca_map_user_db(to_mdev(ibdev), &context->uar,
 605					context->db_tab, ucmd.arm_db_index,
 606					ucmd.arm_db_page);
 607		if (err)
 608			goto err_unmap_set;
 609	}
 610
 611	cq = to_mcq(ibcq);
 
 
 
 
 612
 613	if (udata) {
 614		cq->buf.mr.ibmr.lkey = ucmd.lkey;
 615		cq->set_ci_db_index  = ucmd.set_db_index;
 616		cq->arm_db_index     = ucmd.arm_db_index;
 617	}
 618
 619	for (nent = 1; nent <= entries; nent <<= 1)
 620		; /* nothing */
 621
 622	err = mthca_init_cq(to_mdev(ibdev), nent, context,
 623			    udata ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num,
 
 624			    cq);
 625	if (err)
 626		goto err_unmap_arm;
 627
 628	if (udata && ib_copy_to_udata(udata, &cq->cqn, sizeof(__u32))) {
 629		mthca_free_cq(to_mdev(ibdev), cq);
 630		err = -EFAULT;
 631		goto err_unmap_arm;
 632	}
 633
 634	cq->resize_buf = NULL;
 635
 636	return 0;
 
 
 
 637
 638err_unmap_arm:
 639	if (udata)
 640		mthca_unmap_user_db(to_mdev(ibdev), &context->uar,
 641				    context->db_tab, ucmd.arm_db_index);
 642
 643err_unmap_set:
 644	if (udata)
 645		mthca_unmap_user_db(to_mdev(ibdev), &context->uar,
 646				    context->db_tab, ucmd.set_db_index);
 647
 648	return err;
 649}
 650
 651static int mthca_alloc_resize_buf(struct mthca_dev *dev, struct mthca_cq *cq,
 652				  int entries)
 653{
 654	int ret;
 655
 656	spin_lock_irq(&cq->lock);
 657	if (cq->resize_buf) {
 658		ret = -EBUSY;
 659		goto unlock;
 660	}
 661
 662	cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
 663	if (!cq->resize_buf) {
 664		ret = -ENOMEM;
 665		goto unlock;
 666	}
 667
 668	cq->resize_buf->state = CQ_RESIZE_ALLOC;
 669
 670	ret = 0;
 671
 672unlock:
 673	spin_unlock_irq(&cq->lock);
 674
 675	if (ret)
 676		return ret;
 677
 678	ret = mthca_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
 679	if (ret) {
 680		spin_lock_irq(&cq->lock);
 681		kfree(cq->resize_buf);
 682		cq->resize_buf = NULL;
 683		spin_unlock_irq(&cq->lock);
 684		return ret;
 685	}
 686
 687	cq->resize_buf->cqe = entries - 1;
 688
 689	spin_lock_irq(&cq->lock);
 690	cq->resize_buf->state = CQ_RESIZE_READY;
 691	spin_unlock_irq(&cq->lock);
 692
 693	return 0;
 694}
 695
 696static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
 697{
 698	struct mthca_dev *dev = to_mdev(ibcq->device);
 699	struct mthca_cq *cq = to_mcq(ibcq);
 700	struct mthca_resize_cq ucmd;
 701	u32 lkey;
 702	int ret;
 703
 704	if (entries < 1 || entries > dev->limits.max_cqes)
 705		return -EINVAL;
 706
 707	mutex_lock(&cq->mutex);
 708
 709	entries = roundup_pow_of_two(entries + 1);
 710	if (entries == ibcq->cqe + 1) {
 711		ret = 0;
 712		goto out;
 713	}
 714
 715	if (cq->is_kernel) {
 716		ret = mthca_alloc_resize_buf(dev, cq, entries);
 717		if (ret)
 718			goto out;
 719		lkey = cq->resize_buf->buf.mr.ibmr.lkey;
 720	} else {
 721		if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
 722			ret = -EFAULT;
 723			goto out;
 724		}
 725		lkey = ucmd.lkey;
 726	}
 727
 728	ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries));
 729
 730	if (ret) {
 731		if (cq->resize_buf) {
 732			mthca_free_cq_buf(dev, &cq->resize_buf->buf,
 733					  cq->resize_buf->cqe);
 734			kfree(cq->resize_buf);
 735			spin_lock_irq(&cq->lock);
 736			cq->resize_buf = NULL;
 737			spin_unlock_irq(&cq->lock);
 738		}
 739		goto out;
 740	}
 741
 742	if (cq->is_kernel) {
 743		struct mthca_cq_buf tbuf;
 744		int tcqe;
 745
 746		spin_lock_irq(&cq->lock);
 747		if (cq->resize_buf->state == CQ_RESIZE_READY) {
 748			mthca_cq_resize_copy_cqes(cq);
 749			tbuf         = cq->buf;
 750			tcqe         = cq->ibcq.cqe;
 751			cq->buf      = cq->resize_buf->buf;
 752			cq->ibcq.cqe = cq->resize_buf->cqe;
 753		} else {
 754			tbuf = cq->resize_buf->buf;
 755			tcqe = cq->resize_buf->cqe;
 756		}
 757
 758		kfree(cq->resize_buf);
 759		cq->resize_buf = NULL;
 760		spin_unlock_irq(&cq->lock);
 761
 762		mthca_free_cq_buf(dev, &tbuf, tcqe);
 763	} else
 764		ibcq->cqe = entries - 1;
 765
 766out:
 767	mutex_unlock(&cq->mutex);
 768
 769	return ret;
 770}
 771
 772static int mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
 773{
 774	if (udata) {
 775		struct mthca_ucontext *context =
 776			rdma_udata_to_drv_context(
 777				udata,
 778				struct mthca_ucontext,
 779				ibucontext);
 780
 781		mthca_unmap_user_db(to_mdev(cq->device),
 782				    &context->uar,
 783				    context->db_tab,
 784				    to_mcq(cq)->arm_db_index);
 785		mthca_unmap_user_db(to_mdev(cq->device),
 786				    &context->uar,
 787				    context->db_tab,
 788				    to_mcq(cq)->set_ci_db_index);
 789	}
 790	mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
 
 
 791	return 0;
 792}
 793
 794static inline u32 convert_access(int acc)
 795{
 796	return (acc & IB_ACCESS_REMOTE_ATOMIC ? MTHCA_MPT_FLAG_ATOMIC       : 0) |
 797	       (acc & IB_ACCESS_REMOTE_WRITE  ? MTHCA_MPT_FLAG_REMOTE_WRITE : 0) |
 798	       (acc & IB_ACCESS_REMOTE_READ   ? MTHCA_MPT_FLAG_REMOTE_READ  : 0) |
 799	       (acc & IB_ACCESS_LOCAL_WRITE   ? MTHCA_MPT_FLAG_LOCAL_WRITE  : 0) |
 800	       MTHCA_MPT_FLAG_LOCAL_READ;
 801}
 802
 803static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc)
 804{
 805	struct mthca_mr *mr;
 806	int err;
 807
 808	mr = kmalloc(sizeof *mr, GFP_KERNEL);
 809	if (!mr)
 810		return ERR_PTR(-ENOMEM);
 811
 812	err = mthca_mr_alloc_notrans(to_mdev(pd->device),
 813				     to_mpd(pd)->pd_num,
 814				     convert_access(acc), mr);
 815
 816	if (err) {
 817		kfree(mr);
 818		return ERR_PTR(err);
 819	}
 820
 821	mr->umem = NULL;
 822
 823	return &mr->ibmr;
 824}
 825
 826static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 827				       u64 virt, int acc, struct ib_udata *udata)
 828{
 829	struct mthca_dev *dev = to_mdev(pd->device);
 830	struct ib_block_iter biter;
 831	struct mthca_ucontext *context = rdma_udata_to_drv_context(
 832		udata, struct mthca_ucontext, ibucontext);
 833	struct mthca_mr *mr;
 834	struct mthca_reg_mr ucmd;
 835	u64 *pages;
 836	int n, i;
 
 837	int err = 0;
 838	int write_mtt_size;
 839
 840	if (udata->inlen < sizeof ucmd) {
 841		if (!context->reg_mr_warned) {
 842			mthca_warn(dev, "Process '%s' did not pass in MR attrs.\n",
 843				   current->comm);
 844			mthca_warn(dev, "  Update libmthca to fix this.\n");
 845		}
 846		++context->reg_mr_warned;
 847		ucmd.mr_attrs = 0;
 848	} else if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
 849		return ERR_PTR(-EFAULT);
 850
 851	mr = kmalloc(sizeof *mr, GFP_KERNEL);
 852	if (!mr)
 853		return ERR_PTR(-ENOMEM);
 854
 855	mr->umem = ib_umem_get(pd->device, start, length, acc);
 
 
 856	if (IS_ERR(mr->umem)) {
 857		err = PTR_ERR(mr->umem);
 858		goto err;
 859	}
 860
 861	n = ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE);
 
 862
 863	mr->mtt = mthca_alloc_mtt(dev, n);
 864	if (IS_ERR(mr->mtt)) {
 865		err = PTR_ERR(mr->mtt);
 866		goto err_umem;
 867	}
 868
 869	pages = (u64 *) __get_free_page(GFP_KERNEL);
 870	if (!pages) {
 871		err = -ENOMEM;
 872		goto err_mtt;
 873	}
 874
 875	i = n = 0;
 876
 877	write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages));
 878
 879	rdma_umem_for_each_dma_block(mr->umem, &biter, PAGE_SIZE) {
 880		pages[i++] = rdma_block_iter_dma_address(&biter);
 881
 882		/*
 883		 * Be friendly to write_mtt and pass it chunks
 884		 * of appropriate size.
 885		 */
 886		if (i == write_mtt_size) {
 887			err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
 888			if (err)
 889				goto mtt_done;
 890			n += i;
 891			i = 0;
 
 
 
 892		}
 893	}
 894
 895	if (i)
 896		err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
 897mtt_done:
 898	free_page((unsigned long) pages);
 899	if (err)
 900		goto err_mtt;
 901
 902	err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, PAGE_SHIFT, virt, length,
 903			     convert_access(acc), mr);
 904
 905	if (err)
 906		goto err_mtt;
 907
 908	return &mr->ibmr;
 909
 910err_mtt:
 911	mthca_free_mtt(dev, mr->mtt);
 912
 913err_umem:
 914	ib_umem_release(mr->umem);
 915
 916err:
 917	kfree(mr);
 918	return ERR_PTR(err);
 919}
 920
 921static int mthca_dereg_mr(struct ib_mr *mr, struct ib_udata *udata)
 922{
 923	struct mthca_mr *mmr = to_mmr(mr);
 924
 925	mthca_free_mr(to_mdev(mr->device), mmr);
 926	ib_umem_release(mmr->umem);
 
 927	kfree(mmr);
 928
 929	return 0;
 930}
 931
 932static ssize_t hw_rev_show(struct device *device,
 933			   struct device_attribute *attr, char *buf)
 934{
 935	struct mthca_dev *dev =
 936		rdma_device_to_drv_device(device, struct mthca_dev, ib_dev);
 937
 938	return sysfs_emit(buf, "%x\n", dev->rev_id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 939}
 940static DEVICE_ATTR_RO(hw_rev);
 941
 942static const char *hca_type_string(int hca_type)
 943{
 944	switch (hca_type) {
 945	case PCI_DEVICE_ID_MELLANOX_TAVOR:
 946		return "MT23108";
 947	case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT:
 948		return "MT25208 (MT23108 compat mode)";
 949	case PCI_DEVICE_ID_MELLANOX_ARBEL:
 950		return "MT25208";
 951	case PCI_DEVICE_ID_MELLANOX_SINAI:
 952	case PCI_DEVICE_ID_MELLANOX_SINAI_OLD:
 953		return "MT25204";
 954	}
 955
 956	return "unknown";
 
 
 
 
 
 957}
 958
 959static ssize_t hca_type_show(struct device *device,
 960			     struct device_attribute *attr, char *buf)
 961{
 962	struct mthca_dev *dev =
 963		rdma_device_to_drv_device(device, struct mthca_dev, ib_dev);
 
 
 
 
 
 
 
 
 
 
 964
 965	return sysfs_emit(buf, "%s\n", hca_type_string(dev->pdev->device));
 
 
 
 
 
 
 
 
 
 
 966}
 967static DEVICE_ATTR_RO(hca_type);
 968
 969static ssize_t board_id_show(struct device *device,
 970			     struct device_attribute *attr, char *buf)
 971{
 972	struct mthca_dev *dev =
 973		rdma_device_to_drv_device(device, struct mthca_dev, ib_dev);
 
 
 974
 975	return sysfs_emit(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 976}
 977static DEVICE_ATTR_RO(board_id);
 978
 979static struct attribute *mthca_dev_attributes[] = {
 980	&dev_attr_hw_rev.attr,
 981	&dev_attr_hca_type.attr,
 982	&dev_attr_board_id.attr,
 983	NULL
 984};
 
 985
 986static const struct attribute_group mthca_attr_group = {
 987	.attrs = mthca_dev_attributes,
 
 
 
 
 
 
 988};
 989
 990static int mthca_init_node_data(struct mthca_dev *dev)
 991{
 992	struct ib_smp *in_mad;
 993	struct ib_smp *out_mad;
 994	int err = -ENOMEM;
 995
 996	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
 997	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
 998	if (!in_mad || !out_mad)
 999		goto out;
1000
1001	ib_init_query_mad(in_mad);
1002	in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1003
1004	err = mthca_MAD_IFC(dev, 1, 1,
1005			    1, NULL, NULL, in_mad, out_mad);
1006	if (err)
1007		goto out;
1008
1009	memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
1010
1011	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1012
1013	err = mthca_MAD_IFC(dev, 1, 1,
1014			    1, NULL, NULL, in_mad, out_mad);
1015	if (err)
1016		goto out;
1017
1018	if (mthca_is_memfree(dev))
1019		dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
1020	memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
1021
1022out:
1023	kfree(in_mad);
1024	kfree(out_mad);
1025	return err;
1026}
1027
1028static int mthca_port_immutable(struct ib_device *ibdev, u32 port_num,
1029			        struct ib_port_immutable *immutable)
1030{
1031	struct ib_port_attr attr;
1032	int err;
1033
1034	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
1035
1036	err = ib_query_port(ibdev, port_num, &attr);
1037	if (err)
1038		return err;
1039
1040	immutable->pkey_tbl_len = attr.pkey_tbl_len;
1041	immutable->gid_tbl_len = attr.gid_tbl_len;
 
1042	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
1043
1044	return 0;
1045}
1046
1047static void get_dev_fw_str(struct ib_device *device, char *str)
 
1048{
1049	struct mthca_dev *dev =
1050		container_of(device, struct mthca_dev, ib_dev);
1051	snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d",
1052		 (int) (dev->fw_ver >> 32),
1053		 (int) (dev->fw_ver >> 16) & 0xffff,
1054		 (int) dev->fw_ver & 0xffff);
1055}
1056
1057static const struct ib_device_ops mthca_dev_ops = {
1058	.owner = THIS_MODULE,
1059	.driver_id = RDMA_DRIVER_MTHCA,
1060	.uverbs_abi_ver = MTHCA_UVERBS_ABI_VERSION,
1061	.uverbs_no_driver_id_binding = 1,
1062
1063	.alloc_pd = mthca_alloc_pd,
1064	.alloc_ucontext = mthca_alloc_ucontext,
1065	.attach_mcast = mthca_multicast_attach,
1066	.create_ah = mthca_ah_create,
1067	.create_cq = mthca_create_cq,
1068	.create_qp = mthca_create_qp,
1069	.dealloc_pd = mthca_dealloc_pd,
1070	.dealloc_ucontext = mthca_dealloc_ucontext,
1071	.dereg_mr = mthca_dereg_mr,
1072	.destroy_ah = mthca_ah_destroy,
1073	.destroy_cq = mthca_destroy_cq,
1074	.destroy_qp = mthca_destroy_qp,
1075	.detach_mcast = mthca_multicast_detach,
1076	.device_group = &mthca_attr_group,
1077	.get_dev_fw_str = get_dev_fw_str,
1078	.get_dma_mr = mthca_get_dma_mr,
1079	.get_port_immutable = mthca_port_immutable,
1080	.mmap = mthca_mmap_uar,
1081	.modify_device = mthca_modify_device,
1082	.modify_port = mthca_modify_port,
1083	.modify_qp = mthca_modify_qp,
1084	.poll_cq = mthca_poll_cq,
1085	.process_mad = mthca_process_mad,
1086	.query_ah = mthca_ah_query,
1087	.query_device = mthca_query_device,
1088	.query_gid = mthca_query_gid,
1089	.query_pkey = mthca_query_pkey,
1090	.query_port = mthca_query_port,
1091	.query_qp = mthca_query_qp,
1092	.reg_user_mr = mthca_reg_user_mr,
1093	.resize_cq = mthca_resize_cq,
1094
1095	INIT_RDMA_OBJ_SIZE(ib_ah, mthca_ah, ibah),
1096	INIT_RDMA_OBJ_SIZE(ib_cq, mthca_cq, ibcq),
1097	INIT_RDMA_OBJ_SIZE(ib_pd, mthca_pd, ibpd),
1098	INIT_RDMA_OBJ_SIZE(ib_qp, mthca_qp, ibqp),
1099	INIT_RDMA_OBJ_SIZE(ib_ucontext, mthca_ucontext, ibucontext),
1100};
1101
1102static const struct ib_device_ops mthca_dev_arbel_srq_ops = {
1103	.create_srq = mthca_create_srq,
1104	.destroy_srq = mthca_destroy_srq,
1105	.modify_srq = mthca_modify_srq,
1106	.post_srq_recv = mthca_arbel_post_srq_recv,
1107	.query_srq = mthca_query_srq,
1108
1109	INIT_RDMA_OBJ_SIZE(ib_srq, mthca_srq, ibsrq),
1110};
1111
1112static const struct ib_device_ops mthca_dev_tavor_srq_ops = {
1113	.create_srq = mthca_create_srq,
1114	.destroy_srq = mthca_destroy_srq,
1115	.modify_srq = mthca_modify_srq,
1116	.post_srq_recv = mthca_tavor_post_srq_recv,
1117	.query_srq = mthca_query_srq,
1118
1119	INIT_RDMA_OBJ_SIZE(ib_srq, mthca_srq, ibsrq),
1120};
1121
1122static const struct ib_device_ops mthca_dev_arbel_ops = {
1123	.post_recv = mthca_arbel_post_receive,
1124	.post_send = mthca_arbel_post_send,
1125	.req_notify_cq = mthca_arbel_arm_cq,
1126};
1127
1128static const struct ib_device_ops mthca_dev_tavor_ops = {
1129	.post_recv = mthca_tavor_post_receive,
1130	.post_send = mthca_tavor_post_send,
1131	.req_notify_cq = mthca_tavor_arm_cq,
1132};
1133
1134int mthca_register_device(struct mthca_dev *dev)
1135{
1136	int ret;
 
1137
1138	ret = mthca_init_node_data(dev);
1139	if (ret)
1140		return ret;
1141
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1142	dev->ib_dev.node_type            = RDMA_NODE_IB_CA;
1143	dev->ib_dev.phys_port_cnt        = dev->limits.num_ports;
1144	dev->ib_dev.num_comp_vectors     = 1;
1145	dev->ib_dev.dev.parent           = &dev->pdev->dev;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1146
1147	if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1148		if (mthca_is_memfree(dev))
1149			ib_set_device_ops(&dev->ib_dev,
1150					  &mthca_dev_arbel_srq_ops);
1151		else
1152			ib_set_device_ops(&dev->ib_dev,
1153					  &mthca_dev_tavor_srq_ops);
1154	}
1155
1156	ib_set_device_ops(&dev->ib_dev, &mthca_dev_ops);
 
 
1157
1158	if (mthca_is_memfree(dev))
1159		ib_set_device_ops(&dev->ib_dev, &mthca_dev_arbel_ops);
1160	else
1161		ib_set_device_ops(&dev->ib_dev, &mthca_dev_tavor_ops);
 
 
 
 
 
1162
1163	mutex_init(&dev->cap_mask_mutex);
1164
1165	ret = ib_register_device(&dev->ib_dev, "mthca%d", &dev->pdev->dev);
1166	if (ret)
1167		return ret;
 
 
 
 
 
 
 
 
 
1168
1169	mthca_start_catas_poll(dev);
1170
1171	return 0;
1172}
1173
1174void mthca_unregister_device(struct mthca_dev *dev)
1175{
1176	mthca_stop_catas_poll(dev);
1177	ib_unregister_device(&dev->ib_dev);
1178}
v4.10.11
   1/*
   2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   4 * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
   5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
   6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
   7 *
   8 * This software is available to you under a choice of one of two
   9 * licenses.  You may choose to be licensed under the terms of the GNU
  10 * General Public License (GPL) Version 2, available from the file
  11 * COPYING in the main directory of this source tree, or the
  12 * OpenIB.org BSD license below:
  13 *
  14 *     Redistribution and use in source and binary forms, with or
  15 *     without modification, are permitted provided that the following
  16 *     conditions are met:
  17 *
  18 *      - Redistributions of source code must retain the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer.
  21 *
  22 *      - Redistributions in binary form must reproduce the above
  23 *        copyright notice, this list of conditions and the following
  24 *        disclaimer in the documentation and/or other materials
  25 *        provided with the distribution.
  26 *
  27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  34 * SOFTWARE.
  35 */
  36
  37#include <rdma/ib_smi.h>
  38#include <rdma/ib_umem.h>
  39#include <rdma/ib_user_verbs.h>
 
  40
  41#include <linux/sched.h>
  42#include <linux/slab.h>
  43#include <linux/stat.h>
  44#include <linux/mm.h>
  45#include <linux/export.h>
  46
  47#include "mthca_dev.h"
  48#include "mthca_cmd.h"
  49#include <rdma/mthca-abi.h>
  50#include "mthca_memfree.h"
  51
  52static void init_query_mad(struct ib_smp *mad)
  53{
  54	mad->base_version  = 1;
  55	mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
  56	mad->class_version = 1;
  57	mad->method    	   = IB_MGMT_METHOD_GET;
  58}
  59
  60static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
  61			      struct ib_udata *uhw)
  62{
  63	struct ib_smp *in_mad  = NULL;
  64	struct ib_smp *out_mad = NULL;
  65	int err = -ENOMEM;
  66	struct mthca_dev *mdev = to_mdev(ibdev);
  67
  68	if (uhw->inlen || uhw->outlen)
  69		return -EINVAL;
  70
  71	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
  72	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  73	if (!in_mad || !out_mad)
  74		goto out;
  75
  76	memset(props, 0, sizeof *props);
  77
  78	props->fw_ver              = mdev->fw_ver;
  79
  80	init_query_mad(in_mad);
  81	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
  82
  83	err = mthca_MAD_IFC(mdev, 1, 1,
  84			    1, NULL, NULL, in_mad, out_mad);
  85	if (err)
  86		goto out;
  87
  88	props->device_cap_flags    = mdev->device_cap_flags;
  89	props->vendor_id           = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
  90		0xffffff;
  91	props->vendor_part_id      = be16_to_cpup((__be16 *) (out_mad->data + 30));
  92	props->hw_ver              = be32_to_cpup((__be32 *) (out_mad->data + 32));
  93	memcpy(&props->sys_image_guid, out_mad->data +  4, 8);
  94
  95	props->max_mr_size         = ~0ull;
  96	props->page_size_cap       = mdev->limits.page_size_cap;
  97	props->max_qp              = mdev->limits.num_qps - mdev->limits.reserved_qps;
  98	props->max_qp_wr           = mdev->limits.max_wqes;
  99	props->max_sge             = mdev->limits.max_sg;
 100	props->max_sge_rd          = props->max_sge;
 
 101	props->max_cq              = mdev->limits.num_cqs - mdev->limits.reserved_cqs;
 102	props->max_cqe             = mdev->limits.max_cqes;
 103	props->max_mr              = mdev->limits.num_mpts - mdev->limits.reserved_mrws;
 104	props->max_pd              = mdev->limits.num_pds - mdev->limits.reserved_pds;
 105	props->max_qp_rd_atom      = 1 << mdev->qp_table.rdb_shift;
 106	props->max_qp_init_rd_atom = mdev->limits.max_qp_init_rdma;
 107	props->max_res_rd_atom     = props->max_qp_rd_atom * props->max_qp;
 108	props->max_srq             = mdev->limits.num_srqs - mdev->limits.reserved_srqs;
 109	props->max_srq_wr          = mdev->limits.max_srq_wqes;
 110	props->max_srq_sge         = mdev->limits.max_srq_sge;
 111	props->local_ca_ack_delay  = mdev->limits.local_ca_ack_delay;
 112	props->atomic_cap          = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ?
 113					IB_ATOMIC_HCA : IB_ATOMIC_NONE;
 114	props->max_pkeys           = mdev->limits.pkey_table_len;
 115	props->max_mcast_grp       = mdev->limits.num_mgms + mdev->limits.num_amgms;
 116	props->max_mcast_qp_attach = MTHCA_QP_PER_MGM;
 117	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
 118					   props->max_mcast_grp;
 119	/*
 120	 * If Sinai memory key optimization is being used, then only
 121	 * the 8-bit key portion will change.  For other HCAs, the
 122	 * unused index bits will also be used for FMR remapping.
 123	 */
 124	if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
 125		props->max_map_per_fmr = 255;
 126	else
 127		props->max_map_per_fmr =
 128			(1 << (32 - ilog2(mdev->limits.num_mpts))) - 1;
 129
 130	err = 0;
 131 out:
 132	kfree(in_mad);
 133	kfree(out_mad);
 134	return err;
 135}
 136
 137static int mthca_query_port(struct ib_device *ibdev,
 138			    u8 port, struct ib_port_attr *props)
 139{
 140	struct ib_smp *in_mad  = NULL;
 141	struct ib_smp *out_mad = NULL;
 142	int err = -ENOMEM;
 143
 144	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
 145	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
 146	if (!in_mad || !out_mad)
 147		goto out;
 148
 149	memset(props, 0, sizeof *props);
 150
 151	init_query_mad(in_mad);
 152	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
 153	in_mad->attr_mod = cpu_to_be32(port);
 154
 155	err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
 156			    port, NULL, NULL, in_mad, out_mad);
 157	if (err)
 158		goto out;
 159
 160	props->lid               = be16_to_cpup((__be16 *) (out_mad->data + 16));
 161	props->lmc               = out_mad->data[34] & 0x7;
 162	props->sm_lid            = be16_to_cpup((__be16 *) (out_mad->data + 18));
 163	props->sm_sl             = out_mad->data[36] & 0xf;
 164	props->state             = out_mad->data[32] & 0xf;
 165	props->phys_state        = out_mad->data[33] >> 4;
 166	props->port_cap_flags    = be32_to_cpup((__be32 *) (out_mad->data + 20));
 167	props->gid_tbl_len       = to_mdev(ibdev)->limits.gid_table_len;
 168	props->max_msg_sz        = 0x80000000;
 169	props->pkey_tbl_len      = to_mdev(ibdev)->limits.pkey_table_len;
 170	props->bad_pkey_cntr     = be16_to_cpup((__be16 *) (out_mad->data + 46));
 171	props->qkey_viol_cntr    = be16_to_cpup((__be16 *) (out_mad->data + 48));
 172	props->active_width      = out_mad->data[31] & 0xf;
 173	props->active_speed      = out_mad->data[35] >> 4;
 174	props->max_mtu           = out_mad->data[41] & 0xf;
 175	props->active_mtu        = out_mad->data[36] >> 4;
 176	props->subnet_timeout    = out_mad->data[51] & 0x1f;
 177	props->max_vl_num        = out_mad->data[37] >> 4;
 178	props->init_type_reply   = out_mad->data[41] >> 4;
 179
 180 out:
 181	kfree(in_mad);
 182	kfree(out_mad);
 183	return err;
 184}
 185
 186static int mthca_modify_device(struct ib_device *ibdev,
 187			       int mask,
 188			       struct ib_device_modify *props)
 189{
 190	if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
 191		return -EOPNOTSUPP;
 192
 193	if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
 194		if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
 195			return -ERESTARTSYS;
 196		memcpy(ibdev->node_desc, props->node_desc,
 197		       IB_DEVICE_NODE_DESC_MAX);
 198		mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
 199	}
 200
 201	return 0;
 202}
 203
 204static int mthca_modify_port(struct ib_device *ibdev,
 205			     u8 port, int port_modify_mask,
 206			     struct ib_port_modify *props)
 207{
 208	struct mthca_set_ib_param set_ib;
 209	struct ib_port_attr attr;
 210	int err;
 211
 212	if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
 213		return -ERESTARTSYS;
 214
 215	err = mthca_query_port(ibdev, port, &attr);
 216	if (err)
 217		goto out;
 218
 219	set_ib.set_si_guid     = 0;
 220	set_ib.reset_qkey_viol = !!(port_modify_mask & IB_PORT_RESET_QKEY_CNTR);
 221
 222	set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
 223		~props->clr_port_cap_mask;
 224
 225	err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port);
 226	if (err)
 227		goto out;
 228out:
 229	mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
 230	return err;
 231}
 232
 233static int mthca_query_pkey(struct ib_device *ibdev,
 234			    u8 port, u16 index, u16 *pkey)
 235{
 236	struct ib_smp *in_mad  = NULL;
 237	struct ib_smp *out_mad = NULL;
 238	int err = -ENOMEM;
 239
 240	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
 241	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
 242	if (!in_mad || !out_mad)
 243		goto out;
 244
 245	init_query_mad(in_mad);
 246	in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
 247	in_mad->attr_mod = cpu_to_be32(index / 32);
 248
 249	err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
 250			    port, NULL, NULL, in_mad, out_mad);
 251	if (err)
 252		goto out;
 253
 254	*pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
 255
 256 out:
 257	kfree(in_mad);
 258	kfree(out_mad);
 259	return err;
 260}
 261
 262static int mthca_query_gid(struct ib_device *ibdev, u8 port,
 263			   int index, union ib_gid *gid)
 264{
 265	struct ib_smp *in_mad  = NULL;
 266	struct ib_smp *out_mad = NULL;
 267	int err = -ENOMEM;
 268
 269	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
 270	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
 271	if (!in_mad || !out_mad)
 272		goto out;
 273
 274	init_query_mad(in_mad);
 275	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
 276	in_mad->attr_mod = cpu_to_be32(port);
 277
 278	err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
 279			    port, NULL, NULL, in_mad, out_mad);
 280	if (err)
 281		goto out;
 282
 283	memcpy(gid->raw, out_mad->data + 8, 8);
 284
 285	init_query_mad(in_mad);
 286	in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
 287	in_mad->attr_mod = cpu_to_be32(index / 8);
 288
 289	err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
 290			    port, NULL, NULL, in_mad, out_mad);
 291	if (err)
 292		goto out;
 293
 294	memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
 295
 296 out:
 297	kfree(in_mad);
 298	kfree(out_mad);
 299	return err;
 300}
 301
 302static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
 303						struct ib_udata *udata)
 304{
 305	struct mthca_alloc_ucontext_resp uresp;
 306	struct mthca_ucontext           *context;
 
 307	int                              err;
 308
 309	if (!(to_mdev(ibdev)->active))
 310		return ERR_PTR(-EAGAIN);
 311
 312	memset(&uresp, 0, sizeof uresp);
 313
 314	uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
 315	if (mthca_is_memfree(to_mdev(ibdev)))
 316		uresp.uarc_size = to_mdev(ibdev)->uar_table.uarc_size;
 317	else
 318		uresp.uarc_size = 0;
 319
 320	context = kmalloc(sizeof *context, GFP_KERNEL);
 321	if (!context)
 322		return ERR_PTR(-ENOMEM);
 323
 324	err = mthca_uar_alloc(to_mdev(ibdev), &context->uar);
 325	if (err) {
 326		kfree(context);
 327		return ERR_PTR(err);
 328	}
 329
 330	context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev));
 331	if (IS_ERR(context->db_tab)) {
 332		err = PTR_ERR(context->db_tab);
 333		mthca_uar_free(to_mdev(ibdev), &context->uar);
 334		kfree(context);
 335		return ERR_PTR(err);
 336	}
 337
 338	if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
 339		mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab);
 340		mthca_uar_free(to_mdev(ibdev), &context->uar);
 341		kfree(context);
 342		return ERR_PTR(-EFAULT);
 343	}
 344
 345	context->reg_mr_warned = 0;
 346
 347	return &context->ibucontext;
 348}
 349
 350static int mthca_dealloc_ucontext(struct ib_ucontext *context)
 351{
 352	mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar,
 353				  to_mucontext(context)->db_tab);
 354	mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar);
 355	kfree(to_mucontext(context));
 356
 357	return 0;
 358}
 359
 360static int mthca_mmap_uar(struct ib_ucontext *context,
 361			  struct vm_area_struct *vma)
 362{
 363	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
 364		return -EINVAL;
 365
 366	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 367
 368	if (io_remap_pfn_range(vma, vma->vm_start,
 369			       to_mucontext(context)->uar.pfn,
 370			       PAGE_SIZE, vma->vm_page_prot))
 371		return -EAGAIN;
 372
 373	return 0;
 374}
 375
 376static struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev,
 377				    struct ib_ucontext *context,
 378				    struct ib_udata *udata)
 379{
 380	struct mthca_pd *pd;
 
 381	int err;
 382
 383	pd = kmalloc(sizeof *pd, GFP_KERNEL);
 384	if (!pd)
 385		return ERR_PTR(-ENOMEM);
 386
 387	err = mthca_pd_alloc(to_mdev(ibdev), !context, pd);
 388	if (err) {
 389		kfree(pd);
 390		return ERR_PTR(err);
 391	}
 392
 393	if (context) {
 394		if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) {
 395			mthca_pd_free(to_mdev(ibdev), pd);
 396			kfree(pd);
 397			return ERR_PTR(-EFAULT);
 398		}
 399	}
 400
 401	return &pd->ibpd;
 402}
 403
 404static int mthca_dealloc_pd(struct ib_pd *pd)
 405{
 406	mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
 407	kfree(pd);
 408
 409	return 0;
 410}
 411
 412static struct ib_ah *mthca_ah_create(struct ib_pd *pd,
 413				     struct ib_ah_attr *ah_attr,
 414				     struct ib_udata *udata)
 415
 416{
 417	int err;
 418	struct mthca_ah *ah;
 419
 420	ah = kmalloc(sizeof *ah, GFP_ATOMIC);
 421	if (!ah)
 422		return ERR_PTR(-ENOMEM);
 423
 424	err = mthca_create_ah(to_mdev(pd->device), to_mpd(pd), ah_attr, ah);
 425	if (err) {
 426		kfree(ah);
 427		return ERR_PTR(err);
 428	}
 429
 430	return &ah->ibah;
 431}
 432
 433static int mthca_ah_destroy(struct ib_ah *ah)
 434{
 435	mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
 436	kfree(ah);
 437
 438	return 0;
 439}
 440
 441static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
 442				       struct ib_srq_init_attr *init_attr,
 443				       struct ib_udata *udata)
 444{
 445	struct mthca_create_srq ucmd;
 446	struct mthca_ucontext *context = NULL;
 447	struct mthca_srq *srq;
 
 448	int err;
 449
 450	if (init_attr->srq_type != IB_SRQT_BASIC)
 451		return ERR_PTR(-ENOSYS);
 452
 453	srq = kmalloc(sizeof *srq, GFP_KERNEL);
 454	if (!srq)
 455		return ERR_PTR(-ENOMEM);
 456
 457	if (pd->uobject) {
 458		context = to_mucontext(pd->uobject->context);
 459
 460		if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
 461			err = -EFAULT;
 462			goto err_free;
 463		}
 464
 465		err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
 466					context->db_tab, ucmd.db_index,
 467					ucmd.db_page);
 468
 469		if (err)
 470			goto err_free;
 471
 472		srq->mr.ibmr.lkey = ucmd.lkey;
 473		srq->db_index     = ucmd.db_index;
 474	}
 475
 476	err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
 477			      &init_attr->attr, srq);
 478
 479	if (err && pd->uobject)
 480		mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
 481				    context->db_tab, ucmd.db_index);
 482
 483	if (err)
 484		goto err_free;
 485
 486	if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) {
 487		mthca_free_srq(to_mdev(pd->device), srq);
 488		err = -EFAULT;
 489		goto err_free;
 490	}
 491
 492	return &srq->ibsrq;
 493
 494err_free:
 495	kfree(srq);
 496
 497	return ERR_PTR(err);
 498}
 499
 500static int mthca_destroy_srq(struct ib_srq *srq)
 501{
 502	struct mthca_ucontext *context;
 503
 504	if (srq->uobject) {
 505		context = to_mucontext(srq->uobject->context);
 
 
 506
 507		mthca_unmap_user_db(to_mdev(srq->device), &context->uar,
 508				    context->db_tab, to_msrq(srq)->db_index);
 509	}
 510
 511	mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
 512	kfree(srq);
 513
 514	return 0;
 515}
 516
 517static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
 518				     struct ib_qp_init_attr *init_attr,
 519				     struct ib_udata *udata)
 520{
 
 
 521	struct mthca_create_qp ucmd;
 522	struct mthca_qp *qp;
 
 523	int err;
 524
 525	if (init_attr->create_flags)
 526		return ERR_PTR(-EINVAL);
 527
 528	switch (init_attr->qp_type) {
 529	case IB_QPT_RC:
 530	case IB_QPT_UC:
 531	case IB_QPT_UD:
 532	{
 533		struct mthca_ucontext *context;
 
 
 534
 535		qp = kmalloc(sizeof *qp, GFP_KERNEL);
 536		if (!qp)
 537			return ERR_PTR(-ENOMEM);
 538
 539		if (pd->uobject) {
 540			context = to_mucontext(pd->uobject->context);
 541
 542			if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
 543				kfree(qp);
 544				return ERR_PTR(-EFAULT);
 545			}
 546
 547			err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
 548						context->db_tab,
 549						ucmd.sq_db_index, ucmd.sq_db_page);
 550			if (err) {
 551				kfree(qp);
 552				return ERR_PTR(err);
 553			}
 554
 555			err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
 556						context->db_tab,
 557						ucmd.rq_db_index, ucmd.rq_db_page);
 
 558			if (err) {
 559				mthca_unmap_user_db(to_mdev(pd->device),
 560						    &context->uar,
 561						    context->db_tab,
 562						    ucmd.sq_db_index);
 563				kfree(qp);
 564				return ERR_PTR(err);
 565			}
 566
 567			qp->mr.ibmr.lkey = ucmd.lkey;
 568			qp->sq.db_index  = ucmd.sq_db_index;
 569			qp->rq.db_index  = ucmd.rq_db_index;
 570		}
 571
 572		err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd),
 573				     to_mcq(init_attr->send_cq),
 574				     to_mcq(init_attr->recv_cq),
 575				     init_attr->qp_type, init_attr->sq_sig_type,
 576				     &init_attr->cap, qp);
 577
 578		if (err && pd->uobject) {
 579			context = to_mucontext(pd->uobject->context);
 580
 581			mthca_unmap_user_db(to_mdev(pd->device),
 582					    &context->uar,
 583					    context->db_tab,
 584					    ucmd.sq_db_index);
 585			mthca_unmap_user_db(to_mdev(pd->device),
 586					    &context->uar,
 587					    context->db_tab,
 588					    ucmd.rq_db_index);
 589		}
 590
 591		qp->ibqp.qp_num = qp->qpn;
 592		break;
 593	}
 594	case IB_QPT_SMI:
 595	case IB_QPT_GSI:
 596	{
 597		/* Don't allow userspace to create special QPs */
 598		if (pd->uobject)
 599			return ERR_PTR(-EINVAL);
 600
 601		qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
 602		if (!qp)
 603			return ERR_PTR(-ENOMEM);
 604
 605		qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
 606
 607		err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd),
 608				      to_mcq(init_attr->send_cq),
 609				      to_mcq(init_attr->recv_cq),
 610				      init_attr->sq_sig_type, &init_attr->cap,
 611				      qp->ibqp.qp_num, init_attr->port_num,
 612				      to_msqp(qp));
 613		break;
 614	}
 615	default:
 616		/* Don't support raw QPs */
 617		return ERR_PTR(-ENOSYS);
 618	}
 619
 620	if (err) {
 621		kfree(qp);
 622		return ERR_PTR(err);
 623	}
 624
 625	init_attr->cap.max_send_wr     = qp->sq.max;
 626	init_attr->cap.max_recv_wr     = qp->rq.max;
 627	init_attr->cap.max_send_sge    = qp->sq.max_gs;
 628	init_attr->cap.max_recv_sge    = qp->rq.max_gs;
 629	init_attr->cap.max_inline_data = qp->max_inline_data;
 630
 631	return &qp->ibqp;
 632}
 633
 634static int mthca_destroy_qp(struct ib_qp *qp)
 635{
 636	if (qp->uobject) {
 
 
 
 
 
 
 637		mthca_unmap_user_db(to_mdev(qp->device),
 638				    &to_mucontext(qp->uobject->context)->uar,
 639				    to_mucontext(qp->uobject->context)->db_tab,
 640				    to_mqp(qp)->sq.db_index);
 641		mthca_unmap_user_db(to_mdev(qp->device),
 642				    &to_mucontext(qp->uobject->context)->uar,
 643				    to_mucontext(qp->uobject->context)->db_tab,
 644				    to_mqp(qp)->rq.db_index);
 645	}
 646	mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
 647	kfree(qp);
 648	return 0;
 649}
 650
 651static struct ib_cq *mthca_create_cq(struct ib_device *ibdev,
 652				     const struct ib_cq_init_attr *attr,
 653				     struct ib_ucontext *context,
 654				     struct ib_udata *udata)
 655{
 
 656	int entries = attr->cqe;
 657	struct mthca_create_cq ucmd;
 658	struct mthca_cq *cq;
 659	int nent;
 660	int err;
 
 
 661
 662	if (attr->flags)
 663		return ERR_PTR(-EINVAL);
 664
 665	if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes)
 666		return ERR_PTR(-EINVAL);
 667
 668	if (context) {
 669		if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
 670			return ERR_PTR(-EFAULT);
 671
 672		err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
 673					to_mucontext(context)->db_tab,
 674					ucmd.set_db_index, ucmd.set_db_page);
 675		if (err)
 676			return ERR_PTR(err);
 677
 678		err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
 679					to_mucontext(context)->db_tab,
 680					ucmd.arm_db_index, ucmd.arm_db_page);
 681		if (err)
 682			goto err_unmap_set;
 683	}
 684
 685	cq = kmalloc(sizeof *cq, GFP_KERNEL);
 686	if (!cq) {
 687		err = -ENOMEM;
 688		goto err_unmap_arm;
 689	}
 690
 691	if (context) {
 692		cq->buf.mr.ibmr.lkey = ucmd.lkey;
 693		cq->set_ci_db_index  = ucmd.set_db_index;
 694		cq->arm_db_index     = ucmd.arm_db_index;
 695	}
 696
 697	for (nent = 1; nent <= entries; nent <<= 1)
 698		; /* nothing */
 699
 700	err = mthca_init_cq(to_mdev(ibdev), nent,
 701			    context ? to_mucontext(context) : NULL,
 702			    context ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num,
 703			    cq);
 704	if (err)
 705		goto err_free;
 706
 707	if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) {
 708		mthca_free_cq(to_mdev(ibdev), cq);
 709		err = -EFAULT;
 710		goto err_free;
 711	}
 712
 713	cq->resize_buf = NULL;
 714
 715	return &cq->ibcq;
 716
 717err_free:
 718	kfree(cq);
 719
 720err_unmap_arm:
 721	if (context)
 722		mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
 723				    to_mucontext(context)->db_tab, ucmd.arm_db_index);
 724
 725err_unmap_set:
 726	if (context)
 727		mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
 728				    to_mucontext(context)->db_tab, ucmd.set_db_index);
 729
 730	return ERR_PTR(err);
 731}
 732
 733static int mthca_alloc_resize_buf(struct mthca_dev *dev, struct mthca_cq *cq,
 734				  int entries)
 735{
 736	int ret;
 737
 738	spin_lock_irq(&cq->lock);
 739	if (cq->resize_buf) {
 740		ret = -EBUSY;
 741		goto unlock;
 742	}
 743
 744	cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
 745	if (!cq->resize_buf) {
 746		ret = -ENOMEM;
 747		goto unlock;
 748	}
 749
 750	cq->resize_buf->state = CQ_RESIZE_ALLOC;
 751
 752	ret = 0;
 753
 754unlock:
 755	spin_unlock_irq(&cq->lock);
 756
 757	if (ret)
 758		return ret;
 759
 760	ret = mthca_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
 761	if (ret) {
 762		spin_lock_irq(&cq->lock);
 763		kfree(cq->resize_buf);
 764		cq->resize_buf = NULL;
 765		spin_unlock_irq(&cq->lock);
 766		return ret;
 767	}
 768
 769	cq->resize_buf->cqe = entries - 1;
 770
 771	spin_lock_irq(&cq->lock);
 772	cq->resize_buf->state = CQ_RESIZE_READY;
 773	spin_unlock_irq(&cq->lock);
 774
 775	return 0;
 776}
 777
 778static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
 779{
 780	struct mthca_dev *dev = to_mdev(ibcq->device);
 781	struct mthca_cq *cq = to_mcq(ibcq);
 782	struct mthca_resize_cq ucmd;
 783	u32 lkey;
 784	int ret;
 785
 786	if (entries < 1 || entries > dev->limits.max_cqes)
 787		return -EINVAL;
 788
 789	mutex_lock(&cq->mutex);
 790
 791	entries = roundup_pow_of_two(entries + 1);
 792	if (entries == ibcq->cqe + 1) {
 793		ret = 0;
 794		goto out;
 795	}
 796
 797	if (cq->is_kernel) {
 798		ret = mthca_alloc_resize_buf(dev, cq, entries);
 799		if (ret)
 800			goto out;
 801		lkey = cq->resize_buf->buf.mr.ibmr.lkey;
 802	} else {
 803		if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
 804			ret = -EFAULT;
 805			goto out;
 806		}
 807		lkey = ucmd.lkey;
 808	}
 809
 810	ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries));
 811
 812	if (ret) {
 813		if (cq->resize_buf) {
 814			mthca_free_cq_buf(dev, &cq->resize_buf->buf,
 815					  cq->resize_buf->cqe);
 816			kfree(cq->resize_buf);
 817			spin_lock_irq(&cq->lock);
 818			cq->resize_buf = NULL;
 819			spin_unlock_irq(&cq->lock);
 820		}
 821		goto out;
 822	}
 823
 824	if (cq->is_kernel) {
 825		struct mthca_cq_buf tbuf;
 826		int tcqe;
 827
 828		spin_lock_irq(&cq->lock);
 829		if (cq->resize_buf->state == CQ_RESIZE_READY) {
 830			mthca_cq_resize_copy_cqes(cq);
 831			tbuf         = cq->buf;
 832			tcqe         = cq->ibcq.cqe;
 833			cq->buf      = cq->resize_buf->buf;
 834			cq->ibcq.cqe = cq->resize_buf->cqe;
 835		} else {
 836			tbuf = cq->resize_buf->buf;
 837			tcqe = cq->resize_buf->cqe;
 838		}
 839
 840		kfree(cq->resize_buf);
 841		cq->resize_buf = NULL;
 842		spin_unlock_irq(&cq->lock);
 843
 844		mthca_free_cq_buf(dev, &tbuf, tcqe);
 845	} else
 846		ibcq->cqe = entries - 1;
 847
 848out:
 849	mutex_unlock(&cq->mutex);
 850
 851	return ret;
 852}
 853
 854static int mthca_destroy_cq(struct ib_cq *cq)
 855{
 856	if (cq->uobject) {
 
 
 
 
 
 
 857		mthca_unmap_user_db(to_mdev(cq->device),
 858				    &to_mucontext(cq->uobject->context)->uar,
 859				    to_mucontext(cq->uobject->context)->db_tab,
 860				    to_mcq(cq)->arm_db_index);
 861		mthca_unmap_user_db(to_mdev(cq->device),
 862				    &to_mucontext(cq->uobject->context)->uar,
 863				    to_mucontext(cq->uobject->context)->db_tab,
 864				    to_mcq(cq)->set_ci_db_index);
 865	}
 866	mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
 867	kfree(cq);
 868
 869	return 0;
 870}
 871
 872static inline u32 convert_access(int acc)
 873{
 874	return (acc & IB_ACCESS_REMOTE_ATOMIC ? MTHCA_MPT_FLAG_ATOMIC       : 0) |
 875	       (acc & IB_ACCESS_REMOTE_WRITE  ? MTHCA_MPT_FLAG_REMOTE_WRITE : 0) |
 876	       (acc & IB_ACCESS_REMOTE_READ   ? MTHCA_MPT_FLAG_REMOTE_READ  : 0) |
 877	       (acc & IB_ACCESS_LOCAL_WRITE   ? MTHCA_MPT_FLAG_LOCAL_WRITE  : 0) |
 878	       MTHCA_MPT_FLAG_LOCAL_READ;
 879}
 880
 881static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc)
 882{
 883	struct mthca_mr *mr;
 884	int err;
 885
 886	mr = kmalloc(sizeof *mr, GFP_KERNEL);
 887	if (!mr)
 888		return ERR_PTR(-ENOMEM);
 889
 890	err = mthca_mr_alloc_notrans(to_mdev(pd->device),
 891				     to_mpd(pd)->pd_num,
 892				     convert_access(acc), mr);
 893
 894	if (err) {
 895		kfree(mr);
 896		return ERR_PTR(err);
 897	}
 898
 899	mr->umem = NULL;
 900
 901	return &mr->ibmr;
 902}
 903
 904static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 905				       u64 virt, int acc, struct ib_udata *udata)
 906{
 907	struct mthca_dev *dev = to_mdev(pd->device);
 908	struct scatterlist *sg;
 
 
 909	struct mthca_mr *mr;
 910	struct mthca_reg_mr ucmd;
 911	u64 *pages;
 912	int shift, n, len;
 913	int i, k, entry;
 914	int err = 0;
 915	int write_mtt_size;
 916
 917	if (udata->inlen - sizeof (struct ib_uverbs_cmd_hdr) < sizeof ucmd) {
 918		if (!to_mucontext(pd->uobject->context)->reg_mr_warned) {
 919			mthca_warn(dev, "Process '%s' did not pass in MR attrs.\n",
 920				   current->comm);
 921			mthca_warn(dev, "  Update libmthca to fix this.\n");
 922		}
 923		++to_mucontext(pd->uobject->context)->reg_mr_warned;
 924		ucmd.mr_attrs = 0;
 925	} else if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
 926		return ERR_PTR(-EFAULT);
 927
 928	mr = kmalloc(sizeof *mr, GFP_KERNEL);
 929	if (!mr)
 930		return ERR_PTR(-ENOMEM);
 931
 932	mr->umem = ib_umem_get(pd->uobject->context, start, length, acc,
 933			       ucmd.mr_attrs & MTHCA_MR_DMASYNC);
 934
 935	if (IS_ERR(mr->umem)) {
 936		err = PTR_ERR(mr->umem);
 937		goto err;
 938	}
 939
 940	shift = ffs(mr->umem->page_size) - 1;
 941	n = mr->umem->nmap;
 942
 943	mr->mtt = mthca_alloc_mtt(dev, n);
 944	if (IS_ERR(mr->mtt)) {
 945		err = PTR_ERR(mr->mtt);
 946		goto err_umem;
 947	}
 948
 949	pages = (u64 *) __get_free_page(GFP_KERNEL);
 950	if (!pages) {
 951		err = -ENOMEM;
 952		goto err_mtt;
 953	}
 954
 955	i = n = 0;
 956
 957	write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages));
 958
 959	for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
 960		len = sg_dma_len(sg) >> shift;
 961		for (k = 0; k < len; ++k) {
 962			pages[i++] = sg_dma_address(sg) +
 963				mr->umem->page_size * k;
 964			/*
 965			 * Be friendly to write_mtt and pass it chunks
 966			 * of appropriate size.
 967			 */
 968			if (i == write_mtt_size) {
 969				err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
 970				if (err)
 971					goto mtt_done;
 972				n += i;
 973				i = 0;
 974			}
 975		}
 976	}
 977
 978	if (i)
 979		err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
 980mtt_done:
 981	free_page((unsigned long) pages);
 982	if (err)
 983		goto err_mtt;
 984
 985	err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, virt, length,
 986			     convert_access(acc), mr);
 987
 988	if (err)
 989		goto err_mtt;
 990
 991	return &mr->ibmr;
 992
 993err_mtt:
 994	mthca_free_mtt(dev, mr->mtt);
 995
 996err_umem:
 997	ib_umem_release(mr->umem);
 998
 999err:
1000	kfree(mr);
1001	return ERR_PTR(err);
1002}
1003
1004static int mthca_dereg_mr(struct ib_mr *mr)
1005{
1006	struct mthca_mr *mmr = to_mmr(mr);
1007
1008	mthca_free_mr(to_mdev(mr->device), mmr);
1009	if (mmr->umem)
1010		ib_umem_release(mmr->umem);
1011	kfree(mmr);
1012
1013	return 0;
1014}
1015
1016static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
1017				      struct ib_fmr_attr *fmr_attr)
1018{
1019	struct mthca_fmr *fmr;
1020	int err;
1021
1022	fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
1023	if (!fmr)
1024		return ERR_PTR(-ENOMEM);
1025
1026	memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr);
1027	err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num,
1028			     convert_access(mr_access_flags), fmr);
1029
1030	if (err) {
1031		kfree(fmr);
1032		return ERR_PTR(err);
1033	}
1034
1035	return &fmr->ibmr;
1036}
 
1037
1038static int mthca_dealloc_fmr(struct ib_fmr *fmr)
1039{
1040	struct mthca_fmr *mfmr = to_mfmr(fmr);
1041	int err;
 
 
 
 
 
 
 
 
 
1042
1043	err = mthca_free_fmr(to_mdev(fmr->device), mfmr);
1044	if (err)
1045		return err;
1046
1047	kfree(mfmr);
1048	return 0;
1049}
1050
1051static int mthca_unmap_fmr(struct list_head *fmr_list)
 
1052{
1053	struct ib_fmr *fmr;
1054	int err;
1055	struct mthca_dev *mdev = NULL;
1056
1057	list_for_each_entry(fmr, fmr_list, list) {
1058		if (mdev && to_mdev(fmr->device) != mdev)
1059			return -EINVAL;
1060		mdev = to_mdev(fmr->device);
1061	}
1062
1063	if (!mdev)
1064		return 0;
1065
1066	if (mthca_is_memfree(mdev)) {
1067		list_for_each_entry(fmr, fmr_list, list)
1068			mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr));
1069
1070		wmb();
1071	} else
1072		list_for_each_entry(fmr, fmr_list, list)
1073			mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr));
1074
1075	err = mthca_SYNC_TPT(mdev);
1076	return err;
1077}
 
1078
1079static ssize_t show_rev(struct device *device, struct device_attribute *attr,
1080			char *buf)
1081{
1082	struct mthca_dev *dev =
1083		container_of(device, struct mthca_dev, ib_dev.dev);
1084	return sprintf(buf, "%x\n", dev->rev_id);
1085}
1086
1087static ssize_t show_hca(struct device *device, struct device_attribute *attr,
1088			char *buf)
1089{
1090	struct mthca_dev *dev =
1091		container_of(device, struct mthca_dev, ib_dev.dev);
1092	switch (dev->pdev->device) {
1093	case PCI_DEVICE_ID_MELLANOX_TAVOR:
1094		return sprintf(buf, "MT23108\n");
1095	case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT:
1096		return sprintf(buf, "MT25208 (MT23108 compat mode)\n");
1097	case PCI_DEVICE_ID_MELLANOX_ARBEL:
1098		return sprintf(buf, "MT25208\n");
1099	case PCI_DEVICE_ID_MELLANOX_SINAI:
1100	case PCI_DEVICE_ID_MELLANOX_SINAI_OLD:
1101		return sprintf(buf, "MT25204\n");
1102	default:
1103		return sprintf(buf, "unknown\n");
1104	}
1105}
 
1106
1107static ssize_t show_board(struct device *device, struct device_attribute *attr,
1108			  char *buf)
1109{
1110	struct mthca_dev *dev =
1111		container_of(device, struct mthca_dev, ib_dev.dev);
1112	return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id);
1113}
1114
1115static DEVICE_ATTR(hw_rev,   S_IRUGO, show_rev,    NULL);
1116static DEVICE_ATTR(hca_type, S_IRUGO, show_hca,    NULL);
1117static DEVICE_ATTR(board_id, S_IRUGO, show_board,  NULL);
1118
1119static struct device_attribute *mthca_dev_attributes[] = {
1120	&dev_attr_hw_rev,
1121	&dev_attr_hca_type,
1122	&dev_attr_board_id
1123};
1124
1125static int mthca_init_node_data(struct mthca_dev *dev)
1126{
1127	struct ib_smp *in_mad  = NULL;
1128	struct ib_smp *out_mad = NULL;
1129	int err = -ENOMEM;
1130
1131	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
1132	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1133	if (!in_mad || !out_mad)
1134		goto out;
1135
1136	init_query_mad(in_mad);
1137	in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1138
1139	err = mthca_MAD_IFC(dev, 1, 1,
1140			    1, NULL, NULL, in_mad, out_mad);
1141	if (err)
1142		goto out;
1143
1144	memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
1145
1146	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1147
1148	err = mthca_MAD_IFC(dev, 1, 1,
1149			    1, NULL, NULL, in_mad, out_mad);
1150	if (err)
1151		goto out;
1152
1153	if (mthca_is_memfree(dev))
1154		dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
1155	memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
1156
1157out:
1158	kfree(in_mad);
1159	kfree(out_mad);
1160	return err;
1161}
1162
1163static int mthca_port_immutable(struct ib_device *ibdev, u8 port_num,
1164			        struct ib_port_immutable *immutable)
1165{
1166	struct ib_port_attr attr;
1167	int err;
1168
1169	err = mthca_query_port(ibdev, port_num, &attr);
 
 
1170	if (err)
1171		return err;
1172
1173	immutable->pkey_tbl_len = attr.pkey_tbl_len;
1174	immutable->gid_tbl_len = attr.gid_tbl_len;
1175	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
1176	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
1177
1178	return 0;
1179}
1180
1181static void get_dev_fw_str(struct ib_device *device, char *str,
1182			   size_t str_len)
1183{
1184	struct mthca_dev *dev =
1185		container_of(device, struct mthca_dev, ib_dev);
1186	snprintf(str, str_len, "%d.%d.%d",
1187		 (int) (dev->fw_ver >> 32),
1188		 (int) (dev->fw_ver >> 16) & 0xffff,
1189		 (int) dev->fw_ver & 0xffff);
1190}
1191
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1192int mthca_register_device(struct mthca_dev *dev)
1193{
1194	int ret;
1195	int i;
1196
1197	ret = mthca_init_node_data(dev);
1198	if (ret)
1199		return ret;
1200
1201	strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX);
1202	dev->ib_dev.owner                = THIS_MODULE;
1203
1204	dev->ib_dev.uverbs_abi_ver	 = MTHCA_UVERBS_ABI_VERSION;
1205	dev->ib_dev.uverbs_cmd_mask	 =
1206		(1ull << IB_USER_VERBS_CMD_GET_CONTEXT)		|
1207		(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)	|
1208		(1ull << IB_USER_VERBS_CMD_QUERY_PORT)		|
1209		(1ull << IB_USER_VERBS_CMD_ALLOC_PD)		|
1210		(1ull << IB_USER_VERBS_CMD_DEALLOC_PD)		|
1211		(1ull << IB_USER_VERBS_CMD_REG_MR)		|
1212		(1ull << IB_USER_VERBS_CMD_DEREG_MR)		|
1213		(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)	|
1214		(1ull << IB_USER_VERBS_CMD_CREATE_CQ)		|
1215		(1ull << IB_USER_VERBS_CMD_RESIZE_CQ)		|
1216		(1ull << IB_USER_VERBS_CMD_DESTROY_CQ)		|
1217		(1ull << IB_USER_VERBS_CMD_CREATE_QP)		|
1218		(1ull << IB_USER_VERBS_CMD_QUERY_QP)		|
1219		(1ull << IB_USER_VERBS_CMD_MODIFY_QP)		|
1220		(1ull << IB_USER_VERBS_CMD_DESTROY_QP)		|
1221		(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)	|
1222		(1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
1223	dev->ib_dev.node_type            = RDMA_NODE_IB_CA;
1224	dev->ib_dev.phys_port_cnt        = dev->limits.num_ports;
1225	dev->ib_dev.num_comp_vectors     = 1;
1226	dev->ib_dev.dma_device           = &dev->pdev->dev;
1227	dev->ib_dev.query_device         = mthca_query_device;
1228	dev->ib_dev.query_port           = mthca_query_port;
1229	dev->ib_dev.modify_device        = mthca_modify_device;
1230	dev->ib_dev.modify_port          = mthca_modify_port;
1231	dev->ib_dev.query_pkey           = mthca_query_pkey;
1232	dev->ib_dev.query_gid            = mthca_query_gid;
1233	dev->ib_dev.alloc_ucontext       = mthca_alloc_ucontext;
1234	dev->ib_dev.dealloc_ucontext     = mthca_dealloc_ucontext;
1235	dev->ib_dev.mmap                 = mthca_mmap_uar;
1236	dev->ib_dev.alloc_pd             = mthca_alloc_pd;
1237	dev->ib_dev.dealloc_pd           = mthca_dealloc_pd;
1238	dev->ib_dev.create_ah            = mthca_ah_create;
1239	dev->ib_dev.query_ah             = mthca_ah_query;
1240	dev->ib_dev.destroy_ah           = mthca_ah_destroy;
1241
1242	if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
1243		dev->ib_dev.create_srq           = mthca_create_srq;
1244		dev->ib_dev.modify_srq           = mthca_modify_srq;
1245		dev->ib_dev.query_srq            = mthca_query_srq;
1246		dev->ib_dev.destroy_srq          = mthca_destroy_srq;
1247		dev->ib_dev.uverbs_cmd_mask	|=
1248			(1ull << IB_USER_VERBS_CMD_CREATE_SRQ)		|
1249			(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)		|
1250			(1ull << IB_USER_VERBS_CMD_QUERY_SRQ)		|
1251			(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
1252
1253		if (mthca_is_memfree(dev))
1254			dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv;
1255		else
1256			dev->ib_dev.post_srq_recv = mthca_tavor_post_srq_recv;
1257	}
1258
1259	dev->ib_dev.create_qp            = mthca_create_qp;
1260	dev->ib_dev.modify_qp            = mthca_modify_qp;
1261	dev->ib_dev.query_qp             = mthca_query_qp;
1262	dev->ib_dev.destroy_qp           = mthca_destroy_qp;
1263	dev->ib_dev.create_cq            = mthca_create_cq;
1264	dev->ib_dev.resize_cq            = mthca_resize_cq;
1265	dev->ib_dev.destroy_cq           = mthca_destroy_cq;
1266	dev->ib_dev.poll_cq              = mthca_poll_cq;
1267	dev->ib_dev.get_dma_mr           = mthca_get_dma_mr;
1268	dev->ib_dev.reg_user_mr          = mthca_reg_user_mr;
1269	dev->ib_dev.dereg_mr             = mthca_dereg_mr;
1270	dev->ib_dev.get_port_immutable   = mthca_port_immutable;
1271	dev->ib_dev.get_dev_fw_str       = get_dev_fw_str;
1272
1273	if (dev->mthca_flags & MTHCA_FLAG_FMR) {
1274		dev->ib_dev.alloc_fmr            = mthca_alloc_fmr;
1275		dev->ib_dev.unmap_fmr            = mthca_unmap_fmr;
1276		dev->ib_dev.dealloc_fmr          = mthca_dealloc_fmr;
1277		if (mthca_is_memfree(dev))
1278			dev->ib_dev.map_phys_fmr = mthca_arbel_map_phys_fmr;
 
1279		else
1280			dev->ib_dev.map_phys_fmr = mthca_tavor_map_phys_fmr;
 
1281	}
1282
1283	dev->ib_dev.attach_mcast         = mthca_multicast_attach;
1284	dev->ib_dev.detach_mcast         = mthca_multicast_detach;
1285	dev->ib_dev.process_mad          = mthca_process_mad;
1286
1287	if (mthca_is_memfree(dev)) {
1288		dev->ib_dev.req_notify_cq = mthca_arbel_arm_cq;
1289		dev->ib_dev.post_send     = mthca_arbel_post_send;
1290		dev->ib_dev.post_recv     = mthca_arbel_post_receive;
1291	} else {
1292		dev->ib_dev.req_notify_cq = mthca_tavor_arm_cq;
1293		dev->ib_dev.post_send     = mthca_tavor_post_send;
1294		dev->ib_dev.post_recv     = mthca_tavor_post_receive;
1295	}
1296
1297	mutex_init(&dev->cap_mask_mutex);
1298
1299	ret = ib_register_device(&dev->ib_dev, NULL);
1300	if (ret)
1301		return ret;
1302
1303	for (i = 0; i < ARRAY_SIZE(mthca_dev_attributes); ++i) {
1304		ret = device_create_file(&dev->ib_dev.dev,
1305					 mthca_dev_attributes[i]);
1306		if (ret) {
1307			ib_unregister_device(&dev->ib_dev);
1308			return ret;
1309		}
1310	}
1311
1312	mthca_start_catas_poll(dev);
1313
1314	return 0;
1315}
1316
1317void mthca_unregister_device(struct mthca_dev *dev)
1318{
1319	mthca_stop_catas_poll(dev);
1320	ib_unregister_device(&dev->ib_dev);
1321}