Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
   1/*******************************************************************************
   2*
   3* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
   4*
   5* This software is available to you under a choice of one of two
   6* licenses.  You may choose to be licensed under the terms of the GNU
   7* General Public License (GPL) Version 2, available from the file
   8* COPYING in the main directory of this source tree, or the
   9* OpenFabrics.org BSD license below:
  10*
  11*   Redistribution and use in source and binary forms, with or
  12*   without modification, are permitted provided that the following
  13*   conditions are met:
  14*
  15*    - Redistributions of source code must retain the above
  16*	copyright notice, this list of conditions and the following
  17*	disclaimer.
  18*
  19*    - Redistributions in binary form must reproduce the above
  20*	copyright notice, this list of conditions and the following
  21*	disclaimer in the documentation and/or other materials
  22*	provided with the distribution.
  23*
  24* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31* SOFTWARE.
  32*
  33*******************************************************************************/
  34
  35#include <linux/module.h>
  36#include <linux/moduleparam.h>
  37#include <linux/netdevice.h>
  38#include <linux/etherdevice.h>
  39#include <linux/ethtool.h>
  40#include <linux/mii.h>
  41#include <linux/if_vlan.h>
  42#include <linux/crc32.h>
  43#include <linux/in.h>
  44#include <linux/ip.h>
  45#include <linux/tcp.h>
  46#include <linux/init.h>
  47#include <linux/io.h>
  48#include <asm/irq.h>
  49#include <asm/byteorder.h>
  50#include <net/netevent.h>
  51#include <net/neighbour.h>
  52#include "i40iw.h"
  53
  54/**
  55 * i40iw_arp_table - manage arp table
  56 * @iwdev: iwarp device
  57 * @ip_addr: ip address for device
  58 * @mac_addr: mac address ptr
  59 * @action: modify, delete or add
  60 */
  61int i40iw_arp_table(struct i40iw_device *iwdev,
  62		    u32 *ip_addr,
  63		    bool ipv4,
  64		    u8 *mac_addr,
  65		    u32 action)
  66{
  67	int arp_index;
  68	int err;
  69	u32 ip[4];
  70
  71	if (ipv4) {
  72		memset(ip, 0, sizeof(ip));
  73		ip[0] = *ip_addr;
  74	} else {
  75		memcpy(ip, ip_addr, sizeof(ip));
  76	}
  77
  78	for (arp_index = 0; (u32)arp_index < iwdev->arp_table_size; arp_index++)
  79		if (memcmp(iwdev->arp_table[arp_index].ip_addr, ip, sizeof(ip)) == 0)
  80			break;
  81	switch (action) {
  82	case I40IW_ARP_ADD:
  83		if (arp_index != iwdev->arp_table_size)
  84			return -1;
  85
  86		arp_index = 0;
  87		err = i40iw_alloc_resource(iwdev, iwdev->allocated_arps,
  88					   iwdev->arp_table_size,
  89					   (u32 *)&arp_index,
  90					   &iwdev->next_arp_index);
  91
  92		if (err)
  93			return err;
  94
  95		memcpy(iwdev->arp_table[arp_index].ip_addr, ip, sizeof(ip));
  96		ether_addr_copy(iwdev->arp_table[arp_index].mac_addr, mac_addr);
  97		break;
  98	case I40IW_ARP_RESOLVE:
  99		if (arp_index == iwdev->arp_table_size)
 100			return -1;
 101		break;
 102	case I40IW_ARP_DELETE:
 103		if (arp_index == iwdev->arp_table_size)
 104			return -1;
 105		memset(iwdev->arp_table[arp_index].ip_addr, 0,
 106		       sizeof(iwdev->arp_table[arp_index].ip_addr));
 107		eth_zero_addr(iwdev->arp_table[arp_index].mac_addr);
 108		i40iw_free_resource(iwdev, iwdev->allocated_arps, arp_index);
 109		break;
 110	default:
 111		return -1;
 112	}
 113	return arp_index;
 114}
 115
 116/**
 117 * i40iw_wr32 - write 32 bits to hw register
 118 * @hw: hardware information including registers
 119 * @reg: register offset
 120 * @value: vvalue to write to register
 121 */
 122inline void i40iw_wr32(struct i40iw_hw *hw, u32 reg, u32 value)
 123{
 124	writel(value, hw->hw_addr + reg);
 125}
 126
 127/**
 128 * i40iw_rd32 - read a 32 bit hw register
 129 * @hw: hardware information including registers
 130 * @reg: register offset
 131 *
 132 * Return value of register content
 133 */
 134inline u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg)
 135{
 136	return readl(hw->hw_addr + reg);
 137}
 138
 139/**
 140 * i40iw_inetaddr_event - system notifier for ipv4 addr events
 141 * @notfier: not used
 142 * @event: event for notifier
 143 * @ptr: if address
 144 */
 145int i40iw_inetaddr_event(struct notifier_block *notifier,
 146			 unsigned long event,
 147			 void *ptr)
 148{
 149	struct in_ifaddr *ifa = ptr;
 150	struct net_device *event_netdev = ifa->ifa_dev->dev;
 151	struct net_device *netdev;
 152	struct net_device *upper_dev;
 153	struct i40iw_device *iwdev;
 154	struct i40iw_handler *hdl;
 155	u32 local_ipaddr;
 156	u32 action = I40IW_ARP_ADD;
 157
 158	hdl = i40iw_find_netdev(event_netdev);
 159	if (!hdl)
 160		return NOTIFY_DONE;
 161
 162	iwdev = &hdl->device;
 163	if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
 164		return NOTIFY_DONE;
 165
 166	netdev = iwdev->ldev->netdev;
 167	upper_dev = netdev_master_upper_dev_get(netdev);
 168	if (netdev != event_netdev)
 169		return NOTIFY_DONE;
 170
 171	if (upper_dev) {
 172		struct in_device *in;
 173
 174		rcu_read_lock();
 175		in = __in_dev_get_rcu(upper_dev);
 176
 177		local_ipaddr = 0;
 178		if (in) {
 179			struct in_ifaddr *ifa;
 180
 181			ifa = rcu_dereference(in->ifa_list);
 182			if (ifa)
 183				local_ipaddr = ntohl(ifa->ifa_address);
 184		}
 185
 186		rcu_read_unlock();
 187	} else {
 188		local_ipaddr = ntohl(ifa->ifa_address);
 189	}
 190	switch (event) {
 191	case NETDEV_DOWN:
 192		action = I40IW_ARP_DELETE;
 193		/* Fall through */
 194	case NETDEV_UP:
 195		/* Fall through */
 196	case NETDEV_CHANGEADDR:
 197
 198		/* Just skip if no need to handle ARP cache */
 199		if (!local_ipaddr)
 200			break;
 201
 202		i40iw_manage_arp_cache(iwdev,
 203				       netdev->dev_addr,
 204				       &local_ipaddr,
 205				       true,
 206				       action);
 207		i40iw_if_notify(iwdev, netdev, &local_ipaddr, true,
 208				(action == I40IW_ARP_ADD) ? true : false);
 209		break;
 210	default:
 211		break;
 212	}
 213	return NOTIFY_DONE;
 214}
 215
 216/**
 217 * i40iw_inet6addr_event - system notifier for ipv6 addr events
 218 * @notfier: not used
 219 * @event: event for notifier
 220 * @ptr: if address
 221 */
 222int i40iw_inet6addr_event(struct notifier_block *notifier,
 223			  unsigned long event,
 224			  void *ptr)
 225{
 226	struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
 227	struct net_device *event_netdev = ifa->idev->dev;
 228	struct net_device *netdev;
 229	struct i40iw_device *iwdev;
 230	struct i40iw_handler *hdl;
 231	u32 local_ipaddr6[4];
 232	u32 action = I40IW_ARP_ADD;
 233
 234	hdl = i40iw_find_netdev(event_netdev);
 235	if (!hdl)
 236		return NOTIFY_DONE;
 237
 238	iwdev = &hdl->device;
 239	if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
 240		return NOTIFY_DONE;
 241
 242	netdev = iwdev->ldev->netdev;
 243	if (netdev != event_netdev)
 244		return NOTIFY_DONE;
 245
 246	i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
 247	switch (event) {
 248	case NETDEV_DOWN:
 249		action = I40IW_ARP_DELETE;
 250		/* Fall through */
 251	case NETDEV_UP:
 252		/* Fall through */
 253	case NETDEV_CHANGEADDR:
 254		i40iw_manage_arp_cache(iwdev,
 255				       netdev->dev_addr,
 256				       local_ipaddr6,
 257				       false,
 258				       action);
 259		i40iw_if_notify(iwdev, netdev, local_ipaddr6, false,
 260				(action == I40IW_ARP_ADD) ? true : false);
 261		break;
 262	default:
 263		break;
 264	}
 265	return NOTIFY_DONE;
 266}
 267
 268/**
 269 * i40iw_net_event - system notifier for netevents
 270 * @notfier: not used
 271 * @event: event for notifier
 272 * @ptr: neighbor
 273 */
 274int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *ptr)
 275{
 276	struct neighbour *neigh = ptr;
 277	struct i40iw_device *iwdev;
 278	struct i40iw_handler *iwhdl;
 279	__be32 *p;
 280	u32 local_ipaddr[4];
 281
 282	switch (event) {
 283	case NETEVENT_NEIGH_UPDATE:
 284		iwhdl = i40iw_find_netdev((struct net_device *)neigh->dev);
 285		if (!iwhdl)
 286			return NOTIFY_DONE;
 287		iwdev = &iwhdl->device;
 288		if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
 289			return NOTIFY_DONE;
 290		p = (__be32 *)neigh->primary_key;
 291		i40iw_copy_ip_ntohl(local_ipaddr, p);
 292		if (neigh->nud_state & NUD_VALID) {
 293			i40iw_manage_arp_cache(iwdev,
 294					       neigh->ha,
 295					       local_ipaddr,
 296					       false,
 297					       I40IW_ARP_ADD);
 298
 299		} else {
 300			i40iw_manage_arp_cache(iwdev,
 301					       neigh->ha,
 302					       local_ipaddr,
 303					       false,
 304					       I40IW_ARP_DELETE);
 305		}
 306		break;
 307	default:
 308		break;
 309	}
 310	return NOTIFY_DONE;
 311}
 312
 313/**
 314 * i40iw_netdevice_event - system notifier for netdev events
 315 * @notfier: not used
 316 * @event: event for notifier
 317 * @ptr: netdev
 318 */
 319int i40iw_netdevice_event(struct notifier_block *notifier,
 320			  unsigned long event,
 321			  void *ptr)
 322{
 323	struct net_device *event_netdev;
 324	struct net_device *netdev;
 325	struct i40iw_device *iwdev;
 326	struct i40iw_handler *hdl;
 327
 328	event_netdev = netdev_notifier_info_to_dev(ptr);
 329
 330	hdl = i40iw_find_netdev(event_netdev);
 331	if (!hdl)
 332		return NOTIFY_DONE;
 333
 334	iwdev = &hdl->device;
 335	if (iwdev->init_state < RDMA_DEV_REGISTERED || iwdev->closing)
 336		return NOTIFY_DONE;
 337
 338	netdev = iwdev->ldev->netdev;
 339	if (netdev != event_netdev)
 340		return NOTIFY_DONE;
 341
 342	iwdev->iw_status = 1;
 343
 344	switch (event) {
 345	case NETDEV_DOWN:
 346		iwdev->iw_status = 0;
 347		/* Fall through */
 348	case NETDEV_UP:
 349		i40iw_port_ibevent(iwdev);
 350		break;
 351	default:
 352		break;
 353	}
 354	return NOTIFY_DONE;
 355}
 356
 357/**
 358 * i40iw_get_cqp_request - get cqp struct
 359 * @cqp: device cqp ptr
 360 * @wait: cqp to be used in wait mode
 361 */
 362struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait)
 363{
 364	struct i40iw_cqp_request *cqp_request = NULL;
 365	unsigned long flags;
 366
 367	spin_lock_irqsave(&cqp->req_lock, flags);
 368	if (!list_empty(&cqp->cqp_avail_reqs)) {
 369		cqp_request = list_entry(cqp->cqp_avail_reqs.next,
 370					 struct i40iw_cqp_request, list);
 371		list_del_init(&cqp_request->list);
 372	}
 373	spin_unlock_irqrestore(&cqp->req_lock, flags);
 374	if (!cqp_request) {
 375		cqp_request = kzalloc(sizeof(*cqp_request), GFP_ATOMIC);
 376		if (cqp_request) {
 377			cqp_request->dynamic = true;
 378			INIT_LIST_HEAD(&cqp_request->list);
 379			init_waitqueue_head(&cqp_request->waitq);
 380		}
 381	}
 382	if (!cqp_request) {
 383		i40iw_pr_err("CQP Request Fail: No Memory");
 384		return NULL;
 385	}
 386
 387	if (wait) {
 388		atomic_set(&cqp_request->refcount, 2);
 389		cqp_request->waiting = true;
 390	} else {
 391		atomic_set(&cqp_request->refcount, 1);
 392	}
 393	return cqp_request;
 394}
 395
 396/**
 397 * i40iw_free_cqp_request - free cqp request
 398 * @cqp: cqp ptr
 399 * @cqp_request: to be put back in cqp list
 400 */
 401void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request)
 402{
 403	struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);
 404	unsigned long flags;
 405
 406	if (cqp_request->dynamic) {
 407		kfree(cqp_request);
 408	} else {
 409		cqp_request->request_done = false;
 410		cqp_request->callback_fcn = NULL;
 411		cqp_request->waiting = false;
 412
 413		spin_lock_irqsave(&cqp->req_lock, flags);
 414		list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
 415		spin_unlock_irqrestore(&cqp->req_lock, flags);
 416	}
 417	wake_up(&iwdev->close_wq);
 418}
 419
 420/**
 421 * i40iw_put_cqp_request - dec ref count and free if 0
 422 * @cqp: cqp ptr
 423 * @cqp_request: to be put back in cqp list
 424 */
 425void i40iw_put_cqp_request(struct i40iw_cqp *cqp,
 426			   struct i40iw_cqp_request *cqp_request)
 427{
 428	if (atomic_dec_and_test(&cqp_request->refcount))
 429		i40iw_free_cqp_request(cqp, cqp_request);
 430}
 431
 432/**
 433 * i40iw_free_pending_cqp_request -free pending cqp request objs
 434 * @cqp: cqp ptr
 435 * @cqp_request: to be put back in cqp list
 436 */
 437static void i40iw_free_pending_cqp_request(struct i40iw_cqp *cqp,
 438					   struct i40iw_cqp_request *cqp_request)
 439{
 440	struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);
 441
 442	if (cqp_request->waiting) {
 443		cqp_request->compl_info.error = true;
 444		cqp_request->request_done = true;
 445		wake_up(&cqp_request->waitq);
 446	}
 447	i40iw_put_cqp_request(cqp, cqp_request);
 448	wait_event_timeout(iwdev->close_wq,
 449			   !atomic_read(&cqp_request->refcount),
 450			   1000);
 451}
 452
 453/**
 454 * i40iw_cleanup_pending_cqp_op - clean-up cqp with no completions
 455 * @iwdev: iwarp device
 456 */
 457void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev)
 458{
 459	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
 460	struct i40iw_cqp *cqp = &iwdev->cqp;
 461	struct i40iw_cqp_request *cqp_request = NULL;
 462	struct cqp_commands_info *pcmdinfo = NULL;
 463	u32 i, pending_work, wqe_idx;
 464
 465	pending_work = I40IW_RING_WORK_AVAILABLE(cqp->sc_cqp.sq_ring);
 466	wqe_idx = I40IW_RING_GETCURRENT_TAIL(cqp->sc_cqp.sq_ring);
 467	for (i = 0; i < pending_work; i++) {
 468		cqp_request = (struct i40iw_cqp_request *)(unsigned long)cqp->scratch_array[wqe_idx];
 469		if (cqp_request)
 470			i40iw_free_pending_cqp_request(cqp, cqp_request);
 471		wqe_idx = (wqe_idx + 1) % I40IW_RING_GETSIZE(cqp->sc_cqp.sq_ring);
 472	}
 473
 474	while (!list_empty(&dev->cqp_cmd_head)) {
 475		pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head);
 476		cqp_request = container_of(pcmdinfo, struct i40iw_cqp_request, info);
 477		if (cqp_request)
 478			i40iw_free_pending_cqp_request(cqp, cqp_request);
 479	}
 480}
 481
 482/**
 483 * i40iw_free_qp - callback after destroy cqp completes
 484 * @cqp_request: cqp request for destroy qp
 485 * @num: not used
 486 */
 487static void i40iw_free_qp(struct i40iw_cqp_request *cqp_request, u32 num)
 488{
 489	struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)cqp_request->param;
 490	struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
 491	struct i40iw_device *iwdev;
 492	u32 qp_num = iwqp->ibqp.qp_num;
 493
 494	iwdev = iwqp->iwdev;
 495
 496	i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
 497	i40iw_free_qp_resources(iwdev, iwqp, qp_num);
 498	i40iw_rem_devusecount(iwdev);
 499}
 500
 501/**
 502 * i40iw_wait_event - wait for completion
 503 * @iwdev: iwarp device
 504 * @cqp_request: cqp request to wait
 505 */
 506static int i40iw_wait_event(struct i40iw_device *iwdev,
 507			    struct i40iw_cqp_request *cqp_request)
 508{
 509	struct cqp_commands_info *info = &cqp_request->info;
 510	struct i40iw_cqp *iwcqp = &iwdev->cqp;
 511	struct i40iw_cqp_timeout cqp_timeout;
 512	bool cqp_error = false;
 513	int err_code = 0;
 514	memset(&cqp_timeout, 0, sizeof(cqp_timeout));
 515	cqp_timeout.compl_cqp_cmds = iwdev->sc_dev.cqp_cmd_stats[OP_COMPLETED_COMMANDS];
 516	do {
 517		if (wait_event_timeout(cqp_request->waitq,
 518				       cqp_request->request_done, CQP_COMPL_WAIT_TIME))
 519			break;
 520
 521		i40iw_check_cqp_progress(&cqp_timeout, &iwdev->sc_dev);
 522
 523		if (cqp_timeout.count < CQP_TIMEOUT_THRESHOLD)
 524			continue;
 525
 526		i40iw_pr_err("error cqp command 0x%x timed out", info->cqp_cmd);
 527		err_code = -ETIME;
 528		if (!iwdev->reset) {
 529			iwdev->reset = true;
 530			i40iw_request_reset(iwdev);
 531		}
 532		goto done;
 533	} while (1);
 534	cqp_error = cqp_request->compl_info.error;
 535	if (cqp_error) {
 536		i40iw_pr_err("error cqp command 0x%x completion maj = 0x%x min=0x%x\n",
 537			     info->cqp_cmd, cqp_request->compl_info.maj_err_code,
 538			     cqp_request->compl_info.min_err_code);
 539		err_code = -EPROTO;
 540		goto done;
 541	}
 542done:
 543	i40iw_put_cqp_request(iwcqp, cqp_request);
 544	return err_code;
 545}
 546
 547/**
 548 * i40iw_handle_cqp_op - process cqp command
 549 * @iwdev: iwarp device
 550 * @cqp_request: cqp request to process
 551 */
 552enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
 553					   struct i40iw_cqp_request
 554					   *cqp_request)
 555{
 556	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
 557	enum i40iw_status_code status;
 558	struct cqp_commands_info *info = &cqp_request->info;
 559	int err_code = 0;
 560
 561	if (iwdev->reset) {
 562		i40iw_free_cqp_request(&iwdev->cqp, cqp_request);
 563		return I40IW_ERR_CQP_COMPL_ERROR;
 564	}
 565
 566	status = i40iw_process_cqp_cmd(dev, info);
 567	if (status) {
 568		i40iw_pr_err("error cqp command 0x%x failed\n", info->cqp_cmd);
 569		i40iw_free_cqp_request(&iwdev->cqp, cqp_request);
 570		return status;
 571	}
 572	if (cqp_request->waiting)
 573		err_code = i40iw_wait_event(iwdev, cqp_request);
 574	if (err_code)
 575		status = I40IW_ERR_CQP_COMPL_ERROR;
 576	return status;
 577}
 578
 579/**
 580 * i40iw_add_devusecount - add dev refcount
 581 * @iwdev: dev for refcount
 582 */
 583void i40iw_add_devusecount(struct i40iw_device *iwdev)
 584{
 585	atomic64_inc(&iwdev->use_count);
 586}
 587
 588/**
 589 * i40iw_rem_devusecount - decrement refcount for dev
 590 * @iwdev: device
 591 */
 592void i40iw_rem_devusecount(struct i40iw_device *iwdev)
 593{
 594	if (!atomic64_dec_and_test(&iwdev->use_count))
 595		return;
 596	wake_up(&iwdev->close_wq);
 597}
 598
 599/**
 600 * i40iw_add_pdusecount - add pd refcount
 601 * @iwpd: pd for refcount
 602 */
 603void i40iw_add_pdusecount(struct i40iw_pd *iwpd)
 604{
 605	atomic_inc(&iwpd->usecount);
 606}
 607
 608/**
 609 * i40iw_rem_pdusecount - decrement refcount for pd and free if 0
 610 * @iwpd: pd for refcount
 611 * @iwdev: iwarp device
 612 */
 613void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev)
 614{
 615	if (!atomic_dec_and_test(&iwpd->usecount))
 616		return;
 617	i40iw_free_resource(iwdev, iwdev->allocated_pds, iwpd->sc_pd.pd_id);
 618}
 619
 620/**
 621 * i40iw_add_ref - add refcount for qp
 622 * @ibqp: iqarp qp
 623 */
 624void i40iw_add_ref(struct ib_qp *ibqp)
 625{
 626	struct i40iw_qp *iwqp = (struct i40iw_qp *)ibqp;
 627
 628	atomic_inc(&iwqp->refcount);
 629}
 630
 631/**
 632 * i40iw_rem_ref - rem refcount for qp and free if 0
 633 * @ibqp: iqarp qp
 634 */
 635void i40iw_rem_ref(struct ib_qp *ibqp)
 636{
 637	struct i40iw_qp *iwqp;
 638	enum i40iw_status_code status;
 639	struct i40iw_cqp_request *cqp_request;
 640	struct cqp_commands_info *cqp_info;
 641	struct i40iw_device *iwdev;
 642	u32 qp_num;
 643	unsigned long flags;
 644
 645	iwqp = to_iwqp(ibqp);
 646	iwdev = iwqp->iwdev;
 647	spin_lock_irqsave(&iwdev->qptable_lock, flags);
 648	if (!atomic_dec_and_test(&iwqp->refcount)) {
 649		spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
 650		return;
 651	}
 652
 653	qp_num = iwqp->ibqp.qp_num;
 654	iwdev->qp_table[qp_num] = NULL;
 655	spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
 656	cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
 657	if (!cqp_request)
 658		return;
 659
 660	cqp_request->callback_fcn = i40iw_free_qp;
 661	cqp_request->param = (void *)&iwqp->sc_qp;
 662	cqp_info = &cqp_request->info;
 663	cqp_info->cqp_cmd = OP_QP_DESTROY;
 664	cqp_info->post_sq = 1;
 665	cqp_info->in.u.qp_destroy.qp = &iwqp->sc_qp;
 666	cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
 667	cqp_info->in.u.qp_destroy.remove_hash_idx = true;
 668	status = i40iw_handle_cqp_op(iwdev, cqp_request);
 669	if (!status)
 670		return;
 671
 672	i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
 673	i40iw_free_qp_resources(iwdev, iwqp, qp_num);
 674	i40iw_rem_devusecount(iwdev);
 675}
 676
 677/**
 678 * i40iw_get_qp - get qp address
 679 * @device: iwarp device
 680 * @qpn: qp number
 681 */
 682struct ib_qp *i40iw_get_qp(struct ib_device *device, int qpn)
 683{
 684	struct i40iw_device *iwdev = to_iwdev(device);
 685
 686	if ((qpn < IW_FIRST_QPN) || (qpn >= iwdev->max_qp))
 687		return NULL;
 688
 689	return &iwdev->qp_table[qpn]->ibqp;
 690}
 691
 692/**
 693 * i40iw_debug_buf - print debug msg and buffer is mask set
 694 * @dev: hardware control device structure
 695 * @mask: mask to compare if to print debug buffer
 696 * @buf: points buffer addr
 697 * @size: saize of buffer to print
 698 */
 699void i40iw_debug_buf(struct i40iw_sc_dev *dev,
 700		     enum i40iw_debug_flag mask,
 701		     char *desc,
 702		     u64 *buf,
 703		     u32 size)
 704{
 705	u32 i;
 706
 707	if (!(dev->debug_mask & mask))
 708		return;
 709	i40iw_debug(dev, mask, "%s\n", desc);
 710	i40iw_debug(dev, mask, "starting address virt=%p phy=%llxh\n", buf,
 711		    (unsigned long long)virt_to_phys(buf));
 712
 713	for (i = 0; i < size; i += 8)
 714		i40iw_debug(dev, mask, "index %03d val: %016llx\n", i, buf[i / 8]);
 715}
 716
 717/**
 718 * i40iw_get_hw_addr - return hw addr
 719 * @par: points to shared dev
 720 */
 721u8 __iomem *i40iw_get_hw_addr(void *par)
 722{
 723	struct i40iw_sc_dev *dev = (struct i40iw_sc_dev *)par;
 724
 725	return dev->hw->hw_addr;
 726}
 727
 728/**
 729 * i40iw_remove_head - return head entry and remove from list
 730 * @list: list for entry
 731 */
 732void *i40iw_remove_head(struct list_head *list)
 733{
 734	struct list_head *entry;
 735
 736	if (list_empty(list))
 737		return NULL;
 738
 739	entry = (void *)list->next;
 740	list_del(entry);
 741	return (void *)entry;
 742}
 743
 744/**
 745 * i40iw_allocate_dma_mem - Memory alloc helper fn
 746 * @hw:   pointer to the HW structure
 747 * @mem:  ptr to mem struct to fill out
 748 * @size: size of memory requested
 749 * @alignment: what to align the allocation to
 750 */
 751enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw,
 752					      struct i40iw_dma_mem *mem,
 753					      u64 size,
 754					      u32 alignment)
 755{
 756	struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
 757
 758	if (!mem)
 759		return I40IW_ERR_PARAM;
 760	mem->size = ALIGN(size, alignment);
 761	mem->va = dma_alloc_coherent(&pcidev->dev, mem->size,
 762				     (dma_addr_t *)&mem->pa, GFP_KERNEL);
 763	if (!mem->va)
 764		return I40IW_ERR_NO_MEMORY;
 765	return 0;
 766}
 767
 768/**
 769 * i40iw_free_dma_mem - Memory free helper fn
 770 * @hw:   pointer to the HW structure
 771 * @mem:  ptr to mem struct to free
 772 */
 773void i40iw_free_dma_mem(struct i40iw_hw *hw, struct i40iw_dma_mem *mem)
 774{
 775	struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
 776
 777	if (!mem || !mem->va)
 778		return;
 779
 780	dma_free_coherent(&pcidev->dev, mem->size,
 781			  mem->va, (dma_addr_t)mem->pa);
 782	mem->va = NULL;
 783}
 784
 785/**
 786 * i40iw_allocate_virt_mem - virtual memory alloc helper fn
 787 * @hw:   pointer to the HW structure
 788 * @mem:  ptr to mem struct to fill out
 789 * @size: size of memory requested
 790 */
 791enum i40iw_status_code i40iw_allocate_virt_mem(struct i40iw_hw *hw,
 792					       struct i40iw_virt_mem *mem,
 793					       u32 size)
 794{
 795	if (!mem)
 796		return I40IW_ERR_PARAM;
 797
 798	mem->size = size;
 799	mem->va = kzalloc(size, GFP_KERNEL);
 800
 801	if (mem->va)
 802		return 0;
 803	else
 804		return I40IW_ERR_NO_MEMORY;
 805}
 806
 807/**
 808 * i40iw_free_virt_mem - virtual memory free helper fn
 809 * @hw:   pointer to the HW structure
 810 * @mem:  ptr to mem struct to free
 811 */
 812enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw,
 813					   struct i40iw_virt_mem *mem)
 814{
 815	if (!mem)
 816		return I40IW_ERR_PARAM;
 817	/*
 818	 * mem->va points to the parent of mem, so both mem and mem->va
 819	 * can not be touched once mem->va is freed
 820	 */
 821	kfree(mem->va);
 822	return 0;
 823}
 824
 825/**
 826 * i40iw_cqp_sds_cmd - create cqp command for sd
 827 * @dev: hardware control device structure
 828 * @sd_info: information  for sd cqp
 829 *
 830 */
 831enum i40iw_status_code i40iw_cqp_sds_cmd(struct i40iw_sc_dev *dev,
 832					 struct i40iw_update_sds_info *sdinfo)
 833{
 834	enum i40iw_status_code status;
 835	struct i40iw_cqp_request *cqp_request;
 836	struct cqp_commands_info *cqp_info;
 837	struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
 838
 839	cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
 840	if (!cqp_request)
 841		return I40IW_ERR_NO_MEMORY;
 842	cqp_info = &cqp_request->info;
 843	memcpy(&cqp_info->in.u.update_pe_sds.info, sdinfo,
 844	       sizeof(cqp_info->in.u.update_pe_sds.info));
 845	cqp_info->cqp_cmd = OP_UPDATE_PE_SDS;
 846	cqp_info->post_sq = 1;
 847	cqp_info->in.u.update_pe_sds.dev = dev;
 848	cqp_info->in.u.update_pe_sds.scratch = (uintptr_t)cqp_request;
 849	status = i40iw_handle_cqp_op(iwdev, cqp_request);
 850	if (status)
 851		i40iw_pr_err("CQP-OP Update SD's fail");
 852	return status;
 853}
 854
 855/**
 856 * i40iw_qp_suspend_resume - cqp command for suspend/resume
 857 * @dev: hardware control device structure
 858 * @qp: hardware control qp
 859 * @suspend: flag if suspend or resume
 860 */
 861void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend)
 862{
 863	struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
 864	struct i40iw_cqp_request *cqp_request;
 865	struct i40iw_sc_cqp *cqp = dev->cqp;
 866	struct cqp_commands_info *cqp_info;
 867	enum i40iw_status_code status;
 868
 869	cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
 870	if (!cqp_request)
 871		return;
 872
 873	cqp_info = &cqp_request->info;
 874	cqp_info->cqp_cmd = (suspend) ? OP_SUSPEND : OP_RESUME;
 875	cqp_info->in.u.suspend_resume.cqp = cqp;
 876	cqp_info->in.u.suspend_resume.qp = qp;
 877	cqp_info->in.u.suspend_resume.scratch = (uintptr_t)cqp_request;
 878	status = i40iw_handle_cqp_op(iwdev, cqp_request);
 879	if (status)
 880		i40iw_pr_err("CQP-OP QP Suspend/Resume fail");
 881}
 882
 883/**
 884 * i40iw_term_modify_qp - modify qp for term message
 885 * @qp: hardware control qp
 886 * @next_state: qp's next state
 887 * @term: terminate code
 888 * @term_len: length
 889 */
 890void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len)
 891{
 892	struct i40iw_qp *iwqp;
 893
 894	iwqp = (struct i40iw_qp *)qp->back_qp;
 895	i40iw_next_iw_state(iwqp, next_state, 0, term, term_len);
 896};
 897
 898/**
 899 * i40iw_terminate_done - after terminate is completed
 900 * @qp: hardware control qp
 901 * @timeout_occurred: indicates if terminate timer expired
 902 */
 903void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred)
 904{
 905	struct i40iw_qp *iwqp;
 906	u32 next_iwarp_state = I40IW_QP_STATE_ERROR;
 907	u8 hte = 0;
 908	bool first_time;
 909	unsigned long flags;
 910
 911	iwqp = (struct i40iw_qp *)qp->back_qp;
 912	spin_lock_irqsave(&iwqp->lock, flags);
 913	if (iwqp->hte_added) {
 914		iwqp->hte_added = 0;
 915		hte = 1;
 916	}
 917	first_time = !(qp->term_flags & I40IW_TERM_DONE);
 918	qp->term_flags |= I40IW_TERM_DONE;
 919	spin_unlock_irqrestore(&iwqp->lock, flags);
 920	if (first_time) {
 921		if (!timeout_occurred)
 922			i40iw_terminate_del_timer(qp);
 923		else
 924			next_iwarp_state = I40IW_QP_STATE_CLOSING;
 925
 926		i40iw_next_iw_state(iwqp, next_iwarp_state, hte, 0, 0);
 927		i40iw_cm_disconn(iwqp);
 928	}
 929}
 930
 931/**
 932 * i40iw_terminate_imeout - timeout happened
 933 * @context: points to iwarp qp
 934 */
 935static void i40iw_terminate_timeout(struct timer_list *t)
 936{
 937	struct i40iw_qp *iwqp = from_timer(iwqp, t, terminate_timer);
 938	struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp;
 939
 940	i40iw_terminate_done(qp, 1);
 941	i40iw_rem_ref(&iwqp->ibqp);
 942}
 943
 944/**
 945 * i40iw_terminate_start_timer - start terminate timeout
 946 * @qp: hardware control qp
 947 */
 948void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp)
 949{
 950	struct i40iw_qp *iwqp;
 951
 952	iwqp = (struct i40iw_qp *)qp->back_qp;
 953	i40iw_add_ref(&iwqp->ibqp);
 954	timer_setup(&iwqp->terminate_timer, i40iw_terminate_timeout, 0);
 955	iwqp->terminate_timer.expires = jiffies + HZ;
 956	add_timer(&iwqp->terminate_timer);
 957}
 958
 959/**
 960 * i40iw_terminate_del_timer - delete terminate timeout
 961 * @qp: hardware control qp
 962 */
 963void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp)
 964{
 965	struct i40iw_qp *iwqp;
 966
 967	iwqp = (struct i40iw_qp *)qp->back_qp;
 968	if (del_timer(&iwqp->terminate_timer))
 969		i40iw_rem_ref(&iwqp->ibqp);
 970}
 971
 972/**
 973 * i40iw_cqp_generic_worker - generic worker for cqp
 974 * @work: work pointer
 975 */
 976static void i40iw_cqp_generic_worker(struct work_struct *work)
 977{
 978	struct i40iw_virtchnl_work_info *work_info =
 979	    &((struct virtchnl_work *)work)->work_info;
 980
 981	if (work_info->worker_vf_dev)
 982		work_info->callback_fcn(work_info->worker_vf_dev);
 983}
 984
 985/**
 986 * i40iw_cqp_spawn_worker - spawn worket thread
 987 * @iwdev: device struct pointer
 988 * @work_info: work request info
 989 * @iw_vf_idx: virtual function index
 990 */
 991void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,
 992			    struct i40iw_virtchnl_work_info *work_info,
 993			    u32 iw_vf_idx)
 994{
 995	struct virtchnl_work *work;
 996	struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
 997
 998	work = &iwdev->virtchnl_w[iw_vf_idx];
 999	memcpy(&work->work_info, work_info, sizeof(*work_info));
1000	INIT_WORK(&work->work, i40iw_cqp_generic_worker);
1001	queue_work(iwdev->virtchnl_wq, &work->work);
1002}
1003
1004/**
1005 * i40iw_cqp_manage_hmc_fcn_worker -
1006 * @work: work pointer for hmc info
1007 */
1008static void i40iw_cqp_manage_hmc_fcn_worker(struct work_struct *work)
1009{
1010	struct i40iw_cqp_request *cqp_request =
1011	    ((struct virtchnl_work *)work)->cqp_request;
1012	struct i40iw_ccq_cqe_info ccq_cqe_info;
1013	struct i40iw_hmc_fcn_info *hmcfcninfo =
1014			&cqp_request->info.in.u.manage_hmc_pm.info;
1015	struct i40iw_device *iwdev =
1016	    (struct i40iw_device *)cqp_request->info.in.u.manage_hmc_pm.dev->back_dev;
1017
1018	ccq_cqe_info.cqp = NULL;
1019	ccq_cqe_info.maj_err_code = cqp_request->compl_info.maj_err_code;
1020	ccq_cqe_info.min_err_code = cqp_request->compl_info.min_err_code;
1021	ccq_cqe_info.op_code = cqp_request->compl_info.op_code;
1022	ccq_cqe_info.op_ret_val = cqp_request->compl_info.op_ret_val;
1023	ccq_cqe_info.scratch = 0;
1024	ccq_cqe_info.error = cqp_request->compl_info.error;
1025	hmcfcninfo->callback_fcn(cqp_request->info.in.u.manage_hmc_pm.dev,
1026				 hmcfcninfo->cqp_callback_param, &ccq_cqe_info);
1027	i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
1028}
1029
1030/**
1031 * i40iw_cqp_manage_hmc_fcn_callback - called function after cqp completion
1032 * @cqp_request: cqp request info struct for hmc fun
1033 * @unused: unused param of callback
1034 */
1035static void i40iw_cqp_manage_hmc_fcn_callback(struct i40iw_cqp_request *cqp_request,
1036					      u32 unused)
1037{
1038	struct virtchnl_work *work;
1039	struct i40iw_hmc_fcn_info *hmcfcninfo =
1040	    &cqp_request->info.in.u.manage_hmc_pm.info;
1041	struct i40iw_device *iwdev =
1042	    (struct i40iw_device *)cqp_request->info.in.u.manage_hmc_pm.dev->
1043	    back_dev;
1044
1045	if (hmcfcninfo && hmcfcninfo->callback_fcn) {
1046		i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s1\n", __func__);
1047		atomic_inc(&cqp_request->refcount);
1048		work = &iwdev->virtchnl_w[hmcfcninfo->iw_vf_idx];
1049		work->cqp_request = cqp_request;
1050		INIT_WORK(&work->work, i40iw_cqp_manage_hmc_fcn_worker);
1051		queue_work(iwdev->virtchnl_wq, &work->work);
1052		i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s2\n", __func__);
1053	} else {
1054		i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s: Something wrong\n", __func__);
1055	}
1056}
1057
1058/**
1059 * i40iw_cqp_manage_hmc_fcn_cmd - issue cqp command to manage hmc
1060 * @dev: hardware control device structure
1061 * @hmcfcninfo: info for hmc
1062 */
1063enum i40iw_status_code i40iw_cqp_manage_hmc_fcn_cmd(struct i40iw_sc_dev *dev,
1064						    struct i40iw_hmc_fcn_info *hmcfcninfo)
1065{
1066	enum i40iw_status_code status;
1067	struct i40iw_cqp_request *cqp_request;
1068	struct cqp_commands_info *cqp_info;
1069	struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1070
1071	i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s\n", __func__);
1072	cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
1073	if (!cqp_request)
1074		return I40IW_ERR_NO_MEMORY;
1075	cqp_info = &cqp_request->info;
1076	cqp_request->callback_fcn = i40iw_cqp_manage_hmc_fcn_callback;
1077	cqp_request->param = hmcfcninfo;
1078	memcpy(&cqp_info->in.u.manage_hmc_pm.info, hmcfcninfo,
1079	       sizeof(*hmcfcninfo));
1080	cqp_info->in.u.manage_hmc_pm.dev = dev;
1081	cqp_info->cqp_cmd = OP_MANAGE_HMC_PM_FUNC_TABLE;
1082	cqp_info->post_sq = 1;
1083	cqp_info->in.u.manage_hmc_pm.scratch = (uintptr_t)cqp_request;
1084	status = i40iw_handle_cqp_op(iwdev, cqp_request);
1085	if (status)
1086		i40iw_pr_err("CQP-OP Manage HMC fail");
1087	return status;
1088}
1089
1090/**
1091 * i40iw_cqp_query_fpm_values_cmd - send cqp command for fpm
1092 * @iwdev: function device struct
1093 * @values_mem: buffer for fpm
1094 * @hmc_fn_id: function id for fpm
1095 */
1096enum i40iw_status_code i40iw_cqp_query_fpm_values_cmd(struct i40iw_sc_dev *dev,
1097						      struct i40iw_dma_mem *values_mem,
1098						      u8 hmc_fn_id)
1099{
1100	enum i40iw_status_code status;
1101	struct i40iw_cqp_request *cqp_request;
1102	struct cqp_commands_info *cqp_info;
1103	struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1104
1105	cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1106	if (!cqp_request)
1107		return I40IW_ERR_NO_MEMORY;
1108	cqp_info = &cqp_request->info;
1109	cqp_request->param = NULL;
1110	cqp_info->in.u.query_fpm_values.cqp = dev->cqp;
1111	cqp_info->in.u.query_fpm_values.fpm_values_pa = values_mem->pa;
1112	cqp_info->in.u.query_fpm_values.fpm_values_va = values_mem->va;
1113	cqp_info->in.u.query_fpm_values.hmc_fn_id = hmc_fn_id;
1114	cqp_info->cqp_cmd = OP_QUERY_FPM_VALUES;
1115	cqp_info->post_sq = 1;
1116	cqp_info->in.u.query_fpm_values.scratch = (uintptr_t)cqp_request;
1117	status = i40iw_handle_cqp_op(iwdev, cqp_request);
1118	if (status)
1119		i40iw_pr_err("CQP-OP Query FPM fail");
1120	return status;
1121}
1122
1123/**
1124 * i40iw_cqp_commit_fpm_values_cmd - commit fpm values in hw
1125 * @dev: hardware control device structure
1126 * @values_mem: buffer with fpm values
1127 * @hmc_fn_id: function id for fpm
1128 */
1129enum i40iw_status_code i40iw_cqp_commit_fpm_values_cmd(struct i40iw_sc_dev *dev,
1130						       struct i40iw_dma_mem *values_mem,
1131						       u8 hmc_fn_id)
1132{
1133	enum i40iw_status_code status;
1134	struct i40iw_cqp_request *cqp_request;
1135	struct cqp_commands_info *cqp_info;
1136	struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1137
1138	cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1139	if (!cqp_request)
1140		return I40IW_ERR_NO_MEMORY;
1141	cqp_info = &cqp_request->info;
1142	cqp_request->param = NULL;
1143	cqp_info->in.u.commit_fpm_values.cqp = dev->cqp;
1144	cqp_info->in.u.commit_fpm_values.fpm_values_pa = values_mem->pa;
1145	cqp_info->in.u.commit_fpm_values.fpm_values_va = values_mem->va;
1146	cqp_info->in.u.commit_fpm_values.hmc_fn_id = hmc_fn_id;
1147	cqp_info->cqp_cmd = OP_COMMIT_FPM_VALUES;
1148	cqp_info->post_sq = 1;
1149	cqp_info->in.u.commit_fpm_values.scratch = (uintptr_t)cqp_request;
1150	status = i40iw_handle_cqp_op(iwdev, cqp_request);
1151	if (status)
1152		i40iw_pr_err("CQP-OP Commit FPM fail");
1153	return status;
1154}
1155
1156/**
1157 * i40iw_vf_wait_vchnl_resp - wait for channel msg
1158 * @iwdev: function's device struct
1159 */
1160enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev)
1161{
1162	struct i40iw_device *iwdev = dev->back_dev;
1163	int timeout_ret;
1164
1165	i40iw_debug(dev, I40IW_DEBUG_VIRT, "%s[%u] dev %p, iwdev %p\n",
1166		    __func__, __LINE__, dev, iwdev);
1167
1168	atomic_set(&iwdev->vchnl_msgs, 2);
1169	timeout_ret = wait_event_timeout(iwdev->vchnl_waitq,
1170					 (atomic_read(&iwdev->vchnl_msgs) == 1),
1171					 I40IW_VCHNL_EVENT_TIMEOUT);
1172	atomic_dec(&iwdev->vchnl_msgs);
1173	if (!timeout_ret) {
1174		i40iw_pr_err("virt channel completion timeout = 0x%x\n", timeout_ret);
1175		atomic_set(&iwdev->vchnl_msgs, 0);
1176		dev->vchnl_up = false;
1177		return I40IW_ERR_TIMEOUT;
1178	}
1179	wake_up(&dev->vf_reqs);
1180	return 0;
1181}
1182
1183/**
1184 * i40iw_cqp_cq_create_cmd - create a cq for the cqp
1185 * @dev: device pointer
1186 * @cq: pointer to created cq
1187 */
1188enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev,
1189					       struct i40iw_sc_cq *cq)
1190{
1191	struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1192	struct i40iw_cqp *iwcqp = &iwdev->cqp;
1193	struct i40iw_cqp_request *cqp_request;
1194	struct cqp_commands_info *cqp_info;
1195	enum i40iw_status_code status;
1196
1197	cqp_request = i40iw_get_cqp_request(iwcqp, true);
1198	if (!cqp_request)
1199		return I40IW_ERR_NO_MEMORY;
1200
1201	cqp_info = &cqp_request->info;
1202	cqp_info->cqp_cmd = OP_CQ_CREATE;
1203	cqp_info->post_sq = 1;
1204	cqp_info->in.u.cq_create.cq = cq;
1205	cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
1206	status = i40iw_handle_cqp_op(iwdev, cqp_request);
1207	if (status)
1208		i40iw_pr_err("CQP-OP Create QP fail");
1209
1210	return status;
1211}
1212
1213/**
1214 * i40iw_cqp_qp_create_cmd - create a qp for the cqp
1215 * @dev: device pointer
1216 * @qp: pointer to created qp
1217 */
1218enum i40iw_status_code i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev,
1219					       struct i40iw_sc_qp *qp)
1220{
1221	struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1222	struct i40iw_cqp *iwcqp = &iwdev->cqp;
1223	struct i40iw_cqp_request *cqp_request;
1224	struct cqp_commands_info *cqp_info;
1225	struct i40iw_create_qp_info *qp_info;
1226	enum i40iw_status_code status;
1227
1228	cqp_request = i40iw_get_cqp_request(iwcqp, true);
1229	if (!cqp_request)
1230		return I40IW_ERR_NO_MEMORY;
1231
1232	cqp_info = &cqp_request->info;
1233	qp_info = &cqp_request->info.in.u.qp_create.info;
1234
1235	memset(qp_info, 0, sizeof(*qp_info));
1236
1237	qp_info->cq_num_valid = true;
1238	qp_info->next_iwarp_state = I40IW_QP_STATE_RTS;
1239
1240	cqp_info->cqp_cmd = OP_QP_CREATE;
1241	cqp_info->post_sq = 1;
1242	cqp_info->in.u.qp_create.qp = qp;
1243	cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
1244	status = i40iw_handle_cqp_op(iwdev, cqp_request);
1245	if (status)
1246		i40iw_pr_err("CQP-OP QP create fail");
1247	return status;
1248}
1249
1250/**
1251 * i40iw_cqp_cq_destroy_cmd - destroy the cqp cq
1252 * @dev: device pointer
1253 * @cq: pointer to cq
1254 */
1255void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq)
1256{
1257	struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1258
1259	i40iw_cq_wq_destroy(iwdev, cq);
1260}
1261
1262/**
1263 * i40iw_cqp_qp_destroy_cmd - destroy the cqp
1264 * @dev: device pointer
1265 * @qp: pointer to qp
1266 */
1267void i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
1268{
1269	struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1270	struct i40iw_cqp *iwcqp = &iwdev->cqp;
1271	struct i40iw_cqp_request *cqp_request;
1272	struct cqp_commands_info *cqp_info;
1273	enum i40iw_status_code status;
1274
1275	cqp_request = i40iw_get_cqp_request(iwcqp, true);
1276	if (!cqp_request)
1277		return;
1278
1279	cqp_info = &cqp_request->info;
1280	memset(cqp_info, 0, sizeof(*cqp_info));
1281
1282	cqp_info->cqp_cmd = OP_QP_DESTROY;
1283	cqp_info->post_sq = 1;
1284	cqp_info->in.u.qp_destroy.qp = qp;
1285	cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
1286	cqp_info->in.u.qp_destroy.remove_hash_idx = true;
1287	status = i40iw_handle_cqp_op(iwdev, cqp_request);
1288	if (status)
1289		i40iw_pr_err("CQP QP_DESTROY fail");
1290}
1291
1292
1293/**
1294 * i40iw_ieq_mpa_crc_ae - generate AE for crc error
1295 * @dev: hardware control device structure
1296 * @qp: hardware control qp
1297 */
1298void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
1299{
1300	struct i40iw_gen_ae_info info;
1301	struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1302
1303	i40iw_debug(dev, I40IW_DEBUG_AEQ, "%s entered\n", __func__);
1304	info.ae_code = I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR;
1305	info.ae_source = I40IW_AE_SOURCE_RQ;
1306	i40iw_gen_ae(iwdev, qp, &info, false);
1307}
1308
1309/**
1310 * i40iw_init_hash_desc - initialize hash for crc calculation
1311 * @desc: cryption type
1312 */
1313enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **desc)
1314{
1315	struct crypto_shash *tfm;
1316	struct shash_desc *tdesc;
1317
1318	tfm = crypto_alloc_shash("crc32c", 0, 0);
1319	if (IS_ERR(tfm))
1320		return I40IW_ERR_MPA_CRC;
1321
1322	tdesc = kzalloc(sizeof(*tdesc) + crypto_shash_descsize(tfm),
1323			GFP_KERNEL);
1324	if (!tdesc) {
1325		crypto_free_shash(tfm);
1326		return I40IW_ERR_MPA_CRC;
1327	}
1328	tdesc->tfm = tfm;
1329	*desc = tdesc;
1330
1331	return 0;
1332}
1333
1334/**
1335 * i40iw_free_hash_desc - free hash desc
1336 * @desc: to be freed
1337 */
1338void i40iw_free_hash_desc(struct shash_desc *desc)
1339{
1340	if (desc) {
1341		crypto_free_shash(desc->tfm);
1342		kfree(desc);
1343	}
1344}
1345
1346/**
1347 * i40iw_alloc_query_fpm_buf - allocate buffer for fpm
1348 * @dev: hardware control device structure
1349 * @mem: buffer ptr for fpm to be allocated
1350 * @return: memory allocation status
1351 */
1352enum i40iw_status_code i40iw_alloc_query_fpm_buf(struct i40iw_sc_dev *dev,
1353						 struct i40iw_dma_mem *mem)
1354{
1355	enum i40iw_status_code status;
1356	struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1357
1358	status = i40iw_obj_aligned_mem(iwdev, mem, I40IW_QUERY_FPM_BUF_SIZE,
1359				       I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
1360	return status;
1361}
1362
1363/**
1364 * i40iw_ieq_check_mpacrc - check if mpa crc is OK
1365 * @desc: desc for hash
1366 * @addr: address of buffer for crc
1367 * @length: length of buffer
1368 * @value: value to be compared
1369 */
1370enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc,
1371					      void *addr,
1372					      u32 length,
1373					      u32 value)
1374{
1375	u32 crc = 0;
1376	int ret;
1377	enum i40iw_status_code ret_code = 0;
1378
1379	crypto_shash_init(desc);
1380	ret = crypto_shash_update(desc, addr, length);
1381	if (!ret)
1382		crypto_shash_final(desc, (u8 *)&crc);
1383	if (crc != value) {
1384		i40iw_pr_err("mpa crc check fail\n");
1385		ret_code = I40IW_ERR_MPA_CRC;
1386	}
1387	return ret_code;
1388}
1389
1390/**
1391 * i40iw_ieq_get_qp - get qp based on quad in puda buffer
1392 * @dev: hardware control device structure
1393 * @buf: receive puda buffer on exception q
1394 */
1395struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev,
1396				     struct i40iw_puda_buf *buf)
1397{
1398	struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1399	struct i40iw_qp *iwqp;
1400	struct i40iw_cm_node *cm_node;
1401	u32 loc_addr[4], rem_addr[4];
1402	u16 loc_port, rem_port;
1403	struct ipv6hdr *ip6h;
1404	struct iphdr *iph = (struct iphdr *)buf->iph;
1405	struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
1406
1407	if (iph->version == 4) {
1408		memset(loc_addr, 0, sizeof(loc_addr));
1409		loc_addr[0] = ntohl(iph->daddr);
1410		memset(rem_addr, 0, sizeof(rem_addr));
1411		rem_addr[0] = ntohl(iph->saddr);
1412	} else {
1413		ip6h = (struct ipv6hdr *)buf->iph;
1414		i40iw_copy_ip_ntohl(loc_addr, ip6h->daddr.in6_u.u6_addr32);
1415		i40iw_copy_ip_ntohl(rem_addr, ip6h->saddr.in6_u.u6_addr32);
1416	}
1417	loc_port = ntohs(tcph->dest);
1418	rem_port = ntohs(tcph->source);
1419
1420	cm_node = i40iw_find_node(&iwdev->cm_core, rem_port, rem_addr, loc_port,
1421				  loc_addr, false, true);
1422	if (!cm_node)
1423		return NULL;
1424	iwqp = cm_node->iwqp;
1425	return &iwqp->sc_qp;
1426}
1427
1428/**
1429 * i40iw_ieq_update_tcpip_info - update tcpip in the buffer
1430 * @buf: puda to update
1431 * @length: length of buffer
1432 * @seqnum: seq number for tcp
1433 */
1434void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length, u32 seqnum)
1435{
1436	struct tcphdr *tcph;
1437	struct iphdr *iph;
1438	u16 iphlen;
1439	u16 packetsize;
1440	u8 *addr = (u8 *)buf->mem.va;
1441
1442	iphlen = (buf->ipv4) ? 20 : 40;
1443	iph = (struct iphdr *)(addr + buf->maclen);
1444	tcph = (struct tcphdr *)(addr + buf->maclen + iphlen);
1445	packetsize = length + buf->tcphlen + iphlen;
1446
1447	iph->tot_len = htons(packetsize);
1448	tcph->seq = htonl(seqnum);
1449}
1450
1451/**
1452 * i40iw_puda_get_tcpip_info - get tcpip info from puda buffer
1453 * @info: to get information
1454 * @buf: puda buffer
1455 */
1456enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info,
1457						 struct i40iw_puda_buf *buf)
1458{
1459	struct iphdr *iph;
1460	struct ipv6hdr *ip6h;
1461	struct tcphdr *tcph;
1462	u16 iphlen;
1463	u16 pkt_len;
1464	u8 *mem = (u8 *)buf->mem.va;
1465	struct ethhdr *ethh = (struct ethhdr *)buf->mem.va;
1466
1467	if (ethh->h_proto == htons(0x8100)) {
1468		info->vlan_valid = true;
1469		buf->vlan_id = ntohs(((struct vlan_ethhdr *)ethh)->h_vlan_TCI) & VLAN_VID_MASK;
1470	}
1471	buf->maclen = (info->vlan_valid) ? 18 : 14;
1472	iphlen = (info->l3proto) ? 40 : 20;
1473	buf->ipv4 = (info->l3proto) ? false : true;
1474	buf->iph = mem + buf->maclen;
1475	iph = (struct iphdr *)buf->iph;
1476
1477	buf->tcph = buf->iph + iphlen;
1478	tcph = (struct tcphdr *)buf->tcph;
1479
1480	if (buf->ipv4) {
1481		pkt_len = ntohs(iph->tot_len);
1482	} else {
1483		ip6h = (struct ipv6hdr *)buf->iph;
1484		pkt_len = ntohs(ip6h->payload_len) + iphlen;
1485	}
1486
1487	buf->totallen = pkt_len + buf->maclen;
1488
1489	if (info->payload_len < buf->totallen) {
1490		i40iw_pr_err("payload_len = 0x%x totallen expected0x%x\n",
1491			     info->payload_len, buf->totallen);
1492		return I40IW_ERR_INVALID_SIZE;
1493	}
1494
1495	buf->tcphlen = (tcph->doff) << 2;
1496	buf->datalen = pkt_len - iphlen - buf->tcphlen;
1497	buf->data = (buf->datalen) ? buf->tcph + buf->tcphlen : NULL;
1498	buf->hdrlen = buf->maclen + iphlen + buf->tcphlen;
1499	buf->seqnum = ntohl(tcph->seq);
1500	return 0;
1501}
1502
1503/**
1504 * i40iw_hw_stats_timeout - Stats timer-handler which updates all HW stats
1505 * @vsi: pointer to the vsi structure
1506 */
1507static void i40iw_hw_stats_timeout(struct timer_list *t)
1508{
1509	struct i40iw_vsi_pestat *pf_devstat = from_timer(pf_devstat, t,
1510						       stats_timer);
1511	struct i40iw_sc_vsi *sc_vsi = pf_devstat->vsi;
1512	struct i40iw_sc_dev *pf_dev = sc_vsi->dev;
1513	struct i40iw_vsi_pestat *vf_devstat = NULL;
1514	u16 iw_vf_idx;
1515	unsigned long flags;
1516
1517	/*PF*/
1518	i40iw_hw_stats_read_all(pf_devstat, &pf_devstat->hw_stats);
1519
1520	for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {
1521		spin_lock_irqsave(&pf_devstat->lock, flags);
1522		if (pf_dev->vf_dev[iw_vf_idx]) {
1523			if (pf_dev->vf_dev[iw_vf_idx]->stats_initialized) {
1524				vf_devstat = &pf_dev->vf_dev[iw_vf_idx]->pestat;
1525				i40iw_hw_stats_read_all(vf_devstat, &vf_devstat->hw_stats);
1526			}
1527		}
1528		spin_unlock_irqrestore(&pf_devstat->lock, flags);
1529	}
1530
1531	mod_timer(&pf_devstat->stats_timer,
1532		  jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
1533}
1534
1535/**
1536 * i40iw_hw_stats_start_timer - Start periodic stats timer
1537 * @vsi: pointer to the vsi structure
1538 */
1539void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi)
1540{
1541	struct i40iw_vsi_pestat *devstat = vsi->pestat;
1542
1543	timer_setup(&devstat->stats_timer, i40iw_hw_stats_timeout, 0);
1544	mod_timer(&devstat->stats_timer,
1545		  jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
1546}
1547
1548/**
1549 * i40iw_hw_stats_stop_timer - Delete periodic stats timer
1550 * @vsi: pointer to the vsi structure
1551 */
1552void i40iw_hw_stats_stop_timer(struct i40iw_sc_vsi *vsi)
1553{
1554	struct i40iw_vsi_pestat *devstat = vsi->pestat;
1555
1556	del_timer_sync(&devstat->stats_timer);
1557}