Linux Audio

Check our new training course

Loading...
v6.8
   1/*
   2 * Copyright (c) 2016 Hisilicon Limited.
   3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
 
  34#include "hns_roce_device.h"
  35#include "hns_roce_hem.h"
  36#include "hns_roce_common.h"
  37
  38#define HEM_INDEX_BUF			BIT(0)
  39#define HEM_INDEX_L0			BIT(1)
  40#define HEM_INDEX_L1			BIT(2)
  41struct hns_roce_hem_index {
  42	u64 buf;
  43	u64 l0;
  44	u64 l1;
  45	u32 inited; /* indicate which index is available */
  46};
  47
  48bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
  49{
  50	int hop_num = 0;
  51
  52	switch (type) {
  53	case HEM_TYPE_QPC:
  54		hop_num = hr_dev->caps.qpc_hop_num;
  55		break;
  56	case HEM_TYPE_MTPT:
  57		hop_num = hr_dev->caps.mpt_hop_num;
  58		break;
  59	case HEM_TYPE_CQC:
  60		hop_num = hr_dev->caps.cqc_hop_num;
  61		break;
  62	case HEM_TYPE_SRQC:
  63		hop_num = hr_dev->caps.srqc_hop_num;
  64		break;
  65	case HEM_TYPE_SCCC:
  66		hop_num = hr_dev->caps.sccc_hop_num;
  67		break;
  68	case HEM_TYPE_QPC_TIMER:
  69		hop_num = hr_dev->caps.qpc_timer_hop_num;
  70		break;
  71	case HEM_TYPE_CQC_TIMER:
  72		hop_num = hr_dev->caps.cqc_timer_hop_num;
  73		break;
  74	case HEM_TYPE_GMV:
  75		hop_num = hr_dev->caps.gmv_hop_num;
  76		break;
  77	default:
  78		return false;
  79	}
  80
  81	return hop_num;
  82}
  83
  84static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 hem_idx,
  85				    u32 bt_chunk_num, u64 hem_max_num)
  86{
  87	u64 start_idx = round_down(hem_idx, bt_chunk_num);
  88	u64 check_max_num = start_idx + bt_chunk_num;
  89	u64 i;
  90
  91	for (i = start_idx; (i < check_max_num) && (i < hem_max_num); i++)
  92		if (i != hem_idx && hem[i])
  93			return false;
  94
  95	return true;
  96}
  97
  98static bool hns_roce_check_bt_null(u64 **bt, u64 ba_idx, u32 bt_chunk_num)
  99{
 100	u64 start_idx = round_down(ba_idx, bt_chunk_num);
 101	int i;
 102
 103	for (i = 0; i < bt_chunk_num; i++)
 104		if (i != ba_idx && bt[start_idx + i])
 105			return false;
 106
 107	return true;
 108}
 109
 110static int hns_roce_get_bt_num(u32 table_type, u32 hop_num)
 111{
 112	if (check_whether_bt_num_3(table_type, hop_num))
 113		return 3;
 114	else if (check_whether_bt_num_2(table_type, hop_num))
 115		return 2;
 116	else if (check_whether_bt_num_1(table_type, hop_num))
 117		return 1;
 118	else
 119		return 0;
 120}
 121
 122static int get_hem_table_config(struct hns_roce_dev *hr_dev,
 123				struct hns_roce_hem_mhop *mhop,
 124				u32 type)
 125{
 126	struct device *dev = hr_dev->dev;
 127
 128	switch (type) {
 129	case HEM_TYPE_QPC:
 130		mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
 131					     + PAGE_SHIFT);
 132		mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz
 133					     + PAGE_SHIFT);
 134		mhop->ba_l0_num = hr_dev->caps.qpc_bt_num;
 135		mhop->hop_num = hr_dev->caps.qpc_hop_num;
 136		break;
 137	case HEM_TYPE_MTPT:
 138		mhop->buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz
 139					     + PAGE_SHIFT);
 140		mhop->bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz
 141					     + PAGE_SHIFT);
 142		mhop->ba_l0_num = hr_dev->caps.mpt_bt_num;
 143		mhop->hop_num = hr_dev->caps.mpt_hop_num;
 144		break;
 145	case HEM_TYPE_CQC:
 146		mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz
 147					     + PAGE_SHIFT);
 148		mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz
 149					    + PAGE_SHIFT);
 150		mhop->ba_l0_num = hr_dev->caps.cqc_bt_num;
 151		mhop->hop_num = hr_dev->caps.cqc_hop_num;
 152		break;
 153	case HEM_TYPE_SCCC:
 154		mhop->buf_chunk_size = 1 << (hr_dev->caps.sccc_buf_pg_sz
 155					     + PAGE_SHIFT);
 156		mhop->bt_chunk_size = 1 << (hr_dev->caps.sccc_ba_pg_sz
 157					    + PAGE_SHIFT);
 158		mhop->ba_l0_num = hr_dev->caps.sccc_bt_num;
 159		mhop->hop_num = hr_dev->caps.sccc_hop_num;
 160		break;
 161	case HEM_TYPE_QPC_TIMER:
 162		mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz
 163					     + PAGE_SHIFT);
 164		mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz
 165					    + PAGE_SHIFT);
 166		mhop->ba_l0_num = hr_dev->caps.qpc_timer_bt_num;
 167		mhop->hop_num = hr_dev->caps.qpc_timer_hop_num;
 168		break;
 169	case HEM_TYPE_CQC_TIMER:
 170		mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz
 171					     + PAGE_SHIFT);
 172		mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz
 173					    + PAGE_SHIFT);
 174		mhop->ba_l0_num = hr_dev->caps.cqc_timer_bt_num;
 175		mhop->hop_num = hr_dev->caps.cqc_timer_hop_num;
 176		break;
 177	case HEM_TYPE_SRQC:
 178		mhop->buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
 179					     + PAGE_SHIFT);
 180		mhop->bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz
 181					     + PAGE_SHIFT);
 182		mhop->ba_l0_num = hr_dev->caps.srqc_bt_num;
 183		mhop->hop_num = hr_dev->caps.srqc_hop_num;
 184		break;
 185	case HEM_TYPE_GMV:
 186		mhop->buf_chunk_size = 1 << (hr_dev->caps.gmv_buf_pg_sz +
 187					     PAGE_SHIFT);
 188		mhop->bt_chunk_size = 1 << (hr_dev->caps.gmv_ba_pg_sz +
 189					    PAGE_SHIFT);
 190		mhop->ba_l0_num = hr_dev->caps.gmv_bt_num;
 191		mhop->hop_num = hr_dev->caps.gmv_hop_num;
 192		break;
 193	default:
 194		dev_err(dev, "table %u not support multi-hop addressing!\n",
 195			type);
 196		return -EINVAL;
 197	}
 198
 199	return 0;
 200}
 201
 202int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
 203			   struct hns_roce_hem_table *table, unsigned long *obj,
 204			   struct hns_roce_hem_mhop *mhop)
 205{
 206	struct device *dev = hr_dev->dev;
 207	u32 chunk_ba_num;
 208	u32 chunk_size;
 209	u32 table_idx;
 210	u32 bt_num;
 211
 212	if (get_hem_table_config(hr_dev, mhop, table->type))
 213		return -EINVAL;
 214
 215	if (!obj)
 216		return 0;
 217
 218	/*
 219	 * QPC/MTPT/CQC/SRQC/SCCC alloc hem for buffer pages.
 220	 * MTT/CQE alloc hem for bt pages.
 221	 */
 222	bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
 223	chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
 224	chunk_size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size :
 225			      mhop->bt_chunk_size;
 226	table_idx = *obj / (chunk_size / table->obj_size);
 227	switch (bt_num) {
 228	case 3:
 229		mhop->l2_idx = table_idx & (chunk_ba_num - 1);
 230		mhop->l1_idx = table_idx / chunk_ba_num & (chunk_ba_num - 1);
 231		mhop->l0_idx = (table_idx / chunk_ba_num) / chunk_ba_num;
 232		break;
 233	case 2:
 234		mhop->l1_idx = table_idx & (chunk_ba_num - 1);
 235		mhop->l0_idx = table_idx / chunk_ba_num;
 236		break;
 237	case 1:
 238		mhop->l0_idx = table_idx;
 239		break;
 240	default:
 241		dev_err(dev, "table %u not support hop_num = %u!\n",
 242			table->type, mhop->hop_num);
 243		return -EINVAL;
 244	}
 245	if (mhop->l0_idx >= mhop->ba_l0_num)
 246		mhop->l0_idx %= mhop->ba_l0_num;
 247
 248	return 0;
 249}
 250
 251static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
 252					       int npages,
 253					       unsigned long hem_alloc_size,
 254					       gfp_t gfp_mask)
 255{
 256	struct hns_roce_hem_chunk *chunk = NULL;
 257	struct hns_roce_hem *hem;
 258	struct scatterlist *mem;
 259	int order;
 260	void *buf;
 261
 262	WARN_ON(gfp_mask & __GFP_HIGHMEM);
 263
 264	hem = kmalloc(sizeof(*hem),
 265		      gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
 266	if (!hem)
 267		return NULL;
 268
 
 269	INIT_LIST_HEAD(&hem->chunk_list);
 270
 271	order = get_order(hem_alloc_size);
 272
 273	while (npages > 0) {
 274		if (!chunk) {
 275			chunk = kmalloc(sizeof(*chunk),
 276				gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
 277			if (!chunk)
 278				goto fail;
 279
 280			sg_init_table(chunk->mem, HNS_ROCE_HEM_CHUNK_LEN);
 281			chunk->npages = 0;
 282			chunk->nsg = 0;
 283			memset(chunk->buf, 0, sizeof(chunk->buf));
 284			list_add_tail(&chunk->list, &hem->chunk_list);
 285		}
 286
 287		while (1 << order > npages)
 288			--order;
 289
 290		/*
 291		 * Alloc memory one time. If failed, don't alloc small block
 292		 * memory, directly return fail.
 293		 */
 294		mem = &chunk->mem[chunk->npages];
 295		buf = dma_alloc_coherent(hr_dev->dev, PAGE_SIZE << order,
 296				&sg_dma_address(mem), gfp_mask);
 297		if (!buf)
 298			goto fail;
 299
 300		chunk->buf[chunk->npages] = buf;
 
 301		sg_dma_len(mem) = PAGE_SIZE << order;
 302
 303		++chunk->npages;
 304		++chunk->nsg;
 305		npages -= 1 << order;
 306	}
 307
 308	return hem;
 309
 310fail:
 311	hns_roce_free_hem(hr_dev, hem);
 312	return NULL;
 313}
 314
 315void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
 316{
 317	struct hns_roce_hem_chunk *chunk, *tmp;
 318	int i;
 319
 320	if (!hem)
 321		return;
 322
 323	list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
 324		for (i = 0; i < chunk->npages; ++i)
 325			dma_free_coherent(hr_dev->dev,
 326				   sg_dma_len(&chunk->mem[i]),
 327				   chunk->buf[i],
 328				   sg_dma_address(&chunk->mem[i]));
 329		kfree(chunk);
 330	}
 331
 332	kfree(hem);
 333}
 334
 335static int calc_hem_config(struct hns_roce_dev *hr_dev,
 336			   struct hns_roce_hem_table *table, unsigned long obj,
 337			   struct hns_roce_hem_mhop *mhop,
 338			   struct hns_roce_hem_index *index)
 339{
 340	struct ib_device *ibdev = &hr_dev->ib_dev;
 341	unsigned long mhop_obj = obj;
 342	u32 l0_idx, l1_idx, l2_idx;
 343	u32 chunk_ba_num;
 344	u32 bt_num;
 345	int ret;
 
 
 
 
 
 346
 347	ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, mhop);
 348	if (ret)
 349		return ret;
 350
 351	l0_idx = mhop->l0_idx;
 352	l1_idx = mhop->l1_idx;
 353	l2_idx = mhop->l2_idx;
 354	chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
 355	bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
 356	switch (bt_num) {
 357	case 3:
 358		index->l1 = l0_idx * chunk_ba_num + l1_idx;
 359		index->l0 = l0_idx;
 360		index->buf = l0_idx * chunk_ba_num * chunk_ba_num +
 361			     l1_idx * chunk_ba_num + l2_idx;
 362		break;
 363	case 2:
 364		index->l0 = l0_idx;
 365		index->buf = l0_idx * chunk_ba_num + l1_idx;
 
 366		break;
 367	case 1:
 368		index->buf = l0_idx;
 
 
 
 
 
 
 369		break;
 370	default:
 371		ibdev_err(ibdev, "table %u not support mhop.hop_num = %u!\n",
 372			  table->type, mhop->hop_num);
 373		return -EINVAL;
 374	}
 375
 376	if (unlikely(index->buf >= table->num_hem)) {
 377		ibdev_err(ibdev, "table %u exceed hem limt idx %llu, max %lu!\n",
 378			  table->type, index->buf, table->num_hem);
 379		return -EINVAL;
 380	}
 381
 382	return 0;
 383}
 384
 385static void free_mhop_hem(struct hns_roce_dev *hr_dev,
 386			  struct hns_roce_hem_table *table,
 387			  struct hns_roce_hem_mhop *mhop,
 388			  struct hns_roce_hem_index *index)
 389{
 390	u32 bt_size = mhop->bt_chunk_size;
 391	struct device *dev = hr_dev->dev;
 392
 393	if (index->inited & HEM_INDEX_BUF) {
 394		hns_roce_free_hem(hr_dev, table->hem[index->buf]);
 395		table->hem[index->buf] = NULL;
 396	}
 397
 398	if (index->inited & HEM_INDEX_L1) {
 399		dma_free_coherent(dev, bt_size, table->bt_l1[index->l1],
 400				  table->bt_l1_dma_addr[index->l1]);
 401		table->bt_l1[index->l1] = NULL;
 402	}
 403
 404	if (index->inited & HEM_INDEX_L0) {
 405		dma_free_coherent(dev, bt_size, table->bt_l0[index->l0],
 406				  table->bt_l0_dma_addr[index->l0]);
 407		table->bt_l0[index->l0] = NULL;
 408	}
 409}
 410
 411static int alloc_mhop_hem(struct hns_roce_dev *hr_dev,
 412			  struct hns_roce_hem_table *table,
 413			  struct hns_roce_hem_mhop *mhop,
 414			  struct hns_roce_hem_index *index)
 415{
 416	u32 bt_size = mhop->bt_chunk_size;
 417	struct device *dev = hr_dev->dev;
 418	struct hns_roce_hem_iter iter;
 419	gfp_t flag;
 420	u64 bt_ba;
 421	u32 size;
 422	int ret;
 423
 424	/* alloc L1 BA's chunk */
 425	if ((check_whether_bt_num_3(table->type, mhop->hop_num) ||
 426	     check_whether_bt_num_2(table->type, mhop->hop_num)) &&
 427	     !table->bt_l0[index->l0]) {
 428		table->bt_l0[index->l0] = dma_alloc_coherent(dev, bt_size,
 429					    &table->bt_l0_dma_addr[index->l0],
 430					    GFP_KERNEL);
 431		if (!table->bt_l0[index->l0]) {
 432			ret = -ENOMEM;
 433			goto out;
 434		}
 435		index->inited |= HEM_INDEX_L0;
 436	}
 437
 438	/* alloc L2 BA's chunk */
 439	if (check_whether_bt_num_3(table->type, mhop->hop_num) &&
 440	    !table->bt_l1[index->l1])  {
 441		table->bt_l1[index->l1] = dma_alloc_coherent(dev, bt_size,
 442					    &table->bt_l1_dma_addr[index->l1],
 443					    GFP_KERNEL);
 444		if (!table->bt_l1[index->l1]) {
 445			ret = -ENOMEM;
 446			goto err_alloc_hem;
 447		}
 448		index->inited |= HEM_INDEX_L1;
 449		*(table->bt_l0[index->l0] + mhop->l1_idx) =
 450					       table->bt_l1_dma_addr[index->l1];
 451	}
 452
 453	/*
 454	 * alloc buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC.
 455	 * alloc bt space chunk for MTT/CQE.
 456	 */
 457	size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : bt_size;
 458	flag = GFP_KERNEL | __GFP_NOWARN;
 459	table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size >> PAGE_SHIFT,
 460						    size, flag);
 461	if (!table->hem[index->buf]) {
 462		ret = -ENOMEM;
 463		goto err_alloc_hem;
 464	}
 465
 466	index->inited |= HEM_INDEX_BUF;
 467	hns_roce_hem_first(table->hem[index->buf], &iter);
 468	bt_ba = hns_roce_hem_addr(&iter);
 469	if (table->type < HEM_TYPE_MTT) {
 470		if (mhop->hop_num == 2)
 471			*(table->bt_l1[index->l1] + mhop->l2_idx) = bt_ba;
 472		else if (mhop->hop_num == 1)
 473			*(table->bt_l0[index->l0] + mhop->l1_idx) = bt_ba;
 474	} else if (mhop->hop_num == 2) {
 475		*(table->bt_l0[index->l0] + mhop->l1_idx) = bt_ba;
 476	}
 477
 478	return 0;
 479err_alloc_hem:
 480	free_mhop_hem(hr_dev, table, mhop, index);
 481out:
 482	return ret;
 483}
 484
 485static int set_mhop_hem(struct hns_roce_dev *hr_dev,
 486			struct hns_roce_hem_table *table, unsigned long obj,
 487			struct hns_roce_hem_mhop *mhop,
 488			struct hns_roce_hem_index *index)
 489{
 490	struct ib_device *ibdev = &hr_dev->ib_dev;
 491	u32 step_idx;
 492	int ret = 0;
 493
 494	if (index->inited & HEM_INDEX_L0) {
 495		ret = hr_dev->hw->set_hem(hr_dev, table, obj, 0);
 496		if (ret) {
 497			ibdev_err(ibdev, "set HEM step 0 failed!\n");
 498			goto out;
 499		}
 500	}
 501
 502	if (index->inited & HEM_INDEX_L1) {
 503		ret = hr_dev->hw->set_hem(hr_dev, table, obj, 1);
 504		if (ret) {
 505			ibdev_err(ibdev, "set HEM step 1 failed!\n");
 506			goto out;
 507		}
 508	}
 509
 510	if (index->inited & HEM_INDEX_BUF) {
 511		if (mhop->hop_num == HNS_ROCE_HOP_NUM_0)
 512			step_idx = 0;
 513		else
 514			step_idx = mhop->hop_num;
 515		ret = hr_dev->hw->set_hem(hr_dev, table, obj, step_idx);
 516		if (ret)
 517			ibdev_err(ibdev, "set HEM step last failed!\n");
 518	}
 519out:
 520	return ret;
 521}
 522
 523static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
 524				   struct hns_roce_hem_table *table,
 525				   unsigned long obj)
 526{
 527	struct ib_device *ibdev = &hr_dev->ib_dev;
 528	struct hns_roce_hem_index index = {};
 529	struct hns_roce_hem_mhop mhop = {};
 530	int ret;
 531
 532	ret = calc_hem_config(hr_dev, table, obj, &mhop, &index);
 533	if (ret) {
 534		ibdev_err(ibdev, "calc hem config failed!\n");
 535		return ret;
 536	}
 537
 538	mutex_lock(&table->mutex);
 539	if (table->hem[index.buf]) {
 540		refcount_inc(&table->hem[index.buf]->refcount);
 541		goto out;
 542	}
 543
 544	ret = alloc_mhop_hem(hr_dev, table, &mhop, &index);
 545	if (ret) {
 546		ibdev_err(ibdev, "alloc mhop hem failed!\n");
 547		goto out;
 548	}
 549
 550	/* set HEM base address to hardware */
 551	if (table->type < HEM_TYPE_MTT) {
 552		ret = set_mhop_hem(hr_dev, table, obj, &mhop, &index);
 553		if (ret) {
 554			ibdev_err(ibdev, "set HEM address to HW failed!\n");
 555			goto err_alloc;
 
 
 
 
 
 
 
 556		}
 557	}
 558
 559	refcount_set(&table->hem[index.buf]->refcount, 1);
 560	goto out;
 
 
 
 
 
 
 
 
 
 561
 562err_alloc:
 563	free_mhop_hem(hr_dev, table, &mhop, &index);
 564out:
 565	mutex_unlock(&table->mutex);
 566	return ret;
 567}
 568
 569int hns_roce_table_get(struct hns_roce_dev *hr_dev,
 570		       struct hns_roce_hem_table *table, unsigned long obj)
 571{
 572	struct device *dev = hr_dev->dev;
 573	unsigned long i;
 574	int ret = 0;
 
 575
 576	if (hns_roce_check_whether_mhop(hr_dev, table->type))
 577		return hns_roce_table_mhop_get(hr_dev, table, obj);
 578
 579	i = obj / (table->table_chunk_size / table->obj_size);
 580
 581	mutex_lock(&table->mutex);
 582
 583	if (table->hem[i]) {
 584		refcount_inc(&table->hem[i]->refcount);
 585		goto out;
 586	}
 587
 588	table->hem[i] = hns_roce_alloc_hem(hr_dev,
 589				       table->table_chunk_size >> PAGE_SHIFT,
 590				       table->table_chunk_size,
 591				       GFP_KERNEL | __GFP_NOWARN);
 592	if (!table->hem[i]) {
 593		ret = -ENOMEM;
 594		goto out;
 595	}
 596
 597	/* Set HEM base address(128K/page, pa) to Hardware */
 598	ret = hr_dev->hw->set_hem(hr_dev, table, obj, HEM_HOP_STEP_DIRECT);
 599	if (ret) {
 600		hns_roce_free_hem(hr_dev, table->hem[i]);
 601		table->hem[i] = NULL;
 602		dev_err(dev, "set HEM base address to HW failed, ret = %d.\n",
 603			ret);
 604		goto out;
 605	}
 606
 607	refcount_set(&table->hem[i]->refcount, 1);
 608out:
 609	mutex_unlock(&table->mutex);
 610	return ret;
 611}
 612
 613static void clear_mhop_hem(struct hns_roce_dev *hr_dev,
 614			   struct hns_roce_hem_table *table, unsigned long obj,
 615			   struct hns_roce_hem_mhop *mhop,
 616			   struct hns_roce_hem_index *index)
 617{
 618	struct ib_device *ibdev = &hr_dev->ib_dev;
 619	u32 hop_num = mhop->hop_num;
 620	u32 chunk_ba_num;
 621	u32 step_idx;
 622	int ret;
 623
 624	index->inited = HEM_INDEX_BUF;
 625	chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
 626	if (check_whether_bt_num_2(table->type, hop_num)) {
 627		if (hns_roce_check_hem_null(table->hem, index->buf,
 628					    chunk_ba_num, table->num_hem))
 629			index->inited |= HEM_INDEX_L0;
 630	} else if (check_whether_bt_num_3(table->type, hop_num)) {
 631		if (hns_roce_check_hem_null(table->hem, index->buf,
 632					    chunk_ba_num, table->num_hem)) {
 633			index->inited |= HEM_INDEX_L1;
 634			if (hns_roce_check_bt_null(table->bt_l1, index->l1,
 635						   chunk_ba_num))
 636				index->inited |= HEM_INDEX_L0;
 637		}
 638	}
 639
 640	if (table->type < HEM_TYPE_MTT) {
 641		if (hop_num == HNS_ROCE_HOP_NUM_0)
 642			step_idx = 0;
 643		else
 644			step_idx = hop_num;
 645
 646		ret = hr_dev->hw->clear_hem(hr_dev, table, obj, step_idx);
 647		if (ret)
 648			ibdev_warn(ibdev, "failed to clear hop%u HEM, ret = %d.\n",
 649				   hop_num, ret);
 650
 651		if (index->inited & HEM_INDEX_L1) {
 652			ret = hr_dev->hw->clear_hem(hr_dev, table, obj, 1);
 653			if (ret)
 654				ibdev_warn(ibdev, "failed to clear HEM step 1, ret = %d.\n",
 655					   ret);
 656		}
 657
 658		if (index->inited & HEM_INDEX_L0) {
 659			ret = hr_dev->hw->clear_hem(hr_dev, table, obj, 0);
 660			if (ret)
 661				ibdev_warn(ibdev, "failed to clear HEM step 0, ret = %d.\n",
 662					   ret);
 663		}
 664	}
 665}
 666
 667static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
 668				    struct hns_roce_hem_table *table,
 669				    unsigned long obj,
 670				    int check_refcount)
 671{
 672	struct ib_device *ibdev = &hr_dev->ib_dev;
 673	struct hns_roce_hem_index index = {};
 674	struct hns_roce_hem_mhop mhop = {};
 675	int ret;
 676
 677	ret = calc_hem_config(hr_dev, table, obj, &mhop, &index);
 678	if (ret) {
 679		ibdev_err(ibdev, "calc hem config failed!\n");
 680		return;
 681	}
 682
 683	if (!check_refcount)
 684		mutex_lock(&table->mutex);
 685	else if (!refcount_dec_and_mutex_lock(&table->hem[index.buf]->refcount,
 686					      &table->mutex))
 687		return;
 688
 689	clear_mhop_hem(hr_dev, table, obj, &mhop, &index);
 690	free_mhop_hem(hr_dev, table, &mhop, &index);
 691
 692	mutex_unlock(&table->mutex);
 693}
 694
 695void hns_roce_table_put(struct hns_roce_dev *hr_dev,
 696			struct hns_roce_hem_table *table, unsigned long obj)
 697{
 698	struct device *dev = hr_dev->dev;
 699	unsigned long i;
 700	int ret;
 701
 702	if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
 703		hns_roce_table_mhop_put(hr_dev, table, obj, 1);
 704		return;
 705	}
 706
 707	i = obj / (table->table_chunk_size / table->obj_size);
 708
 709	if (!refcount_dec_and_mutex_lock(&table->hem[i]->refcount,
 710					 &table->mutex))
 711		return;
 712
 713	ret = hr_dev->hw->clear_hem(hr_dev, table, obj, HEM_HOP_STEP_DIRECT);
 714	if (ret)
 715		dev_warn(dev, "failed to clear HEM base address, ret = %d.\n",
 716			 ret);
 717
 718	hns_roce_free_hem(hr_dev, table->hem[i]);
 719	table->hem[i] = NULL;
 
 720
 721	mutex_unlock(&table->mutex);
 722}
 723
 724void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
 725			  struct hns_roce_hem_table *table,
 726			  unsigned long obj, dma_addr_t *dma_handle)
 727{
 728	struct hns_roce_hem_chunk *chunk;
 729	struct hns_roce_hem_mhop mhop;
 730	struct hns_roce_hem *hem;
 731	unsigned long mhop_obj = obj;
 732	unsigned long obj_per_chunk;
 733	unsigned long idx_offset;
 734	int offset, dma_offset;
 735	void *addr = NULL;
 736	u32 hem_idx = 0;
 737	int length;
 738	int i, j;
 739
 740	mutex_lock(&table->mutex);
 
 741
 742	if (!hns_roce_check_whether_mhop(hr_dev, table->type)) {
 743		obj_per_chunk = table->table_chunk_size / table->obj_size;
 744		hem = table->hem[obj / obj_per_chunk];
 745		idx_offset = obj % obj_per_chunk;
 746		dma_offset = offset = idx_offset * table->obj_size;
 747	} else {
 748		u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */
 749
 750		if (hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop))
 751			goto out;
 752		/* mtt mhop */
 753		i = mhop.l0_idx;
 754		j = mhop.l1_idx;
 755		if (mhop.hop_num == 2)
 756			hem_idx = i * (mhop.bt_chunk_size / BA_BYTE_LEN) + j;
 757		else if (mhop.hop_num == 1 ||
 758			 mhop.hop_num == HNS_ROCE_HOP_NUM_0)
 759			hem_idx = i;
 760
 761		hem = table->hem[hem_idx];
 762		dma_offset = offset = obj * seg_size % mhop.bt_chunk_size;
 763		if (mhop.hop_num == 2)
 764			dma_offset = offset = 0;
 765	}
 766
 767	if (!hem)
 768		goto out;
 769
 770	list_for_each_entry(chunk, &hem->chunk_list, list) {
 771		for (i = 0; i < chunk->npages; ++i) {
 772			length = sg_dma_len(&chunk->mem[i]);
 773			if (dma_handle && dma_offset >= 0) {
 774				if (length > (u32)dma_offset)
 
 775					*dma_handle = sg_dma_address(
 776						&chunk->mem[i]) + dma_offset;
 777				dma_offset -= length;
 778			}
 779
 780			if (length > (u32)offset) {
 781				addr = chunk->buf[i] + offset;
 782				goto out;
 783			}
 784			offset -= length;
 785		}
 786	}
 787
 788out:
 789	mutex_unlock(&table->mutex);
 790	return addr;
 791}
 792
 793int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
 794			    struct hns_roce_hem_table *table, u32 type,
 795			    unsigned long obj_size, unsigned long nobj)
 796{
 797	unsigned long obj_per_chunk;
 798	unsigned long num_hem;
 799
 800	if (!hns_roce_check_whether_mhop(hr_dev, type)) {
 801		table->table_chunk_size = hr_dev->caps.chunk_sz;
 802		obj_per_chunk = table->table_chunk_size / obj_size;
 803		num_hem = DIV_ROUND_UP(nobj, obj_per_chunk);
 804
 805		table->hem = kcalloc(num_hem, sizeof(*table->hem), GFP_KERNEL);
 806		if (!table->hem)
 807			return -ENOMEM;
 808	} else {
 809		struct hns_roce_hem_mhop mhop = {};
 810		unsigned long buf_chunk_size;
 811		unsigned long bt_chunk_size;
 812		unsigned long bt_chunk_num;
 813		unsigned long num_bt_l0;
 814		u32 hop_num;
 815
 816		if (get_hem_table_config(hr_dev, &mhop, type))
 817			return -EINVAL;
 818
 819		buf_chunk_size = mhop.buf_chunk_size;
 820		bt_chunk_size = mhop.bt_chunk_size;
 821		num_bt_l0 = mhop.ba_l0_num;
 822		hop_num = mhop.hop_num;
 823
 824		obj_per_chunk = buf_chunk_size / obj_size;
 825		num_hem = DIV_ROUND_UP(nobj, obj_per_chunk);
 826		bt_chunk_num = bt_chunk_size / BA_BYTE_LEN;
 827
 828		if (type >= HEM_TYPE_MTT)
 829			num_bt_l0 = bt_chunk_num;
 830
 831		table->hem = kcalloc(num_hem, sizeof(*table->hem),
 832					 GFP_KERNEL);
 833		if (!table->hem)
 834			goto err_kcalloc_hem_buf;
 835
 836		if (check_whether_bt_num_3(type, hop_num)) {
 837			unsigned long num_bt_l1;
 838
 839			num_bt_l1 = DIV_ROUND_UP(num_hem, bt_chunk_num);
 840			table->bt_l1 = kcalloc(num_bt_l1,
 841					       sizeof(*table->bt_l1),
 842					       GFP_KERNEL);
 843			if (!table->bt_l1)
 844				goto err_kcalloc_bt_l1;
 845
 846			table->bt_l1_dma_addr = kcalloc(num_bt_l1,
 847						 sizeof(*table->bt_l1_dma_addr),
 848						 GFP_KERNEL);
 849
 850			if (!table->bt_l1_dma_addr)
 851				goto err_kcalloc_l1_dma;
 852		}
 853
 854		if (check_whether_bt_num_2(type, hop_num) ||
 855			check_whether_bt_num_3(type, hop_num)) {
 856			table->bt_l0 = kcalloc(num_bt_l0, sizeof(*table->bt_l0),
 857					       GFP_KERNEL);
 858			if (!table->bt_l0)
 859				goto err_kcalloc_bt_l0;
 860
 861			table->bt_l0_dma_addr = kcalloc(num_bt_l0,
 862						 sizeof(*table->bt_l0_dma_addr),
 863						 GFP_KERNEL);
 864			if (!table->bt_l0_dma_addr)
 865				goto err_kcalloc_l0_dma;
 866		}
 867	}
 868
 869	table->type = type;
 870	table->num_hem = num_hem;
 871	table->obj_size = obj_size;
 872	mutex_init(&table->mutex);
 873
 874	return 0;
 875
 876err_kcalloc_l0_dma:
 877	kfree(table->bt_l0);
 878	table->bt_l0 = NULL;
 879
 880err_kcalloc_bt_l0:
 881	kfree(table->bt_l1_dma_addr);
 882	table->bt_l1_dma_addr = NULL;
 883
 884err_kcalloc_l1_dma:
 885	kfree(table->bt_l1);
 886	table->bt_l1 = NULL;
 887
 888err_kcalloc_bt_l1:
 889	kfree(table->hem);
 890	table->hem = NULL;
 
 
 891
 892err_kcalloc_hem_buf:
 893	return -ENOMEM;
 
 894}
 895
 896static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev,
 897					    struct hns_roce_hem_table *table)
 
 
 898{
 899	struct hns_roce_hem_mhop mhop;
 900	u32 buf_chunk_size;
 901	u64 obj;
 902	int i;
 903
 904	if (hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop))
 905		return;
 906	buf_chunk_size = table->type < HEM_TYPE_MTT ? mhop.buf_chunk_size :
 907					mhop.bt_chunk_size;
 908
 909	for (i = 0; i < table->num_hem; ++i) {
 910		obj = i * buf_chunk_size / table->obj_size;
 911		if (table->hem[i])
 912			hns_roce_table_mhop_put(hr_dev, table, obj, 0);
 913	}
 914
 915	kfree(table->hem);
 916	table->hem = NULL;
 917	kfree(table->bt_l1);
 918	table->bt_l1 = NULL;
 919	kfree(table->bt_l1_dma_addr);
 920	table->bt_l1_dma_addr = NULL;
 921	kfree(table->bt_l0);
 922	table->bt_l0 = NULL;
 923	kfree(table->bt_l0_dma_addr);
 924	table->bt_l0_dma_addr = NULL;
 925}
 926
 927void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
 928				struct hns_roce_hem_table *table)
 929{
 930	struct device *dev = hr_dev->dev;
 931	unsigned long i;
 932	int obj;
 933	int ret;
 934
 935	if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
 936		hns_roce_cleanup_mhop_hem_table(hr_dev, table);
 937		return;
 938	}
 939
 940	for (i = 0; i < table->num_hem; ++i)
 941		if (table->hem[i]) {
 942			obj = i * table->table_chunk_size / table->obj_size;
 943			ret = hr_dev->hw->clear_hem(hr_dev, table, obj, 0);
 944			if (ret)
 945				dev_err(dev, "clear HEM base address failed, ret = %d.\n",
 946					ret);
 947
 948			hns_roce_free_hem(hr_dev, table->hem[i]);
 949		}
 950
 951	kfree(table->hem);
 952}
 953
 954void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
 955{
 956	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
 957		hns_roce_cleanup_hem_table(hr_dev,
 958					   &hr_dev->srq_table.table);
 959	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
 960	if (hr_dev->caps.qpc_timer_entry_sz)
 961		hns_roce_cleanup_hem_table(hr_dev,
 962					   &hr_dev->qpc_timer_table);
 963	if (hr_dev->caps.cqc_timer_entry_sz)
 964		hns_roce_cleanup_hem_table(hr_dev,
 965					   &hr_dev->cqc_timer_table);
 966	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
 967		hns_roce_cleanup_hem_table(hr_dev,
 968					   &hr_dev->qp_table.sccc_table);
 969	if (hr_dev->caps.trrl_entry_sz)
 970		hns_roce_cleanup_hem_table(hr_dev,
 971					   &hr_dev->qp_table.trrl_table);
 972
 973	if (hr_dev->caps.gmv_entry_sz)
 974		hns_roce_cleanup_hem_table(hr_dev, &hr_dev->gmv_table);
 975
 976	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
 977	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
 978	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
 979}
 980
 981struct hns_roce_hem_item {
 982	struct list_head list; /* link all hems in the same bt level */
 983	struct list_head sibling; /* link all hems in last hop for mtt */
 984	void *addr;
 985	dma_addr_t dma_addr;
 986	size_t count; /* max ba numbers */
 987	int start; /* start buf offset in this hem */
 988	int end; /* end buf offset in this hem */
 989};
 990
 991/* All HEM items are linked in a tree structure */
 992struct hns_roce_hem_head {
 993	struct list_head branch[HNS_ROCE_MAX_BT_REGION];
 994	struct list_head root;
 995	struct list_head leaf;
 996};
 997
 998static struct hns_roce_hem_item *
 999hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
1000		    bool exist_bt)
1001{
1002	struct hns_roce_hem_item *hem;
1003
1004	hem = kzalloc(sizeof(*hem), GFP_KERNEL);
1005	if (!hem)
1006		return NULL;
1007
1008	if (exist_bt) {
1009		hem->addr = dma_alloc_coherent(hr_dev->dev, count * BA_BYTE_LEN,
1010					       &hem->dma_addr, GFP_KERNEL);
1011		if (!hem->addr) {
1012			kfree(hem);
1013			return NULL;
1014		}
1015	}
1016
1017	hem->count = count;
1018	hem->start = start;
1019	hem->end = end;
1020	INIT_LIST_HEAD(&hem->list);
1021	INIT_LIST_HEAD(&hem->sibling);
1022
1023	return hem;
1024}
1025
1026static void hem_list_free_item(struct hns_roce_dev *hr_dev,
1027			       struct hns_roce_hem_item *hem, bool exist_bt)
1028{
1029	if (exist_bt)
1030		dma_free_coherent(hr_dev->dev, hem->count * BA_BYTE_LEN,
1031				  hem->addr, hem->dma_addr);
1032	kfree(hem);
1033}
1034
1035static void hem_list_free_all(struct hns_roce_dev *hr_dev,
1036			      struct list_head *head, bool exist_bt)
1037{
1038	struct hns_roce_hem_item *hem, *temp_hem;
1039
1040	list_for_each_entry_safe(hem, temp_hem, head, list) {
1041		list_del(&hem->list);
1042		hem_list_free_item(hr_dev, hem, exist_bt);
1043	}
1044}
1045
1046static void hem_list_link_bt(struct hns_roce_dev *hr_dev, void *base_addr,
1047			     u64 table_addr)
1048{
1049	*(u64 *)(base_addr) = table_addr;
1050}
1051
1052/* assign L0 table address to hem from root bt */
1053static void hem_list_assign_bt(struct hns_roce_dev *hr_dev,
1054			       struct hns_roce_hem_item *hem, void *cpu_addr,
1055			       u64 phy_addr)
1056{
1057	hem->addr = cpu_addr;
1058	hem->dma_addr = (dma_addr_t)phy_addr;
1059}
1060
1061static inline bool hem_list_page_is_in_range(struct hns_roce_hem_item *hem,
1062					     int offset)
1063{
1064	return (hem->start <= offset && offset <= hem->end);
1065}
1066
1067static struct hns_roce_hem_item *hem_list_search_item(struct list_head *ba_list,
1068						      int page_offset)
1069{
1070	struct hns_roce_hem_item *hem, *temp_hem;
1071	struct hns_roce_hem_item *found = NULL;
1072
1073	list_for_each_entry_safe(hem, temp_hem, ba_list, list) {
1074		if (hem_list_page_is_in_range(hem, page_offset)) {
1075			found = hem;
1076			break;
1077		}
1078	}
1079
1080	return found;
1081}
1082
1083static bool hem_list_is_bottom_bt(int hopnum, int bt_level)
1084{
1085	/*
1086	 * hopnum    base address table levels
1087	 * 0		L0(buf)
1088	 * 1		L0 -> buf
1089	 * 2		L0 -> L1 -> buf
1090	 * 3		L0 -> L1 -> L2 -> buf
1091	 */
1092	return bt_level >= (hopnum ? hopnum - 1 : hopnum);
1093}
1094
1095/*
1096 * calc base address entries num
1097 * @hopnum: num of mutihop addressing
1098 * @bt_level: base address table level
1099 * @unit: ba entries per bt page
1100 */
1101static u32 hem_list_calc_ba_range(int hopnum, int bt_level, int unit)
1102{
1103	u32 step;
1104	int max;
1105	int i;
1106
1107	if (hopnum <= bt_level)
1108		return 0;
1109	/*
1110	 * hopnum  bt_level   range
1111	 * 1	      0       unit
1112	 * ------------
1113	 * 2	      0       unit * unit
1114	 * 2	      1       unit
1115	 * ------------
1116	 * 3	      0       unit * unit * unit
1117	 * 3	      1       unit * unit
1118	 * 3	      2       unit
1119	 */
1120	step = 1;
1121	max = hopnum - bt_level;
1122	for (i = 0; i < max; i++)
1123		step = step * unit;
1124
1125	return step;
1126}
1127
1128/*
1129 * calc the root ba entries which could cover all regions
1130 * @regions: buf region array
1131 * @region_cnt: array size of @regions
1132 * @unit: ba entries per bt page
1133 */
1134int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,
1135				   int region_cnt, int unit)
1136{
1137	struct hns_roce_buf_region *r;
1138	int total = 0;
1139	int step;
1140	int i;
1141
1142	for (i = 0; i < region_cnt; i++) {
1143		r = (struct hns_roce_buf_region *)&regions[i];
1144		if (r->hopnum > 1) {
1145			step = hem_list_calc_ba_range(r->hopnum, 1, unit);
1146			if (step > 0)
1147				total += (r->count + step - 1) / step;
1148		} else {
1149			total += r->count;
1150		}
1151	}
1152
1153	return total;
1154}
1155
1156static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
1157				 const struct hns_roce_buf_region *r, int unit,
1158				 int offset, struct list_head *mid_bt,
1159				 struct list_head *btm_bt)
1160{
1161	struct hns_roce_hem_item *hem_ptrs[HNS_ROCE_MAX_BT_LEVEL] = { NULL };
1162	struct list_head temp_list[HNS_ROCE_MAX_BT_LEVEL];
1163	struct hns_roce_hem_item *cur, *pre;
1164	const int hopnum = r->hopnum;
1165	int start_aligned;
1166	int distance;
1167	int ret = 0;
1168	int max_ofs;
1169	int level;
1170	u32 step;
1171	int end;
1172
1173	if (hopnum <= 1)
1174		return 0;
1175
1176	if (hopnum > HNS_ROCE_MAX_BT_LEVEL) {
1177		dev_err(hr_dev->dev, "invalid hopnum %d!\n", hopnum);
1178		return -EINVAL;
1179	}
1180
1181	if (offset < r->offset) {
1182		dev_err(hr_dev->dev, "invalid offset %d, min %u!\n",
1183			offset, r->offset);
1184		return -EINVAL;
1185	}
1186
1187	distance = offset - r->offset;
1188	max_ofs = r->offset + r->count - 1;
1189	for (level = 0; level < hopnum; level++)
1190		INIT_LIST_HEAD(&temp_list[level]);
1191
1192	/* config L1 bt to last bt and link them to corresponding parent */
1193	for (level = 1; level < hopnum; level++) {
1194		cur = hem_list_search_item(&mid_bt[level], offset);
1195		if (cur) {
1196			hem_ptrs[level] = cur;
1197			continue;
1198		}
1199
1200		step = hem_list_calc_ba_range(hopnum, level, unit);
1201		if (step < 1) {
1202			ret = -EINVAL;
1203			goto err_exit;
1204		}
1205
1206		start_aligned = (distance / step) * step + r->offset;
1207		end = min_t(int, start_aligned + step - 1, max_ofs);
1208		cur = hem_list_alloc_item(hr_dev, start_aligned, end, unit,
1209					  true);
1210		if (!cur) {
1211			ret = -ENOMEM;
1212			goto err_exit;
1213		}
1214		hem_ptrs[level] = cur;
1215		list_add(&cur->list, &temp_list[level]);
1216		if (hem_list_is_bottom_bt(hopnum, level))
1217			list_add(&cur->sibling, &temp_list[0]);
1218
1219		/* link bt to parent bt */
1220		if (level > 1) {
1221			pre = hem_ptrs[level - 1];
1222			step = (cur->start - pre->start) / step * BA_BYTE_LEN;
1223			hem_list_link_bt(hr_dev, pre->addr + step,
1224					 cur->dma_addr);
1225		}
1226	}
1227
1228	list_splice(&temp_list[0], btm_bt);
1229	for (level = 1; level < hopnum; level++)
1230		list_splice(&temp_list[level], &mid_bt[level]);
1231
1232	return 0;
1233
1234err_exit:
1235	for (level = 1; level < hopnum; level++)
1236		hem_list_free_all(hr_dev, &temp_list[level], true);
1237
1238	return ret;
1239}
1240
1241static struct hns_roce_hem_item *
1242alloc_root_hem(struct hns_roce_dev *hr_dev, int unit, int *max_ba_num,
1243	       const struct hns_roce_buf_region *regions, int region_cnt)
1244{
1245	const struct hns_roce_buf_region *r;
1246	struct hns_roce_hem_item *hem;
1247	int ba_num;
1248	int offset;
1249
1250	ba_num = hns_roce_hem_list_calc_root_ba(regions, region_cnt, unit);
1251	if (ba_num < 1)
1252		return ERR_PTR(-ENOMEM);
1253
1254	if (ba_num > unit)
1255		return ERR_PTR(-ENOBUFS);
1256
1257	offset = regions[0].offset;
1258	/* indicate to last region */
1259	r = &regions[region_cnt - 1];
1260	hem = hem_list_alloc_item(hr_dev, offset, r->offset + r->count - 1,
1261				  ba_num, true);
1262	if (!hem)
1263		return ERR_PTR(-ENOMEM);
1264
1265	*max_ba_num = ba_num;
1266
1267	return hem;
1268}
1269
1270static int alloc_fake_root_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
1271			      u64 phy_base, const struct hns_roce_buf_region *r,
1272			      struct list_head *branch_head,
1273			      struct list_head *leaf_head)
1274{
1275	struct hns_roce_hem_item *hem;
1276
1277	hem = hem_list_alloc_item(hr_dev, r->offset, r->offset + r->count - 1,
1278				  r->count, false);
1279	if (!hem)
1280		return -ENOMEM;
1281
1282	hem_list_assign_bt(hr_dev, hem, cpu_base, phy_base);
1283	list_add(&hem->list, branch_head);
1284	list_add(&hem->sibling, leaf_head);
1285
1286	return r->count;
1287}
1288
1289static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
1290			   int unit, const struct hns_roce_buf_region *r,
1291			   const struct list_head *branch_head)
1292{
1293	struct hns_roce_hem_item *hem, *temp_hem;
1294	int total = 0;
1295	int offset;
1296	int step;
1297
1298	step = hem_list_calc_ba_range(r->hopnum, 1, unit);
1299	if (step < 1)
1300		return -EINVAL;
1301
1302	/* if exist mid bt, link L1 to L0 */
1303	list_for_each_entry_safe(hem, temp_hem, branch_head, list) {
1304		offset = (hem->start - r->offset) / step * BA_BYTE_LEN;
1305		hem_list_link_bt(hr_dev, cpu_base + offset, hem->dma_addr);
1306		total++;
1307	}
1308
1309	return total;
1310}
1311
1312static int
1313setup_root_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_list *hem_list,
1314	       int unit, int max_ba_num, struct hns_roce_hem_head *head,
1315	       const struct hns_roce_buf_region *regions, int region_cnt)
1316{
1317	const struct hns_roce_buf_region *r;
1318	struct hns_roce_hem_item *root_hem;
1319	void *cpu_base;
1320	u64 phy_base;
1321	int i, total;
1322	int ret;
1323
1324	root_hem = list_first_entry(&head->root,
1325				    struct hns_roce_hem_item, list);
1326	if (!root_hem)
1327		return -ENOMEM;
1328
1329	total = 0;
1330	for (i = 0; i < region_cnt && total < max_ba_num; i++) {
1331		r = &regions[i];
1332		if (!r->count)
1333			continue;
1334
1335		/* all regions's mid[x][0] shared the root_bt's trunk */
1336		cpu_base = root_hem->addr + total * BA_BYTE_LEN;
1337		phy_base = root_hem->dma_addr + total * BA_BYTE_LEN;
1338
1339		/* if hopnum is 0 or 1, cut a new fake hem from the root bt
1340		 * which's address share to all regions.
1341		 */
1342		if (hem_list_is_bottom_bt(r->hopnum, 0))
1343			ret = alloc_fake_root_bt(hr_dev, cpu_base, phy_base, r,
1344						 &head->branch[i], &head->leaf);
1345		else
1346			ret = setup_middle_bt(hr_dev, cpu_base, unit, r,
1347					      &hem_list->mid_bt[i][1]);
1348
1349		if (ret < 0)
1350			return ret;
1351
1352		total += ret;
1353	}
1354
1355	list_splice(&head->leaf, &hem_list->btm_bt);
1356	list_splice(&head->root, &hem_list->root_bt);
1357	for (i = 0; i < region_cnt; i++)
1358		list_splice(&head->branch[i], &hem_list->mid_bt[i][0]);
1359
1360	return 0;
1361}
1362
1363static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
1364				  struct hns_roce_hem_list *hem_list, int unit,
1365				  const struct hns_roce_buf_region *regions,
1366				  int region_cnt)
1367{
1368	struct hns_roce_hem_item *root_hem;
1369	struct hns_roce_hem_head head;
1370	int max_ba_num;
1371	int ret;
1372	int i;
1373
1374	root_hem = hem_list_search_item(&hem_list->root_bt, regions[0].offset);
1375	if (root_hem)
1376		return 0;
1377
1378	max_ba_num = 0;
1379	root_hem = alloc_root_hem(hr_dev, unit, &max_ba_num, regions,
1380				  region_cnt);
1381	if (IS_ERR(root_hem))
1382		return PTR_ERR(root_hem);
1383
1384	/* List head for storing all allocated HEM items */
1385	INIT_LIST_HEAD(&head.root);
1386	INIT_LIST_HEAD(&head.leaf);
1387	for (i = 0; i < region_cnt; i++)
1388		INIT_LIST_HEAD(&head.branch[i]);
1389
1390	hem_list->root_ba = root_hem->dma_addr;
1391	list_add(&root_hem->list, &head.root);
1392	ret = setup_root_hem(hr_dev, hem_list, unit, max_ba_num, &head, regions,
1393			     region_cnt);
1394	if (ret) {
1395		for (i = 0; i < region_cnt; i++)
1396			hem_list_free_all(hr_dev, &head.branch[i], false);
1397
1398		hem_list_free_all(hr_dev, &head.root, true);
1399	}
1400
1401	return ret;
1402}
1403
1404/* construct the base address table and link them by address hop config */
1405int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
1406			      struct hns_roce_hem_list *hem_list,
1407			      const struct hns_roce_buf_region *regions,
1408			      int region_cnt, unsigned int bt_pg_shift)
1409{
1410	const struct hns_roce_buf_region *r;
1411	int ofs, end;
1412	int unit;
1413	int ret;
1414	int i;
1415
1416	if (region_cnt > HNS_ROCE_MAX_BT_REGION) {
1417		dev_err(hr_dev->dev, "invalid region region_cnt %d!\n",
1418			region_cnt);
1419		return -EINVAL;
1420	}
1421
1422	unit = (1 << bt_pg_shift) / BA_BYTE_LEN;
1423	for (i = 0; i < region_cnt; i++) {
1424		r = &regions[i];
1425		if (!r->count)
1426			continue;
1427
1428		end = r->offset + r->count;
1429		for (ofs = r->offset; ofs < end; ofs += unit) {
1430			ret = hem_list_alloc_mid_bt(hr_dev, r, unit, ofs,
1431						    hem_list->mid_bt[i],
1432						    &hem_list->btm_bt);
1433			if (ret) {
1434				dev_err(hr_dev->dev,
1435					"alloc hem trunk fail ret = %d!\n", ret);
1436				goto err_alloc;
1437			}
1438		}
1439	}
1440
1441	ret = hem_list_alloc_root_bt(hr_dev, hem_list, unit, regions,
1442				     region_cnt);
1443	if (ret)
1444		dev_err(hr_dev->dev, "alloc hem root fail ret = %d!\n", ret);
1445	else
1446		return 0;
1447
1448err_alloc:
1449	hns_roce_hem_list_release(hr_dev, hem_list);
1450
1451	return ret;
1452}
1453
1454void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
1455			       struct hns_roce_hem_list *hem_list)
1456{
1457	int i, j;
1458
1459	for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
1460		for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
1461			hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j],
1462					  j != 0);
1463
1464	hem_list_free_all(hr_dev, &hem_list->root_bt, true);
1465	INIT_LIST_HEAD(&hem_list->btm_bt);
1466	hem_list->root_ba = 0;
1467}
1468
1469void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list)
1470{
1471	int i, j;
1472
1473	INIT_LIST_HEAD(&hem_list->root_bt);
1474	INIT_LIST_HEAD(&hem_list->btm_bt);
1475	for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
1476		for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
1477			INIT_LIST_HEAD(&hem_list->mid_bt[i][j]);
1478}
1479
1480void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
1481				 struct hns_roce_hem_list *hem_list,
1482				 int offset, int *mtt_cnt)
1483{
1484	struct list_head *head = &hem_list->btm_bt;
1485	struct hns_roce_hem_item *hem, *temp_hem;
1486	void *cpu_base = NULL;
1487	int nr = 0;
1488
1489	list_for_each_entry_safe(hem, temp_hem, head, sibling) {
1490		if (hem_list_page_is_in_range(hem, offset)) {
1491			nr = offset - hem->start;
1492			cpu_base = hem->addr + nr * BA_BYTE_LEN;
1493			nr = hem->end + 1 - offset;
1494			break;
1495		}
1496	}
1497
1498	if (mtt_cnt)
1499		*mtt_cnt = nr;
1500
1501	return cpu_base;
1502}
v4.10.11
  1/*
  2 * Copyright (c) 2016 Hisilicon Limited.
  3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
  4 *
  5 * This software is available to you under a choice of one of two
  6 * licenses.  You may choose to be licensed under the terms of the GNU
  7 * General Public License (GPL) Version 2, available from the file
  8 * COPYING in the main directory of this source tree, or the
  9 * OpenIB.org BSD license below:
 10 *
 11 *     Redistribution and use in source and binary forms, with or
 12 *     without modification, are permitted provided that the following
 13 *     conditions are met:
 14 *
 15 *      - Redistributions of source code must retain the above
 16 *        copyright notice, this list of conditions and the following
 17 *        disclaimer.
 18 *
 19 *      - Redistributions in binary form must reproduce the above
 20 *        copyright notice, this list of conditions and the following
 21 *        disclaimer in the documentation and/or other materials
 22 *        provided with the distribution.
 23 *
 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 31 * SOFTWARE.
 32 */
 33
 34#include <linux/platform_device.h>
 35#include "hns_roce_device.h"
 36#include "hns_roce_hem.h"
 37#include "hns_roce_common.h"
 38
 39#define HNS_ROCE_HEM_ALLOC_SIZE		(1 << 17)
 40#define HNS_ROCE_TABLE_CHUNK_SIZE	(1 << 17)
 
 
 
 
 
 
 
 41
 42#define DMA_ADDR_T_SHIFT		12
 43#define BT_BA_SHIFT			32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 44
 45struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
 46					gfp_t gfp_mask)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 47{
 48	struct hns_roce_hem_chunk *chunk = NULL;
 49	struct hns_roce_hem *hem;
 50	struct scatterlist *mem;
 51	int order;
 52	void *buf;
 53
 54	WARN_ON(gfp_mask & __GFP_HIGHMEM);
 55
 56	hem = kmalloc(sizeof(*hem),
 57		      gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
 58	if (!hem)
 59		return NULL;
 60
 61	hem->refcount = 0;
 62	INIT_LIST_HEAD(&hem->chunk_list);
 63
 64	order = get_order(HNS_ROCE_HEM_ALLOC_SIZE);
 65
 66	while (npages > 0) {
 67		if (!chunk) {
 68			chunk = kmalloc(sizeof(*chunk),
 69				gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
 70			if (!chunk)
 71				goto fail;
 72
 73			sg_init_table(chunk->mem, HNS_ROCE_HEM_CHUNK_LEN);
 74			chunk->npages = 0;
 75			chunk->nsg = 0;
 
 76			list_add_tail(&chunk->list, &hem->chunk_list);
 77		}
 78
 79		while (1 << order > npages)
 80			--order;
 81
 82		/*
 83		 * Alloc memory one time. If failed, don't alloc small block
 84		 * memory, directly return fail.
 85		 */
 86		mem = &chunk->mem[chunk->npages];
 87		buf = dma_alloc_coherent(&hr_dev->pdev->dev, PAGE_SIZE << order,
 88				&sg_dma_address(mem), gfp_mask);
 89		if (!buf)
 90			goto fail;
 91
 92		sg_set_buf(mem, buf, PAGE_SIZE << order);
 93		WARN_ON(mem->offset);
 94		sg_dma_len(mem) = PAGE_SIZE << order;
 95
 96		++chunk->npages;
 97		++chunk->nsg;
 98		npages -= 1 << order;
 99	}
100
101	return hem;
102
103fail:
104	hns_roce_free_hem(hr_dev, hem);
105	return NULL;
106}
107
108void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
109{
110	struct hns_roce_hem_chunk *chunk, *tmp;
111	int i;
112
113	if (!hem)
114		return;
115
116	list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
117		for (i = 0; i < chunk->npages; ++i)
118			dma_free_coherent(&hr_dev->pdev->dev,
119				   chunk->mem[i].length,
120				   lowmem_page_address(sg_page(&chunk->mem[i])),
121				   sg_dma_address(&chunk->mem[i]));
122		kfree(chunk);
123	}
124
125	kfree(hem);
126}
127
128static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
129			    struct hns_roce_hem_table *table, unsigned long obj)
 
 
130{
131	struct device *dev = &hr_dev->pdev->dev;
132	spinlock_t *lock = &hr_dev->bt_cmd_lock;
133	unsigned long end = 0;
134	unsigned long flags;
135	struct hns_roce_hem_iter iter;
136	void __iomem *bt_cmd;
137	u32 bt_cmd_h_val = 0;
138	u32 bt_cmd_val[2];
139	u32 bt_cmd_l = 0;
140	u64 bt_ba = 0;
141	int ret = 0;
142
143	/* Find the HEM(Hardware Entry Memory) entry */
144	unsigned long i = (obj & (table->num_obj - 1)) /
145			  (HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size);
146
147	switch (table->type) {
148	case HEM_TYPE_QPC:
149		roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
150			       ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
 
 
 
 
 
 
 
151		break;
152	case HEM_TYPE_MTPT:
153		roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
154			       ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
155			       HEM_TYPE_MTPT);
156		break;
157	case HEM_TYPE_CQC:
158		roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
159			       ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
160		break;
161	case HEM_TYPE_SRQC:
162		roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
163			       ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
164			       HEM_TYPE_SRQC);
165		break;
166	default:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167		return ret;
168	}
169	roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
170		       ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
171	roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
172	roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
173
174	/* Currently iter only a chunk */
175	for (hns_roce_hem_first(table->hem[i], &iter);
176	     !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
177		bt_ba = hns_roce_hem_addr(&iter) >> DMA_ADDR_T_SHIFT;
178
179		spin_lock_irqsave(lock, flags);
180
181		bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
182
183		end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
184		while (1) {
185			if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
186				if (!(time_before(jiffies, end))) {
187					dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
188					spin_unlock_irqrestore(lock, flags);
189					return -EBUSY;
190				}
191			} else {
192				break;
193			}
194			msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
195		}
 
196
197		bt_cmd_l = (u32)bt_ba;
198		roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
199			       ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S,
200			       bt_ba >> BT_BA_SHIFT);
201
202		bt_cmd_val[0] = bt_cmd_l;
203		bt_cmd_val[1] = bt_cmd_h_val;
204		hns_roce_write64_k(bt_cmd_val,
205				   hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
206		spin_unlock_irqrestore(lock, flags);
207	}
208
 
 
 
 
209	return ret;
210}
211
212int hns_roce_table_get(struct hns_roce_dev *hr_dev,
213		       struct hns_roce_hem_table *table, unsigned long obj)
214{
215	struct device *dev = &hr_dev->pdev->dev;
 
216	int ret = 0;
217	unsigned long i;
218
219	i = (obj & (table->num_obj - 1)) / (HNS_ROCE_TABLE_CHUNK_SIZE /
220	     table->obj_size);
 
 
221
222	mutex_lock(&table->mutex);
223
224	if (table->hem[i]) {
225		++table->hem[i]->refcount;
226		goto out;
227	}
228
229	table->hem[i] = hns_roce_alloc_hem(hr_dev,
230				       HNS_ROCE_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
231				       (table->lowmem ? GFP_KERNEL :
232					GFP_HIGHUSER) | __GFP_NOWARN);
233	if (!table->hem[i]) {
234		ret = -ENOMEM;
235		goto out;
236	}
237
238	/* Set HEM base address(128K/page, pa) to Hardware */
239	if (hns_roce_set_hem(hr_dev, table, obj)) {
240		ret = -ENODEV;
241		dev_err(dev, "set HEM base address to HW failed.\n");
 
 
 
242		goto out;
243	}
244
245	++table->hem[i]->refcount;
246out:
247	mutex_unlock(&table->mutex);
248	return ret;
249}
250
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
251void hns_roce_table_put(struct hns_roce_dev *hr_dev,
252			struct hns_roce_hem_table *table, unsigned long obj)
253{
254	struct device *dev = &hr_dev->pdev->dev;
255	unsigned long i;
 
256
257	i = (obj & (table->num_obj - 1)) /
258	    (HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size);
 
 
 
 
259
260	mutex_lock(&table->mutex);
 
 
261
262	if (--table->hem[i]->refcount == 0) {
263		/* Clear HEM base address */
264		if (hr_dev->hw->clear_hem(hr_dev, table, obj))
265			dev_warn(dev, "Clear HEM base address failed.\n");
266
267		hns_roce_free_hem(hr_dev, table->hem[i]);
268		table->hem[i] = NULL;
269	}
270
271	mutex_unlock(&table->mutex);
272}
273
274void *hns_roce_table_find(struct hns_roce_hem_table *table, unsigned long obj,
275			  dma_addr_t *dma_handle)
 
276{
277	struct hns_roce_hem_chunk *chunk;
278	unsigned long idx;
279	int i;
 
 
 
280	int offset, dma_offset;
281	struct hns_roce_hem *hem;
282	struct page *page = NULL;
 
 
283
284	if (!table->lowmem)
285		return NULL;
286
287	mutex_lock(&table->mutex);
288	idx = (obj & (table->num_obj - 1)) * table->obj_size;
289	hem = table->hem[idx / HNS_ROCE_TABLE_CHUNK_SIZE];
290	dma_offset = offset = idx % HNS_ROCE_TABLE_CHUNK_SIZE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
291
292	if (!hem)
293		goto out;
294
295	list_for_each_entry(chunk, &hem->chunk_list, list) {
296		for (i = 0; i < chunk->npages; ++i) {
 
297			if (dma_handle && dma_offset >= 0) {
298				if (sg_dma_len(&chunk->mem[i]) >
299				    (u32)dma_offset)
300					*dma_handle = sg_dma_address(
301						&chunk->mem[i]) + dma_offset;
302				dma_offset -= sg_dma_len(&chunk->mem[i]);
303			}
304
305			if (chunk->mem[i].length > (u32)offset) {
306				page = sg_page(&chunk->mem[i]);
307				goto out;
308			}
309			offset -= chunk->mem[i].length;
310		}
311	}
312
313out:
314	mutex_unlock(&table->mutex);
315	return page ? lowmem_page_address(page) + offset : NULL;
316}
317
318int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
319			     struct hns_roce_hem_table *table,
320			     unsigned long start, unsigned long end)
321{
322	unsigned long inc = HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size;
323	unsigned long i = 0;
324	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
325
326	/* Allocate MTT entry memory according to chunk(128K) */
327	for (i = start; i <= end; i += inc) {
328		ret = hns_roce_table_get(hr_dev, table, i);
329		if (ret)
330			goto fail;
 
 
 
 
 
 
 
 
331	}
332
 
 
 
 
 
333	return 0;
334
335fail:
336	while (i > start) {
337		i -= inc;
338		hns_roce_table_put(hr_dev, table, i);
339	}
340	return ret;
341}
 
 
 
 
342
343void hns_roce_table_put_range(struct hns_roce_dev *hr_dev,
344			      struct hns_roce_hem_table *table,
345			      unsigned long start, unsigned long end)
346{
347	unsigned long i;
348
349	for (i = start; i <= end;
350		i += HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size)
351		hns_roce_table_put(hr_dev, table, i);
352}
353
354int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
355			    struct hns_roce_hem_table *table, u32 type,
356			    unsigned long obj_size, unsigned long nobj,
357			    int use_lowmem)
358{
359	unsigned long obj_per_chunk;
360	unsigned long num_hem;
 
 
361
362	obj_per_chunk = HNS_ROCE_TABLE_CHUNK_SIZE / obj_size;
363	num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
 
 
364
365	table->hem = kcalloc(num_hem, sizeof(*table->hem), GFP_KERNEL);
366	if (!table->hem)
367		return -ENOMEM;
 
 
368
369	table->type = type;
370	table->num_hem = num_hem;
371	table->num_obj = nobj;
372	table->obj_size = obj_size;
373	table->lowmem = use_lowmem;
374	mutex_init(&table->mutex);
375
376	return 0;
 
 
377}
378
379void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
380				struct hns_roce_hem_table *table)
381{
382	struct device *dev = &hr_dev->pdev->dev;
383	unsigned long i;
 
 
 
 
 
 
 
384
385	for (i = 0; i < table->num_hem; ++i)
386		if (table->hem[i]) {
387			if (hr_dev->hw->clear_hem(hr_dev, table,
388			    i * HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size))
389				dev_err(dev, "Clear HEM base address failed.\n");
 
 
390
391			hns_roce_free_hem(hr_dev, table->hem[i]);
392		}
393
394	kfree(table->hem);
395}
396
397void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
398{
 
 
 
399	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
400	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
401	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
402	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
403	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
404}