Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1/*******************************************************************************
  2*
  3* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
  4*
  5* This software is available to you under a choice of one of two
  6* licenses.  You may choose to be licensed under the terms of the GNU
  7* General Public License (GPL) Version 2, available from the file
  8* COPYING in the main directory of this source tree, or the
  9* OpenFabrics.org BSD license below:
 10*
 11*   Redistribution and use in source and binary forms, with or
 12*   without modification, are permitted provided that the following
 13*   conditions are met:
 14*
 15*    - Redistributions of source code must retain the above
 16*	copyright notice, this list of conditions and the following
 17*	disclaimer.
 18*
 19*    - Redistributions in binary form must reproduce the above
 20*	copyright notice, this list of conditions and the following
 21*	disclaimer in the documentation and/or other materials
 22*	provided with the distribution.
 23*
 24* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 25* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 26* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 27* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 28* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 29* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 30* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 31* SOFTWARE.
 32*
 33*******************************************************************************/
 34
 35#include "i40iw_osdep.h"
 36#include "i40iw_register.h"
 37#include "i40iw_status.h"
 38#include "i40iw_hmc.h"
 39#include "i40iw_d.h"
 40#include "i40iw_type.h"
 41#include "i40iw_p.h"
 42#include "i40iw_vf.h"
 43#include "i40iw_virtchnl.h"
 44
 45/**
 46 * i40iw_find_sd_index_limit - finds segment descriptor index limit
 47 * @hmc_info: pointer to the HMC configuration information structure
 48 * @type: type of HMC resources we're searching
 49 * @index: starting index for the object
 50 * @cnt: number of objects we're trying to create
 51 * @sd_idx: pointer to return index of the segment descriptor in question
 52 * @sd_limit: pointer to return the maximum number of segment descriptors
 53 *
 54 * This function calculates the segment descriptor index and index limit
 55 * for the resource defined by i40iw_hmc_rsrc_type.
 56 */
 57
 58static inline void i40iw_find_sd_index_limit(struct i40iw_hmc_info *hmc_info,
 59					     u32 type,
 60					     u32 idx,
 61					     u32 cnt,
 62					     u32 *sd_idx,
 63					     u32 *sd_limit)
 64{
 65	u64 fpm_addr, fpm_limit;
 66
 67	fpm_addr = hmc_info->hmc_obj[(type)].base +
 68			hmc_info->hmc_obj[type].size * idx;
 69	fpm_limit = fpm_addr + hmc_info->hmc_obj[type].size * cnt;
 70	*sd_idx = (u32)(fpm_addr / I40IW_HMC_DIRECT_BP_SIZE);
 71	*sd_limit = (u32)((fpm_limit - 1) / I40IW_HMC_DIRECT_BP_SIZE);
 72	*sd_limit += 1;
 73}
 74
 75/**
 76 * i40iw_find_pd_index_limit - finds page descriptor index limit
 77 * @hmc_info: pointer to the HMC configuration information struct
 78 * @type: HMC resource type we're examining
 79 * @idx: starting index for the object
 80 * @cnt: number of objects we're trying to create
 81 * @pd_index: pointer to return page descriptor index
 82 * @pd_limit: pointer to return page descriptor index limit
 83 *
 84 * Calculates the page descriptor index and index limit for the resource
 85 * defined by i40iw_hmc_rsrc_type.
 86 */
 87
 88static inline void i40iw_find_pd_index_limit(struct i40iw_hmc_info *hmc_info,
 89					     u32 type,
 90					     u32 idx,
 91					     u32 cnt,
 92					     u32 *pd_idx,
 93					     u32 *pd_limit)
 94{
 95	u64 fpm_adr, fpm_limit;
 96
 97	fpm_adr = hmc_info->hmc_obj[type].base +
 98			hmc_info->hmc_obj[type].size * idx;
 99	fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt);
100	*(pd_idx) = (u32)(fpm_adr / I40IW_HMC_PAGED_BP_SIZE);
101	*(pd_limit) = (u32)((fpm_limit - 1) / I40IW_HMC_PAGED_BP_SIZE);
102	*(pd_limit) += 1;
103}
104
105/**
106 * i40iw_set_sd_entry - setup entry for sd programming
107 * @pa: physical addr
108 * @idx: sd index
109 * @type: paged or direct sd
110 * @entry: sd entry ptr
111 */
112static inline void i40iw_set_sd_entry(u64 pa,
113				      u32 idx,
114				      enum i40iw_sd_entry_type type,
115				      struct update_sd_entry *entry)
116{
117	entry->data = pa | (I40IW_HMC_MAX_BP_COUNT << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
118			(((type == I40IW_SD_TYPE_PAGED) ? 0 : 1) <<
119				I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |
120			(1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);
121	entry->cmd = (idx | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | (1 << 15));
122}
123
124/**
125 * i40iw_clr_sd_entry - setup entry for sd clear
126 * @idx: sd index
127 * @type: paged or direct sd
128 * @entry: sd entry ptr
129 */
130static inline void i40iw_clr_sd_entry(u32 idx, enum i40iw_sd_entry_type type,
131				      struct update_sd_entry *entry)
132{
133	entry->data = (I40IW_HMC_MAX_BP_COUNT <<
134			I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
135			(((type == I40IW_SD_TYPE_PAGED) ? 0 : 1) <<
136				I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);
137	entry->cmd = (idx | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | (1 << 15));
138}
139
140/**
141 * i40iw_hmc_sd_one - setup 1 sd entry for cqp
142 * @dev: pointer to the device structure
143 * @hmc_fn_id: hmc's function id
144 * @pa: physical addr
145 * @sd_idx: sd index
146 * @type: paged or direct sd
147 * @setsd: flag to set or clear sd
148 */
149enum i40iw_status_code i40iw_hmc_sd_one(struct i40iw_sc_dev *dev,
150					u8 hmc_fn_id,
151					u64 pa, u32 sd_idx,
152					enum i40iw_sd_entry_type type,
153					bool setsd)
154{
155	struct i40iw_update_sds_info sdinfo;
156
157	sdinfo.cnt = 1;
158	sdinfo.hmc_fn_id = hmc_fn_id;
159	if (setsd)
160		i40iw_set_sd_entry(pa, sd_idx, type, sdinfo.entry);
161	else
162		i40iw_clr_sd_entry(sd_idx, type, sdinfo.entry);
163
164	return dev->cqp->process_cqp_sds(dev, &sdinfo);
165}
166
167/**
168 * i40iw_hmc_sd_grp - setup group od sd entries for cqp
169 * @dev: pointer to the device structure
170 * @hmc_info: pointer to the HMC configuration information struct
171 * @sd_index: sd index
172 * @sd_cnt: number of sd entries
173 * @setsd: flag to set or clear sd
174 */
175static enum i40iw_status_code i40iw_hmc_sd_grp(struct i40iw_sc_dev *dev,
176					       struct i40iw_hmc_info *hmc_info,
177					       u32 sd_index,
178					       u32 sd_cnt,
179					       bool setsd)
180{
181	struct i40iw_hmc_sd_entry *sd_entry;
182	struct i40iw_update_sds_info sdinfo;
183	u64 pa;
184	u32 i;
185	enum i40iw_status_code ret_code = 0;
186
187	memset(&sdinfo, 0, sizeof(sdinfo));
188	sdinfo.hmc_fn_id = hmc_info->hmc_fn_id;
189	for (i = sd_index; i < sd_index + sd_cnt; i++) {
190		sd_entry = &hmc_info->sd_table.sd_entry[i];
191		if (!sd_entry ||
192		    (!sd_entry->valid && setsd) ||
193		    (sd_entry->valid && !setsd))
194			continue;
195		if (setsd) {
196			pa = (sd_entry->entry_type == I40IW_SD_TYPE_PAGED) ?
197			    sd_entry->u.pd_table.pd_page_addr.pa :
198			    sd_entry->u.bp.addr.pa;
199			i40iw_set_sd_entry(pa, i, sd_entry->entry_type,
200					   &sdinfo.entry[sdinfo.cnt]);
201		} else {
202			i40iw_clr_sd_entry(i, sd_entry->entry_type,
203					   &sdinfo.entry[sdinfo.cnt]);
204		}
205		sdinfo.cnt++;
206		if (sdinfo.cnt == I40IW_MAX_SD_ENTRIES) {
207			ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
208			if (ret_code) {
209				i40iw_debug(dev, I40IW_DEBUG_HMC,
210					    "i40iw_hmc_sd_grp: sd_programming failed err=%d\n",
211					    ret_code);
212				return ret_code;
213			}
214			sdinfo.cnt = 0;
215		}
216	}
217	if (sdinfo.cnt)
218		ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
219
220	return ret_code;
221}
222
223/**
224 * i40iw_vfdev_from_fpm - return vf dev ptr for hmc function id
225 * @dev: pointer to the device structure
226 * @hmc_fn_id: hmc's function id
227 */
228struct i40iw_vfdev *i40iw_vfdev_from_fpm(struct i40iw_sc_dev *dev, u8 hmc_fn_id)
229{
230	struct i40iw_vfdev *vf_dev = NULL;
231	u16 idx;
232
233	for (idx = 0; idx < I40IW_MAX_PE_ENABLED_VF_COUNT; idx++) {
234		if (dev->vf_dev[idx] &&
235		    ((u8)dev->vf_dev[idx]->pmf_index == hmc_fn_id)) {
236			vf_dev = dev->vf_dev[idx];
237			break;
238		}
239	}
240	return vf_dev;
241}
242
243/**
244 * i40iw_vf_hmcinfo_from_fpm - get ptr to hmc for func_id
245 * @dev: pointer to the device structure
246 * @hmc_fn_id: hmc's function id
247 */
248struct i40iw_hmc_info *i40iw_vf_hmcinfo_from_fpm(struct i40iw_sc_dev *dev,
249						 u8 hmc_fn_id)
250{
251	struct i40iw_hmc_info *hmc_info = NULL;
252	u16 idx;
253
254	for (idx = 0; idx < I40IW_MAX_PE_ENABLED_VF_COUNT; idx++) {
255		if (dev->vf_dev[idx] &&
256		    ((u8)dev->vf_dev[idx]->pmf_index == hmc_fn_id)) {
257			hmc_info = &dev->vf_dev[idx]->hmc_info;
258			break;
259		}
260	}
261	return hmc_info;
262}
263
264/**
265 * i40iw_hmc_finish_add_sd_reg - program sd entries for objects
266 * @dev: pointer to the device structure
267 * @info: create obj info
268 */
269static enum i40iw_status_code i40iw_hmc_finish_add_sd_reg(struct i40iw_sc_dev *dev,
270							  struct i40iw_hmc_create_obj_info *info)
271{
272	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
273		return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
274
275	if ((info->start_idx + info->count) >
276			info->hmc_info->hmc_obj[info->rsrc_type].cnt)
277		return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
278
279	if (!info->add_sd_cnt)
280		return 0;
281
282	return i40iw_hmc_sd_grp(dev, info->hmc_info,
283				info->hmc_info->sd_indexes[0],
284				info->add_sd_cnt, true);
285}
286
287/**
288 * i40iw_create_iw_hmc_obj - allocate backing store for hmc objects
289 * @dev: pointer to the device structure
290 * @info: pointer to i40iw_hmc_iw_create_obj_info struct
291 *
292 * This will allocate memory for PDs and backing pages and populate
293 * the sd and pd entries.
294 */
295enum i40iw_status_code i40iw_sc_create_hmc_obj(struct i40iw_sc_dev *dev,
296					       struct i40iw_hmc_create_obj_info *info)
297{
298	struct i40iw_hmc_sd_entry *sd_entry;
299	u32 sd_idx, sd_lmt;
300	u32 pd_idx = 0, pd_lmt = 0;
301	u32 pd_idx1 = 0, pd_lmt1 = 0;
302	u32 i, j;
303	bool pd_error = false;
304	enum i40iw_status_code ret_code = 0;
305
306	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
307		return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
308
309	if ((info->start_idx + info->count) >
310	    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
311		i40iw_debug(dev, I40IW_DEBUG_HMC,
312			    "%s: error type %u, start = %u, req cnt %u, cnt = %u\n",
313			    __func__, info->rsrc_type, info->start_idx, info->count,
314			    info->hmc_info->hmc_obj[info->rsrc_type].cnt);
315		return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
316	}
317
318	if (!dev->is_pf)
319		return i40iw_vchnl_vf_add_hmc_objs(dev, info->rsrc_type, 0, info->count);
320
321	i40iw_find_sd_index_limit(info->hmc_info, info->rsrc_type,
322				  info->start_idx, info->count,
323				  &sd_idx, &sd_lmt);
324	if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
325	    sd_lmt > info->hmc_info->sd_table.sd_cnt) {
326		return I40IW_ERR_INVALID_SD_INDEX;
327	}
328	i40iw_find_pd_index_limit(info->hmc_info, info->rsrc_type,
329				  info->start_idx, info->count, &pd_idx, &pd_lmt);
330
331	for (j = sd_idx; j < sd_lmt; j++) {
332		ret_code = i40iw_add_sd_table_entry(dev->hw, info->hmc_info,
333						    j,
334						    info->entry_type,
335						    I40IW_HMC_DIRECT_BP_SIZE);
336		if (ret_code)
337			goto exit_sd_error;
338		sd_entry = &info->hmc_info->sd_table.sd_entry[j];
339
340		if ((sd_entry->entry_type == I40IW_SD_TYPE_PAGED) &&
341		    ((dev->hmc_info == info->hmc_info) &&
342		     (info->rsrc_type != I40IW_HMC_IW_PBLE))) {
343			pd_idx1 = max(pd_idx, (j * I40IW_HMC_MAX_BP_COUNT));
344			pd_lmt1 = min(pd_lmt,
345				      (j + 1) * I40IW_HMC_MAX_BP_COUNT);
346			for (i = pd_idx1; i < pd_lmt1; i++) {
347				/* update the pd table entry */
348				ret_code = i40iw_add_pd_table_entry(dev->hw, info->hmc_info,
349								    i, NULL);
350				if (ret_code) {
351					pd_error = true;
352					break;
353				}
354			}
355			if (pd_error) {
356				while (i && (i > pd_idx1)) {
357					i40iw_remove_pd_bp(dev->hw, info->hmc_info, (i - 1),
358							   info->is_pf);
359					i--;
360				}
361			}
362		}
363		if (sd_entry->valid)
364			continue;
365
366		info->hmc_info->sd_indexes[info->add_sd_cnt] = (u16)j;
367		info->add_sd_cnt++;
368		sd_entry->valid = true;
369	}
370	return i40iw_hmc_finish_add_sd_reg(dev, info);
371
372exit_sd_error:
373	while (j && (j > sd_idx)) {
374		sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
375		switch (sd_entry->entry_type) {
376		case I40IW_SD_TYPE_PAGED:
377			pd_idx1 = max(pd_idx,
378				      (j - 1) * I40IW_HMC_MAX_BP_COUNT);
379			pd_lmt1 = min(pd_lmt, (j * I40IW_HMC_MAX_BP_COUNT));
380			for (i = pd_idx1; i < pd_lmt1; i++)
381				i40iw_prep_remove_pd_page(info->hmc_info, i);
382			break;
383		case I40IW_SD_TYPE_DIRECT:
384			i40iw_prep_remove_pd_page(info->hmc_info, (j - 1));
385			break;
386		default:
387			ret_code = I40IW_ERR_INVALID_SD_TYPE;
388			break;
389		}
390		j--;
391	}
392
393	return ret_code;
394}
395
396/**
397 * i40iw_finish_del_sd_reg - delete sd entries for objects
398 * @dev: pointer to the device structure
399 * @info: dele obj info
400 * @reset: true if called before reset
401 */
402static enum i40iw_status_code i40iw_finish_del_sd_reg(struct i40iw_sc_dev *dev,
403						      struct i40iw_hmc_del_obj_info *info,
404						      bool reset)
405{
406	struct i40iw_hmc_sd_entry *sd_entry;
407	enum i40iw_status_code ret_code = 0;
408	u32 i, sd_idx;
409	struct i40iw_dma_mem *mem;
410
411	if (dev->is_pf && !reset)
412		ret_code = i40iw_hmc_sd_grp(dev, info->hmc_info,
413					    info->hmc_info->sd_indexes[0],
414					    info->del_sd_cnt, false);
415
416	if (ret_code)
417		i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error cqp sd sd_grp\n", __func__);
418
419	for (i = 0; i < info->del_sd_cnt; i++) {
420		sd_idx = info->hmc_info->sd_indexes[i];
421		sd_entry = &info->hmc_info->sd_table.sd_entry[sd_idx];
422		if (!sd_entry)
423			continue;
424		mem = (sd_entry->entry_type == I40IW_SD_TYPE_PAGED) ?
425			&sd_entry->u.pd_table.pd_page_addr :
426			&sd_entry->u.bp.addr;
427
428		if (!mem || !mem->va)
429			i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error cqp sd mem\n", __func__);
430		else
431			i40iw_free_dma_mem(dev->hw, mem);
432	}
433	return ret_code;
434}
435
436/**
437 * i40iw_del_iw_hmc_obj - remove pe hmc objects
438 * @dev: pointer to the device structure
439 * @info: pointer to i40iw_hmc_del_obj_info struct
440 * @reset: true if called before reset
441 *
442 * This will de-populate the SDs and PDs.  It frees
443 * the memory for PDS and backing storage.  After this function is returned,
444 * caller should deallocate memory allocated previously for
445 * book-keeping information about PDs and backing storage.
446 */
447enum i40iw_status_code i40iw_sc_del_hmc_obj(struct i40iw_sc_dev *dev,
448					    struct i40iw_hmc_del_obj_info *info,
449					    bool reset)
450{
451	struct i40iw_hmc_pd_table *pd_table;
452	u32 sd_idx, sd_lmt;
453	u32 pd_idx, pd_lmt, rel_pd_idx;
454	u32 i, j;
455	enum i40iw_status_code ret_code = 0;
456
457	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
458		i40iw_debug(dev, I40IW_DEBUG_HMC,
459			    "%s: error start_idx[%04d]  >= [type %04d].cnt[%04d]\n",
460			    __func__, info->start_idx, info->rsrc_type,
461			    info->hmc_info->hmc_obj[info->rsrc_type].cnt);
462		return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
463	}
464
465	if ((info->start_idx + info->count) >
466	    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
467		i40iw_debug(dev, I40IW_DEBUG_HMC,
468			    "%s: error start_idx[%04d] + count %04d  >= [type %04d].cnt[%04d]\n",
469			    __func__, info->start_idx, info->count,
470			    info->rsrc_type,
471			    info->hmc_info->hmc_obj[info->rsrc_type].cnt);
472		return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
473	}
474	if (!dev->is_pf) {
475		ret_code = i40iw_vchnl_vf_del_hmc_obj(dev, info->rsrc_type, 0,
476						      info->count);
477		if (info->rsrc_type != I40IW_HMC_IW_PBLE)
478			return ret_code;
479	}
480
481	i40iw_find_pd_index_limit(info->hmc_info, info->rsrc_type,
482				  info->start_idx, info->count, &pd_idx, &pd_lmt);
483
484	for (j = pd_idx; j < pd_lmt; j++) {
485		sd_idx = j / I40IW_HMC_PD_CNT_IN_SD;
486
487		if (info->hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
488		    I40IW_SD_TYPE_PAGED)
489			continue;
490
491		rel_pd_idx = j % I40IW_HMC_PD_CNT_IN_SD;
492		pd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
493		if (pd_table->pd_entry[rel_pd_idx].valid) {
494			ret_code = i40iw_remove_pd_bp(dev->hw, info->hmc_info, j,
495						      info->is_pf);
496			if (ret_code) {
497				i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error\n", __func__);
498				return ret_code;
499			}
500		}
501	}
502
503	i40iw_find_sd_index_limit(info->hmc_info, info->rsrc_type,
504				  info->start_idx, info->count, &sd_idx, &sd_lmt);
505	if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
506	    sd_lmt > info->hmc_info->sd_table.sd_cnt) {
507		i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error invalid sd_idx\n", __func__);
508		return I40IW_ERR_INVALID_SD_INDEX;
509	}
510
511	for (i = sd_idx; i < sd_lmt; i++) {
512		if (!info->hmc_info->sd_table.sd_entry[i].valid)
513			continue;
514		switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
515		case I40IW_SD_TYPE_DIRECT:
516			ret_code = i40iw_prep_remove_sd_bp(info->hmc_info, i);
517			if (!ret_code) {
518				info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
519				info->del_sd_cnt++;
520			}
521			break;
522		case I40IW_SD_TYPE_PAGED:
523			ret_code = i40iw_prep_remove_pd_page(info->hmc_info, i);
524			if (!ret_code) {
525				info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
526				info->del_sd_cnt++;
527			}
528			break;
529		default:
530			break;
531		}
532	}
533	return i40iw_finish_del_sd_reg(dev, info, reset);
534}
535
536/**
537 * i40iw_add_sd_table_entry - Adds a segment descriptor to the table
538 * @hw: pointer to our hw struct
539 * @hmc_info: pointer to the HMC configuration information struct
540 * @sd_index: segment descriptor index to manipulate
541 * @type: what type of segment descriptor we're manipulating
542 * @direct_mode_sz: size to alloc in direct mode
543 */
544enum i40iw_status_code i40iw_add_sd_table_entry(struct i40iw_hw *hw,
545						struct i40iw_hmc_info *hmc_info,
546						u32 sd_index,
547						enum i40iw_sd_entry_type type,
548						u64 direct_mode_sz)
549{
550	enum i40iw_status_code ret_code = 0;
551	struct i40iw_hmc_sd_entry *sd_entry;
552	bool dma_mem_alloc_done = false;
553	struct i40iw_dma_mem mem;
554	u64 alloc_len;
555
556	sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
557	if (!sd_entry->valid) {
558		if (type == I40IW_SD_TYPE_PAGED)
559			alloc_len = I40IW_HMC_PAGED_BP_SIZE;
560		else
561			alloc_len = direct_mode_sz;
562
563		/* allocate a 4K pd page or 2M backing page */
564		ret_code = i40iw_allocate_dma_mem(hw, &mem, alloc_len,
565						  I40IW_HMC_PD_BP_BUF_ALIGNMENT);
566		if (ret_code)
567			goto exit;
568		dma_mem_alloc_done = true;
569		if (type == I40IW_SD_TYPE_PAGED) {
570			ret_code = i40iw_allocate_virt_mem(hw,
571							   &sd_entry->u.pd_table.pd_entry_virt_mem,
572							   sizeof(struct i40iw_hmc_pd_entry) * 512);
573			if (ret_code)
574				goto exit;
575			sd_entry->u.pd_table.pd_entry = (struct i40iw_hmc_pd_entry *)
576							 sd_entry->u.pd_table.pd_entry_virt_mem.va;
577
578			memcpy(&sd_entry->u.pd_table.pd_page_addr, &mem, sizeof(struct i40iw_dma_mem));
579		} else {
580			memcpy(&sd_entry->u.bp.addr, &mem, sizeof(struct i40iw_dma_mem));
581			sd_entry->u.bp.sd_pd_index = sd_index;
582		}
583
584		hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
585
586		I40IW_INC_SD_REFCNT(&hmc_info->sd_table);
587	}
588	if (sd_entry->entry_type == I40IW_SD_TYPE_DIRECT)
589		I40IW_INC_BP_REFCNT(&sd_entry->u.bp);
590exit:
591	if (ret_code)
592		if (dma_mem_alloc_done)
593			i40iw_free_dma_mem(hw, &mem);
594
595	return ret_code;
596}
597
598/**
599 * i40iw_add_pd_table_entry - Adds page descriptor to the specified table
600 * @hw: pointer to our HW structure
601 * @hmc_info: pointer to the HMC configuration information structure
602 * @pd_index: which page descriptor index to manipulate
603 * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
604 *
605 * This function:
606 *	1. Initializes the pd entry
607 *	2. Adds pd_entry in the pd_table
608 *	3. Mark the entry valid in i40iw_hmc_pd_entry structure
609 *	4. Initializes the pd_entry's ref count to 1
610 * assumptions:
611 *	1. The memory for pd should be pinned down, physically contiguous and
612 *	   aligned on 4K boundary and zeroed memory.
613 *	2. It should be 4K in size.
614 */
615enum i40iw_status_code i40iw_add_pd_table_entry(struct i40iw_hw *hw,
616						struct i40iw_hmc_info *hmc_info,
617						u32 pd_index,
618						struct i40iw_dma_mem *rsrc_pg)
619{
620	enum i40iw_status_code ret_code = 0;
621	struct i40iw_hmc_pd_table *pd_table;
622	struct i40iw_hmc_pd_entry *pd_entry;
623	struct i40iw_dma_mem mem;
624	struct i40iw_dma_mem *page = &mem;
625	u32 sd_idx, rel_pd_idx;
626	u64 *pd_addr;
627	u64 page_desc;
628
629	if (pd_index / I40IW_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt)
630		return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
631
632	sd_idx = (pd_index / I40IW_HMC_PD_CNT_IN_SD);
633	if (hmc_info->sd_table.sd_entry[sd_idx].entry_type != I40IW_SD_TYPE_PAGED)
634		return 0;
635
636	rel_pd_idx = (pd_index % I40IW_HMC_PD_CNT_IN_SD);
637	pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
638	pd_entry = &pd_table->pd_entry[rel_pd_idx];
639	if (!pd_entry->valid) {
640		if (rsrc_pg) {
641			pd_entry->rsrc_pg = true;
642			page = rsrc_pg;
643		} else {
644			ret_code = i40iw_allocate_dma_mem(hw, page,
645							  I40IW_HMC_PAGED_BP_SIZE,
646							  I40IW_HMC_PD_BP_BUF_ALIGNMENT);
647			if (ret_code)
648				return ret_code;
649			pd_entry->rsrc_pg = false;
650		}
651
652		memcpy(&pd_entry->bp.addr, page, sizeof(struct i40iw_dma_mem));
653		pd_entry->bp.sd_pd_index = pd_index;
654		pd_entry->bp.entry_type = I40IW_SD_TYPE_PAGED;
655		page_desc = page->pa | 0x1;
656
657		pd_addr = (u64 *)pd_table->pd_page_addr.va;
658		pd_addr += rel_pd_idx;
659
660		memcpy(pd_addr, &page_desc, sizeof(*pd_addr));
661
662		pd_entry->sd_index = sd_idx;
663		pd_entry->valid = true;
664		I40IW_INC_PD_REFCNT(pd_table);
665		if (hmc_info->hmc_fn_id < I40IW_FIRST_VF_FPM_ID)
666			I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, rel_pd_idx);
667		else if (hw->hmc.hmc_fn_id != hmc_info->hmc_fn_id)
668			I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, rel_pd_idx,
669						   hmc_info->hmc_fn_id);
670	}
671	I40IW_INC_BP_REFCNT(&pd_entry->bp);
672
673	return 0;
674}
675
676/**
677 * i40iw_remove_pd_bp - remove a backing page from a page descriptor
678 * @hw: pointer to our HW structure
679 * @hmc_info: pointer to the HMC configuration information structure
680 * @idx: the page index
681 * @is_pf: distinguishes a VF from a PF
682 *
683 * This function:
684 *	1. Marks the entry in pd table (for paged address mode) or in sd table
685 *	   (for direct address mode) invalid.
686 *	2. Write to register PMPDINV to invalidate the backing page in FV cache
687 *	3. Decrement the ref count for the pd _entry
688 * assumptions:
689 *	1. Caller can deallocate the memory used by backing storage after this
690 *	   function returns.
691 */
692enum i40iw_status_code i40iw_remove_pd_bp(struct i40iw_hw *hw,
693					  struct i40iw_hmc_info *hmc_info,
694					  u32 idx,
695					  bool is_pf)
696{
697	struct i40iw_hmc_pd_entry *pd_entry;
698	struct i40iw_hmc_pd_table *pd_table;
699	struct i40iw_hmc_sd_entry *sd_entry;
700	u32 sd_idx, rel_pd_idx;
701	struct i40iw_dma_mem *mem;
702	u64 *pd_addr;
703
704	sd_idx = idx / I40IW_HMC_PD_CNT_IN_SD;
705	rel_pd_idx = idx % I40IW_HMC_PD_CNT_IN_SD;
706	if (sd_idx >= hmc_info->sd_table.sd_cnt)
707		return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
708
709	sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
710	if (sd_entry->entry_type != I40IW_SD_TYPE_PAGED)
711		return I40IW_ERR_INVALID_SD_TYPE;
712
713	pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
714	pd_entry = &pd_table->pd_entry[rel_pd_idx];
715	I40IW_DEC_BP_REFCNT(&pd_entry->bp);
716	if (pd_entry->bp.ref_cnt)
717		return 0;
718
719	pd_entry->valid = false;
720	I40IW_DEC_PD_REFCNT(pd_table);
721	pd_addr = (u64 *)pd_table->pd_page_addr.va;
722	pd_addr += rel_pd_idx;
723	memset(pd_addr, 0, sizeof(u64));
724	if (is_pf)
725		I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
726	else
727		I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx,
728					   hmc_info->hmc_fn_id);
729
730	if (!pd_entry->rsrc_pg) {
731		mem = &pd_entry->bp.addr;
732		if (!mem || !mem->va)
733			return I40IW_ERR_PARAM;
734		i40iw_free_dma_mem(hw, mem);
735	}
736	if (!pd_table->ref_cnt)
737		i40iw_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
738
739	return 0;
740}
741
742/**
743 * i40iw_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
744 * @hmc_info: pointer to the HMC configuration information structure
745 * @idx: the page index
746 */
747enum i40iw_status_code i40iw_prep_remove_sd_bp(struct i40iw_hmc_info *hmc_info, u32 idx)
748{
749	struct i40iw_hmc_sd_entry *sd_entry;
750
751	sd_entry = &hmc_info->sd_table.sd_entry[idx];
752	I40IW_DEC_BP_REFCNT(&sd_entry->u.bp);
753	if (sd_entry->u.bp.ref_cnt)
754		return I40IW_ERR_NOT_READY;
755
756	I40IW_DEC_SD_REFCNT(&hmc_info->sd_table);
757	sd_entry->valid = false;
758
759	return 0;
760}
761
762/**
763 * i40iw_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
764 * @hmc_info: pointer to the HMC configuration information structure
765 * @idx: segment descriptor index to find the relevant page descriptor
766 */
767enum i40iw_status_code i40iw_prep_remove_pd_page(struct i40iw_hmc_info *hmc_info,
768						 u32 idx)
769{
770	struct i40iw_hmc_sd_entry *sd_entry;
771
772	sd_entry = &hmc_info->sd_table.sd_entry[idx];
773
774	if (sd_entry->u.pd_table.ref_cnt)
775		return I40IW_ERR_NOT_READY;
776
777	sd_entry->valid = false;
778	I40IW_DEC_SD_REFCNT(&hmc_info->sd_table);
779
780	return 0;
781}
782
783/**
784 * i40iw_pf_init_vfhmc -
785 * @vf_cnt_array: array of cnt values of iwarp hmc objects
786 * @vf_hmc_fn_id: hmc function id ofr vf driver
787 * @dev: pointer to i40iw_dev struct
788 *
789 * Called by pf driver to initialize hmc_info for vf driver instance.
790 */
791enum i40iw_status_code i40iw_pf_init_vfhmc(struct i40iw_sc_dev *dev,
792					   u8 vf_hmc_fn_id,
793					   u32 *vf_cnt_array)
794{
795	struct i40iw_hmc_info *hmc_info;
796	enum i40iw_status_code ret_code = 0;
797	u32 i;
798
799	if ((vf_hmc_fn_id < I40IW_FIRST_VF_FPM_ID) ||
800	    (vf_hmc_fn_id >= I40IW_FIRST_VF_FPM_ID +
801	     I40IW_MAX_PE_ENABLED_VF_COUNT)) {
802		i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: invalid vf_hmc_fn_id  0x%x\n",
803			    __func__, vf_hmc_fn_id);
804		return I40IW_ERR_INVALID_HMCFN_ID;
805	}
806
807	ret_code = i40iw_sc_init_iw_hmc(dev, vf_hmc_fn_id);
808	if (ret_code)
809		return ret_code;
810
811	hmc_info = i40iw_vf_hmcinfo_from_fpm(dev, vf_hmc_fn_id);
812
813	for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++)
814		if (vf_cnt_array)
815			hmc_info->hmc_obj[i].cnt =
816			    vf_cnt_array[i - I40IW_HMC_IW_QP];
817		else
818			hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
819
820	return 0;
821}