Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
   1// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
   2/* Copyright(c) 2014 - 2020 Intel Corporation */
   3#include <linux/slab.h>
   4#include <linux/ctype.h>
   5#include <linux/kernel.h>
   6#include <linux/delay.h>
   7#include <linux/pci_ids.h>
   8#include "adf_accel_devices.h"
   9#include "adf_common_drv.h"
  10#include "icp_qat_uclo.h"
  11#include "icp_qat_hal.h"
  12#include "icp_qat_fw_loader_handle.h"
  13
  14#define UWORD_CPYBUF_SIZE 1024U
  15#define INVLD_UWORD 0xffffffffffull
  16#define PID_MINOR_REV 0xf
  17#define PID_MAJOR_REV (0xf << 4)
  18
  19static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
  20				 unsigned int ae, unsigned int image_num)
  21{
  22	struct icp_qat_uclo_aedata *ae_data;
  23	struct icp_qat_uclo_encapme *encap_image;
  24	struct icp_qat_uclo_page *page = NULL;
  25	struct icp_qat_uclo_aeslice *ae_slice = NULL;
  26
  27	ae_data = &obj_handle->ae_data[ae];
  28	encap_image = &obj_handle->ae_uimage[image_num];
  29	ae_slice = &ae_data->ae_slices[ae_data->slice_num];
  30	ae_slice->encap_image = encap_image;
  31
  32	if (encap_image->img_ptr) {
  33		ae_slice->ctx_mask_assigned =
  34					encap_image->img_ptr->ctx_assigned;
  35		ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
  36	} else {
  37		ae_slice->ctx_mask_assigned = 0;
  38	}
  39	ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL);
  40	if (!ae_slice->region)
  41		return -ENOMEM;
  42	ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
  43	if (!ae_slice->page)
  44		goto out_err;
  45	page = ae_slice->page;
  46	page->encap_page = encap_image->page;
  47	ae_slice->page->region = ae_slice->region;
  48	ae_data->slice_num++;
  49	return 0;
  50out_err:
  51	kfree(ae_slice->region);
  52	ae_slice->region = NULL;
  53	return -ENOMEM;
  54}
  55
  56static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
  57{
  58	unsigned int i;
  59
  60	if (!ae_data) {
  61		pr_err("QAT: bad argument, ae_data is NULL\n ");
  62		return -EINVAL;
  63	}
  64
  65	for (i = 0; i < ae_data->slice_num; i++) {
  66		kfree(ae_data->ae_slices[i].region);
  67		ae_data->ae_slices[i].region = NULL;
  68		kfree(ae_data->ae_slices[i].page);
  69		ae_data->ae_slices[i].page = NULL;
  70	}
  71	return 0;
  72}
  73
  74static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
  75				 unsigned int str_offset)
  76{
  77	if (!str_table->table_len || str_offset > str_table->table_len)
  78		return NULL;
  79	return (char *)(((uintptr_t)(str_table->strings)) + str_offset);
  80}
  81
  82static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr)
  83{
  84	int maj = hdr->maj_ver & 0xff;
  85	int min = hdr->min_ver & 0xff;
  86
  87	if (hdr->file_id != ICP_QAT_UOF_FID) {
  88		pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
  89		return -EINVAL;
  90	}
  91	if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
  92		pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
  93		       maj, min);
  94		return -EINVAL;
  95	}
  96	return 0;
  97}
  98
  99static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr)
 100{
 101	int maj = suof_hdr->maj_ver & 0xff;
 102	int min = suof_hdr->min_ver & 0xff;
 103
 104	if (suof_hdr->file_id != ICP_QAT_SUOF_FID) {
 105		pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id);
 106		return -EINVAL;
 107	}
 108	if (suof_hdr->fw_type != 0) {
 109		pr_err("QAT: unsupported firmware type\n");
 110		return -EINVAL;
 111	}
 112	if (suof_hdr->num_chunks <= 0x1) {
 113		pr_err("QAT: SUOF chunk amount is incorrect\n");
 114		return -EINVAL;
 115	}
 116	if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) {
 117		pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n",
 118		       maj, min);
 119		return -EINVAL;
 120	}
 121	return 0;
 122}
 123
 124static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
 125				      unsigned int addr, unsigned int *val,
 126				      unsigned int num_in_bytes)
 127{
 128	unsigned int outval;
 129	unsigned char *ptr = (unsigned char *)val;
 130
 131	while (num_in_bytes) {
 132		memcpy(&outval, ptr, 4);
 133		SRAM_WRITE(handle, addr, outval);
 134		num_in_bytes -= 4;
 135		ptr += 4;
 136		addr += 4;
 137	}
 138}
 139
 140static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
 141				      unsigned char ae, unsigned int addr,
 142				      unsigned int *val,
 143				      unsigned int num_in_bytes)
 144{
 145	unsigned int outval;
 146	unsigned char *ptr = (unsigned char *)val;
 147
 148	addr >>= 0x2; /* convert to uword address */
 149
 150	while (num_in_bytes) {
 151		memcpy(&outval, ptr, 4);
 152		qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
 153		num_in_bytes -= 4;
 154		ptr += 4;
 155	}
 156}
 157
 158static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
 159				   unsigned char ae,
 160				   struct icp_qat_uof_batch_init
 161				   *umem_init_header)
 162{
 163	struct icp_qat_uof_batch_init *umem_init;
 164
 165	if (!umem_init_header)
 166		return;
 167	umem_init = umem_init_header->next;
 168	while (umem_init) {
 169		unsigned int addr, *value, size;
 170
 171		ae = umem_init->ae;
 172		addr = umem_init->addr;
 173		value = umem_init->value;
 174		size = umem_init->size;
 175		qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
 176		umem_init = umem_init->next;
 177	}
 178}
 179
 180static void
 181qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
 182				 struct icp_qat_uof_batch_init **base)
 183{
 184	struct icp_qat_uof_batch_init *umem_init;
 185
 186	umem_init = *base;
 187	while (umem_init) {
 188		struct icp_qat_uof_batch_init *pre;
 189
 190		pre = umem_init;
 191		umem_init = umem_init->next;
 192		kfree(pre);
 193	}
 194	*base = NULL;
 195}
 196
 197static int qat_uclo_parse_num(char *str, unsigned int *num)
 198{
 199	char buf[16] = {0};
 200	unsigned long ae = 0;
 201	int i;
 202
 203	strscpy(buf, str, sizeof(buf));
 204	for (i = 0; i < 16; i++) {
 205		if (!isdigit(buf[i])) {
 206			buf[i] = '\0';
 207			break;
 208		}
 209	}
 210	if ((kstrtoul(buf, 10, &ae)))
 211		return -EFAULT;
 212
 213	*num = (unsigned int)ae;
 214	return 0;
 215}
 216
 217static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
 218				     struct icp_qat_uof_initmem *init_mem,
 219				     unsigned int size_range, unsigned int *ae)
 220{
 221	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
 222	char *str;
 223
 224	if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
 225		pr_err("QAT: initmem is out of range");
 226		return -EINVAL;
 227	}
 228	if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
 229		pr_err("QAT: Memory scope for init_mem error\n");
 230		return -EINVAL;
 231	}
 232	str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
 233	if (!str) {
 234		pr_err("QAT: AE name assigned in UOF init table is NULL\n");
 235		return -EINVAL;
 236	}
 237	if (qat_uclo_parse_num(str, ae)) {
 238		pr_err("QAT: Parse num for AE number failed\n");
 239		return -EINVAL;
 240	}
 241	if (*ae >= ICP_QAT_UCLO_MAX_AE) {
 242		pr_err("QAT: ae %d out of range\n", *ae);
 243		return -EINVAL;
 244	}
 245	return 0;
 246}
 247
 248static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
 249					   *handle, struct icp_qat_uof_initmem
 250					   *init_mem, unsigned int ae,
 251					   struct icp_qat_uof_batch_init
 252					   **init_tab_base)
 253{
 254	struct icp_qat_uof_batch_init *init_header, *tail;
 255	struct icp_qat_uof_batch_init *mem_init, *tail_old;
 256	struct icp_qat_uof_memvar_attr *mem_val_attr;
 257	unsigned int i, flag = 0;
 258
 259	mem_val_attr =
 260		(struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem +
 261		sizeof(struct icp_qat_uof_initmem));
 262
 263	init_header = *init_tab_base;
 264	if (!init_header) {
 265		init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
 266		if (!init_header)
 267			return -ENOMEM;
 268		init_header->size = 1;
 269		*init_tab_base = init_header;
 270		flag = 1;
 271	}
 272	tail_old = init_header;
 273	while (tail_old->next)
 274		tail_old = tail_old->next;
 275	tail = tail_old;
 276	for (i = 0; i < init_mem->val_attr_num; i++) {
 277		mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
 278		if (!mem_init)
 279			goto out_err;
 280		mem_init->ae = ae;
 281		mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
 282		mem_init->value = &mem_val_attr->value;
 283		mem_init->size = 4;
 284		mem_init->next = NULL;
 285		tail->next = mem_init;
 286		tail = mem_init;
 287		init_header->size += qat_hal_get_ins_num();
 288		mem_val_attr++;
 289	}
 290	return 0;
 291out_err:
 292	/* Do not free the list head unless we allocated it. */
 293	tail_old = tail_old->next;
 294	if (flag) {
 295		kfree(*init_tab_base);
 296		*init_tab_base = NULL;
 297	}
 298
 299	while (tail_old) {
 300		mem_init = tail_old->next;
 301		kfree(tail_old);
 302		tail_old = mem_init;
 303	}
 304	return -ENOMEM;
 305}
 306
 307static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
 308				  struct icp_qat_uof_initmem *init_mem)
 309{
 310	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
 311	unsigned int ae;
 312
 313	if (qat_uclo_fetch_initmem_ae(handle, init_mem,
 314				      handle->chip_info->lm_size, &ae))
 315		return -EINVAL;
 316	if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
 317					    &obj_handle->lm_init_tab[ae]))
 318		return -EINVAL;
 319	return 0;
 320}
 321
 322static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
 323				  struct icp_qat_uof_initmem *init_mem)
 324{
 325	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
 326	unsigned int ae, ustore_size, uaddr, i;
 327	struct icp_qat_uclo_aedata *aed;
 328
 329	ustore_size = obj_handle->ustore_phy_size;
 330	if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
 331		return -EINVAL;
 332	if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
 333					    &obj_handle->umem_init_tab[ae]))
 334		return -EINVAL;
 335	/* set the highest ustore address referenced */
 336	uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
 337	aed = &obj_handle->ae_data[ae];
 338	for (i = 0; i < aed->slice_num; i++) {
 339		if (aed->ae_slices[i].encap_image->uwords_num < uaddr)
 340			aed->ae_slices[i].encap_image->uwords_num = uaddr;
 341	}
 342	return 0;
 343}
 344
 345static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
 346				   struct icp_qat_uof_initmem *init_mem)
 347{
 348	switch (init_mem->region) {
 349	case ICP_QAT_UOF_LMEM_REGION:
 350		if (qat_uclo_init_lmem_seg(handle, init_mem))
 351			return -EINVAL;
 352		break;
 353	case ICP_QAT_UOF_UMEM_REGION:
 354		if (qat_uclo_init_umem_seg(handle, init_mem))
 355			return -EINVAL;
 356		break;
 357	default:
 358		pr_err("QAT: initmem region error. region type=0x%x\n",
 359		       init_mem->region);
 360		return -EINVAL;
 361	}
 362	return 0;
 363}
 364
 365static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
 366				struct icp_qat_uclo_encapme *image)
 367{
 368	unsigned int i;
 369	struct icp_qat_uclo_encap_page *page;
 370	struct icp_qat_uof_image *uof_image;
 371	unsigned char ae;
 372	unsigned int ustore_size;
 373	unsigned int patt_pos;
 374	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
 375	unsigned long ae_mask = handle->hal_handle->ae_mask;
 376	unsigned long cfg_ae_mask = handle->cfg_ae_mask;
 377	u64 *fill_data;
 378
 379	uof_image = image->img_ptr;
 380	fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(u64),
 381			    GFP_KERNEL);
 382	if (!fill_data)
 383		return -ENOMEM;
 384	for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
 385		memcpy(&fill_data[i], &uof_image->fill_pattern,
 386		       sizeof(u64));
 387	page = image->page;
 388
 389	for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
 390		unsigned long ae_assigned = uof_image->ae_assigned;
 391
 392		if (!test_bit(ae, &ae_assigned))
 393			continue;
 394
 395		if (!test_bit(ae, &cfg_ae_mask))
 396			continue;
 397
 398		ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
 399		patt_pos = page->beg_addr_p + page->micro_words_num;
 400
 401		qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
 402				  page->beg_addr_p, &fill_data[0]);
 403		qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
 404				  ustore_size - patt_pos + 1,
 405				  &fill_data[page->beg_addr_p]);
 406	}
 407	kfree(fill_data);
 408	return 0;
 409}
 410
 411static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
 412{
 413	int i, ae;
 414	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
 415	struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
 416	unsigned long ae_mask = handle->hal_handle->ae_mask;
 417
 418	for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
 419		if (initmem->num_in_bytes) {
 420			if (qat_uclo_init_ae_memory(handle, initmem))
 421				return -EINVAL;
 422		}
 423		initmem = (struct icp_qat_uof_initmem *)((uintptr_t)(
 424			(uintptr_t)initmem +
 425			sizeof(struct icp_qat_uof_initmem)) +
 426			(sizeof(struct icp_qat_uof_memvar_attr) *
 427			initmem->val_attr_num));
 428	}
 429
 430	for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
 431		if (qat_hal_batch_wr_lm(handle, ae,
 432					obj_handle->lm_init_tab[ae])) {
 433			pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
 434			return -EINVAL;
 435		}
 436		qat_uclo_cleanup_batch_init_list(handle,
 437						 &obj_handle->lm_init_tab[ae]);
 438		qat_uclo_batch_wr_umem(handle, ae,
 439				       obj_handle->umem_init_tab[ae]);
 440		qat_uclo_cleanup_batch_init_list(handle,
 441						 &obj_handle->
 442						 umem_init_tab[ae]);
 443	}
 444	return 0;
 445}
 446
 447static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
 448				 char *chunk_id, void *cur)
 449{
 450	int i;
 451	struct icp_qat_uof_chunkhdr *chunk_hdr =
 452	    (struct icp_qat_uof_chunkhdr *)
 453	    ((uintptr_t)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
 454
 455	for (i = 0; i < obj_hdr->num_chunks; i++) {
 456		if ((cur < (void *)&chunk_hdr[i]) &&
 457		    !strncmp(chunk_hdr[i].chunk_id, chunk_id,
 458			     ICP_QAT_UOF_OBJID_LEN)) {
 459			return &chunk_hdr[i];
 460		}
 461	}
 462	return NULL;
 463}
 464
 465static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
 466{
 467	int i;
 468	unsigned int topbit = 1 << 0xF;
 469	unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
 470
 471	reg ^= inbyte << 0x8;
 472	for (i = 0; i < 0x8; i++) {
 473		if (reg & topbit)
 474			reg = (reg << 1) ^ 0x1021;
 475		else
 476			reg <<= 1;
 477	}
 478	return reg & 0xFFFF;
 479}
 480
 481static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
 482{
 483	unsigned int chksum = 0;
 484
 485	if (ptr)
 486		while (num--)
 487			chksum = qat_uclo_calc_checksum(chksum, *ptr++);
 488	return chksum;
 489}
 490
 491static struct icp_qat_uclo_objhdr *
 492qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
 493		   char *chunk_id)
 494{
 495	struct icp_qat_uof_filechunkhdr *file_chunk;
 496	struct icp_qat_uclo_objhdr *obj_hdr;
 497	char *chunk;
 498	int i;
 499
 500	file_chunk = (struct icp_qat_uof_filechunkhdr *)
 501		(buf + sizeof(struct icp_qat_uof_filehdr));
 502	for (i = 0; i < file_hdr->num_chunks; i++) {
 503		if (!strncmp(file_chunk->chunk_id, chunk_id,
 504			     ICP_QAT_UOF_OBJID_LEN)) {
 505			chunk = buf + file_chunk->offset;
 506			if (file_chunk->checksum != qat_uclo_calc_str_checksum(
 507				chunk, file_chunk->size))
 508				break;
 509			obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
 510			if (!obj_hdr)
 511				break;
 512			obj_hdr->file_buff = chunk;
 513			obj_hdr->checksum = file_chunk->checksum;
 514			obj_hdr->size = file_chunk->size;
 515			return obj_hdr;
 516		}
 517		file_chunk++;
 518	}
 519	return NULL;
 520}
 521
 522static int
 523qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
 524			    struct icp_qat_uof_image *image)
 525{
 526	struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
 527	struct icp_qat_uof_objtable *neigh_reg_tab;
 528	struct icp_qat_uof_code_page *code_page;
 529
 530	code_page = (struct icp_qat_uof_code_page *)
 531			((char *)image + sizeof(struct icp_qat_uof_image));
 532	uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
 533		     code_page->uc_var_tab_offset);
 534	imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
 535		      code_page->imp_var_tab_offset);
 536	imp_expr_tab = (struct icp_qat_uof_objtable *)
 537		       (encap_uof_obj->beg_uof +
 538		       code_page->imp_expr_tab_offset);
 539	if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
 540	    imp_expr_tab->entry_num) {
 541		pr_err("QAT: UOF can't contain imported variable to be parsed\n");
 542		return -EINVAL;
 543	}
 544	neigh_reg_tab = (struct icp_qat_uof_objtable *)
 545			(encap_uof_obj->beg_uof +
 546			code_page->neigh_reg_tab_offset);
 547	if (neigh_reg_tab->entry_num) {
 548		pr_err("QAT: UOF can't contain neighbor register table\n");
 549		return -EINVAL;
 550	}
 551	if (image->numpages > 1) {
 552		pr_err("QAT: UOF can't contain multiple pages\n");
 553		return -EINVAL;
 554	}
 555	if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
 556		pr_err("QAT: UOF can't use shared control store feature\n");
 557		return -EFAULT;
 558	}
 559	if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
 560		pr_err("QAT: UOF can't use reloadable feature\n");
 561		return -EFAULT;
 562	}
 563	return 0;
 564}
 565
 566static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
 567				     *encap_uof_obj,
 568				     struct icp_qat_uof_image *img,
 569				     struct icp_qat_uclo_encap_page *page)
 570{
 571	struct icp_qat_uof_code_page *code_page;
 572	struct icp_qat_uof_code_area *code_area;
 573	struct icp_qat_uof_objtable *uword_block_tab;
 574	struct icp_qat_uof_uword_block *uwblock;
 575	int i;
 576
 577	code_page = (struct icp_qat_uof_code_page *)
 578			((char *)img + sizeof(struct icp_qat_uof_image));
 579	page->def_page = code_page->def_page;
 580	page->page_region = code_page->page_region;
 581	page->beg_addr_v = code_page->beg_addr_v;
 582	page->beg_addr_p = code_page->beg_addr_p;
 583	code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
 584						code_page->code_area_offset);
 585	page->micro_words_num = code_area->micro_words_num;
 586	uword_block_tab = (struct icp_qat_uof_objtable *)
 587			  (encap_uof_obj->beg_uof +
 588			  code_area->uword_block_tab);
 589	page->uwblock_num = uword_block_tab->entry_num;
 590	uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
 591			sizeof(struct icp_qat_uof_objtable));
 592	page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
 593	for (i = 0; i < uword_block_tab->entry_num; i++)
 594		page->uwblock[i].micro_words =
 595		(uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
 596}
 597
 598static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
 599			       struct icp_qat_uclo_encapme *ae_uimage,
 600			       int max_image)
 601{
 602	int i, j;
 603	struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
 604	struct icp_qat_uof_image *image;
 605	struct icp_qat_uof_objtable *ae_regtab;
 606	struct icp_qat_uof_objtable *init_reg_sym_tab;
 607	struct icp_qat_uof_objtable *sbreak_tab;
 608	struct icp_qat_uof_encap_obj *encap_uof_obj =
 609					&obj_handle->encap_uof_obj;
 610
 611	for (j = 0; j < max_image; j++) {
 612		chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
 613						ICP_QAT_UOF_IMAG, chunk_hdr);
 614		if (!chunk_hdr)
 615			break;
 616		image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
 617						     chunk_hdr->offset);
 618		ae_regtab = (struct icp_qat_uof_objtable *)
 619			   (image->reg_tab_offset +
 620			   obj_handle->obj_hdr->file_buff);
 621		ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
 622		ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)
 623			(((char *)ae_regtab) +
 624			sizeof(struct icp_qat_uof_objtable));
 625		init_reg_sym_tab = (struct icp_qat_uof_objtable *)
 626				   (image->init_reg_sym_tab +
 627				   obj_handle->obj_hdr->file_buff);
 628		ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
 629		ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)
 630			(((char *)init_reg_sym_tab) +
 631			sizeof(struct icp_qat_uof_objtable));
 632		sbreak_tab = (struct icp_qat_uof_objtable *)
 633			(image->sbreak_tab + obj_handle->obj_hdr->file_buff);
 634		ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
 635		ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)
 636				      (((char *)sbreak_tab) +
 637				      sizeof(struct icp_qat_uof_objtable));
 638		ae_uimage[j].img_ptr = image;
 639		if (qat_uclo_check_image_compat(encap_uof_obj, image))
 640			goto out_err;
 641		ae_uimage[j].page =
 642			kzalloc(sizeof(struct icp_qat_uclo_encap_page),
 643				GFP_KERNEL);
 644		if (!ae_uimage[j].page)
 645			goto out_err;
 646		qat_uclo_map_image_page(encap_uof_obj, image,
 647					ae_uimage[j].page);
 648	}
 649	return j;
 650out_err:
 651	for (i = 0; i < j; i++)
 652		kfree(ae_uimage[i].page);
 653	return 0;
 654}
 655
 656static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
 657{
 658	int i, ae;
 659	int mflag = 0;
 660	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
 661	unsigned long ae_mask = handle->hal_handle->ae_mask;
 662	unsigned long cfg_ae_mask = handle->cfg_ae_mask;
 663
 664	for_each_set_bit(ae, &ae_mask, max_ae) {
 665		if (!test_bit(ae, &cfg_ae_mask))
 666			continue;
 667
 668		for (i = 0; i < obj_handle->uimage_num; i++) {
 669			unsigned long ae_assigned = obj_handle->ae_uimage[i].img_ptr->ae_assigned;
 670
 671			if (!test_bit(ae, &ae_assigned))
 672				continue;
 673			mflag = 1;
 674			if (qat_uclo_init_ae_data(obj_handle, ae, i))
 675				return -EINVAL;
 676		}
 677	}
 678	if (!mflag) {
 679		pr_err("QAT: uimage uses AE not set\n");
 680		return -EINVAL;
 681	}
 682	return 0;
 683}
 684
 685static struct icp_qat_uof_strtable *
 686qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
 687		       char *tab_name, struct icp_qat_uof_strtable *str_table)
 688{
 689	struct icp_qat_uof_chunkhdr *chunk_hdr;
 690
 691	chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
 692					obj_hdr->file_buff, tab_name, NULL);
 693	if (chunk_hdr) {
 694		int hdr_size;
 695
 696		memcpy(&str_table->table_len, obj_hdr->file_buff +
 697		       chunk_hdr->offset, sizeof(str_table->table_len));
 698		hdr_size = (char *)&str_table->strings - (char *)str_table;
 699		str_table->strings = (uintptr_t)obj_hdr->file_buff +
 700					chunk_hdr->offset + hdr_size;
 701		return str_table;
 702	}
 703	return NULL;
 704}
 705
 706static void
 707qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
 708			   struct icp_qat_uclo_init_mem_table *init_mem_tab)
 709{
 710	struct icp_qat_uof_chunkhdr *chunk_hdr;
 711
 712	chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
 713					ICP_QAT_UOF_IMEM, NULL);
 714	if (chunk_hdr) {
 715		memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
 716			chunk_hdr->offset, sizeof(unsigned int));
 717		init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
 718		(encap_uof_obj->beg_uof + chunk_hdr->offset +
 719		sizeof(unsigned int));
 720	}
 721}
 722
 723static unsigned int
 724qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
 725{
 726	switch (handle->pci_dev->device) {
 727	case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
 728		return ICP_QAT_AC_895XCC_DEV_TYPE;
 729	case PCI_DEVICE_ID_INTEL_QAT_C62X:
 730		return ICP_QAT_AC_C62X_DEV_TYPE;
 731	case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
 732		return ICP_QAT_AC_C3XXX_DEV_TYPE;
 733	case ADF_4XXX_PCI_DEVICE_ID:
 734	case ADF_401XX_PCI_DEVICE_ID:
 735	case ADF_402XX_PCI_DEVICE_ID:
 736	case ADF_420XX_PCI_DEVICE_ID:
 737		return ICP_QAT_AC_4XXX_A_DEV_TYPE;
 738	default:
 739		pr_err("QAT: unsupported device 0x%x\n",
 740		       handle->pci_dev->device);
 741		return 0;
 742	}
 743}
 744
 745static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
 746{
 747	unsigned int maj_ver, prod_type = obj_handle->prod_type;
 748
 749	if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) {
 750		pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n",
 751		       obj_handle->encap_uof_obj.obj_hdr->ac_dev_type,
 752		       prod_type);
 753		return -EINVAL;
 754	}
 755	maj_ver = obj_handle->prod_rev & 0xff;
 756	if (obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver ||
 757	    obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver) {
 758		pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
 759		return -EINVAL;
 760	}
 761	return 0;
 762}
 763
 764static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
 765			     unsigned char ae, unsigned char ctx_mask,
 766			     enum icp_qat_uof_regtype reg_type,
 767			     unsigned short reg_addr, unsigned int value)
 768{
 769	switch (reg_type) {
 770	case ICP_GPA_ABS:
 771	case ICP_GPB_ABS:
 772		ctx_mask = 0;
 773		fallthrough;
 774	case ICP_GPA_REL:
 775	case ICP_GPB_REL:
 776		return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
 777					reg_addr, value);
 778	case ICP_SR_ABS:
 779	case ICP_DR_ABS:
 780	case ICP_SR_RD_ABS:
 781	case ICP_DR_RD_ABS:
 782		ctx_mask = 0;
 783		fallthrough;
 784	case ICP_SR_REL:
 785	case ICP_DR_REL:
 786	case ICP_SR_RD_REL:
 787	case ICP_DR_RD_REL:
 788		return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
 789					    reg_addr, value);
 790	case ICP_SR_WR_ABS:
 791	case ICP_DR_WR_ABS:
 792		ctx_mask = 0;
 793		fallthrough;
 794	case ICP_SR_WR_REL:
 795	case ICP_DR_WR_REL:
 796		return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
 797					    reg_addr, value);
 798	case ICP_NEIGH_REL:
 799		return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
 800	default:
 801		pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
 802		return -EFAULT;
 803	}
 804	return 0;
 805}
 806
 807static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
 808				 unsigned int ae,
 809				 struct icp_qat_uclo_encapme *encap_ae)
 810{
 811	unsigned int i;
 812	unsigned char ctx_mask;
 813	struct icp_qat_uof_init_regsym *init_regsym;
 814
 815	if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
 816	    ICP_QAT_UCLO_MAX_CTX)
 817		ctx_mask = 0xff;
 818	else
 819		ctx_mask = 0x55;
 820
 821	for (i = 0; i < encap_ae->init_regsym_num; i++) {
 822		unsigned int exp_res;
 823
 824		init_regsym = &encap_ae->init_regsym[i];
 825		exp_res = init_regsym->value;
 826		switch (init_regsym->init_type) {
 827		case ICP_QAT_UOF_INIT_REG:
 828			qat_uclo_init_reg(handle, ae, ctx_mask,
 829					  (enum icp_qat_uof_regtype)
 830					  init_regsym->reg_type,
 831					  (unsigned short)init_regsym->reg_addr,
 832					  exp_res);
 833			break;
 834		case ICP_QAT_UOF_INIT_REG_CTX:
 835			/* check if ctx is appropriate for the ctxMode */
 836			if (!((1 << init_regsym->ctx) & ctx_mask)) {
 837				pr_err("QAT: invalid ctx num = 0x%x\n",
 838				       init_regsym->ctx);
 839				return -EINVAL;
 840			}
 841			qat_uclo_init_reg(handle, ae,
 842					  (unsigned char)
 843					  (1 << init_regsym->ctx),
 844					  (enum icp_qat_uof_regtype)
 845					  init_regsym->reg_type,
 846					  (unsigned short)init_regsym->reg_addr,
 847					  exp_res);
 848			break;
 849		case ICP_QAT_UOF_INIT_EXPR:
 850			pr_err("QAT: INIT_EXPR feature not supported\n");
 851			return -EINVAL;
 852		case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
 853			pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
 854			return -EINVAL;
 855		default:
 856			break;
 857		}
 858	}
 859	return 0;
 860}
 861
 862static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
 863{
 864	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
 865	unsigned long ae_mask = handle->hal_handle->ae_mask;
 866	struct icp_qat_uclo_aedata *aed;
 867	unsigned int s, ae;
 868
 869	if (obj_handle->global_inited)
 870		return 0;
 871	if (obj_handle->init_mem_tab.entry_num) {
 872		if (qat_uclo_init_memory(handle)) {
 873			pr_err("QAT: initialize memory failed\n");
 874			return -EINVAL;
 875		}
 876	}
 877
 878	for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
 879		aed = &obj_handle->ae_data[ae];
 880		for (s = 0; s < aed->slice_num; s++) {
 881			if (!aed->ae_slices[s].encap_image)
 882				continue;
 883			if (qat_uclo_init_reg_sym(handle, ae, aed->ae_slices[s].encap_image))
 884				return -EINVAL;
 885		}
 886	}
 887	obj_handle->global_inited = 1;
 888	return 0;
 889}
 890
 891static int qat_hal_set_modes(struct icp_qat_fw_loader_handle *handle,
 892			     struct icp_qat_uclo_objhandle *obj_handle,
 893			     unsigned char ae,
 894			     struct icp_qat_uof_image *uof_image)
 895{
 896	unsigned char mode;
 897	int ret;
 898
 899	mode = ICP_QAT_CTX_MODE(uof_image->ae_mode);
 900	ret = qat_hal_set_ae_ctx_mode(handle, ae, mode);
 901	if (ret) {
 902		pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
 903		return ret;
 904	}
 905	if (handle->chip_info->nn) {
 906		mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
 907		ret = qat_hal_set_ae_nn_mode(handle, ae, mode);
 908		if (ret) {
 909			pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
 910			return ret;
 911		}
 912	}
 913	mode = ICP_QAT_LOC_MEM0_MODE(uof_image->ae_mode);
 914	ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, mode);
 915	if (ret) {
 916		pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
 917		return ret;
 918	}
 919	mode = ICP_QAT_LOC_MEM1_MODE(uof_image->ae_mode);
 920	ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1, mode);
 921	if (ret) {
 922		pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
 923		return ret;
 924	}
 925	if (handle->chip_info->lm2lm3) {
 926		mode = ICP_QAT_LOC_MEM2_MODE(uof_image->ae_mode);
 927		ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM2, mode);
 928		if (ret) {
 929			pr_err("QAT: qat_hal_set_ae_lm_mode LMEM2 error\n");
 930			return ret;
 931		}
 932		mode = ICP_QAT_LOC_MEM3_MODE(uof_image->ae_mode);
 933		ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM3, mode);
 934		if (ret) {
 935			pr_err("QAT: qat_hal_set_ae_lm_mode LMEM3 error\n");
 936			return ret;
 937		}
 938		mode = ICP_QAT_LOC_TINDEX_MODE(uof_image->ae_mode);
 939		qat_hal_set_ae_tindex_mode(handle, ae, mode);
 940	}
 941	return 0;
 942}
 943
 944static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
 945{
 946	struct icp_qat_uof_image *uof_image;
 947	struct icp_qat_uclo_aedata *ae_data;
 948	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
 949	unsigned long ae_mask = handle->hal_handle->ae_mask;
 950	unsigned long cfg_ae_mask = handle->cfg_ae_mask;
 951	unsigned char ae, s;
 952	int error;
 953
 954	for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
 955		if (!test_bit(ae, &cfg_ae_mask))
 956			continue;
 957
 958		ae_data = &obj_handle->ae_data[ae];
 959		for (s = 0; s < min_t(unsigned int, ae_data->slice_num,
 960				      ICP_QAT_UCLO_MAX_CTX); s++) {
 961			if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
 962				continue;
 963			uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
 964			error = qat_hal_set_modes(handle, obj_handle, ae,
 965						  uof_image);
 966			if (error)
 967				return error;
 968		}
 969	}
 970	return 0;
 971}
 972
 973static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
 974{
 975	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
 976	struct icp_qat_uclo_encapme *image;
 977	int a;
 978
 979	for (a = 0; a < obj_handle->uimage_num; a++) {
 980		image = &obj_handle->ae_uimage[a];
 981		image->uwords_num = image->page->beg_addr_p +
 982					image->page->micro_words_num;
 983	}
 984}
 985
 986static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
 987{
 988	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
 989	unsigned int ae;
 990
 991	obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
 992	obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
 993					     obj_handle->obj_hdr->file_buff;
 994	obj_handle->uword_in_bytes = 6;
 995	obj_handle->prod_type = qat_uclo_get_dev_type(handle);
 996	obj_handle->prod_rev = PID_MAJOR_REV |
 997			(PID_MINOR_REV & handle->hal_handle->revision_id);
 998	if (qat_uclo_check_uof_compat(obj_handle)) {
 999		pr_err("QAT: UOF incompatible\n");
1000		return -EINVAL;
1001	}
1002	obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(u64),
1003					GFP_KERNEL);
1004	if (!obj_handle->uword_buf)
1005		return -ENOMEM;
1006	obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
1007	if (!obj_handle->obj_hdr->file_buff ||
1008	    !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
1009				    &obj_handle->str_table)) {
1010		pr_err("QAT: UOF doesn't have effective images\n");
1011		goto out_err;
1012	}
1013	obj_handle->uimage_num =
1014		qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
1015				    ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
1016	if (!obj_handle->uimage_num)
1017		goto out_err;
1018	if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
1019		pr_err("QAT: Bad object\n");
1020		goto out_check_uof_aemask_err;
1021	}
1022	qat_uclo_init_uword_num(handle);
1023	qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
1024				   &obj_handle->init_mem_tab);
1025	if (qat_uclo_set_ae_mode(handle))
1026		goto out_check_uof_aemask_err;
1027	return 0;
1028out_check_uof_aemask_err:
1029	for (ae = 0; ae < obj_handle->uimage_num; ae++)
1030		kfree(obj_handle->ae_uimage[ae].page);
1031out_err:
1032	kfree(obj_handle->uword_buf);
1033	return -EFAULT;
1034}
1035
1036static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle,
1037				      struct icp_qat_suof_filehdr *suof_ptr,
1038				      int suof_size)
1039{
1040	unsigned int check_sum = 0;
1041	unsigned int min_ver_offset = 0;
1042	struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1043
1044	suof_handle->file_id = ICP_QAT_SUOF_FID;
1045	suof_handle->suof_buf = (char *)suof_ptr;
1046	suof_handle->suof_size = suof_size;
1047	min_ver_offset = suof_size - offsetof(struct icp_qat_suof_filehdr,
1048					      min_ver);
1049	check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver,
1050					       min_ver_offset);
1051	if (check_sum != suof_ptr->check_sum) {
1052		pr_err("QAT: incorrect SUOF checksum\n");
1053		return -EINVAL;
1054	}
1055	suof_handle->check_sum = suof_ptr->check_sum;
1056	suof_handle->min_ver = suof_ptr->min_ver;
1057	suof_handle->maj_ver = suof_ptr->maj_ver;
1058	suof_handle->fw_type = suof_ptr->fw_type;
1059	return 0;
1060}
1061
1062static void qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle,
1063			      struct icp_qat_suof_img_hdr *suof_img_hdr,
1064			      struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
1065{
1066	struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1067	struct icp_qat_simg_ae_mode *ae_mode;
1068	struct icp_qat_suof_objhdr *suof_objhdr;
1069
1070	suof_img_hdr->simg_buf  = (suof_handle->suof_buf +
1071				   suof_chunk_hdr->offset +
1072				   sizeof(*suof_objhdr));
1073	suof_img_hdr->simg_len = ((struct icp_qat_suof_objhdr *)(uintptr_t)
1074				  (suof_handle->suof_buf +
1075				   suof_chunk_hdr->offset))->img_length;
1076
1077	suof_img_hdr->css_header = suof_img_hdr->simg_buf;
1078	suof_img_hdr->css_key = (suof_img_hdr->css_header +
1079				 sizeof(struct icp_qat_css_hdr));
1080	suof_img_hdr->css_signature = suof_img_hdr->css_key +
1081				      ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
1082				      ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle);
1083	suof_img_hdr->css_simg = suof_img_hdr->css_signature +
1084				 ICP_QAT_CSS_SIGNATURE_LEN(handle);
1085
1086	ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg);
1087	suof_img_hdr->ae_mask = ae_mode->ae_mask;
1088	suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name;
1089	suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data;
1090	suof_img_hdr->fw_type = ae_mode->fw_type;
1091}
1092
1093static void
1094qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle,
1095			  struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
1096{
1097	char **sym_str = (char **)&suof_handle->sym_str;
1098	unsigned int *sym_size = &suof_handle->sym_size;
1099	struct icp_qat_suof_strtable *str_table_obj;
1100
1101	*sym_size = *(unsigned int *)(uintptr_t)
1102		   (suof_chunk_hdr->offset + suof_handle->suof_buf);
1103	*sym_str = (char *)(uintptr_t)
1104		   (suof_handle->suof_buf + suof_chunk_hdr->offset +
1105		   sizeof(str_table_obj->tab_length));
1106}
1107
1108static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle,
1109				      struct icp_qat_suof_img_hdr *img_hdr)
1110{
1111	struct icp_qat_simg_ae_mode *img_ae_mode = NULL;
1112	unsigned int prod_rev, maj_ver, prod_type;
1113
1114	prod_type = qat_uclo_get_dev_type(handle);
1115	img_ae_mode = (struct icp_qat_simg_ae_mode *)img_hdr->css_simg;
1116	prod_rev = PID_MAJOR_REV |
1117			 (PID_MINOR_REV & handle->hal_handle->revision_id);
1118	if (img_ae_mode->dev_type != prod_type) {
1119		pr_err("QAT: incompatible product type %x\n",
1120		       img_ae_mode->dev_type);
1121		return -EINVAL;
1122	}
1123	maj_ver = prod_rev & 0xff;
1124	if (maj_ver > img_ae_mode->devmax_ver ||
1125	    maj_ver < img_ae_mode->devmin_ver) {
1126		pr_err("QAT: incompatible device majver 0x%x\n", maj_ver);
1127		return -EINVAL;
1128	}
1129	return 0;
1130}
1131
1132static void qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle)
1133{
1134	struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
1135
1136	kfree(sobj_handle->img_table.simg_hdr);
1137	sobj_handle->img_table.simg_hdr = NULL;
1138	kfree(handle->sobj_handle);
1139	handle->sobj_handle = NULL;
1140}
1141
1142static void qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr,
1143			      unsigned int img_id, unsigned int num_simgs)
1144{
1145	struct icp_qat_suof_img_hdr img_header;
1146
1147	if (img_id != num_simgs - 1) {
1148		memcpy(&img_header, &suof_img_hdr[num_simgs - 1],
1149		       sizeof(*suof_img_hdr));
1150		memcpy(&suof_img_hdr[num_simgs - 1], &suof_img_hdr[img_id],
1151		       sizeof(*suof_img_hdr));
1152		memcpy(&suof_img_hdr[img_id], &img_header,
1153		       sizeof(*suof_img_hdr));
1154	}
1155}
1156
1157static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
1158			     struct icp_qat_suof_filehdr *suof_ptr,
1159			     int suof_size)
1160{
1161	struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1162	struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL;
1163	struct icp_qat_suof_img_hdr *suof_img_hdr = NULL;
1164	int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE;
1165	unsigned int i = 0;
1166	struct icp_qat_suof_img_hdr img_header;
1167
1168	if (!suof_ptr || suof_size == 0) {
1169		pr_err("QAT: input parameter SUOF pointer/size is NULL\n");
1170		return -EINVAL;
1171	}
1172	if (qat_uclo_check_suof_format(suof_ptr))
1173		return -EINVAL;
1174	ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size);
1175	if (ret)
1176		return ret;
1177	suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *)
1178			 ((uintptr_t)suof_ptr + sizeof(*suof_ptr));
1179
1180	qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr);
1181	suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1;
1182
1183	if (suof_handle->img_table.num_simgs != 0) {
1184		suof_img_hdr = kcalloc(suof_handle->img_table.num_simgs,
1185				       sizeof(img_header),
1186				       GFP_KERNEL);
1187		if (!suof_img_hdr)
1188			return -ENOMEM;
1189		suof_handle->img_table.simg_hdr = suof_img_hdr;
1190
1191		for (i = 0; i < suof_handle->img_table.num_simgs; i++) {
1192			qat_uclo_map_simg(handle, &suof_img_hdr[i],
1193					  &suof_chunk_hdr[1 + i]);
1194			ret = qat_uclo_check_simg_compat(handle,
1195							 &suof_img_hdr[i]);
1196			if (ret)
1197				return ret;
1198			suof_img_hdr[i].ae_mask &= handle->cfg_ae_mask;
1199			if ((suof_img_hdr[i].ae_mask & 0x1) != 0)
1200				ae0_img = i;
1201		}
1202
1203		if (!handle->chip_info->tgroup_share_ustore) {
1204			qat_uclo_tail_img(suof_img_hdr, ae0_img,
1205					  suof_handle->img_table.num_simgs);
1206		}
1207	}
1208	return 0;
1209}
1210
1211#define ADD_ADDR(high, low)  ((((u64)high) << 32) + low)
1212#define BITS_IN_DWORD 32
1213
1214static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
1215			    struct icp_qat_fw_auth_desc *desc)
1216{
1217	u32 fcu_sts, retry = 0;
1218	u32 fcu_ctl_csr, fcu_sts_csr;
1219	u32 fcu_dram_hi_csr, fcu_dram_lo_csr;
1220	u64 bus_addr;
1221
1222	bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low)
1223			   - sizeof(struct icp_qat_auth_chunk);
1224
1225	fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
1226	fcu_sts_csr = handle->chip_info->fcu_sts_csr;
1227	fcu_dram_hi_csr = handle->chip_info->fcu_dram_addr_hi;
1228	fcu_dram_lo_csr = handle->chip_info->fcu_dram_addr_lo;
1229
1230	SET_CAP_CSR(handle, fcu_dram_hi_csr, (bus_addr >> BITS_IN_DWORD));
1231	SET_CAP_CSR(handle, fcu_dram_lo_csr, bus_addr);
1232	SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH);
1233
1234	do {
1235		msleep(FW_AUTH_WAIT_PERIOD);
1236		fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
1237		if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL)
1238			goto auth_fail;
1239		if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1))
1240			if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE)
1241				return 0;
1242	} while (retry++ < FW_AUTH_MAX_RETRY);
1243auth_fail:
1244	pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n",
1245	       fcu_sts & FCU_AUTH_STS_MASK, retry);
1246	return -EINVAL;
1247}
1248
1249static bool qat_uclo_is_broadcast(struct icp_qat_fw_loader_handle *handle,
1250				  int imgid)
1251{
1252	struct icp_qat_suof_handle *sobj_handle;
1253
1254	if (!handle->chip_info->tgroup_share_ustore)
1255		return false;
1256
1257	sobj_handle = (struct icp_qat_suof_handle *)handle->sobj_handle;
1258	if (handle->hal_handle->admin_ae_mask &
1259	    sobj_handle->img_table.simg_hdr[imgid].ae_mask)
1260		return false;
1261
1262	return true;
1263}
1264
1265static int qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle *handle,
1266				      struct icp_qat_fw_auth_desc *desc)
1267{
1268	unsigned long ae_mask = handle->hal_handle->ae_mask;
1269	unsigned long desc_ae_mask = desc->ae_mask;
1270	u32 fcu_sts, ae_broadcast_mask = 0;
1271	u32 fcu_loaded_csr, ae_loaded;
1272	u32 fcu_sts_csr, fcu_ctl_csr;
1273	unsigned int ae, retry = 0;
1274
1275	if (handle->chip_info->tgroup_share_ustore) {
1276		fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
1277		fcu_sts_csr = handle->chip_info->fcu_sts_csr;
1278		fcu_loaded_csr = handle->chip_info->fcu_loaded_ae_csr;
1279	} else {
1280		pr_err("Chip 0x%x doesn't support broadcast load\n",
1281		       handle->pci_dev->device);
1282		return -EINVAL;
1283	}
1284
1285	for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
1286		if (qat_hal_check_ae_active(handle, (unsigned char)ae)) {
1287			pr_err("QAT: Broadcast load failed. AE is not enabled or active.\n");
1288			return -EINVAL;
1289		}
1290
1291		if (test_bit(ae, &desc_ae_mask))
1292			ae_broadcast_mask |= 1 << ae;
1293	}
1294
1295	if (ae_broadcast_mask) {
1296		SET_CAP_CSR(handle, FCU_ME_BROADCAST_MASK_TYPE,
1297			    ae_broadcast_mask);
1298
1299		SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_LOAD);
1300
1301		do {
1302			msleep(FW_AUTH_WAIT_PERIOD);
1303			fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
1304			fcu_sts &= FCU_AUTH_STS_MASK;
1305
1306			if (fcu_sts == FCU_STS_LOAD_FAIL) {
1307				pr_err("Broadcast load failed: 0x%x)\n", fcu_sts);
1308				return -EINVAL;
1309			} else if (fcu_sts == FCU_STS_LOAD_DONE) {
1310				ae_loaded = GET_CAP_CSR(handle, fcu_loaded_csr);
1311				ae_loaded >>= handle->chip_info->fcu_loaded_ae_pos;
1312
1313				if ((ae_loaded & ae_broadcast_mask) == ae_broadcast_mask)
1314					break;
1315			}
1316		} while (retry++ < FW_AUTH_MAX_RETRY);
1317
1318		if (retry > FW_AUTH_MAX_RETRY) {
1319			pr_err("QAT: broadcast load failed timeout %d\n", retry);
1320			return -EINVAL;
1321		}
1322	}
1323	return 0;
1324}
1325
1326static int qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle,
1327			       struct icp_firml_dram_desc *dram_desc,
1328			       unsigned int size)
1329{
1330	void *vptr;
1331	dma_addr_t ptr;
1332
1333	vptr = dma_alloc_coherent(&handle->pci_dev->dev,
1334				  size, &ptr, GFP_KERNEL);
1335	if (!vptr)
1336		return -ENOMEM;
1337	dram_desc->dram_base_addr_v = vptr;
1338	dram_desc->dram_bus_addr = ptr;
1339	dram_desc->dram_size = size;
1340	return 0;
1341}
1342
1343static void qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle,
1344			       struct icp_firml_dram_desc *dram_desc)
1345{
1346	if (handle && dram_desc && dram_desc->dram_base_addr_v) {
1347		dma_free_coherent(&handle->pci_dev->dev,
1348				  (size_t)(dram_desc->dram_size),
1349				  dram_desc->dram_base_addr_v,
1350				  dram_desc->dram_bus_addr);
1351	}
1352
1353	if (dram_desc)
1354		memset(dram_desc, 0, sizeof(*dram_desc));
1355}
1356
1357static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle,
1358				   struct icp_qat_fw_auth_desc **desc)
1359{
1360	struct icp_firml_dram_desc dram_desc;
1361
1362	if (*desc) {
1363		dram_desc.dram_base_addr_v = *desc;
1364		dram_desc.dram_bus_addr = ((struct icp_qat_auth_chunk *)
1365					   (*desc))->chunk_bus_addr;
1366		dram_desc.dram_size = ((struct icp_qat_auth_chunk *)
1367				       (*desc))->chunk_size;
1368		qat_uclo_simg_free(handle, &dram_desc);
1369	}
1370}
1371
1372static int qat_uclo_check_image(struct icp_qat_fw_loader_handle *handle,
1373				char *image, unsigned int size,
1374				unsigned int fw_type)
1375{
1376	char *fw_type_name = fw_type ? "MMP" : "AE";
1377	unsigned int css_dword_size = sizeof(u32);
1378
1379	if (handle->chip_info->fw_auth) {
1380		struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
1381		unsigned int header_len = ICP_QAT_AE_IMG_OFFSET(handle);
1382
1383		if ((css_hdr->header_len * css_dword_size) != header_len)
1384			goto err;
1385		if ((css_hdr->size * css_dword_size) != size)
1386			goto err;
1387		if (fw_type != css_hdr->fw_type)
1388			goto err;
1389		if (size <= header_len)
1390			goto err;
1391		size -= header_len;
1392	}
1393
1394	if (fw_type == CSS_AE_FIRMWARE) {
1395		if (size < sizeof(struct icp_qat_simg_ae_mode *) +
1396		    ICP_QAT_SIMG_AE_INIT_SEQ_LEN)
1397			goto err;
1398		if (size > ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)
1399			goto err;
1400	} else if (fw_type == CSS_MMP_FIRMWARE) {
1401		if (size > ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN)
1402			goto err;
1403	} else {
1404		pr_err("QAT: Unsupported firmware type\n");
1405		return -EINVAL;
1406	}
1407	return 0;
1408
1409err:
1410	pr_err("QAT: Invalid %s firmware image\n", fw_type_name);
1411	return -EINVAL;
1412}
1413
1414static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
1415				char *image, unsigned int size,
1416				struct icp_qat_fw_auth_desc **desc)
1417{
1418	struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
1419	struct icp_qat_fw_auth_desc *auth_desc;
1420	struct icp_qat_auth_chunk *auth_chunk;
1421	u64 virt_addr,  bus_addr, virt_base;
1422	unsigned int length, simg_offset = sizeof(*auth_chunk);
1423	struct icp_qat_simg_ae_mode *simg_ae_mode;
1424	struct icp_firml_dram_desc img_desc;
1425
1426	if (size > (ICP_QAT_AE_IMG_OFFSET(handle) + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)) {
1427		pr_err("QAT: error, input image size overflow %d\n", size);
1428		return -EINVAL;
1429	}
1430	length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ?
1431		 ICP_QAT_CSS_AE_SIMG_LEN(handle) + simg_offset :
1432		 size + ICP_QAT_CSS_FWSK_PAD_LEN(handle) + simg_offset;
1433	if (qat_uclo_simg_alloc(handle, &img_desc, length)) {
1434		pr_err("QAT: error, allocate continuous dram fail\n");
1435		return -ENOMEM;
1436	}
1437
1438	auth_chunk = img_desc.dram_base_addr_v;
1439	auth_chunk->chunk_size = img_desc.dram_size;
1440	auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr;
1441	virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset;
1442	bus_addr  = img_desc.dram_bus_addr + simg_offset;
1443	auth_desc = img_desc.dram_base_addr_v;
1444	auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1445	auth_desc->css_hdr_low = (unsigned int)bus_addr;
1446	virt_addr = virt_base;
1447
1448	memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr));
1449	/* pub key */
1450	bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) +
1451			   sizeof(*css_hdr);
1452	virt_addr = virt_addr + sizeof(*css_hdr);
1453
1454	auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1455	auth_desc->fwsk_pub_low = (unsigned int)bus_addr;
1456
1457	memcpy((void *)(uintptr_t)virt_addr,
1458	       (void *)(image + sizeof(*css_hdr)),
1459	       ICP_QAT_CSS_FWSK_MODULUS_LEN(handle));
1460	/* padding */
1461	memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)),
1462	       0, ICP_QAT_CSS_FWSK_PAD_LEN(handle));
1463
1464	/* exponent */
1465	memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
1466	       ICP_QAT_CSS_FWSK_PAD_LEN(handle)),
1467	       (void *)(image + sizeof(*css_hdr) +
1468			ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)),
1469	       sizeof(unsigned int));
1470
1471	/* signature */
1472	bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high,
1473			    auth_desc->fwsk_pub_low) +
1474		   ICP_QAT_CSS_FWSK_PUB_LEN(handle);
1475	virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN(handle);
1476	auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1477	auth_desc->signature_low = (unsigned int)bus_addr;
1478
1479	memcpy((void *)(uintptr_t)virt_addr,
1480	       (void *)(image + sizeof(*css_hdr) +
1481	       ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
1482	       ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle)),
1483	       ICP_QAT_CSS_SIGNATURE_LEN(handle));
1484
1485	bus_addr = ADD_ADDR(auth_desc->signature_high,
1486			    auth_desc->signature_low) +
1487		   ICP_QAT_CSS_SIGNATURE_LEN(handle);
1488	virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle);
1489
1490	auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1491	auth_desc->img_low = (unsigned int)bus_addr;
1492	auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET(handle);
1493	memcpy((void *)(uintptr_t)virt_addr,
1494	       (void *)(image + ICP_QAT_AE_IMG_OFFSET(handle)),
1495	       auth_desc->img_len);
1496	virt_addr = virt_base;
1497	/* AE firmware */
1498	if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type ==
1499	    CSS_AE_FIRMWARE) {
1500		auth_desc->img_ae_mode_data_high = auth_desc->img_high;
1501		auth_desc->img_ae_mode_data_low = auth_desc->img_low;
1502		bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high,
1503				    auth_desc->img_ae_mode_data_low) +
1504			   sizeof(struct icp_qat_simg_ae_mode);
1505
1506		auth_desc->img_ae_init_data_high = (unsigned int)
1507						 (bus_addr >> BITS_IN_DWORD);
1508		auth_desc->img_ae_init_data_low = (unsigned int)bus_addr;
1509		bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
1510		auth_desc->img_ae_insts_high = (unsigned int)
1511					     (bus_addr >> BITS_IN_DWORD);
1512		auth_desc->img_ae_insts_low = (unsigned int)bus_addr;
1513		virt_addr += sizeof(struct icp_qat_css_hdr);
1514		virt_addr += ICP_QAT_CSS_FWSK_PUB_LEN(handle);
1515		virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle);
1516		simg_ae_mode = (struct icp_qat_simg_ae_mode *)(uintptr_t)virt_addr;
1517		auth_desc->ae_mask = simg_ae_mode->ae_mask & handle->cfg_ae_mask;
1518	} else {
1519		auth_desc->img_ae_insts_high = auth_desc->img_high;
1520		auth_desc->img_ae_insts_low = auth_desc->img_low;
1521	}
1522	*desc = auth_desc;
1523	return 0;
1524}
1525
1526static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
1527			    struct icp_qat_fw_auth_desc *desc)
1528{
1529	unsigned long ae_mask = handle->hal_handle->ae_mask;
1530	u32 fcu_sts_csr, fcu_ctl_csr;
1531	u32 loaded_aes, loaded_csr;
1532	unsigned int i;
1533	u32 fcu_sts;
1534
1535	fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
1536	fcu_sts_csr = handle->chip_info->fcu_sts_csr;
1537	loaded_csr = handle->chip_info->fcu_loaded_ae_csr;
1538
1539	for_each_set_bit(i, &ae_mask, handle->hal_handle->ae_max_num) {
1540		int retry = 0;
1541
1542		if (!((desc->ae_mask >> i) & 0x1))
1543			continue;
1544		if (qat_hal_check_ae_active(handle, i)) {
1545			pr_err("QAT: AE %d is active\n", i);
1546			return -EINVAL;
1547		}
1548		SET_CAP_CSR(handle, fcu_ctl_csr,
1549			    (FCU_CTRL_CMD_LOAD |
1550			    (1 << FCU_CTRL_BROADCAST_POS) |
1551			    (i << FCU_CTRL_AE_POS)));
1552
1553		do {
1554			msleep(FW_AUTH_WAIT_PERIOD);
1555			fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
1556			if ((fcu_sts & FCU_AUTH_STS_MASK) ==
1557			    FCU_STS_LOAD_DONE) {
1558				loaded_aes = GET_CAP_CSR(handle, loaded_csr);
1559				loaded_aes >>= handle->chip_info->fcu_loaded_ae_pos;
1560				if (loaded_aes & (1 << i))
1561					break;
1562			}
1563		} while (retry++ < FW_AUTH_MAX_RETRY);
1564		if (retry > FW_AUTH_MAX_RETRY) {
1565			pr_err("QAT: firmware load failed timeout %x\n", retry);
1566			return -EINVAL;
1567		}
1568	}
1569	return 0;
1570}
1571
1572static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle,
1573				 void *addr_ptr, int mem_size)
1574{
1575	struct icp_qat_suof_handle *suof_handle;
1576
1577	suof_handle = kzalloc(sizeof(*suof_handle), GFP_KERNEL);
1578	if (!suof_handle)
1579		return -ENOMEM;
1580	handle->sobj_handle = suof_handle;
1581	if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) {
1582		qat_uclo_del_suof(handle);
1583		pr_err("QAT: map SUOF failed\n");
1584		return -EINVAL;
1585	}
1586	return 0;
1587}
1588
1589int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
1590		       void *addr_ptr, int mem_size)
1591{
1592	struct icp_qat_fw_auth_desc *desc = NULL;
1593	int status = 0;
1594	int ret;
1595
1596	ret = qat_uclo_check_image(handle, addr_ptr, mem_size, CSS_MMP_FIRMWARE);
1597	if (ret)
1598		return ret;
1599
1600	if (handle->chip_info->fw_auth) {
1601		status = qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc);
1602		if (!status)
1603			status = qat_uclo_auth_fw(handle, desc);
1604		qat_uclo_ummap_auth_fw(handle, &desc);
1605	} else {
1606		if (handle->chip_info->mmp_sram_size < mem_size) {
1607			pr_err("QAT: MMP size is too large: 0x%x\n", mem_size);
1608			return -EFBIG;
1609		}
1610		qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size);
1611	}
1612	return status;
1613}
1614
1615static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
1616				void *addr_ptr, int mem_size)
1617{
1618	struct icp_qat_uof_filehdr *filehdr;
1619	struct icp_qat_uclo_objhandle *objhdl;
1620
1621	objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
1622	if (!objhdl)
1623		return -ENOMEM;
1624	objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
1625	if (!objhdl->obj_buf)
1626		goto out_objbuf_err;
1627	filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
1628	if (qat_uclo_check_uof_format(filehdr))
1629		goto out_objhdr_err;
1630	objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
1631					     ICP_QAT_UOF_OBJS);
1632	if (!objhdl->obj_hdr) {
1633		pr_err("QAT: object file chunk is null\n");
1634		goto out_objhdr_err;
1635	}
1636	handle->obj_handle = objhdl;
1637	if (qat_uclo_parse_uof_obj(handle))
1638		goto out_overlay_obj_err;
1639	return 0;
1640
1641out_overlay_obj_err:
1642	handle->obj_handle = NULL;
1643	kfree(objhdl->obj_hdr);
1644out_objhdr_err:
1645	kfree(objhdl->obj_buf);
1646out_objbuf_err:
1647	kfree(objhdl);
1648	return -ENOMEM;
1649}
1650
1651static int qat_uclo_map_mof_file_hdr(struct icp_qat_fw_loader_handle *handle,
1652				     struct icp_qat_mof_file_hdr *mof_ptr,
1653				     u32 mof_size)
1654{
1655	struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle;
1656	unsigned int min_ver_offset;
1657	unsigned int checksum;
1658
1659	mobj_handle->file_id = ICP_QAT_MOF_FID;
1660	mobj_handle->mof_buf = (char *)mof_ptr;
1661	mobj_handle->mof_size = mof_size;
1662
1663	min_ver_offset = mof_size - offsetof(struct icp_qat_mof_file_hdr,
1664					     min_ver);
1665	checksum = qat_uclo_calc_str_checksum(&mof_ptr->min_ver,
1666					      min_ver_offset);
1667	if (checksum != mof_ptr->checksum) {
1668		pr_err("QAT: incorrect MOF checksum\n");
1669		return -EINVAL;
1670	}
1671
1672	mobj_handle->checksum = mof_ptr->checksum;
1673	mobj_handle->min_ver = mof_ptr->min_ver;
1674	mobj_handle->maj_ver = mof_ptr->maj_ver;
1675	return 0;
1676}
1677
1678static void qat_uclo_del_mof(struct icp_qat_fw_loader_handle *handle)
1679{
1680	struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle;
1681
1682	kfree(mobj_handle->obj_table.obj_hdr);
1683	mobj_handle->obj_table.obj_hdr = NULL;
1684	kfree(handle->mobj_handle);
1685	handle->mobj_handle = NULL;
1686}
1687
1688static int qat_uclo_seek_obj_inside_mof(struct icp_qat_mof_handle *mobj_handle,
1689					const char *obj_name, char **obj_ptr,
1690					unsigned int *obj_size)
1691{
1692	struct icp_qat_mof_objhdr *obj_hdr = mobj_handle->obj_table.obj_hdr;
1693	unsigned int i;
1694
1695	for (i = 0; i < mobj_handle->obj_table.num_objs; i++) {
1696		if (!strncmp(obj_hdr[i].obj_name, obj_name,
1697			     ICP_QAT_SUOF_OBJ_NAME_LEN)) {
1698			*obj_ptr  = obj_hdr[i].obj_buf;
1699			*obj_size = obj_hdr[i].obj_size;
1700			return 0;
1701		}
1702	}
1703
1704	pr_err("QAT: object %s is not found inside MOF\n", obj_name);
1705	return -EINVAL;
1706}
1707
1708static int qat_uclo_map_obj_from_mof(struct icp_qat_mof_handle *mobj_handle,
1709				     struct icp_qat_mof_objhdr *mobj_hdr,
1710				     struct icp_qat_mof_obj_chunkhdr *obj_chunkhdr)
1711{
1712	u8 *obj;
1713
1714	if (!strncmp(obj_chunkhdr->chunk_id, ICP_QAT_UOF_IMAG,
1715		     ICP_QAT_MOF_OBJ_CHUNKID_LEN)) {
1716		obj = mobj_handle->uobjs_hdr + obj_chunkhdr->offset;
1717	} else if (!strncmp(obj_chunkhdr->chunk_id, ICP_QAT_SUOF_IMAG,
1718			    ICP_QAT_MOF_OBJ_CHUNKID_LEN)) {
1719		obj = mobj_handle->sobjs_hdr + obj_chunkhdr->offset;
1720	} else {
1721		pr_err("QAT: unsupported chunk id\n");
1722		return -EINVAL;
1723	}
1724	mobj_hdr->obj_buf = obj;
1725	mobj_hdr->obj_size = (unsigned int)obj_chunkhdr->size;
1726	mobj_hdr->obj_name = obj_chunkhdr->name + mobj_handle->sym_str;
1727	return 0;
1728}
1729
1730static int qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle)
1731{
1732	struct icp_qat_mof_obj_chunkhdr *uobj_chunkhdr;
1733	struct icp_qat_mof_obj_chunkhdr *sobj_chunkhdr;
1734	struct icp_qat_mof_obj_hdr *uobj_hdr;
1735	struct icp_qat_mof_obj_hdr *sobj_hdr;
1736	struct icp_qat_mof_objhdr *mobj_hdr;
1737	unsigned int uobj_chunk_num = 0;
1738	unsigned int sobj_chunk_num = 0;
1739	unsigned int *valid_chunk;
1740	int ret, i;
1741
1742	uobj_hdr = (struct icp_qat_mof_obj_hdr *)mobj_handle->uobjs_hdr;
1743	sobj_hdr = (struct icp_qat_mof_obj_hdr *)mobj_handle->sobjs_hdr;
1744	if (uobj_hdr)
1745		uobj_chunk_num = uobj_hdr->num_chunks;
1746	if (sobj_hdr)
1747		sobj_chunk_num = sobj_hdr->num_chunks;
1748
1749	mobj_hdr = kzalloc((uobj_chunk_num + sobj_chunk_num) *
1750			   sizeof(*mobj_hdr), GFP_KERNEL);
1751	if (!mobj_hdr)
1752		return -ENOMEM;
1753
1754	mobj_handle->obj_table.obj_hdr = mobj_hdr;
1755	valid_chunk = &mobj_handle->obj_table.num_objs;
1756	uobj_chunkhdr = (struct icp_qat_mof_obj_chunkhdr *)
1757			 ((uintptr_t)uobj_hdr + sizeof(*uobj_hdr));
1758	sobj_chunkhdr = (struct icp_qat_mof_obj_chunkhdr *)
1759			((uintptr_t)sobj_hdr + sizeof(*sobj_hdr));
1760
1761	/* map uof objects */
1762	for (i = 0; i < uobj_chunk_num; i++) {
1763		ret = qat_uclo_map_obj_from_mof(mobj_handle,
1764						&mobj_hdr[*valid_chunk],
1765						&uobj_chunkhdr[i]);
1766		if (ret)
1767			return ret;
1768		(*valid_chunk)++;
1769	}
1770
1771	/* map suof objects */
1772	for (i = 0; i < sobj_chunk_num; i++) {
1773		ret = qat_uclo_map_obj_from_mof(mobj_handle,
1774						&mobj_hdr[*valid_chunk],
1775						&sobj_chunkhdr[i]);
1776		if (ret)
1777			return ret;
1778		(*valid_chunk)++;
1779	}
1780
1781	if ((uobj_chunk_num + sobj_chunk_num) != *valid_chunk) {
1782		pr_err("QAT: inconsistent UOF/SUOF chunk amount\n");
1783		return -EINVAL;
1784	}
1785	return 0;
1786}
1787
1788static void qat_uclo_map_mof_symobjs(struct icp_qat_mof_handle *mobj_handle,
1789				     struct icp_qat_mof_chunkhdr *mof_chunkhdr)
1790{
1791	char **sym_str = (char **)&mobj_handle->sym_str;
1792	unsigned int *sym_size = &mobj_handle->sym_size;
1793	struct icp_qat_mof_str_table *str_table_obj;
1794
1795	*sym_size = *(unsigned int *)(uintptr_t)
1796		    (mof_chunkhdr->offset + mobj_handle->mof_buf);
1797	*sym_str = (char *)(uintptr_t)
1798		   (mobj_handle->mof_buf + mof_chunkhdr->offset +
1799		    sizeof(str_table_obj->tab_len));
1800}
1801
1802static void qat_uclo_map_mof_chunk(struct icp_qat_mof_handle *mobj_handle,
1803				   struct icp_qat_mof_chunkhdr *mof_chunkhdr)
1804{
1805	char *chunk_id = mof_chunkhdr->chunk_id;
1806
1807	if (!strncmp(chunk_id, ICP_QAT_MOF_SYM_OBJS, ICP_QAT_MOF_OBJ_ID_LEN))
1808		qat_uclo_map_mof_symobjs(mobj_handle, mof_chunkhdr);
1809	else if (!strncmp(chunk_id, ICP_QAT_UOF_OBJS, ICP_QAT_MOF_OBJ_ID_LEN))
1810		mobj_handle->uobjs_hdr = mobj_handle->mof_buf +
1811					 mof_chunkhdr->offset;
1812	else if (!strncmp(chunk_id, ICP_QAT_SUOF_OBJS, ICP_QAT_MOF_OBJ_ID_LEN))
1813		mobj_handle->sobjs_hdr = mobj_handle->mof_buf +
1814					 mof_chunkhdr->offset;
1815}
1816
1817static int qat_uclo_check_mof_format(struct icp_qat_mof_file_hdr *mof_hdr)
1818{
1819	int maj = mof_hdr->maj_ver & 0xff;
1820	int min = mof_hdr->min_ver & 0xff;
1821
1822	if (mof_hdr->file_id != ICP_QAT_MOF_FID) {
1823		pr_err("QAT: invalid header 0x%x\n", mof_hdr->file_id);
1824		return -EINVAL;
1825	}
1826
1827	if (mof_hdr->num_chunks <= 0x1) {
1828		pr_err("QAT: MOF chunk amount is incorrect\n");
1829		return -EINVAL;
1830	}
1831	if (maj != ICP_QAT_MOF_MAJVER || min != ICP_QAT_MOF_MINVER) {
1832		pr_err("QAT: bad MOF version, major 0x%x, minor 0x%x\n",
1833		       maj, min);
1834		return -EINVAL;
1835	}
1836	return 0;
1837}
1838
1839static int qat_uclo_map_mof_obj(struct icp_qat_fw_loader_handle *handle,
1840				struct icp_qat_mof_file_hdr *mof_ptr,
1841				u32 mof_size, const char *obj_name,
1842				char **obj_ptr, unsigned int *obj_size)
1843{
1844	struct icp_qat_mof_chunkhdr *mof_chunkhdr;
1845	unsigned int file_id = mof_ptr->file_id;
1846	struct icp_qat_mof_handle *mobj_handle;
1847	unsigned short chunks_num;
1848	unsigned int i;
1849	int ret;
1850
1851	if (file_id == ICP_QAT_UOF_FID || file_id == ICP_QAT_SUOF_FID) {
1852		if (obj_ptr)
1853			*obj_ptr = (char *)mof_ptr;
1854		if (obj_size)
1855			*obj_size = mof_size;
1856		return 0;
1857	}
1858	if (qat_uclo_check_mof_format(mof_ptr))
1859		return -EINVAL;
1860
1861	mobj_handle = kzalloc(sizeof(*mobj_handle), GFP_KERNEL);
1862	if (!mobj_handle)
1863		return -ENOMEM;
1864
1865	handle->mobj_handle = mobj_handle;
1866	ret = qat_uclo_map_mof_file_hdr(handle, mof_ptr, mof_size);
1867	if (ret)
1868		return ret;
1869
1870	mof_chunkhdr = (void *)mof_ptr + sizeof(*mof_ptr);
1871	chunks_num = mof_ptr->num_chunks;
1872
1873	/* Parse MOF file chunks */
1874	for (i = 0; i < chunks_num; i++)
1875		qat_uclo_map_mof_chunk(mobj_handle, &mof_chunkhdr[i]);
1876
1877	/* All sym_objs uobjs and sobjs should be available */
1878	if (!mobj_handle->sym_str ||
1879	    (!mobj_handle->uobjs_hdr && !mobj_handle->sobjs_hdr))
1880		return -EINVAL;
1881
1882	ret = qat_uclo_map_objs_from_mof(mobj_handle);
1883	if (ret)
1884		return ret;
1885
1886	/* Seek specified uof object in MOF */
1887	return qat_uclo_seek_obj_inside_mof(mobj_handle, obj_name,
1888					    obj_ptr, obj_size);
1889}
1890
1891int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
1892		     void *addr_ptr, u32 mem_size, const char *obj_name)
1893{
1894	char *obj_addr;
1895	u32 obj_size;
1896	int ret;
1897
1898	BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
1899		     (sizeof(handle->hal_handle->ae_mask) * 8));
1900
1901	if (!handle || !addr_ptr || mem_size < 24)
1902		return -EINVAL;
1903
1904	if (obj_name) {
1905		ret = qat_uclo_map_mof_obj(handle, addr_ptr, mem_size, obj_name,
1906					   &obj_addr, &obj_size);
1907		if (ret)
1908			return ret;
1909	} else {
1910		obj_addr = addr_ptr;
1911		obj_size = mem_size;
1912	}
1913
1914	return (handle->chip_info->fw_auth) ?
1915			qat_uclo_map_suof_obj(handle, obj_addr, obj_size) :
1916			qat_uclo_map_uof_obj(handle, obj_addr, obj_size);
1917}
1918
1919void qat_uclo_del_obj(struct icp_qat_fw_loader_handle *handle)
1920{
1921	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1922	unsigned int a;
1923
1924	if (handle->mobj_handle)
1925		qat_uclo_del_mof(handle);
1926	if (handle->sobj_handle)
1927		qat_uclo_del_suof(handle);
1928	if (!obj_handle)
1929		return;
1930
1931	kfree(obj_handle->uword_buf);
1932	for (a = 0; a < obj_handle->uimage_num; a++)
1933		kfree(obj_handle->ae_uimage[a].page);
1934
1935	for (a = 0; a < handle->hal_handle->ae_max_num; a++)
1936		qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
1937
1938	kfree(obj_handle->obj_hdr);
1939	kfree(obj_handle->obj_buf);
1940	kfree(obj_handle);
1941	handle->obj_handle = NULL;
1942}
1943
1944static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
1945				 struct icp_qat_uclo_encap_page *encap_page,
1946				 u64 *uword, unsigned int addr_p,
1947				 unsigned int raddr, u64 fill)
1948{
1949	unsigned int i, addr;
1950	u64 uwrd = 0;
1951
1952	if (!encap_page) {
1953		*uword = fill;
1954		return;
1955	}
1956	addr = (encap_page->page_region) ? raddr : addr_p;
1957	for (i = 0; i < encap_page->uwblock_num; i++) {
1958		if (addr >= encap_page->uwblock[i].start_addr &&
1959		    addr <= encap_page->uwblock[i].start_addr +
1960		    encap_page->uwblock[i].words_num - 1) {
1961			addr -= encap_page->uwblock[i].start_addr;
1962			addr *= obj_handle->uword_in_bytes;
1963			memcpy(&uwrd, (void *)(((uintptr_t)
1964			       encap_page->uwblock[i].micro_words) + addr),
1965			       obj_handle->uword_in_bytes);
1966			uwrd = uwrd & GENMASK_ULL(43, 0);
1967		}
1968	}
1969	*uword = uwrd;
1970	if (*uword == INVLD_UWORD)
1971		*uword = fill;
1972}
1973
1974static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
1975					struct icp_qat_uclo_encap_page
1976					*encap_page, unsigned int ae)
1977{
1978	unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
1979	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1980	u64 fill_pat;
1981
1982	/* load the page starting at appropriate ustore address */
1983	/* get fill-pattern from an image -- they are all the same */
1984	memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
1985	       sizeof(u64));
1986	uw_physical_addr = encap_page->beg_addr_p;
1987	uw_relative_addr = 0;
1988	words_num = encap_page->micro_words_num;
1989	while (words_num) {
1990		cpylen = min(words_num, UWORD_CPYBUF_SIZE);
1991
1992		/* load the buffer */
1993		for (i = 0; i < cpylen; i++)
1994			qat_uclo_fill_uwords(obj_handle, encap_page,
1995					     &obj_handle->uword_buf[i],
1996					     uw_physical_addr + i,
1997					     uw_relative_addr + i, fill_pat);
1998
1999		/* copy the buffer to ustore */
2000		qat_hal_wr_uwords(handle, (unsigned char)ae,
2001				  uw_physical_addr, cpylen,
2002				  obj_handle->uword_buf);
2003
2004		uw_physical_addr += cpylen;
2005		uw_relative_addr += cpylen;
2006		words_num -= cpylen;
2007	}
2008}
2009
2010static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
2011				    struct icp_qat_uof_image *image)
2012{
2013	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
2014	unsigned long ae_mask = handle->hal_handle->ae_mask;
2015	unsigned long cfg_ae_mask = handle->cfg_ae_mask;
2016	unsigned long ae_assigned = image->ae_assigned;
2017	struct icp_qat_uclo_aedata *aed;
2018	unsigned int ctx_mask, s;
2019	struct icp_qat_uclo_page *page;
2020	unsigned char ae;
2021	int ctx;
2022
2023	if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
2024		ctx_mask = 0xff;
2025	else
2026		ctx_mask = 0x55;
2027	/* load the default page and set assigned CTX PC
2028	 * to the entrypoint address */
2029	for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
2030		if (!test_bit(ae, &cfg_ae_mask))
2031			continue;
2032
2033		if (!test_bit(ae, &ae_assigned))
2034			continue;
2035
2036		aed = &obj_handle->ae_data[ae];
2037		/* find the slice to which this image is assigned */
2038		for (s = 0; s < aed->slice_num; s++) {
2039			if (image->ctx_assigned &
2040			    aed->ae_slices[s].ctx_mask_assigned)
2041				break;
2042		}
2043		if (s >= aed->slice_num)
2044			continue;
2045		page = aed->ae_slices[s].page;
2046		if (!page->encap_page->def_page)
2047			continue;
2048		qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
2049
2050		page = aed->ae_slices[s].page;
2051		for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
2052			aed->ae_slices[s].cur_page[ctx] =
2053					(ctx_mask & (1 << ctx)) ? page : NULL;
2054		qat_hal_set_live_ctx(handle, (unsigned char)ae,
2055				     image->ctx_assigned);
2056		qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
2057			       image->entry_address);
2058	}
2059}
2060
2061static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle)
2062{
2063	unsigned int i;
2064	struct icp_qat_fw_auth_desc *desc = NULL;
2065	struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
2066	struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr;
2067	int ret;
2068
2069	for (i = 0; i < sobj_handle->img_table.num_simgs; i++) {
2070		ret = qat_uclo_check_image(handle, simg_hdr[i].simg_buf,
2071					   simg_hdr[i].simg_len,
2072					   CSS_AE_FIRMWARE);
2073		if (ret)
2074			return ret;
2075
2076		if (qat_uclo_map_auth_fw(handle,
2077					 (char *)simg_hdr[i].simg_buf,
2078					 (unsigned int)
2079					 simg_hdr[i].simg_len,
2080					 &desc))
2081			goto wr_err;
2082		if (qat_uclo_auth_fw(handle, desc))
2083			goto wr_err;
2084		if (qat_uclo_is_broadcast(handle, i)) {
2085			if (qat_uclo_broadcast_load_fw(handle, desc))
2086				goto wr_err;
2087		} else {
2088			if (qat_uclo_load_fw(handle, desc))
2089				goto wr_err;
2090		}
2091		qat_uclo_ummap_auth_fw(handle, &desc);
2092	}
2093	return 0;
2094wr_err:
2095	qat_uclo_ummap_auth_fw(handle, &desc);
2096	return -EINVAL;
2097}
2098
2099static int qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle)
2100{
2101	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
2102	unsigned int i;
2103
2104	if (qat_uclo_init_globals(handle))
2105		return -EINVAL;
2106	for (i = 0; i < obj_handle->uimage_num; i++) {
2107		if (!obj_handle->ae_uimage[i].img_ptr)
2108			return -EINVAL;
2109		if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
2110			return -EINVAL;
2111		qat_uclo_wr_uimage_page(handle,
2112					obj_handle->ae_uimage[i].img_ptr);
2113	}
2114	return 0;
2115}
2116
2117int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
2118{
2119	return (handle->chip_info->fw_auth) ? qat_uclo_wr_suof_img(handle) :
2120				   qat_uclo_wr_uof_img(handle);
2121}
2122
2123int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle,
2124			     unsigned int cfg_ae_mask)
2125{
2126	if (!cfg_ae_mask)
2127		return -EINVAL;
2128
2129	handle->cfg_ae_mask = cfg_ae_mask;
2130	return 0;
2131}