Linux Audio

Check our new training course

Loading...
   1/*
   2 *
   3 *  sep_main.c - Security Processor Driver main group of functions
   4 *
   5 *  Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
   6 *  Contributions(c) 2009-2011 Discretix. All rights reserved.
   7 *
   8 *  This program is free software; you can redistribute it and/or modify it
   9 *  under the terms of the GNU General Public License as published by the Free
  10 *  Software Foundation; version 2 of the License.
  11 *
  12 *  This program is distributed in the hope that it will be useful, but WITHOUT
  13 *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14 *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  15 *  more details.
  16 *
  17 *  You should have received a copy of the GNU General Public License along with
  18 *  this program; if not, write to the Free Software Foundation, Inc., 59
  19 *  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
  20 *
  21 *  CONTACTS:
  22 *
  23 *  Mark Allyn		mark.a.allyn@intel.com
  24 *  Jayant Mangalampalli jayant.mangalampalli@intel.com
  25 *
  26 *  CHANGES:
  27 *
  28 *  2009.06.26	Initial publish
  29 *  2010.09.14  Upgrade to Medfield
  30 *  2011.01.21  Move to sep_main.c to allow for sep_crypto.c
  31 *  2011.02.22  Enable kernel crypto operation
  32 *
  33 *  Please note that this driver is based on information in the Discretix
  34 *  CryptoCell 5.2 Driver Implementation Guide; the Discretix CryptoCell 5.2
  35 *  Integration Intel Medfield appendix; the Discretix CryptoCell 5.2
  36 *  Linux Driver Integration Guide; and the Discretix CryptoCell 5.2 System
  37 *  Overview and Integration Guide.
  38 */
  39/* #define DEBUG */
  40/* #define SEP_PERF_DEBUG */
  41
  42#include <linux/init.h>
  43#include <linux/kernel.h>
  44#include <linux/module.h>
  45#include <linux/miscdevice.h>
  46#include <linux/fs.h>
  47#include <linux/cdev.h>
  48#include <linux/kdev_t.h>
  49#include <linux/mutex.h>
  50#include <linux/sched.h>
  51#include <linux/mm.h>
  52#include <linux/poll.h>
  53#include <linux/wait.h>
  54#include <linux/pci.h>
  55#include <linux/pm_runtime.h>
  56#include <linux/slab.h>
  57#include <linux/ioctl.h>
  58#include <asm/current.h>
  59#include <linux/ioport.h>
  60#include <linux/io.h>
  61#include <linux/interrupt.h>
  62#include <linux/pagemap.h>
  63#include <asm/cacheflush.h>
  64#include <linux/sched.h>
  65#include <linux/delay.h>
  66#include <linux/jiffies.h>
  67#include <linux/async.h>
  68#include <linux/crypto.h>
  69#include <crypto/internal/hash.h>
  70#include <crypto/scatterwalk.h>
  71#include <crypto/sha.h>
  72#include <crypto/md5.h>
  73#include <crypto/aes.h>
  74#include <crypto/des.h>
  75#include <crypto/hash.h>
  76
  77#include "sep_driver_hw_defs.h"
  78#include "sep_driver_config.h"
  79#include "sep_driver_api.h"
  80#include "sep_dev.h"
  81#include "sep_crypto.h"
  82
  83#define CREATE_TRACE_POINTS
  84#include "sep_trace_events.h"
  85
  86/*
  87 * Let's not spend cycles iterating over message
  88 * area contents if debugging not enabled
  89 */
  90#ifdef DEBUG
  91#define sep_dump_message(sep)	_sep_dump_message(sep)
  92#else
  93#define sep_dump_message(sep)
  94#endif
  95
  96/**
  97 * Currenlty, there is only one SEP device per platform;
  98 * In event platforms in the future have more than one SEP
  99 * device, this will be a linked list
 100 */
 101
 102struct sep_device *sep_dev;
 103
 104/**
 105 * sep_queue_status_remove - Removes transaction from status queue
 106 * @sep: SEP device
 107 * @sep_queue_info: pointer to status queue
 108 *
 109 * This function will removes information about transaction from the queue.
 110 */
 111void sep_queue_status_remove(struct sep_device *sep,
 112				      struct sep_queue_info **queue_elem)
 113{
 114	unsigned long lck_flags;
 115
 116	dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove\n",
 117		current->pid);
 118
 119	if (!queue_elem || !(*queue_elem)) {
 120		dev_dbg(&sep->pdev->dev, "PID%d %s null\n",
 121					current->pid, __func__);
 122		return;
 123	}
 124
 125	spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
 126	list_del(&(*queue_elem)->list);
 127	sep->sep_queue_num--;
 128	spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
 129
 130	kfree(*queue_elem);
 131	*queue_elem = NULL;
 132
 133	dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove return\n",
 134		current->pid);
 135	return;
 136}
 137
 138/**
 139 * sep_queue_status_add - Adds transaction to status queue
 140 * @sep: SEP device
 141 * @opcode: transaction opcode
 142 * @size: input data size
 143 * @pid: pid of current process
 144 * @name: current process name
 145 * @name_len: length of name (current process)
 146 *
 147 * This function adds information about about transaction started to the status
 148 * queue.
 149 */
 150struct sep_queue_info *sep_queue_status_add(
 151						struct sep_device *sep,
 152						u32 opcode,
 153						u32 size,
 154						u32 pid,
 155						u8 *name, size_t name_len)
 156{
 157	unsigned long lck_flags;
 158	struct sep_queue_info *my_elem = NULL;
 159
 160	my_elem = kzalloc(sizeof(struct sep_queue_info), GFP_KERNEL);
 161
 162	if (!my_elem)
 163		return NULL;
 164
 165	dev_dbg(&sep->pdev->dev, "[PID%d] kzalloc ok\n", current->pid);
 166
 167	my_elem->data.opcode = opcode;
 168	my_elem->data.size = size;
 169	my_elem->data.pid = pid;
 170
 171	if (name_len > TASK_COMM_LEN)
 172		name_len = TASK_COMM_LEN;
 173
 174	memcpy(&my_elem->data.name, name, name_len);
 175
 176	spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
 177
 178	list_add_tail(&my_elem->list, &sep->sep_queue_status);
 179	sep->sep_queue_num++;
 180
 181	spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
 182
 183	return my_elem;
 184}
 185
 186/**
 187 *	sep_allocate_dmatables_region - Allocates buf for the MLLI/DMA tables
 188 *	@sep: SEP device
 189 *	@dmatables_region: Destination pointer for the buffer
 190 *	@dma_ctx: DMA context for the transaction
 191 *	@table_count: Number of MLLI/DMA tables to create
 192 *	The buffer created will not work as-is for DMA operations,
 193 *	it needs to be copied over to the appropriate place in the
 194 *	shared area.
 195 */
 196static int sep_allocate_dmatables_region(struct sep_device *sep,
 197					 void **dmatables_region,
 198					 struct sep_dma_context *dma_ctx,
 199					 const u32 table_count)
 200{
 201	const size_t new_len =
 202		SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
 203
 204	void *tmp_region = NULL;
 205
 206	dev_dbg(&sep->pdev->dev, "[PID%d] dma_ctx = 0x%p\n",
 207				current->pid, dma_ctx);
 208	dev_dbg(&sep->pdev->dev, "[PID%d] dmatables_region = 0x%p\n",
 209				current->pid, dmatables_region);
 210
 211	if (!dma_ctx || !dmatables_region) {
 212		dev_warn(&sep->pdev->dev,
 213			"[PID%d] dma context/region uninitialized\n",
 214			current->pid);
 215		return -EINVAL;
 216	}
 217
 218	dev_dbg(&sep->pdev->dev, "[PID%d] newlen = 0x%08zX\n",
 219				current->pid, new_len);
 220	dev_dbg(&sep->pdev->dev, "[PID%d] oldlen = 0x%08X\n", current->pid,
 221				dma_ctx->dmatables_len);
 222	tmp_region = kzalloc(new_len + dma_ctx->dmatables_len, GFP_KERNEL);
 223	if (!tmp_region) {
 224		dev_warn(&sep->pdev->dev,
 225			 "[PID%d] no mem for dma tables region\n",
 226				current->pid);
 227		return -ENOMEM;
 228	}
 229
 230	/* Were there any previous tables that need to be preserved ? */
 231	if (*dmatables_region) {
 232		memcpy(tmp_region, *dmatables_region, dma_ctx->dmatables_len);
 233		kfree(*dmatables_region);
 234		*dmatables_region = NULL;
 235	}
 236
 237	*dmatables_region = tmp_region;
 238
 239	dma_ctx->dmatables_len += new_len;
 240
 241	return 0;
 242}
 243
 244/**
 245 *	sep_wait_transaction - Used for synchronizing transactions
 246 *	@sep: SEP device
 247 */
 248int sep_wait_transaction(struct sep_device *sep)
 249{
 250	int error = 0;
 251	DEFINE_WAIT(wait);
 252
 253	if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
 254				&sep->in_use_flags)) {
 255		dev_dbg(&sep->pdev->dev,
 256			"[PID%d] no transactions, returning\n",
 257				current->pid);
 258		goto end_function_setpid;
 259	}
 260
 261	/*
 262	 * Looping needed even for exclusive waitq entries
 263	 * due to process wakeup latencies, previous process
 264	 * might have already created another transaction.
 265	 */
 266	for (;;) {
 267		/*
 268		 * Exclusive waitq entry, so that only one process is
 269		 * woken up from the queue at a time.
 270		 */
 271		prepare_to_wait_exclusive(&sep->event_transactions,
 272					  &wait,
 273					  TASK_INTERRUPTIBLE);
 274		if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
 275					  &sep->in_use_flags)) {
 276			dev_dbg(&sep->pdev->dev,
 277				"[PID%d] no transactions, breaking\n",
 278					current->pid);
 279			break;
 280		}
 281		dev_dbg(&sep->pdev->dev,
 282			"[PID%d] transactions ongoing, sleeping\n",
 283				current->pid);
 284		schedule();
 285		dev_dbg(&sep->pdev->dev, "[PID%d] woken up\n", current->pid);
 286
 287		if (signal_pending(current)) {
 288			dev_dbg(&sep->pdev->dev, "[PID%d] received signal\n",
 289							current->pid);
 290			error = -EINTR;
 291			goto end_function;
 292		}
 293	}
 294end_function_setpid:
 295	/*
 296	 * The pid_doing_transaction indicates that this process
 297	 * now owns the facilities to performa a transaction with
 298	 * the SEP. While this process is performing a transaction,
 299	 * no other process who has the SEP device open can perform
 300	 * any transactions. This method allows more than one process
 301	 * to have the device open at any given time, which provides
 302	 * finer granularity for device utilization by multiple
 303	 * processes.
 304	 */
 305	/* Only one process is able to progress here at a time */
 306	sep->pid_doing_transaction = current->pid;
 307
 308end_function:
 309	finish_wait(&sep->event_transactions, &wait);
 310
 311	return error;
 312}
 313
 314/**
 315 * sep_check_transaction_owner - Checks if current process owns transaction
 316 * @sep: SEP device
 317 */
 318static inline int sep_check_transaction_owner(struct sep_device *sep)
 319{
 320	dev_dbg(&sep->pdev->dev, "[PID%d] transaction pid = %d\n",
 321		current->pid,
 322		sep->pid_doing_transaction);
 323
 324	if ((sep->pid_doing_transaction == 0) ||
 325		(current->pid != sep->pid_doing_transaction)) {
 326		return -EACCES;
 327	}
 328
 329	/* We own the transaction */
 330	return 0;
 331}
 332
 333#ifdef DEBUG
 334
 335/**
 336 * sep_dump_message - dump the message that is pending
 337 * @sep: SEP device
 338 * This will only print dump if DEBUG is set; it does
 339 * follow kernel debug print enabling
 340 */
 341static void _sep_dump_message(struct sep_device *sep)
 342{
 343	int count;
 344
 345	u32 *p = sep->shared_addr;
 346
 347	for (count = 0; count < 10 * 4; count += 4)
 348		dev_dbg(&sep->pdev->dev,
 349			"[PID%d] Word %d of the message is %x\n",
 350				current->pid, count/4, *p++);
 351}
 352
 353#endif
 354
 355/**
 356 * sep_map_and_alloc_shared_area -allocate shared block
 357 * @sep: security processor
 358 * @size: size of shared area
 359 */
 360static int sep_map_and_alloc_shared_area(struct sep_device *sep)
 361{
 362	sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
 363		sep->shared_size,
 364		&sep->shared_bus, GFP_KERNEL);
 365
 366	if (!sep->shared_addr) {
 367		dev_dbg(&sep->pdev->dev,
 368			"[PID%d] shared memory dma_alloc_coherent failed\n",
 369				current->pid);
 370		return -ENOMEM;
 371	}
 372	dev_dbg(&sep->pdev->dev,
 373		"[PID%d] shared_addr %zx bytes @%p (bus %llx)\n",
 374				current->pid,
 375				sep->shared_size, sep->shared_addr,
 376				(unsigned long long)sep->shared_bus);
 377	return 0;
 378}
 379
 380/**
 381 * sep_unmap_and_free_shared_area - free shared block
 382 * @sep: security processor
 383 */
 384static void sep_unmap_and_free_shared_area(struct sep_device *sep)
 385{
 386	dma_free_coherent(&sep->pdev->dev, sep->shared_size,
 387				sep->shared_addr, sep->shared_bus);
 388}
 389
 390#ifdef DEBUG
 391
 392/**
 393 * sep_shared_bus_to_virt - convert bus/virt addresses
 394 * @sep: pointer to struct sep_device
 395 * @bus_address: address to convert
 396 *
 397 * Returns virtual address inside the shared area according
 398 * to the bus address.
 399 */
 400static void *sep_shared_bus_to_virt(struct sep_device *sep,
 401						dma_addr_t bus_address)
 402{
 403	return sep->shared_addr + (bus_address - sep->shared_bus);
 404}
 405
 406#endif
 407
 408/**
 409 * sep_open - device open method
 410 * @inode: inode of SEP device
 411 * @filp: file handle to SEP device
 412 *
 413 * Open method for the SEP device. Called when userspace opens
 414 * the SEP device node.
 415 *
 416 * Returns zero on success otherwise an error code.
 417 */
 418static int sep_open(struct inode *inode, struct file *filp)
 419{
 420	struct sep_device *sep;
 421	struct sep_private_data *priv;
 422
 423	dev_dbg(&sep_dev->pdev->dev, "[PID%d] open\n", current->pid);
 424
 425	if (filp->f_flags & O_NONBLOCK)
 426		return -ENOTSUPP;
 427
 428	/*
 429	 * Get the SEP device structure and use it for the
 430	 * private_data field in filp for other methods
 431	 */
 432
 433	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 434	if (!priv)
 435		return -ENOMEM;
 436
 437	sep = sep_dev;
 438	priv->device = sep;
 439	filp->private_data = priv;
 440
 441	dev_dbg(&sep_dev->pdev->dev, "[PID%d] priv is 0x%p\n",
 442					current->pid, priv);
 443
 444	/* Anyone can open; locking takes place at transaction level */
 445	return 0;
 446}
 447
 448/**
 449 * sep_free_dma_table_data_handler - free DMA table
 450 * @sep: pointere to struct sep_device
 451 * @dma_ctx: dma context
 452 *
 453 * Handles the request to  free DMA table for synchronic actions
 454 */
 455int sep_free_dma_table_data_handler(struct sep_device *sep,
 456					   struct sep_dma_context **dma_ctx)
 457{
 458	int count;
 459	int dcb_counter;
 460	/* Pointer to the current dma_resource struct */
 461	struct sep_dma_resource *dma;
 462
 463	dev_dbg(&sep->pdev->dev,
 464		"[PID%d] sep_free_dma_table_data_handler\n",
 465			current->pid);
 466
 467	if (!dma_ctx || !(*dma_ctx)) {
 468		/* No context or context already freed */
 469		dev_dbg(&sep->pdev->dev,
 470			"[PID%d] no DMA context or context already freed\n",
 471				current->pid);
 472
 473		return 0;
 474	}
 475
 476	dev_dbg(&sep->pdev->dev, "[PID%d] (*dma_ctx)->nr_dcb_creat 0x%x\n",
 477					current->pid,
 478					(*dma_ctx)->nr_dcb_creat);
 479
 480	for (dcb_counter = 0;
 481	     dcb_counter < (*dma_ctx)->nr_dcb_creat; dcb_counter++) {
 482		dma = &(*dma_ctx)->dma_res_arr[dcb_counter];
 483
 484		/* Unmap and free input map array */
 485		if (dma->in_map_array) {
 486			for (count = 0; count < dma->in_num_pages; count++) {
 487				dma_unmap_page(&sep->pdev->dev,
 488					dma->in_map_array[count].dma_addr,
 489					dma->in_map_array[count].size,
 490					DMA_TO_DEVICE);
 491			}
 492			kfree(dma->in_map_array);
 493		}
 494
 495		/**
 496		 * Output is handled different. If
 497		 * this was a secure dma into restricted memory,
 498		 * then we skip this step altogether as restricted
 499		 * memory is not available to the o/s at all.
 500		 */
 501		if (((*dma_ctx)->secure_dma == false) &&
 502			(dma->out_map_array)) {
 503
 504			for (count = 0; count < dma->out_num_pages; count++) {
 505				dma_unmap_page(&sep->pdev->dev,
 506					dma->out_map_array[count].dma_addr,
 507					dma->out_map_array[count].size,
 508					DMA_FROM_DEVICE);
 509			}
 510			kfree(dma->out_map_array);
 511		}
 512
 513		/* Free page cache for output */
 514		if (dma->in_page_array) {
 515			for (count = 0; count < dma->in_num_pages; count++) {
 516				flush_dcache_page(dma->in_page_array[count]);
 517				page_cache_release(dma->in_page_array[count]);
 518			}
 519			kfree(dma->in_page_array);
 520		}
 521
 522		/* Again, we do this only for non secure dma */
 523		if (((*dma_ctx)->secure_dma == false) &&
 524			(dma->out_page_array)) {
 525
 526			for (count = 0; count < dma->out_num_pages; count++) {
 527				if (!PageReserved(dma->out_page_array[count]))
 528
 529					SetPageDirty(dma->
 530					out_page_array[count]);
 531
 532				flush_dcache_page(dma->out_page_array[count]);
 533				page_cache_release(dma->out_page_array[count]);
 534			}
 535			kfree(dma->out_page_array);
 536		}
 537
 538		/**
 539		 * Note that here we use in_map_num_entries because we
 540		 * don't have a page array; the page array is generated
 541		 * only in the lock_user_pages, which is not called
 542		 * for kernel crypto, which is what the sg (scatter gather
 543		 * is used for exclusively
 544		 */
 545		if (dma->src_sg) {
 546			dma_unmap_sg(&sep->pdev->dev, dma->src_sg,
 547				dma->in_map_num_entries, DMA_TO_DEVICE);
 548			dma->src_sg = NULL;
 549		}
 550
 551		if (dma->dst_sg) {
 552			dma_unmap_sg(&sep->pdev->dev, dma->dst_sg,
 553				dma->in_map_num_entries, DMA_FROM_DEVICE);
 554			dma->dst_sg = NULL;
 555		}
 556
 557		/* Reset all the values */
 558		dma->in_page_array = NULL;
 559		dma->out_page_array = NULL;
 560		dma->in_num_pages = 0;
 561		dma->out_num_pages = 0;
 562		dma->in_map_array = NULL;
 563		dma->out_map_array = NULL;
 564		dma->in_map_num_entries = 0;
 565		dma->out_map_num_entries = 0;
 566	}
 567
 568	(*dma_ctx)->nr_dcb_creat = 0;
 569	(*dma_ctx)->num_lli_tables_created = 0;
 570
 571	kfree(*dma_ctx);
 572	*dma_ctx = NULL;
 573
 574	dev_dbg(&sep->pdev->dev,
 575		"[PID%d] sep_free_dma_table_data_handler end\n",
 576			current->pid);
 577
 578	return 0;
 579}
 580
 581/**
 582 * sep_end_transaction_handler - end transaction
 583 * @sep: pointer to struct sep_device
 584 * @dma_ctx: DMA context
 585 * @call_status: Call status
 586 *
 587 * This API handles the end transaction request.
 588 */
 589static int sep_end_transaction_handler(struct sep_device *sep,
 590				       struct sep_dma_context **dma_ctx,
 591				       struct sep_call_status *call_status,
 592				       struct sep_queue_info **my_queue_elem)
 593{
 594	dev_dbg(&sep->pdev->dev, "[PID%d] ending transaction\n", current->pid);
 595
 596	/*
 597	 * Extraneous transaction clearing would mess up PM
 598	 * device usage counters and SEP would get suspended
 599	 * just before we send a command to SEP in the next
 600	 * transaction
 601	 * */
 602	if (sep_check_transaction_owner(sep)) {
 603		dev_dbg(&sep->pdev->dev, "[PID%d] not transaction owner\n",
 604						current->pid);
 605		return 0;
 606	}
 607
 608	/* Update queue status */
 609	sep_queue_status_remove(sep, my_queue_elem);
 610
 611	/* Check that all the DMA resources were freed */
 612	if (dma_ctx)
 613		sep_free_dma_table_data_handler(sep, dma_ctx);
 614
 615	/* Reset call status for next transaction */
 616	if (call_status)
 617		call_status->status = 0;
 618
 619	/* Clear the message area to avoid next transaction reading
 620	 * sensitive results from previous transaction */
 621	memset(sep->shared_addr, 0,
 622	       SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
 623
 624	/* start suspend delay */
 625#ifdef SEP_ENABLE_RUNTIME_PM
 626	if (sep->in_use) {
 627		sep->in_use = 0;
 628		pm_runtime_mark_last_busy(&sep->pdev->dev);
 629		pm_runtime_put_autosuspend(&sep->pdev->dev);
 630	}
 631#endif
 632
 633	clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
 634	sep->pid_doing_transaction = 0;
 635
 636	/* Now it's safe for next process to proceed */
 637	dev_dbg(&sep->pdev->dev, "[PID%d] waking up next transaction\n",
 638					current->pid);
 639	clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT, &sep->in_use_flags);
 640	wake_up(&sep->event_transactions);
 641
 642	return 0;
 643}
 644
 645
 646/**
 647 * sep_release - close a SEP device
 648 * @inode: inode of SEP device
 649 * @filp: file handle being closed
 650 *
 651 * Called on the final close of a SEP device.
 652 */
 653static int sep_release(struct inode *inode, struct file *filp)
 654{
 655	struct sep_private_data * const private_data = filp->private_data;
 656	struct sep_call_status *call_status = &private_data->call_status;
 657	struct sep_device *sep = private_data->device;
 658	struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
 659	struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
 660
 661	dev_dbg(&sep->pdev->dev, "[PID%d] release\n", current->pid);
 662
 663	sep_end_transaction_handler(sep, dma_ctx, call_status,
 664		my_queue_elem);
 665
 666	kfree(filp->private_data);
 667
 668	return 0;
 669}
 670
 671/**
 672 * sep_mmap -  maps the shared area to user space
 673 * @filp: pointer to struct file
 674 * @vma: pointer to vm_area_struct
 675 *
 676 * Called on an mmap of our space via the normal SEP device
 677 */
 678static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
 679{
 680	struct sep_private_data * const private_data = filp->private_data;
 681	struct sep_call_status *call_status = &private_data->call_status;
 682	struct sep_device *sep = private_data->device;
 683	struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
 684	dma_addr_t bus_addr;
 685	unsigned long error = 0;
 686
 687	dev_dbg(&sep->pdev->dev, "[PID%d] sep_mmap\n", current->pid);
 688
 689	/* Set the transaction busy (own the device) */
 690	/*
 691	 * Problem for multithreaded applications is that here we're
 692	 * possibly going to sleep while holding a write lock on
 693	 * current->mm->mmap_sem, which will cause deadlock for ongoing
 694	 * transaction trying to create DMA tables
 695	 */
 696	error = sep_wait_transaction(sep);
 697	if (error)
 698		/* Interrupted by signal, don't clear transaction */
 699		goto end_function;
 700
 701	/* Clear the message area to avoid next transaction reading
 702	 * sensitive results from previous transaction */
 703	memset(sep->shared_addr, 0,
 704	       SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
 705
 706	/*
 707	 * Check that the size of the mapped range is as the size of the message
 708	 * shared area
 709	 */
 710	if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
 711		error = -EINVAL;
 712		goto end_function_with_error;
 713	}
 714
 715	dev_dbg(&sep->pdev->dev, "[PID%d] shared_addr is %p\n",
 716					current->pid, sep->shared_addr);
 717
 718	/* Get bus address */
 719	bus_addr = sep->shared_bus;
 720
 721	if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
 722		vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
 723		dev_dbg(&sep->pdev->dev, "[PID%d] remap_page_range failed\n",
 724						current->pid);
 725		error = -EAGAIN;
 726		goto end_function_with_error;
 727	}
 728
 729	/* Update call status */
 730	set_bit(SEP_LEGACY_MMAP_DONE_OFFSET, &call_status->status);
 731
 732	goto end_function;
 733
 734end_function_with_error:
 735	/* Clear our transaction */
 736	sep_end_transaction_handler(sep, NULL, call_status,
 737		my_queue_elem);
 738
 739end_function:
 740	return error;
 741}
 742
 743/**
 744 * sep_poll - poll handler
 745 * @filp:	pointer to struct file
 746 * @wait:	pointer to poll_table
 747 *
 748 * Called by the OS when the kernel is asked to do a poll on
 749 * a SEP file handle.
 750 */
 751static unsigned int sep_poll(struct file *filp, poll_table *wait)
 752{
 753	struct sep_private_data * const private_data = filp->private_data;
 754	struct sep_call_status *call_status = &private_data->call_status;
 755	struct sep_device *sep = private_data->device;
 756	u32 mask = 0;
 757	u32 retval = 0;
 758	u32 retval2 = 0;
 759	unsigned long lock_irq_flag;
 760
 761	/* Am I the process that owns the transaction? */
 762	if (sep_check_transaction_owner(sep)) {
 763		dev_dbg(&sep->pdev->dev, "[PID%d] poll pid not owner\n",
 764						current->pid);
 765		mask = POLLERR;
 766		goto end_function;
 767	}
 768
 769	/* Check if send command or send_reply were activated previously */
 770	if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
 771			  &call_status->status)) {
 772		dev_warn(&sep->pdev->dev, "[PID%d] sendmsg not called\n",
 773						current->pid);
 774		mask = POLLERR;
 775		goto end_function;
 776	}
 777
 778
 779	/* Add the event to the polling wait table */
 780	dev_dbg(&sep->pdev->dev, "[PID%d] poll: calling wait sep_event\n",
 781					current->pid);
 782
 783	poll_wait(filp, &sep->event_interrupt, wait);
 784
 785	dev_dbg(&sep->pdev->dev,
 786		"[PID%d] poll: send_ct is %lx reply ct is %lx\n",
 787			current->pid, sep->send_ct, sep->reply_ct);
 788
 789	/* Check if error occurred during poll */
 790	retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
 791	if ((retval2 != 0x0) && (retval2 != 0x8)) {
 792		dev_dbg(&sep->pdev->dev, "[PID%d] poll; poll error %x\n",
 793						current->pid, retval2);
 794		mask |= POLLERR;
 795		goto end_function;
 796	}
 797
 798	spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
 799
 800	if (sep->send_ct == sep->reply_ct) {
 801		spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
 802		retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
 803		dev_dbg(&sep->pdev->dev,
 804			"[PID%d] poll: data ready check (GPR2)  %x\n",
 805				current->pid, retval);
 806
 807		/* Check if printf request  */
 808		if ((retval >> 30) & 0x1) {
 809			dev_dbg(&sep->pdev->dev,
 810				"[PID%d] poll: SEP printf request\n",
 811					current->pid);
 812			goto end_function;
 813		}
 814
 815		/* Check if the this is SEP reply or request */
 816		if (retval >> 31) {
 817			dev_dbg(&sep->pdev->dev,
 818				"[PID%d] poll: SEP request\n",
 819					current->pid);
 820		} else {
 821			dev_dbg(&sep->pdev->dev,
 822				"[PID%d] poll: normal return\n",
 823					current->pid);
 824			sep_dump_message(sep);
 825			dev_dbg(&sep->pdev->dev,
 826				"[PID%d] poll; SEP reply POLLIN|POLLRDNORM\n",
 827					current->pid);
 828			mask |= POLLIN | POLLRDNORM;
 829		}
 830		set_bit(SEP_LEGACY_POLL_DONE_OFFSET, &call_status->status);
 831	} else {
 832		spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
 833		dev_dbg(&sep->pdev->dev,
 834			"[PID%d] poll; no reply; returning mask of 0\n",
 835				current->pid);
 836		mask = 0;
 837	}
 838
 839end_function:
 840	return mask;
 841}
 842
 843/**
 844 * sep_time_address - address in SEP memory of time
 845 * @sep: SEP device we want the address from
 846 *
 847 * Return the address of the two dwords in memory used for time
 848 * setting.
 849 */
 850static u32 *sep_time_address(struct sep_device *sep)
 851{
 852	return sep->shared_addr +
 853		SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
 854}
 855
 856/**
 857 * sep_set_time - set the SEP time
 858 * @sep: the SEP we are setting the time for
 859 *
 860 * Calculates time and sets it at the predefined address.
 861 * Called with the SEP mutex held.
 862 */
 863static unsigned long sep_set_time(struct sep_device *sep)
 864{
 865	struct timeval time;
 866	u32 *time_addr;	/* Address of time as seen by the kernel */
 867
 868
 869	do_gettimeofday(&time);
 870
 871	/* Set value in the SYSTEM MEMORY offset */
 872	time_addr = sep_time_address(sep);
 873
 874	time_addr[0] = SEP_TIME_VAL_TOKEN;
 875	time_addr[1] = time.tv_sec;
 876
 877	dev_dbg(&sep->pdev->dev, "[PID%d] time.tv_sec is %lu\n",
 878					current->pid, time.tv_sec);
 879	dev_dbg(&sep->pdev->dev, "[PID%d] time_addr is %p\n",
 880					current->pid, time_addr);
 881	dev_dbg(&sep->pdev->dev, "[PID%d] sep->shared_addr is %p\n",
 882					current->pid, sep->shared_addr);
 883
 884	return time.tv_sec;
 885}
 886
 887/**
 888 * sep_send_command_handler - kick off a command
 889 * @sep: SEP being signalled
 890 *
 891 * This function raises interrupt to SEP that signals that is has a new
 892 * command from the host
 893 *
 894 * Note that this function does fall under the ioctl lock
 895 */
 896int sep_send_command_handler(struct sep_device *sep)
 897{
 898	unsigned long lock_irq_flag;
 899	u32 *msg_pool;
 900	int error = 0;
 901
 902	/* Basic sanity check; set msg pool to start of shared area */
 903	msg_pool = (u32 *)sep->shared_addr;
 904	msg_pool += 2;
 905
 906	/* Look for start msg token */
 907	if (*msg_pool != SEP_START_MSG_TOKEN) {
 908		dev_warn(&sep->pdev->dev, "start message token not present\n");
 909		error = -EPROTO;
 910		goto end_function;
 911	}
 912
 913	/* Do we have a reasonable size? */
 914	msg_pool += 1;
 915	if ((*msg_pool < 2) ||
 916		(*msg_pool > SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES)) {
 917
 918		dev_warn(&sep->pdev->dev, "invalid message size\n");
 919		error = -EPROTO;
 920		goto end_function;
 921	}
 922
 923	/* Does the command look reasonable? */
 924	msg_pool += 1;
 925	if (*msg_pool < 2) {
 926		dev_warn(&sep->pdev->dev, "invalid message opcode\n");
 927		error = -EPROTO;
 928		goto end_function;
 929	}
 930
 931#if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
 932	dev_dbg(&sep->pdev->dev, "[PID%d] before pm sync status 0x%X\n",
 933					current->pid,
 934					sep->pdev->dev.power.runtime_status);
 935	sep->in_use = 1; /* device is about to be used */
 936	pm_runtime_get_sync(&sep->pdev->dev);
 937#endif
 938
 939	if (test_and_set_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags)) {
 940		error = -EPROTO;
 941		goto end_function;
 942	}
 943	sep->in_use = 1; /* device is about to be used */
 944	sep_set_time(sep);
 945
 946	sep_dump_message(sep);
 947
 948	/* Update counter */
 949	spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
 950	sep->send_ct++;
 951	spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
 952
 953	dev_dbg(&sep->pdev->dev,
 954		"[PID%d] sep_send_command_handler send_ct %lx reply_ct %lx\n",
 955			current->pid, sep->send_ct, sep->reply_ct);
 956
 957	/* Send interrupt to SEP */
 958	sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
 959
 960end_function:
 961	return error;
 962}
 963
 964/**
 965 *	sep_crypto_dma -
 966 *	@sep: pointer to struct sep_device
 967 *	@sg: pointer to struct scatterlist
 968 *	@direction:
 969 *	@dma_maps: pointer to place a pointer to array of dma maps
 970 *	 This is filled in; anything previous there will be lost
 971 *	 The structure for dma maps is sep_dma_map
 972 *	@returns number of dma maps on success; negative on error
 973 *
 974 *	This creates the dma table from the scatterlist
 975 *	It is used only for kernel crypto as it works with scatterlists
 976 *	representation of data buffers
 977 *
 978 */
 979static int sep_crypto_dma(
 980	struct sep_device *sep,
 981	struct scatterlist *sg,
 982	struct sep_dma_map **dma_maps,
 983	enum dma_data_direction direction)
 984{
 985	struct scatterlist *temp_sg;
 986
 987	u32 count_segment;
 988	u32 count_mapped;
 989	struct sep_dma_map *sep_dma;
 990	int ct1;
 991
 992	if (sg->length == 0)
 993		return 0;
 994
 995	/* Count the segments */
 996	temp_sg = sg;
 997	count_segment = 0;
 998	while (temp_sg) {
 999		count_segment += 1;
1000		temp_sg = scatterwalk_sg_next(temp_sg);
1001	}
1002	dev_dbg(&sep->pdev->dev,
1003		"There are (hex) %x segments in sg\n", count_segment);
1004
1005	/* DMA map segments */
1006	count_mapped = dma_map_sg(&sep->pdev->dev, sg,
1007		count_segment, direction);
1008
1009	dev_dbg(&sep->pdev->dev,
1010		"There are (hex) %x maps in sg\n", count_mapped);
1011
1012	if (count_mapped == 0) {
1013		dev_dbg(&sep->pdev->dev, "Cannot dma_map_sg\n");
1014		return -ENOMEM;
1015	}
1016
1017	sep_dma = kmalloc(sizeof(struct sep_dma_map) *
1018		count_mapped, GFP_ATOMIC);
1019
1020	if (sep_dma == NULL) {
1021		dev_dbg(&sep->pdev->dev, "Cannot allocate dma_maps\n");
1022		return -ENOMEM;
1023	}
1024
1025	for_each_sg(sg, temp_sg, count_mapped, ct1) {
1026		sep_dma[ct1].dma_addr = sg_dma_address(temp_sg);
1027		sep_dma[ct1].size = sg_dma_len(temp_sg);
1028		dev_dbg(&sep->pdev->dev, "(all hex) map %x dma %lx len %lx\n",
1029			ct1, (unsigned long)sep_dma[ct1].dma_addr,
1030			(unsigned long)sep_dma[ct1].size);
1031		}
1032
1033	*dma_maps = sep_dma;
1034	return count_mapped;
1035
1036}
1037
1038/**
1039 *	sep_crypto_lli -
1040 *	@sep: pointer to struct sep_device
1041 *	@sg: pointer to struct scatterlist
1042 *	@data_size: total data size
1043 *	@direction:
1044 *	@dma_maps: pointer to place a pointer to array of dma maps
1045 *	 This is filled in; anything previous there will be lost
1046 *	 The structure for dma maps is sep_dma_map
1047 *	@lli_maps: pointer to place a pointer to array of lli maps
1048 *	 This is filled in; anything previous there will be lost
1049 *	 The structure for dma maps is sep_dma_map
1050 *	@returns number of dma maps on success; negative on error
1051 *
1052 *	This creates the LLI table from the scatterlist
1053 *	It is only used for kernel crypto as it works exclusively
1054 *	with scatterlists (struct scatterlist) representation of
1055 *	data buffers
1056 */
1057static int sep_crypto_lli(
1058	struct sep_device *sep,
1059	struct scatterlist *sg,
1060	struct sep_dma_map **maps,
1061	struct sep_lli_entry **llis,
1062	u32 data_size,
1063	enum dma_data_direction direction)
1064{
1065
1066	int ct1;
1067	struct sep_lli_entry *sep_lli;
1068	struct sep_dma_map *sep_map;
1069
1070	int nbr_ents;
1071
1072	nbr_ents = sep_crypto_dma(sep, sg, maps, direction);
1073	if (nbr_ents <= 0) {
1074		dev_dbg(&sep->pdev->dev, "crypto_dma failed %x\n",
1075			nbr_ents);
1076		return nbr_ents;
1077	}
1078
1079	sep_map = *maps;
1080
1081	sep_lli = kmalloc(sizeof(struct sep_lli_entry) * nbr_ents, GFP_ATOMIC);
1082
1083	if (sep_lli == NULL) {
1084		dev_dbg(&sep->pdev->dev, "Cannot allocate lli_maps\n");
1085
1086		kfree(*maps);
1087		*maps = NULL;
1088		return -ENOMEM;
1089	}
1090
1091	for (ct1 = 0; ct1 < nbr_ents; ct1 += 1) {
1092		sep_lli[ct1].bus_address = (u32)sep_map[ct1].dma_addr;
1093
1094		/* Maximum for page is total data size */
1095		if (sep_map[ct1].size > data_size)
1096			sep_map[ct1].size = data_size;
1097
1098		sep_lli[ct1].block_size = (u32)sep_map[ct1].size;
1099	}
1100
1101	*llis = sep_lli;
1102	return nbr_ents;
1103}
1104
1105/**
1106 *	sep_lock_kernel_pages - map kernel pages for DMA
1107 *	@sep: pointer to struct sep_device
1108 *	@kernel_virt_addr: address of data buffer in kernel
1109 *	@data_size: size of data
1110 *	@lli_array_ptr: lli array
1111 *	@in_out_flag: input into device or output from device
1112 *
1113 *	This function locks all the physical pages of the kernel virtual buffer
1114 *	and construct a basic lli  array, where each entry holds the physical
1115 *	page address and the size that application data holds in this page
1116 *	This function is used only during kernel crypto mod calls from within
1117 *	the kernel (when ioctl is not used)
1118 *
1119 *	This is used only for kernel crypto. Kernel pages
1120 *	are handled differently as they are done via
1121 *	scatter gather lists (struct scatterlist)
1122 */
1123static int sep_lock_kernel_pages(struct sep_device *sep,
1124	unsigned long kernel_virt_addr,
1125	u32 data_size,
1126	struct sep_lli_entry **lli_array_ptr,
1127	int in_out_flag,
1128	struct sep_dma_context *dma_ctx)
1129
1130{
1131	u32 num_pages;
1132	struct scatterlist *sg;
1133
1134	/* Array of lli */
1135	struct sep_lli_entry *lli_array;
1136	/* Map array */
1137	struct sep_dma_map *map_array;
1138
1139	enum dma_data_direction direction;
1140
1141	lli_array = NULL;
1142	map_array = NULL;
1143
1144	if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1145		direction = DMA_TO_DEVICE;
1146		sg = dma_ctx->src_sg;
1147	} else {
1148		direction = DMA_FROM_DEVICE;
1149		sg = dma_ctx->dst_sg;
1150	}
1151
1152	num_pages = sep_crypto_lli(sep, sg, &map_array, &lli_array,
1153		data_size, direction);
1154
1155	if (num_pages <= 0) {
1156		dev_dbg(&sep->pdev->dev, "sep_crypto_lli returned error %x\n",
1157			num_pages);
1158		return -ENOMEM;
1159	}
1160
1161	/* Put mapped kernel sg into kernel resource array */
1162
1163	/* Set output params according to the in_out flag */
1164	if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1165		*lli_array_ptr = lli_array;
1166		dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
1167								num_pages;
1168		dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
1169								NULL;
1170		dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
1171								map_array;
1172		dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
1173								num_pages;
1174		dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg =
1175			dma_ctx->src_sg;
1176	} else {
1177		*lli_array_ptr = lli_array;
1178		dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
1179								num_pages;
1180		dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
1181								NULL;
1182		dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
1183								map_array;
1184		dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
1185					out_map_num_entries = num_pages;
1186		dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg =
1187			dma_ctx->dst_sg;
1188	}
1189
1190	return 0;
1191}
1192
1193/**
1194 * sep_lock_user_pages - lock and map user pages for DMA
1195 * @sep: pointer to struct sep_device
1196 * @app_virt_addr: user memory data buffer
1197 * @data_size: size of data buffer
1198 * @lli_array_ptr: lli array
1199 * @in_out_flag: input or output to device
1200 *
1201 * This function locks all the physical pages of the application
1202 * virtual buffer and construct a basic lli  array, where each entry
1203 * holds the physical page address and the size that application
1204 * data holds in this physical pages
1205 */
1206static int sep_lock_user_pages(struct sep_device *sep,
1207	u32 app_virt_addr,
1208	u32 data_size,
1209	struct sep_lli_entry **lli_array_ptr,
1210	int in_out_flag,
1211	struct sep_dma_context *dma_ctx)
1212
1213{
1214	int error = 0;
1215	u32 count;
1216	int result;
1217	/* The the page of the end address of the user space buffer */
1218	u32 end_page;
1219	/* The page of the start address of the user space buffer */
1220	u32 start_page;
1221	/* The range in pages */
1222	u32 num_pages;
1223	/* Array of pointers to page */
1224	struct page **page_array;
1225	/* Array of lli */
1226	struct sep_lli_entry *lli_array;
1227	/* Map array */
1228	struct sep_dma_map *map_array;
1229
1230	/* Set start and end pages  and num pages */
1231	end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1232	start_page = app_virt_addr >> PAGE_SHIFT;
1233	num_pages = end_page - start_page + 1;
1234
1235	dev_dbg(&sep->pdev->dev,
1236		"[PID%d] lock user pages app_virt_addr is %x\n",
1237			current->pid, app_virt_addr);
1238
1239	dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
1240					current->pid, data_size);
1241	dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
1242					current->pid, start_page);
1243	dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
1244					current->pid, end_page);
1245	dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
1246					current->pid, num_pages);
1247
1248	/* Allocate array of pages structure pointers */
1249	page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
1250	if (!page_array) {
1251		error = -ENOMEM;
1252		goto end_function;
1253	}
1254	map_array = kmalloc(sizeof(struct sep_dma_map) * num_pages, GFP_ATOMIC);
1255	if (!map_array) {
1256		dev_warn(&sep->pdev->dev,
1257			 "[PID%d] kmalloc for map_array failed\n",
1258				current->pid);
1259		error = -ENOMEM;
1260		goto end_function_with_error1;
1261	}
1262
1263	lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
1264		GFP_ATOMIC);
1265
1266	if (!lli_array) {
1267		dev_warn(&sep->pdev->dev,
1268			 "[PID%d] kmalloc for lli_array failed\n",
1269				current->pid);
1270		error = -ENOMEM;
1271		goto end_function_with_error2;
1272	}
1273
1274	/* Convert the application virtual address into a set of physical */
1275	down_read(&current->mm->mmap_sem);
1276	result = get_user_pages(current, current->mm, app_virt_addr,
1277		num_pages,
1278		((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1),
1279		0, page_array, NULL);
1280
1281	up_read(&current->mm->mmap_sem);
1282
1283	/* Check the number of pages locked - if not all then exit with error */
1284	if (result != num_pages) {
1285		dev_warn(&sep->pdev->dev,
1286			"[PID%d] not all pages locked by get_user_pages, "
1287			"result 0x%X, num_pages 0x%X\n",
1288				current->pid, result, num_pages);
1289		error = -ENOMEM;
1290		goto end_function_with_error3;
1291	}
1292
1293	dev_dbg(&sep->pdev->dev, "[PID%d] get_user_pages succeeded\n",
1294					current->pid);
1295
1296	/*
1297	 * Fill the array using page array data and
1298	 * map the pages - this action will also flush the cache as needed
1299	 */
1300	for (count = 0; count < num_pages; count++) {
1301		/* Fill the map array */
1302		map_array[count].dma_addr =
1303			dma_map_page(&sep->pdev->dev, page_array[count],
1304			0, PAGE_SIZE, DMA_BIDIRECTIONAL);
1305
1306		map_array[count].size = PAGE_SIZE;
1307
1308		/* Fill the lli array entry */
1309		lli_array[count].bus_address = (u32)map_array[count].dma_addr;
1310		lli_array[count].block_size = PAGE_SIZE;
1311
1312		dev_dbg(&sep->pdev->dev,
1313			"[PID%d] lli_array[%x].bus_address is %08lx, "
1314			"lli_array[%x].block_size is (hex) %x\n", current->pid,
1315			count, (unsigned long)lli_array[count].bus_address,
1316			count, lli_array[count].block_size);
1317	}
1318
1319	/* Check the offset for the first page */
1320	lli_array[0].bus_address =
1321		lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1322
1323	/* Check that not all the data is in the first page only */
1324	if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1325		lli_array[0].block_size = data_size;
1326	else
1327		lli_array[0].block_size =
1328			PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1329
1330		dev_dbg(&sep->pdev->dev,
1331			"[PID%d] After check if page 0 has all data\n",
1332			current->pid);
1333		dev_dbg(&sep->pdev->dev,
1334			"[PID%d] lli_array[0].bus_address is (hex) %08lx, "
1335			"lli_array[0].block_size is (hex) %x\n",
1336			current->pid,
1337			(unsigned long)lli_array[0].bus_address,
1338			lli_array[0].block_size);
1339
1340
1341	/* Check the size of the last page */
1342	if (num_pages > 1) {
1343		lli_array[num_pages - 1].block_size =
1344			(app_virt_addr + data_size) & (~PAGE_MASK);
1345		if (lli_array[num_pages - 1].block_size == 0)
1346			lli_array[num_pages - 1].block_size = PAGE_SIZE;
1347
1348		dev_dbg(&sep->pdev->dev,
1349			"[PID%d] After last page size adjustment\n",
1350			current->pid);
1351		dev_dbg(&sep->pdev->dev,
1352			"[PID%d] lli_array[%x].bus_address is (hex) %08lx, "
1353			"lli_array[%x].block_size is (hex) %x\n",
1354			current->pid,
1355			num_pages - 1,
1356			(unsigned long)lli_array[num_pages - 1].bus_address,
1357			num_pages - 1,
1358			lli_array[num_pages - 1].block_size);
1359	}
1360
1361	/* Set output params according to the in_out flag */
1362	if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1363		*lli_array_ptr = lli_array;
1364		dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
1365								num_pages;
1366		dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
1367								page_array;
1368		dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
1369								map_array;
1370		dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
1371								num_pages;
1372		dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg = NULL;
1373	} else {
1374		*lli_array_ptr = lli_array;
1375		dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
1376								num_pages;
1377		dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
1378								page_array;
1379		dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
1380								map_array;
1381		dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
1382					out_map_num_entries = num_pages;
1383		dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg = NULL;
1384	}
1385	goto end_function;
1386
1387end_function_with_error3:
1388	/* Free lli array */
1389	kfree(lli_array);
1390
1391end_function_with_error2:
1392	kfree(map_array);
1393
1394end_function_with_error1:
1395	/* Free page array */
1396	kfree(page_array);
1397
1398end_function:
1399	return error;
1400}
1401
1402/**
1403 *	sep_lli_table_secure_dma - get lli array for IMR addresses
1404 *	@sep: pointer to struct sep_device
1405 *	@app_virt_addr: user memory data buffer
1406 *	@data_size: size of data buffer
1407 *	@lli_array_ptr: lli array
1408 *	@in_out_flag: not used
1409 *	@dma_ctx: pointer to struct sep_dma_context
1410 *
1411 *	This function creates lli tables for outputting data to
1412 *	IMR memory, which is memory that cannot be accessed by the
1413 *	the x86 processor.
1414 */
1415static int sep_lli_table_secure_dma(struct sep_device *sep,
1416	u32 app_virt_addr,
1417	u32 data_size,
1418	struct sep_lli_entry **lli_array_ptr,
1419	int in_out_flag,
1420	struct sep_dma_context *dma_ctx)
1421
1422{
1423	int error = 0;
1424	u32 count;
1425	/* The the page of the end address of the user space buffer */
1426	u32 end_page;
1427	/* The page of the start address of the user space buffer */
1428	u32 start_page;
1429	/* The range in pages */
1430	u32 num_pages;
1431	/* Array of lli */
1432	struct sep_lli_entry *lli_array;
1433
1434	/* Set start and end pages  and num pages */
1435	end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1436	start_page = app_virt_addr >> PAGE_SHIFT;
1437	num_pages = end_page - start_page + 1;
1438
1439	dev_dbg(&sep->pdev->dev, "[PID%d] lock user pages"
1440		" app_virt_addr is %x\n", current->pid, app_virt_addr);
1441
1442	dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
1443		current->pid, data_size);
1444	dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
1445		current->pid, start_page);
1446	dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
1447		current->pid, end_page);
1448	dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
1449		current->pid, num_pages);
1450
1451	lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
1452		GFP_ATOMIC);
1453
1454	if (!lli_array) {
1455		dev_warn(&sep->pdev->dev,
1456			"[PID%d] kmalloc for lli_array failed\n",
1457			current->pid);
1458		return -ENOMEM;
1459	}
1460
1461	/*
1462	 * Fill the lli_array
1463	 */
1464	start_page = start_page << PAGE_SHIFT;
1465	for (count = 0; count < num_pages; count++) {
1466		/* Fill the lli array entry */
1467		lli_array[count].bus_address = start_page;
1468		lli_array[count].block_size = PAGE_SIZE;
1469
1470		start_page += PAGE_SIZE;
1471
1472		dev_dbg(&sep->pdev->dev,
1473			"[PID%d] lli_array[%x].bus_address is %08lx, "
1474			"lli_array[%x].block_size is (hex) %x\n",
1475			current->pid,
1476			count, (unsigned long)lli_array[count].bus_address,
1477			count, lli_array[count].block_size);
1478	}
1479
1480	/* Check the offset for the first page */
1481	lli_array[0].bus_address =
1482		lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1483
1484	/* Check that not all the data is in the first page only */
1485	if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1486		lli_array[0].block_size = data_size;
1487	else
1488		lli_array[0].block_size =
1489			PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1490
1491	dev_dbg(&sep->pdev->dev,
1492		"[PID%d] After check if page 0 has all data\n"
1493		"lli_array[0].bus_address is (hex) %08lx, "
1494		"lli_array[0].block_size is (hex) %x\n",
1495		current->pid,
1496		(unsigned long)lli_array[0].bus_address,
1497		lli_array[0].block_size);
1498
1499	/* Check the size of the last page */
1500	if (num_pages > 1) {
1501		lli_array[num_pages - 1].block_size =
1502			(app_virt_addr + data_size) & (~PAGE_MASK);
1503		if (lli_array[num_pages - 1].block_size == 0)
1504			lli_array[num_pages - 1].block_size = PAGE_SIZE;
1505
1506		dev_dbg(&sep->pdev->dev,
1507			"[PID%d] After last page size adjustment\n"
1508			"lli_array[%x].bus_address is (hex) %08lx, "
1509			"lli_array[%x].block_size is (hex) %x\n",
1510			current->pid, num_pages - 1,
1511			(unsigned long)lli_array[num_pages - 1].bus_address,
1512			num_pages - 1,
1513			lli_array[num_pages - 1].block_size);
1514	}
1515	*lli_array_ptr = lli_array;
1516	dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages = num_pages;
1517	dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
1518	dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
1519	dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_num_entries = 0;
1520
1521	return error;
1522}
1523
1524/**
1525 * sep_calculate_lli_table_max_size - size the LLI table
1526 * @sep: pointer to struct sep_device
1527 * @lli_in_array_ptr
1528 * @num_array_entries
1529 * @last_table_flag
1530 *
1531 * This function calculates the size of data that can be inserted into
1532 * the lli table from this array, such that either the table is full
1533 * (all entries are entered), or there are no more entries in the
1534 * lli array
1535 */
1536static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
1537	struct sep_lli_entry *lli_in_array_ptr,
1538	u32 num_array_entries,
1539	u32 *last_table_flag)
1540{
1541	u32 counter;
1542	/* Table data size */
1543	u32 table_data_size = 0;
1544	/* Data size for the next table */
1545	u32 next_table_data_size;
1546
1547	*last_table_flag = 0;
1548
1549	/*
1550	 * Calculate the data in the out lli table till we fill the whole
1551	 * table or till the data has ended
1552	 */
1553	for (counter = 0;
1554		(counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
1555			(counter < num_array_entries); counter++)
1556		table_data_size += lli_in_array_ptr[counter].block_size;
1557
1558	/*
1559	 * Check if we reached the last entry,
1560	 * meaning this ia the last table to build,
1561	 * and no need to check the block alignment
1562	 */
1563	if (counter == num_array_entries) {
1564		/* Set the last table flag */
1565		*last_table_flag = 1;
1566		goto end_function;
1567	}
1568
1569	/*
1570	 * Calculate the data size of the next table.
1571	 * Stop if no entries left or if data size is more the DMA restriction
1572	 */
1573	next_table_data_size = 0;
1574	for (; counter < num_array_entries; counter++) {
1575		next_table_data_size += lli_in_array_ptr[counter].block_size;
1576		if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1577			break;
1578	}
1579
1580	/*
1581	 * Check if the next table data size is less then DMA rstriction.
1582	 * if it is - recalculate the current table size, so that the next
1583	 * table data size will be adaquete for DMA
1584	 */
1585	if (next_table_data_size &&
1586		next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1587
1588		table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
1589			next_table_data_size);
1590
1591end_function:
1592	return table_data_size;
1593}
1594
1595/**
1596 * sep_build_lli_table - build an lli array for the given table
1597 * @sep: pointer to struct sep_device
1598 * @lli_array_ptr: pointer to lli array
1599 * @lli_table_ptr: pointer to lli table
1600 * @num_processed_entries_ptr: pointer to number of entries
1601 * @num_table_entries_ptr: pointer to number of tables
1602 * @table_data_size: total data size
1603 *
1604 * Builds ant lli table from the lli_array according to
1605 * the given size of data
1606 */
1607static void sep_build_lli_table(struct sep_device *sep,
1608	struct sep_lli_entry	*lli_array_ptr,
1609	struct sep_lli_entry	*lli_table_ptr,
1610	u32 *num_processed_entries_ptr,
1611	u32 *num_table_entries_ptr,
1612	u32 table_data_size)
1613{
1614	/* Current table data size */
1615	u32 curr_table_data_size;
1616	/* Counter of lli array entry */
1617	u32 array_counter;
1618
1619	/* Init current table data size and lli array entry counter */
1620	curr_table_data_size = 0;
1621	array_counter = 0;
1622	*num_table_entries_ptr = 1;
1623
1624	dev_dbg(&sep->pdev->dev,
1625		"[PID%d] build lli table table_data_size: (hex) %x\n",
1626			current->pid, table_data_size);
1627
1628	/* Fill the table till table size reaches the needed amount */
1629	while (curr_table_data_size < table_data_size) {
1630		/* Update the number of entries in table */
1631		(*num_table_entries_ptr)++;
1632
1633		lli_table_ptr->bus_address =
1634			cpu_to_le32(lli_array_ptr[array_counter].bus_address);
1635
1636		lli_table_ptr->block_size =
1637			cpu_to_le32(lli_array_ptr[array_counter].block_size);
1638
1639		curr_table_data_size += lli_array_ptr[array_counter].block_size;
1640
1641		dev_dbg(&sep->pdev->dev,
1642			"[PID%d] lli_table_ptr is %p\n",
1643				current->pid, lli_table_ptr);
1644		dev_dbg(&sep->pdev->dev,
1645			"[PID%d] lli_table_ptr->bus_address: %08lx\n",
1646				current->pid,
1647				(unsigned long)lli_table_ptr->bus_address);
1648
1649		dev_dbg(&sep->pdev->dev,
1650			"[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1651				current->pid, lli_table_ptr->block_size);
1652
1653		/* Check for overflow of the table data */
1654		if (curr_table_data_size > table_data_size) {
1655			dev_dbg(&sep->pdev->dev,
1656				"[PID%d] curr_table_data_size too large\n",
1657					current->pid);
1658
1659			/* Update the size of block in the table */
1660			lli_table_ptr->block_size =
1661				cpu_to_le32(lli_table_ptr->block_size) -
1662				(curr_table_data_size - table_data_size);
1663
1664			/* Update the physical address in the lli array */
1665			lli_array_ptr[array_counter].bus_address +=
1666			cpu_to_le32(lli_table_ptr->block_size);
1667
1668			/* Update the block size left in the lli array */
1669			lli_array_ptr[array_counter].block_size =
1670				(curr_table_data_size - table_data_size);
1671		} else
1672			/* Advance to the next entry in the lli_array */
1673			array_counter++;
1674
1675		dev_dbg(&sep->pdev->dev,
1676			"[PID%d] lli_table_ptr->bus_address is %08lx\n",
1677				current->pid,
1678				(unsigned long)lli_table_ptr->bus_address);
1679		dev_dbg(&sep->pdev->dev,
1680			"[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1681				current->pid,
1682				lli_table_ptr->block_size);
1683
1684		/* Move to the next entry in table */
1685		lli_table_ptr++;
1686	}
1687
1688	/* Set the info entry to default */
1689	lli_table_ptr->bus_address = 0xffffffff;
1690	lli_table_ptr->block_size = 0;
1691
1692	/* Set the output parameter */
1693	*num_processed_entries_ptr += array_counter;
1694
1695}
1696
1697/**
1698 * sep_shared_area_virt_to_bus - map shared area to bus address
1699 * @sep: pointer to struct sep_device
1700 * @virt_address: virtual address to convert
1701 *
1702 * This functions returns the physical address inside shared area according
1703 * to the virtual address. It can be either on the externa RAM device
1704 * (ioremapped), or on the system RAM
1705 * This implementation is for the external RAM
1706 */
1707static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
1708	void *virt_address)
1709{
1710	dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys v %p\n",
1711					current->pid, virt_address);
1712	dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys p %08lx\n",
1713		current->pid,
1714		(unsigned long)
1715		sep->shared_bus + (virt_address - sep->shared_addr));
1716
1717	return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
1718}
1719
1720/**
1721 * sep_shared_area_bus_to_virt - map shared area bus address to kernel
1722 * @sep: pointer to struct sep_device
1723 * @bus_address: bus address to convert
1724 *
1725 * This functions returns the virtual address inside shared area
1726 * according to the physical address. It can be either on the
1727 * externa RAM device (ioremapped), or on the system RAM
1728 * This implementation is for the external RAM
1729 */
1730static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
1731	dma_addr_t bus_address)
1732{
1733	dev_dbg(&sep->pdev->dev, "[PID%d] shared bus to virt b=%lx v=%lx\n",
1734		current->pid,
1735		(unsigned long)bus_address, (unsigned long)(sep->shared_addr +
1736			(size_t)(bus_address - sep->shared_bus)));
1737
1738	return sep->shared_addr	+ (size_t)(bus_address - sep->shared_bus);
1739}
1740
1741/**
1742 * sep_debug_print_lli_tables - dump LLI table
1743 * @sep: pointer to struct sep_device
1744 * @lli_table_ptr: pointer to sep_lli_entry
1745 * @num_table_entries: number of entries
1746 * @table_data_size: total data size
1747 *
1748 * Walk the the list of the print created tables and print all the data
1749 */
1750static void sep_debug_print_lli_tables(struct sep_device *sep,
1751	struct sep_lli_entry *lli_table_ptr,
1752	unsigned long num_table_entries,
1753	unsigned long table_data_size)
1754{
1755#ifdef DEBUG
1756	unsigned long table_count = 1;
1757	unsigned long entries_count = 0;
1758
1759	dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables start\n",
1760					current->pid);
1761	if (num_table_entries == 0) {
1762		dev_dbg(&sep->pdev->dev, "[PID%d] no table to print\n",
1763			current->pid);
1764		return;
1765	}
1766
1767	while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
1768		dev_dbg(&sep->pdev->dev,
1769			"[PID%d] lli table %08lx, "
1770			"table_data_size is (hex) %lx\n",
1771				current->pid, table_count, table_data_size);
1772		dev_dbg(&sep->pdev->dev,
1773			"[PID%d] num_table_entries is (hex) %lx\n",
1774				current->pid, num_table_entries);
1775
1776		/* Print entries of the table (without info entry) */
1777		for (entries_count = 0; entries_count < num_table_entries;
1778			entries_count++, lli_table_ptr++) {
1779
1780			dev_dbg(&sep->pdev->dev,
1781				"[PID%d] lli_table_ptr address is %08lx\n",
1782				current->pid,
1783				(unsigned long) lli_table_ptr);
1784
1785			dev_dbg(&sep->pdev->dev,
1786				"[PID%d] phys address is %08lx "
1787				"block size is (hex) %x\n", current->pid,
1788				(unsigned long)lli_table_ptr->bus_address,
1789				lli_table_ptr->block_size);
1790		}
1791
1792		/* Point to the info entry */
1793		lli_table_ptr--;
1794
1795		dev_dbg(&sep->pdev->dev,
1796			"[PID%d] phys lli_table_ptr->block_size "
1797			"is (hex) %x\n",
1798			current->pid,
1799			lli_table_ptr->block_size);
1800
1801		dev_dbg(&sep->pdev->dev,
1802			"[PID%d] phys lli_table_ptr->physical_address "
1803			"is %08lx\n",
1804			current->pid,
1805			(unsigned long)lli_table_ptr->bus_address);
1806
1807
1808		table_data_size = lli_table_ptr->block_size & 0xffffff;
1809		num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1810
1811		dev_dbg(&sep->pdev->dev,
1812			"[PID%d] phys table_data_size is "
1813			"(hex) %lx num_table_entries is"
1814			" %lx bus_address is%lx\n",
1815				current->pid,
1816				table_data_size,
1817				num_table_entries,
1818				(unsigned long)lli_table_ptr->bus_address);
1819
1820		if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
1821			lli_table_ptr = (struct sep_lli_entry *)
1822				sep_shared_bus_to_virt(sep,
1823				(unsigned long)lli_table_ptr->bus_address);
1824
1825		table_count++;
1826	}
1827	dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables end\n",
1828					current->pid);
1829#endif
1830}
1831
1832
1833/**
1834 * sep_prepare_empty_lli_table - create a blank LLI table
1835 * @sep: pointer to struct sep_device
1836 * @lli_table_addr_ptr: pointer to lli table
1837 * @num_entries_ptr: pointer to number of entries
1838 * @table_data_size_ptr: point to table data size
1839 * @dmatables_region: Optional buffer for DMA tables
1840 * @dma_ctx: DMA context
1841 *
1842 * This function creates empty lli tables when there is no data
1843 */
1844static void sep_prepare_empty_lli_table(struct sep_device *sep,
1845		dma_addr_t *lli_table_addr_ptr,
1846		u32 *num_entries_ptr,
1847		u32 *table_data_size_ptr,
1848		void **dmatables_region,
1849		struct sep_dma_context *dma_ctx)
1850{
1851	struct sep_lli_entry *lli_table_ptr;
1852
1853	/* Find the area for new table */
1854	lli_table_ptr =
1855		(struct sep_lli_entry *)(sep->shared_addr +
1856		SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1857		dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1858			SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1859
1860	if (dmatables_region && *dmatables_region)
1861		lli_table_ptr = *dmatables_region;
1862
1863	lli_table_ptr->bus_address = 0;
1864	lli_table_ptr->block_size = 0;
1865
1866	lli_table_ptr++;
1867	lli_table_ptr->bus_address = 0xFFFFFFFF;
1868	lli_table_ptr->block_size = 0;
1869
1870	/* Set the output parameter value */
1871	*lli_table_addr_ptr = sep->shared_bus +
1872		SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1873		dma_ctx->num_lli_tables_created *
1874		sizeof(struct sep_lli_entry) *
1875		SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1876
1877	/* Set the num of entries and table data size for empty table */
1878	*num_entries_ptr = 2;
1879	*table_data_size_ptr = 0;
1880
1881	/* Update the number of created tables */
1882	dma_ctx->num_lli_tables_created++;
1883}
1884
1885/**
1886 * sep_prepare_input_dma_table - prepare input DMA mappings
1887 * @sep: pointer to struct sep_device
1888 * @data_size:
1889 * @block_size:
1890 * @lli_table_ptr:
1891 * @num_entries_ptr:
1892 * @table_data_size_ptr:
1893 * @is_kva: set for kernel data (kernel cryptio call)
1894 *
1895 * This function prepares only input DMA table for synhronic symmetric
1896 * operations (HASH)
1897 * Note that all bus addresses that are passed to the SEP
1898 * are in 32 bit format; the SEP is a 32 bit device
1899 */
1900static int sep_prepare_input_dma_table(struct sep_device *sep,
1901	unsigned long app_virt_addr,
1902	u32 data_size,
1903	u32 block_size,
1904	dma_addr_t *lli_table_ptr,
1905	u32 *num_entries_ptr,
1906	u32 *table_data_size_ptr,
1907	bool is_kva,
1908	void **dmatables_region,
1909	struct sep_dma_context *dma_ctx
1910)
1911{
1912	int error = 0;
1913	/* Pointer to the info entry of the table - the last entry */
1914	struct sep_lli_entry *info_entry_ptr;
1915	/* Array of pointers to page */
1916	struct sep_lli_entry *lli_array_ptr;
1917	/* Points to the first entry to be processed in the lli_in_array */
1918	u32 current_entry = 0;
1919	/* Num entries in the virtual buffer */
1920	u32 sep_lli_entries = 0;
1921	/* Lli table pointer */
1922	struct sep_lli_entry *in_lli_table_ptr;
1923	/* The total data in one table */
1924	u32 table_data_size = 0;
1925	/* Flag for last table */
1926	u32 last_table_flag = 0;
1927	/* Number of entries in lli table */
1928	u32 num_entries_in_table = 0;
1929	/* Next table address */
1930	void *lli_table_alloc_addr = NULL;
1931	void *dma_lli_table_alloc_addr = NULL;
1932	void *dma_in_lli_table_ptr = NULL;
1933
1934	dev_dbg(&sep->pdev->dev, "[PID%d] prepare intput dma "
1935				 "tbl data size: (hex) %x\n",
1936					current->pid, data_size);
1937
1938	dev_dbg(&sep->pdev->dev, "[PID%d] block_size is (hex) %x\n",
1939					current->pid, block_size);
1940
1941	/* Initialize the pages pointers */
1942	dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
1943	dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages = 0;
1944
1945	/* Set the kernel address for first table to be allocated */
1946	lli_table_alloc_addr = (void *)(sep->shared_addr +
1947		SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1948		dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1949		SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1950
1951	if (data_size == 0) {
1952		if (dmatables_region) {
1953			error = sep_allocate_dmatables_region(sep,
1954						dmatables_region,
1955						dma_ctx,
1956						1);
1957			if (error)
1958				return error;
1959		}
1960		/* Special case  - create meptu table - 2 entries, zero data */
1961		sep_prepare_empty_lli_table(sep, lli_table_ptr,
1962				num_entries_ptr, table_data_size_ptr,
1963				dmatables_region, dma_ctx);
1964		goto update_dcb_counter;
1965	}
1966
1967	/* Check if the pages are in Kernel Virtual Address layout */
1968	if (is_kva == true)
1969		error = sep_lock_kernel_pages(sep, app_virt_addr,
1970			data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
1971			dma_ctx);
1972	else
1973		/*
1974		 * Lock the pages of the user buffer
1975		 * and translate them to pages
1976		 */
1977		error = sep_lock_user_pages(sep, app_virt_addr,
1978			data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
1979			dma_ctx);
1980
1981	if (error)
1982		goto end_function;
1983
1984	dev_dbg(&sep->pdev->dev,
1985		"[PID%d] output sep_in_num_pages is (hex) %x\n",
1986		current->pid,
1987		dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
1988
1989	current_entry = 0;
1990	info_entry_ptr = NULL;
1991
1992	sep_lli_entries =
1993		dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages;
1994
1995	dma_lli_table_alloc_addr = lli_table_alloc_addr;
1996	if (dmatables_region) {
1997		error = sep_allocate_dmatables_region(sep,
1998					dmatables_region,
1999					dma_ctx,
2000					sep_lli_entries);
2001		if (error)
2002			return error;
2003		lli_table_alloc_addr = *dmatables_region;
2004	}
2005
2006	/* Loop till all the entries in in array are processed */
2007	while (current_entry < sep_lli_entries) {
2008
2009		/* Set the new input and output tables */
2010		in_lli_table_ptr =
2011			(struct sep_lli_entry *)lli_table_alloc_addr;
2012		dma_in_lli_table_ptr =
2013			(struct sep_lli_entry *)dma_lli_table_alloc_addr;
2014
2015		lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2016			SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2017		dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2018			SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2019
2020		if (dma_lli_table_alloc_addr >
2021			((void *)sep->shared_addr +
2022			SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2023			SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
2024
2025			error = -ENOMEM;
2026			goto end_function_error;
2027
2028		}
2029
2030		/* Update the number of created tables */
2031		dma_ctx->num_lli_tables_created++;
2032
2033		/* Calculate the maximum size of data for input table */
2034		table_data_size = sep_calculate_lli_table_max_size(sep,
2035			&lli_array_ptr[current_entry],
2036			(sep_lli_entries - current_entry),
2037			&last_table_flag);
2038
2039		/*
2040		 * If this is not the last table -
2041		 * then align it to the block size
2042		 */
2043		if (!last_table_flag)
2044			table_data_size =
2045				(table_data_size / block_size) * block_size;
2046
2047		dev_dbg(&sep->pdev->dev,
2048			"[PID%d] output table_data_size is (hex) %x\n",
2049				current->pid,
2050				table_data_size);
2051
2052		/* Construct input lli table */
2053		sep_build_lli_table(sep, &lli_array_ptr[current_entry],
2054			in_lli_table_ptr,
2055			&current_entry, &num_entries_in_table, table_data_size);
2056
2057		if (info_entry_ptr == NULL) {
2058
2059			/* Set the output parameters to physical addresses */
2060			*lli_table_ptr = sep_shared_area_virt_to_bus(sep,
2061				dma_in_lli_table_ptr);
2062			*num_entries_ptr = num_entries_in_table;
2063			*table_data_size_ptr = table_data_size;
2064
2065			dev_dbg(&sep->pdev->dev,
2066				"[PID%d] output lli_table_in_ptr is %08lx\n",
2067				current->pid,
2068				(unsigned long)*lli_table_ptr);
2069
2070		} else {
2071			/* Update the info entry of the previous in table */
2072			info_entry_ptr->bus_address =
2073				sep_shared_area_virt_to_bus(sep,
2074							dma_in_lli_table_ptr);
2075			info_entry_ptr->block_size =
2076				((num_entries_in_table) << 24) |
2077				(table_data_size);
2078		}
2079		/* Save the pointer to the info entry of the current tables */
2080		info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
2081	}
2082	/* Print input tables */
2083	if (!dmatables_region) {
2084		sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
2085			sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
2086			*num_entries_ptr, *table_data_size_ptr);
2087	}
2088
2089	/* The array of the pages */
2090	kfree(lli_array_ptr);
2091
2092update_dcb_counter:
2093	/* Update DCB counter */
2094	dma_ctx->nr_dcb_creat++;
2095	goto end_function;
2096
2097end_function_error:
2098	/* Free all the allocated resources */
2099	kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
2100	dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
2101	kfree(lli_array_ptr);
2102	kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
2103	dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2104
2105end_function:
2106	return error;
2107
2108}
2109
2110/**
2111 * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
2112 * @sep: pointer to struct sep_device
2113 * @lli_in_array:
2114 * @sep_in_lli_entries:
2115 * @lli_out_array:
2116 * @sep_out_lli_entries
2117 * @block_size
2118 * @lli_table_in_ptr
2119 * @lli_table_out_ptr
2120 * @in_num_entries_ptr
2121 * @out_num_entries_ptr
2122 * @table_data_size_ptr
2123 *
2124 * This function creates the input and output DMA tables for
2125 * symmetric operations (AES/DES) according to the block
2126 * size from LLI arays
2127 * Note that all bus addresses that are passed to the SEP
2128 * are in 32 bit format; the SEP is a 32 bit device
2129 */
2130static int sep_construct_dma_tables_from_lli(
2131	struct sep_device *sep,
2132	struct sep_lli_entry *lli_in_array,
2133	u32	sep_in_lli_entries,
2134	struct sep_lli_entry *lli_out_array,
2135	u32	sep_out_lli_entries,
2136	u32	block_size,
2137	dma_addr_t *lli_table_in_ptr,
2138	dma_addr_t *lli_table_out_ptr,
2139	u32	*in_num_entries_ptr,
2140	u32	*out_num_entries_ptr,
2141	u32	*table_data_size_ptr,
2142	void	**dmatables_region,
2143	struct sep_dma_context *dma_ctx)
2144{
2145	/* Points to the area where next lli table can be allocated */
2146	void *lli_table_alloc_addr = NULL;
2147	/*
2148	 * Points to the area in shared region where next lli table
2149	 * can be allocated
2150	 */
2151	void *dma_lli_table_alloc_addr = NULL;
2152	/* Input lli table in dmatables_region or shared region */
2153	struct sep_lli_entry *in_lli_table_ptr = NULL;
2154	/* Input lli table location in the shared region */
2155	struct sep_lli_entry *dma_in_lli_table_ptr = NULL;
2156	/* Output lli table in dmatables_region or shared region */
2157	struct sep_lli_entry *out_lli_table_ptr = NULL;
2158	/* Output lli table location in the shared region */
2159	struct sep_lli_entry *dma_out_lli_table_ptr = NULL;
2160	/* Pointer to the info entry of the table - the last entry */
2161	struct sep_lli_entry *info_in_entry_ptr = NULL;
2162	/* Pointer to the info entry of the table - the last entry */
2163	struct sep_lli_entry *info_out_entry_ptr = NULL;
2164	/* Points to the first entry to be processed in the lli_in_array */
2165	u32 current_in_entry = 0;
2166	/* Points to the first entry to be processed in the lli_out_array */
2167	u32 current_out_entry = 0;
2168	/* Max size of the input table */
2169	u32 in_table_data_size = 0;
2170	/* Max size of the output table */
2171	u32 out_table_data_size = 0;
2172	/* Flag te signifies if this is the last tables build */
2173	u32 last_table_flag = 0;
2174	/* The data size that should be in table */
2175	u32 table_data_size = 0;
2176	/* Number of etnries in the input table */
2177	u32 num_entries_in_table = 0;
2178	/* Number of etnries in the output table */
2179	u32 num_entries_out_table = 0;
2180
2181	if (!dma_ctx) {
2182		dev_warn(&sep->pdev->dev, "DMA context uninitialized\n");
2183		return -EINVAL;
2184	}
2185
2186	/* Initiate to point after the message area */
2187	lli_table_alloc_addr = (void *)(sep->shared_addr +
2188		SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2189		(dma_ctx->num_lli_tables_created *
2190		(sizeof(struct sep_lli_entry) *
2191		SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
2192	dma_lli_table_alloc_addr = lli_table_alloc_addr;
2193
2194	if (dmatables_region) {
2195		/* 2 for both in+out table */
2196		if (sep_allocate_dmatables_region(sep,
2197					dmatables_region,
2198					dma_ctx,
2199					2*sep_in_lli_entries))
2200			return -ENOMEM;
2201		lli_table_alloc_addr = *dmatables_region;
2202	}
2203
2204	/* Loop till all the entries in in array are not processed */
2205	while (current_in_entry < sep_in_lli_entries) {
2206		/* Set the new input and output tables */
2207		in_lli_table_ptr =
2208			(struct sep_lli_entry *)lli_table_alloc_addr;
2209		dma_in_lli_table_ptr =
2210			(struct sep_lli_entry *)dma_lli_table_alloc_addr;
2211
2212		lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2213			SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2214		dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2215			SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2216
2217		/* Set the first output tables */
2218		out_lli_table_ptr =
2219			(struct sep_lli_entry *)lli_table_alloc_addr;
2220		dma_out_lli_table_ptr =
2221			(struct sep_lli_entry *)dma_lli_table_alloc_addr;
2222
2223		/* Check if the DMA table area limit was overrun */
2224		if ((dma_lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
2225			SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
2226			((void *)sep->shared_addr +
2227			SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2228			SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
2229
2230			dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
2231			return -ENOMEM;
2232		}
2233
2234		/* Update the number of the lli tables created */
2235		dma_ctx->num_lli_tables_created += 2;
2236
2237		lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2238			SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2239		dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2240			SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2241
2242		/* Calculate the maximum size of data for input table */
2243		in_table_data_size =
2244			sep_calculate_lli_table_max_size(sep,
2245			&lli_in_array[current_in_entry],
2246			(sep_in_lli_entries - current_in_entry),
2247			&last_table_flag);
2248
2249		/* Calculate the maximum size of data for output table */
2250		out_table_data_size =
2251			sep_calculate_lli_table_max_size(sep,
2252			&lli_out_array[current_out_entry],
2253			(sep_out_lli_entries - current_out_entry),
2254			&last_table_flag);
2255
2256		if (!last_table_flag) {
2257			in_table_data_size = (in_table_data_size /
2258				block_size) * block_size;
2259			out_table_data_size = (out_table_data_size /
2260				block_size) * block_size;
2261		}
2262
2263		table_data_size = in_table_data_size;
2264		if (table_data_size > out_table_data_size)
2265			table_data_size = out_table_data_size;
2266
2267		dev_dbg(&sep->pdev->dev,
2268			"[PID%d] construct tables from lli"
2269			" in_table_data_size is (hex) %x\n", current->pid,
2270			in_table_data_size);
2271
2272		dev_dbg(&sep->pdev->dev,
2273			"[PID%d] construct tables from lli"
2274			"out_table_data_size is (hex) %x\n", current->pid,
2275			out_table_data_size);
2276
2277		/* Construct input lli table */
2278		sep_build_lli_table(sep, &lli_in_array[current_in_entry],
2279			in_lli_table_ptr,
2280			&current_in_entry,
2281			&num_entries_in_table,
2282			table_data_size);
2283
2284		/* Construct output lli table */
2285		sep_build_lli_table(sep, &lli_out_array[current_out_entry],
2286			out_lli_table_ptr,
2287			&current_out_entry,
2288			&num_entries_out_table,
2289			table_data_size);
2290
2291		/* If info entry is null - this is the first table built */
2292		if (info_in_entry_ptr == NULL) {
2293			/* Set the output parameters to physical addresses */
2294			*lli_table_in_ptr =
2295			sep_shared_area_virt_to_bus(sep, dma_in_lli_table_ptr);
2296
2297			*in_num_entries_ptr = num_entries_in_table;
2298
2299			*lli_table_out_ptr =
2300				sep_shared_area_virt_to_bus(sep,
2301				dma_out_lli_table_ptr);
2302
2303			*out_num_entries_ptr = num_entries_out_table;
2304			*table_data_size_ptr = table_data_size;
2305
2306			dev_dbg(&sep->pdev->dev,
2307				"[PID%d] output lli_table_in_ptr is %08lx\n",
2308				current->pid,
2309				(unsigned long)*lli_table_in_ptr);
2310			dev_dbg(&sep->pdev->dev,
2311				"[PID%d] output lli_table_out_ptr is %08lx\n",
2312				current->pid,
2313				(unsigned long)*lli_table_out_ptr);
2314		} else {
2315			/* Update the info entry of the previous in table */
2316			info_in_entry_ptr->bus_address =
2317				sep_shared_area_virt_to_bus(sep,
2318				dma_in_lli_table_ptr);
2319
2320			info_in_entry_ptr->block_size =
2321				((num_entries_in_table) << 24) |
2322				(table_data_size);
2323
2324			/* Update the info entry of the previous in table */
2325			info_out_entry_ptr->bus_address =
2326				sep_shared_area_virt_to_bus(sep,
2327				dma_out_lli_table_ptr);
2328
2329			info_out_entry_ptr->block_size =
2330				((num_entries_out_table) << 24) |
2331				(table_data_size);
2332
2333			dev_dbg(&sep->pdev->dev,
2334				"[PID%d] output lli_table_in_ptr:%08lx %08x\n",
2335				current->pid,
2336				(unsigned long)info_in_entry_ptr->bus_address,
2337				info_in_entry_ptr->block_size);
2338
2339			dev_dbg(&sep->pdev->dev,
2340				"[PID%d] output lli_table_out_ptr:"
2341				"%08lx  %08x\n",
2342				current->pid,
2343				(unsigned long)info_out_entry_ptr->bus_address,
2344				info_out_entry_ptr->block_size);
2345		}
2346
2347		/* Save the pointer to the info entry of the current tables */
2348		info_in_entry_ptr = in_lli_table_ptr +
2349			num_entries_in_table - 1;
2350		info_out_entry_ptr = out_lli_table_ptr +
2351			num_entries_out_table - 1;
2352
2353		dev_dbg(&sep->pdev->dev,
2354			"[PID%d] output num_entries_out_table is %x\n",
2355			current->pid,
2356			(u32)num_entries_out_table);
2357		dev_dbg(&sep->pdev->dev,
2358			"[PID%d] output info_in_entry_ptr is %lx\n",
2359			current->pid,
2360			(unsigned long)info_in_entry_ptr);
2361		dev_dbg(&sep->pdev->dev,
2362			"[PID%d] output info_out_entry_ptr is %lx\n",
2363			current->pid,
2364			(unsigned long)info_out_entry_ptr);
2365	}
2366
2367	/* Print input tables */
2368	if (!dmatables_region) {
2369		sep_debug_print_lli_tables(
2370			sep,
2371			(struct sep_lli_entry *)
2372			sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
2373			*in_num_entries_ptr,
2374			*table_data_size_ptr);
2375	}
2376
2377	/* Print output tables */
2378	if (!dmatables_region) {
2379		sep_debug_print_lli_tables(
2380			sep,
2381			(struct sep_lli_entry *)
2382			sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
2383			*out_num_entries_ptr,
2384			*table_data_size_ptr);
2385	}
2386
2387	return 0;
2388}
2389
2390/**
2391 * sep_prepare_input_output_dma_table - prepare DMA I/O table
2392 * @app_virt_in_addr:
2393 * @app_virt_out_addr:
2394 * @data_size:
2395 * @block_size:
2396 * @lli_table_in_ptr:
2397 * @lli_table_out_ptr:
2398 * @in_num_entries_ptr:
2399 * @out_num_entries_ptr:
2400 * @table_data_size_ptr:
2401 * @is_kva: set for kernel data; used only for kernel crypto module
2402 *
2403 * This function builds input and output DMA tables for synhronic
2404 * symmetric operations (AES, DES, HASH). It also checks that each table
2405 * is of the modular block size
2406 * Note that all bus addresses that are passed to the SEP
2407 * are in 32 bit format; the SEP is a 32 bit device
2408 */
2409static int sep_prepare_input_output_dma_table(struct sep_device *sep,
2410	unsigned long app_virt_in_addr,
2411	unsigned long app_virt_out_addr,
2412	u32 data_size,
2413	u32 block_size,
2414	dma_addr_t *lli_table_in_ptr,
2415	dma_addr_t *lli_table_out_ptr,
2416	u32 *in_num_entries_ptr,
2417	u32 *out_num_entries_ptr,
2418	u32 *table_data_size_ptr,
2419	bool is_kva,
2420	void **dmatables_region,
2421	struct sep_dma_context *dma_ctx)
2422
2423{
2424	int error = 0;
2425	/* Array of pointers of page */
2426	struct sep_lli_entry *lli_in_array;
2427	/* Array of pointers of page */
2428	struct sep_lli_entry *lli_out_array;
2429
2430	if (!dma_ctx) {
2431		error = -EINVAL;
2432		goto end_function;
2433	}
2434
2435	if (data_size == 0) {
2436		/* Prepare empty table for input and output */
2437		if (dmatables_region) {
2438			error = sep_allocate_dmatables_region(
2439					sep,
2440					dmatables_region,
2441					dma_ctx,
2442					2);
2443		  if (error)
2444			goto end_function;
2445		}
2446		sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
2447			in_num_entries_ptr, table_data_size_ptr,
2448			dmatables_region, dma_ctx);
2449
2450		sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
2451			out_num_entries_ptr, table_data_size_ptr,
2452			dmatables_region, dma_ctx);
2453
2454		goto update_dcb_counter;
2455	}
2456
2457	/* Initialize the pages pointers */
2458	dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2459	dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
2460
2461	/* Lock the pages of the buffer and translate them to pages */
2462	if (is_kva == true) {
2463		dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel input pages\n",
2464						current->pid);
2465		error = sep_lock_kernel_pages(sep, app_virt_in_addr,
2466				data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
2467				dma_ctx);
2468		if (error) {
2469			dev_warn(&sep->pdev->dev,
2470				"[PID%d] sep_lock_kernel_pages for input "
2471				"virtual buffer failed\n", current->pid);
2472
2473			goto end_function;
2474		}
2475
2476		dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel output pages\n",
2477						current->pid);
2478		error = sep_lock_kernel_pages(sep, app_virt_out_addr,
2479				data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
2480				dma_ctx);
2481
2482		if (error) {
2483			dev_warn(&sep->pdev->dev,
2484				"[PID%d] sep_lock_kernel_pages for output "
2485				"virtual buffer failed\n", current->pid);
2486
2487			goto end_function_free_lli_in;
2488		}
2489
2490	}
2491
2492	else {
2493		dev_dbg(&sep->pdev->dev, "[PID%d] Locking user input pages\n",
2494						current->pid);
2495		error = sep_lock_user_pages(sep, app_virt_in_addr,
2496				data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
2497				dma_ctx);
2498		if (error) {
2499			dev_warn(&sep->pdev->dev,
2500				"[PID%d] sep_lock_user_pages for input "
2501				"virtual buffer failed\n", current->pid);
2502
2503			goto end_function;
2504		}
2505
2506		if (dma_ctx->secure_dma == true) {
2507			/* secure_dma requires use of non accessible memory */
2508			dev_dbg(&sep->pdev->dev, "[PID%d] in secure_dma\n",
2509				current->pid);
2510			error = sep_lli_table_secure_dma(sep,
2511				app_virt_out_addr, data_size, &lli_out_array,
2512				SEP_DRIVER_OUT_FLAG, dma_ctx);
2513			if (error) {
2514				dev_warn(&sep->pdev->dev,
2515					"[PID%d] secure dma table setup "
2516					" for output virtual buffer failed\n",
2517					current->pid);
2518
2519				goto end_function_free_lli_in;
2520			}
2521		} else {
2522			/* For normal, non-secure dma */
2523			dev_dbg(&sep->pdev->dev, "[PID%d] not in secure_dma\n",
2524				current->pid);
2525
2526			dev_dbg(&sep->pdev->dev,
2527				"[PID%d] Locking user output pages\n",
2528				current->pid);
2529
2530			error = sep_lock_user_pages(sep, app_virt_out_addr,
2531				data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
2532				dma_ctx);
2533
2534			if (error) {
2535				dev_warn(&sep->pdev->dev,
2536					"[PID%d] sep_lock_user_pages"
2537					" for output virtual buffer failed\n",
2538					current->pid);
2539
2540				goto end_function_free_lli_in;
2541			}
2542		}
2543	}
2544
2545	dev_dbg(&sep->pdev->dev, "[PID%d] After lock; prep input output dma "
2546		"table sep_in_num_pages is (hex) %x\n", current->pid,
2547		dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
2548
2549	dev_dbg(&sep->pdev->dev, "[PID%d] sep_out_num_pages is (hex) %x\n",
2550		current->pid,
2551		dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages);
2552
2553	dev_dbg(&sep->pdev->dev, "[PID%d] SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP"
2554		" is (hex) %x\n", current->pid,
2555		SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
2556
2557	/* Call the fucntion that creates table from the lli arrays */
2558	dev_dbg(&sep->pdev->dev, "[PID%d] calling create table from lli\n",
2559					current->pid);
2560	error = sep_construct_dma_tables_from_lli(
2561			sep, lli_in_array,
2562			dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
2563								in_num_pages,
2564			lli_out_array,
2565			dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
2566								out_num_pages,
2567			block_size, lli_table_in_ptr, lli_table_out_ptr,
2568			in_num_entries_ptr, out_num_entries_ptr,
2569			table_data_size_ptr, dmatables_region, dma_ctx);
2570
2571	if (error) {
2572		dev_warn(&sep->pdev->dev,
2573			"[PID%d] sep_construct_dma_tables_from_lli failed\n",
2574			current->pid);
2575		goto end_function_with_error;
2576	}
2577
2578	kfree(lli_out_array);
2579	kfree(lli_in_array);
2580
2581update_dcb_counter:
2582	/* Update DCB counter */
2583	dma_ctx->nr_dcb_creat++;
2584
2585	goto end_function;
2586
2587end_function_with_error:
2588	kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array);
2589	dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
2590	kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array);
2591	dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
2592	kfree(lli_out_array);
2593
2594
2595end_function_free_lli_in:
2596	kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
2597	dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
2598	kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
2599	dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2600	kfree(lli_in_array);
2601
2602end_function:
2603
2604	return error;
2605
2606}
2607
2608/**
2609 * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
2610 * @app_in_address: unsigned long; for data buffer in (user space)
2611 * @app_out_address: unsigned long; for data buffer out (user space)
2612 * @data_in_size: u32; for size of data
2613 * @block_size: u32; for block size
2614 * @tail_block_size: u32; for size of tail block
2615 * @isapplet: bool; to indicate external app
2616 * @is_kva: bool; kernel buffer; only used for kernel crypto module
2617 * @secure_dma; indicates whether this is secure_dma using IMR
2618 *
2619 * This function prepares the linked DMA tables and puts the
2620 * address for the linked list of tables inta a DCB (data control
2621 * block) the address of which is known by the SEP hardware
2622 * Note that all bus addresses that are passed to the SEP
2623 * are in 32 bit format; the SEP is a 32 bit device
2624 */
2625int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
2626	unsigned long  app_in_address,
2627	unsigned long  app_out_address,
2628	u32  data_in_size,
2629	u32  block_size,
2630	u32  tail_block_size,
2631	bool isapplet,
2632	bool	is_kva,
2633	bool	secure_dma,
2634	struct sep_dcblock *dcb_region,
2635	void **dmatables_region,
2636	struct sep_dma_context **dma_ctx,
2637	struct scatterlist *src_sg,
2638	struct scatterlist *dst_sg)
2639{
2640	int error = 0;
2641	/* Size of tail */
2642	u32 tail_size = 0;
2643	/* Address of the created DCB table */
2644	struct sep_dcblock *dcb_table_ptr = NULL;
2645	/* The physical address of the first input DMA table */
2646	dma_addr_t in_first_mlli_address = 0;
2647	/* Number of entries in the first input DMA table */
2648	u32  in_first_num_entries = 0;
2649	/* The physical address of the first output DMA table */
2650	dma_addr_t  out_first_mlli_address = 0;
2651	/* Number of entries in the first output DMA table */
2652	u32  out_first_num_entries = 0;
2653	/* Data in the first input/output table */
2654	u32  first_data_size = 0;
2655
2656	dev_dbg(&sep->pdev->dev, "[PID%d] app_in_address %lx\n",
2657		current->pid, app_in_address);
2658
2659	dev_dbg(&sep->pdev->dev, "[PID%d] app_out_address %lx\n",
2660		current->pid, app_out_address);
2661
2662	dev_dbg(&sep->pdev->dev, "[PID%d] data_in_size %x\n",
2663		current->pid, data_in_size);
2664
2665	dev_dbg(&sep->pdev->dev, "[PID%d] block_size %x\n",
2666		current->pid, block_size);
2667
2668	dev_dbg(&sep->pdev->dev, "[PID%d] tail_block_size %x\n",
2669		current->pid, tail_block_size);
2670
2671	dev_dbg(&sep->pdev->dev, "[PID%d] isapplet %x\n",
2672		current->pid, isapplet);
2673
2674	dev_dbg(&sep->pdev->dev, "[PID%d] is_kva %x\n",
2675		current->pid, is_kva);
2676
2677	dev_dbg(&sep->pdev->dev, "[PID%d] src_sg %p\n",
2678		current->pid, src_sg);
2679
2680	dev_dbg(&sep->pdev->dev, "[PID%d] dst_sg %p\n",
2681		current->pid, dst_sg);
2682
2683	if (!dma_ctx) {
2684		dev_warn(&sep->pdev->dev, "[PID%d] no DMA context pointer\n",
2685						current->pid);
2686		error = -EINVAL;
2687		goto end_function;
2688	}
2689
2690	if (*dma_ctx) {
2691		/* In case there are multiple DCBs for this transaction */
2692		dev_dbg(&sep->pdev->dev, "[PID%d] DMA context already set\n",
2693						current->pid);
2694	} else {
2695		*dma_ctx = kzalloc(sizeof(**dma_ctx), GFP_KERNEL);
2696		if (!(*dma_ctx)) {
2697			dev_dbg(&sep->pdev->dev,
2698				"[PID%d] Not enough memory for DMA context\n",
2699				current->pid);
2700		  error = -ENOMEM;
2701		  goto end_function;
2702		}
2703		dev_dbg(&sep->pdev->dev,
2704			"[PID%d] Created DMA context addr at 0x%p\n",
2705			current->pid, *dma_ctx);
2706	}
2707
2708	(*dma_ctx)->secure_dma = secure_dma;
2709
2710	/* these are for kernel crypto only */
2711	(*dma_ctx)->src_sg = src_sg;
2712	(*dma_ctx)->dst_sg = dst_sg;
2713
2714	if ((*dma_ctx)->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
2715		/* No more DCBs to allocate */
2716		dev_dbg(&sep->pdev->dev, "[PID%d] no more DCBs available\n",
2717						current->pid);
2718		error = -ENOSPC;
2719		goto end_function_error;
2720	}
2721
2722	/* Allocate new DCB */
2723	if (dcb_region) {
2724		dcb_table_ptr = dcb_region;
2725	} else {
2726		dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
2727			SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
2728			((*dma_ctx)->nr_dcb_creat *
2729						sizeof(struct sep_dcblock)));
2730	}
2731
2732	/* Set the default values in the DCB */
2733	dcb_table_ptr->input_mlli_address = 0;
2734	dcb_table_ptr->input_mlli_num_entries = 0;
2735	dcb_table_ptr->input_mlli_data_size = 0;
2736	dcb_table_ptr->output_mlli_address = 0;
2737	dcb_table_ptr->output_mlli_num_entries = 0;
2738	dcb_table_ptr->output_mlli_data_size = 0;
2739	dcb_table_ptr->tail_data_size = 0;
2740	dcb_table_ptr->out_vr_tail_pt = 0;
2741
2742	if (isapplet == true) {
2743
2744		/* Check if there is enough data for DMA operation */
2745		if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
2746			if (is_kva == true) {
2747				error = -ENODEV;
2748				goto end_function_error;
2749			} else {
2750				if (copy_from_user(dcb_table_ptr->tail_data,
2751					(void __user *)app_in_address,
2752					data_in_size)) {
2753					error = -EFAULT;
2754					goto end_function_error;
2755				}
2756			}
2757
2758			dcb_table_ptr->tail_data_size = data_in_size;
2759
2760			/* Set the output user-space address for mem2mem op */
2761			if (app_out_address)
2762				dcb_table_ptr->out_vr_tail_pt =
2763				(aligned_u64)app_out_address;
2764
2765			/*
2766			 * Update both data length parameters in order to avoid
2767			 * second data copy and allow building of empty mlli
2768			 * tables
2769			 */
2770			tail_size = 0x0;
2771			data_in_size = 0x0;
2772
2773		} else {
2774			if (!app_out_address) {
2775				tail_size = data_in_size % block_size;
2776				if (!tail_size) {
2777					if (tail_block_size == block_size)
2778						tail_size = block_size;
2779				}
2780			} else {
2781				tail_size = 0;
2782			}
2783		}
2784		if (tail_size) {
2785			if (tail_size > sizeof(dcb_table_ptr->tail_data))
2786				return -EINVAL;
2787			if (is_kva == true) {
2788				error = -ENODEV;
2789				goto end_function_error;
2790			} else {
2791				/* We have tail data - copy it to DCB */
2792				if (copy_from_user(dcb_table_ptr->tail_data,
2793					(void __user *)(app_in_address +
2794					data_in_size - tail_size), tail_size)) {
2795					error = -EFAULT;
2796					goto end_function_error;
2797				}
2798			}
2799			if (app_out_address)
2800				/*
2801				 * Calculate the output address
2802				 * according to tail data size
2803				 */
2804				dcb_table_ptr->out_vr_tail_pt =
2805					(aligned_u64)app_out_address +
2806					data_in_size - tail_size;
2807
2808			/* Save the real tail data size */
2809			dcb_table_ptr->tail_data_size = tail_size;
2810			/*
2811			 * Update the data size without the tail
2812			 * data size AKA data for the dma
2813			 */
2814			data_in_size = (data_in_size - tail_size);
2815		}
2816	}
2817	/* Check if we need to build only input table or input/output */
2818	if (app_out_address) {
2819		/* Prepare input/output tables */
2820		error = sep_prepare_input_output_dma_table(sep,
2821				app_in_address,
2822				app_out_address,
2823				data_in_size,
2824				block_size,
2825				&in_first_mlli_address,
2826				&out_first_mlli_address,
2827				&in_first_num_entries,
2828				&out_first_num_entries,
2829				&first_data_size,
2830				is_kva,
2831				dmatables_region,
2832				*dma_ctx);
2833	} else {
2834		/* Prepare input tables */
2835		error = sep_prepare_input_dma_table(sep,
2836				app_in_address,
2837				data_in_size,
2838				block_size,
2839				&in_first_mlli_address,
2840				&in_first_num_entries,
2841				&first_data_size,
2842				is_kva,
2843				dmatables_region,
2844				*dma_ctx);
2845	}
2846
2847	if (error) {
2848		dev_warn(&sep->pdev->dev,
2849			"prepare DMA table call failed "
2850			"from prepare DCB call\n");
2851		goto end_function_error;
2852	}
2853
2854	/* Set the DCB values */
2855	dcb_table_ptr->input_mlli_address = in_first_mlli_address;
2856	dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
2857	dcb_table_ptr->input_mlli_data_size = first_data_size;
2858	dcb_table_ptr->output_mlli_address = out_first_mlli_address;
2859	dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
2860	dcb_table_ptr->output_mlli_data_size = first_data_size;
2861
2862	goto end_function;
2863
2864end_function_error:
2865	kfree(*dma_ctx);
2866	*dma_ctx = NULL;
2867
2868end_function:
2869	return error;
2870
2871}
2872
2873
2874/**
2875 * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
2876 * @sep: pointer to struct sep_device
2877 * @isapplet: indicates external application (used for kernel access)
2878 * @is_kva: indicates kernel addresses (only used for kernel crypto)
2879 *
2880 * This function frees the DMA tables and DCB
2881 */
2882static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
2883	bool is_kva, struct sep_dma_context **dma_ctx)
2884{
2885	struct sep_dcblock *dcb_table_ptr;
2886	unsigned long pt_hold;
2887	void *tail_pt;
2888
2889	int i = 0;
2890	int error = 0;
2891	int error_temp = 0;
2892
2893	dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb\n",
2894					current->pid);
2895
2896	if (((*dma_ctx)->secure_dma == false) && (isapplet == true)) {
2897		dev_dbg(&sep->pdev->dev, "[PID%d] handling applet\n",
2898			current->pid);
2899
2900		/* Tail stuff is only for non secure_dma */
2901		/* Set pointer to first DCB table */
2902		dcb_table_ptr = (struct sep_dcblock *)
2903			(sep->shared_addr +
2904			SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
2905
2906		/**
2907		 * Go over each DCB and see if
2908		 * tail pointer must be updated
2909		 */
2910		for (i = 0; dma_ctx && *dma_ctx &&
2911			i < (*dma_ctx)->nr_dcb_creat; i++, dcb_table_ptr++) {
2912			if (dcb_table_ptr->out_vr_tail_pt) {
2913				pt_hold = (unsigned long)dcb_table_ptr->
2914					out_vr_tail_pt;
2915				tail_pt = (void *)pt_hold;
2916				if (is_kva == true) {
2917					error = -ENODEV;
2918					break;
2919				} else {
2920					error_temp = copy_to_user(
2921						(void __user *)tail_pt,
2922						dcb_table_ptr->tail_data,
2923						dcb_table_ptr->tail_data_size);
2924				}
2925				if (error_temp) {
2926					/* Release the DMA resource */
2927					error = -EFAULT;
2928					break;
2929				}
2930			}
2931		}
2932	}
2933
2934	/* Free the output pages, if any */
2935	sep_free_dma_table_data_handler(sep, dma_ctx);
2936
2937	dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb end\n",
2938					current->pid);
2939
2940	return error;
2941}
2942
2943/**
2944 * sep_prepare_dcb_handler - prepare a control block
2945 * @sep: pointer to struct sep_device
2946 * @arg: pointer to user parameters
2947 * @secure_dma: indicate whether we are using secure_dma on IMR
2948 *
2949 * This function will retrieve the RAR buffer physical addresses, type
2950 * & size corresponding to the RAR handles provided in the buffers vector.
2951 */
2952static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg,
2953				   bool secure_dma,
2954				   struct sep_dma_context **dma_ctx)
2955{
2956	int error;
2957	/* Command arguments */
2958	static struct build_dcb_struct command_args;
2959
2960	/* Get the command arguments */
2961	if (copy_from_user(&command_args, (void __user *)arg,
2962					sizeof(struct build_dcb_struct))) {
2963		error = -EFAULT;
2964		goto end_function;
2965	}
2966
2967	dev_dbg(&sep->pdev->dev,
2968		"[PID%d] prep dcb handler app_in_address is %08llx\n",
2969			current->pid, command_args.app_in_address);
2970	dev_dbg(&sep->pdev->dev,
2971		"[PID%d] app_out_address is %08llx\n",
2972			current->pid, command_args.app_out_address);
2973	dev_dbg(&sep->pdev->dev,
2974		"[PID%d] data_size is %x\n",
2975			current->pid, command_args.data_in_size);
2976	dev_dbg(&sep->pdev->dev,
2977		"[PID%d] block_size is %x\n",
2978			current->pid, command_args.block_size);
2979	dev_dbg(&sep->pdev->dev,
2980		"[PID%d] tail block_size is %x\n",
2981			current->pid, command_args.tail_block_size);
2982	dev_dbg(&sep->pdev->dev,
2983		"[PID%d] is_applet is %x\n",
2984			current->pid, command_args.is_applet);
2985
2986	if (!command_args.app_in_address) {
2987		dev_warn(&sep->pdev->dev,
2988			"[PID%d] null app_in_address\n", current->pid);
2989		error = -EINVAL;
2990		goto end_function;
2991	}
2992
2993	error = sep_prepare_input_output_dma_table_in_dcb(sep,
2994			(unsigned long)command_args.app_in_address,
2995			(unsigned long)command_args.app_out_address,
2996			command_args.data_in_size, command_args.block_size,
2997			command_args.tail_block_size,
2998			command_args.is_applet, false,
2999			secure_dma, NULL, NULL, dma_ctx, NULL, NULL);
3000
3001end_function:
3002	return error;
3003
3004}
3005
3006/**
3007 * sep_free_dcb_handler - free control block resources
3008 * @sep: pointer to struct sep_device
3009 *
3010 * This function frees the DCB resources and updates the needed
3011 * user-space buffers.
3012 */
3013static int sep_free_dcb_handler(struct sep_device *sep,
3014				struct sep_dma_context **dma_ctx)
3015{
3016	if (!dma_ctx || !(*dma_ctx)) {
3017		dev_dbg(&sep->pdev->dev,
3018			"[PID%d] no dma context defined, nothing to free\n",
3019			current->pid);
3020		return -EINVAL;
3021	}
3022
3023	dev_dbg(&sep->pdev->dev, "[PID%d] free dcbs num of DCBs %x\n",
3024		current->pid,
3025		(*dma_ctx)->nr_dcb_creat);
3026
3027	return sep_free_dma_tables_and_dcb(sep, false, false, dma_ctx);
3028}
3029
3030/**
3031 * sep_ioctl - ioctl handler for sep device
3032 * @filp: pointer to struct file
3033 * @cmd: command
3034 * @arg: pointer to argument structure
3035 *
3036 * Implement the ioctl methods available on the SEP device.
3037 */
3038static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3039{
3040	struct sep_private_data * const private_data = filp->private_data;
3041	struct sep_call_status *call_status = &private_data->call_status;
3042	struct sep_device *sep = private_data->device;
3043	struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
3044	struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
3045	int error = 0;
3046
3047	dev_dbg(&sep->pdev->dev, "[PID%d] ioctl cmd 0x%x\n",
3048		current->pid, cmd);
3049	dev_dbg(&sep->pdev->dev, "[PID%d] dma context addr 0x%p\n",
3050		current->pid, *dma_ctx);
3051
3052	/* Make sure we own this device */
3053	error = sep_check_transaction_owner(sep);
3054	if (error) {
3055		dev_dbg(&sep->pdev->dev, "[PID%d] ioctl pid is not owner\n",
3056			current->pid);
3057		goto end_function;
3058	}
3059
3060	/* Check that sep_mmap has been called before */
3061	if (0 == test_bit(SEP_LEGACY_MMAP_DONE_OFFSET,
3062				&call_status->status)) {
3063		dev_dbg(&sep->pdev->dev,
3064			"[PID%d] mmap not called\n", current->pid);
3065		error = -EPROTO;
3066		goto end_function;
3067	}
3068
3069	/* Check that the command is for SEP device */
3070	if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
3071		error = -ENOTTY;
3072		goto end_function;
3073	}
3074
3075	switch (cmd) {
3076	case SEP_IOCSENDSEPCOMMAND:
3077		dev_dbg(&sep->pdev->dev,
3078			"[PID%d] SEP_IOCSENDSEPCOMMAND start\n",
3079			current->pid);
3080		if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3081				  &call_status->status)) {
3082			dev_warn(&sep->pdev->dev,
3083				"[PID%d] send msg already done\n",
3084				current->pid);
3085			error = -EPROTO;
3086			goto end_function;
3087		}
3088		/* Send command to SEP */
3089		error = sep_send_command_handler(sep);
3090		if (!error)
3091			set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3092				&call_status->status);
3093		dev_dbg(&sep->pdev->dev,
3094			"[PID%d] SEP_IOCSENDSEPCOMMAND end\n",
3095			current->pid);
3096		break;
3097	case SEP_IOCENDTRANSACTION:
3098		dev_dbg(&sep->pdev->dev,
3099			"[PID%d] SEP_IOCENDTRANSACTION start\n",
3100			current->pid);
3101		error = sep_end_transaction_handler(sep, dma_ctx, call_status,
3102						    my_queue_elem);
3103		dev_dbg(&sep->pdev->dev,
3104			"[PID%d] SEP_IOCENDTRANSACTION end\n",
3105			current->pid);
3106		break;
3107	case SEP_IOCPREPAREDCB:
3108		dev_dbg(&sep->pdev->dev,
3109			"[PID%d] SEP_IOCPREPAREDCB start\n",
3110			current->pid);
3111	case SEP_IOCPREPAREDCB_SECURE_DMA:
3112		dev_dbg(&sep->pdev->dev,
3113			"[PID%d] SEP_IOCPREPAREDCB_SECURE_DMA start\n",
3114			current->pid);
3115		if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3116				  &call_status->status)) {
3117			dev_dbg(&sep->pdev->dev,
3118				"[PID%d] dcb prep needed before send msg\n",
3119				current->pid);
3120			error = -EPROTO;
3121			goto end_function;
3122		}
3123
3124		if (!arg) {
3125			dev_dbg(&sep->pdev->dev,
3126				"[PID%d] dcb null arg\n", current->pid);
3127			error = -EINVAL;
3128			goto end_function;
3129		}
3130
3131		if (cmd == SEP_IOCPREPAREDCB) {
3132			/* No secure dma */
3133			dev_dbg(&sep->pdev->dev,
3134				"[PID%d] SEP_IOCPREPAREDCB (no secure_dma)\n",
3135				current->pid);
3136
3137			error = sep_prepare_dcb_handler(sep, arg, false,
3138				dma_ctx);
3139		} else {
3140			/* Secure dma */
3141			dev_dbg(&sep->pdev->dev,
3142				"[PID%d] SEP_IOC_POC (with secure_dma)\n",
3143				current->pid);
3144
3145			error = sep_prepare_dcb_handler(sep, arg, true,
3146				dma_ctx);
3147		}
3148		dev_dbg(&sep->pdev->dev, "[PID%d] dcb's end\n",
3149			current->pid);
3150		break;
3151	case SEP_IOCFREEDCB:
3152		dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB start\n",
3153			current->pid);
3154	case SEP_IOCFREEDCB_SECURE_DMA:
3155		dev_dbg(&sep->pdev->dev,
3156			"[PID%d] SEP_IOCFREEDCB_SECURE_DMA start\n",
3157			current->pid);
3158		error = sep_free_dcb_handler(sep, dma_ctx);
3159		dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB end\n",
3160			current->pid);
3161		break;
3162	default:
3163		error = -ENOTTY;
3164		dev_dbg(&sep->pdev->dev, "[PID%d] default end\n",
3165			current->pid);
3166		break;
3167	}
3168
3169end_function:
3170	dev_dbg(&sep->pdev->dev, "[PID%d] ioctl end\n", current->pid);
3171
3172	return error;
3173}
3174
3175/**
3176 * sep_inthandler - interrupt handler for sep device
3177 * @irq: interrupt
3178 * @dev_id: device id
3179 */
3180static irqreturn_t sep_inthandler(int irq, void *dev_id)
3181{
3182	unsigned long lock_irq_flag;
3183	u32 reg_val, reg_val2 = 0;
3184	struct sep_device *sep = dev_id;
3185	irqreturn_t int_error = IRQ_HANDLED;
3186
3187	/* Are we in power save? */
3188#if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
3189	if (sep->pdev->dev.power.runtime_status != RPM_ACTIVE) {
3190		dev_dbg(&sep->pdev->dev, "interrupt during pwr save\n");
3191		return IRQ_NONE;
3192	}
3193#endif
3194
3195	if (test_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags) == 0) {
3196		dev_dbg(&sep->pdev->dev, "interrupt while nobody using sep\n");
3197		return IRQ_NONE;
3198	}
3199
3200	/* Read the IRR register to check if this is SEP interrupt */
3201	reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
3202
3203	dev_dbg(&sep->pdev->dev, "sep int: IRR REG val: %x\n", reg_val);
3204
3205	if (reg_val & (0x1 << 13)) {
3206
3207		/* Lock and update the counter of reply messages */
3208		spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
3209		sep->reply_ct++;
3210		spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
3211
3212		dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
3213					sep->send_ct, sep->reply_ct);
3214
3215		/* Is this a kernel client request */
3216		if (sep->in_kernel) {
3217			tasklet_schedule(&sep->finish_tasklet);
3218			goto finished_interrupt;
3219		}
3220
3221		/* Is this printf or daemon request? */
3222		reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
3223		dev_dbg(&sep->pdev->dev,
3224			"SEP Interrupt - GPR2 is %08x\n", reg_val2);
3225
3226		clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
3227
3228		if ((reg_val2 >> 30) & 0x1) {
3229			dev_dbg(&sep->pdev->dev, "int: printf request\n");
3230		} else if (reg_val2 >> 31) {
3231			dev_dbg(&sep->pdev->dev, "int: daemon request\n");
3232		} else {
3233			dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
3234			wake_up(&sep->event_interrupt);
3235		}
3236	} else {
3237		dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
3238		int_error = IRQ_NONE;
3239	}
3240
3241finished_interrupt:
3242
3243	if (int_error == IRQ_HANDLED)
3244		sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
3245
3246	return int_error;
3247}
3248
3249/**
3250 * sep_reconfig_shared_area - reconfigure shared area
3251 * @sep: pointer to struct sep_device
3252 *
3253 * Reconfig the shared area between HOST and SEP - needed in case
3254 * the DX_CC_Init function was called before OS loading.
3255 */
3256static int sep_reconfig_shared_area(struct sep_device *sep)
3257{
3258	int ret_val;
3259
3260	/* use to limit waiting for SEP */
3261	unsigned long end_time;
3262
3263	/* Send the new SHARED MESSAGE AREA to the SEP */
3264	dev_dbg(&sep->pdev->dev, "reconfig shared; sending %08llx to sep\n",
3265				(unsigned long long)sep->shared_bus);
3266
3267	sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
3268
3269	/* Poll for SEP response */
3270	ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3271
3272	end_time = jiffies + (WAIT_TIME * HZ);
3273
3274	while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) &&
3275		(ret_val != sep->shared_bus))
3276		ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3277
3278	/* Check the return value (register) */
3279	if (ret_val != sep->shared_bus) {
3280		dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
3281		dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
3282		ret_val = -ENOMEM;
3283	} else
3284		ret_val = 0;
3285
3286	dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
3287
3288	return ret_val;
3289}
3290
3291/**
3292 *	sep_activate_dcb_dmatables_context - Takes DCB & DMA tables
3293 *						contexts into use
3294 *	@sep: SEP device
3295 *	@dcb_region: DCB region copy
3296 *	@dmatables_region: MLLI/DMA tables copy
3297 *	@dma_ctx: DMA context for current transaction
3298 */
3299ssize_t sep_activate_dcb_dmatables_context(struct sep_device *sep,
3300					struct sep_dcblock **dcb_region,
3301					void **dmatables_region,
3302					struct sep_dma_context *dma_ctx)
3303{
3304	void *dmaregion_free_start = NULL;
3305	void *dmaregion_free_end = NULL;
3306	void *dcbregion_free_start = NULL;
3307	void *dcbregion_free_end = NULL;
3308	ssize_t error = 0;
3309
3310	dev_dbg(&sep->pdev->dev, "[PID%d] activating dcb/dma region\n",
3311		current->pid);
3312
3313	if (1 > dma_ctx->nr_dcb_creat) {
3314		dev_warn(&sep->pdev->dev,
3315			 "[PID%d] invalid number of dcbs to activate 0x%08X\n",
3316			 current->pid, dma_ctx->nr_dcb_creat);
3317		error = -EINVAL;
3318		goto end_function;
3319	}
3320
3321	dmaregion_free_start = sep->shared_addr
3322				+ SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES;
3323	dmaregion_free_end = dmaregion_free_start
3324				+ SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
3325
3326	if (dmaregion_free_start
3327	     + dma_ctx->dmatables_len > dmaregion_free_end) {
3328		error = -ENOMEM;
3329		goto end_function;
3330	}
3331	memcpy(dmaregion_free_start,
3332	       *dmatables_region,
3333	       dma_ctx->dmatables_len);
3334	/* Free MLLI table copy */
3335	kfree(*dmatables_region);
3336	*dmatables_region = NULL;
3337
3338	/* Copy thread's DCB  table copy to DCB table region */
3339	dcbregion_free_start = sep->shared_addr +
3340				SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES;
3341	dcbregion_free_end = dcbregion_free_start +
3342				(SEP_MAX_NUM_SYNC_DMA_OPS *
3343					sizeof(struct sep_dcblock)) - 1;
3344
3345	if (dcbregion_free_start
3346	     + (dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock))
3347	     > dcbregion_free_end) {
3348		error = -ENOMEM;
3349		goto end_function;
3350	}
3351
3352	memcpy(dcbregion_free_start,
3353	       *dcb_region,
3354	       dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock));
3355
3356	/* Print the tables */
3357	dev_dbg(&sep->pdev->dev, "activate: input table\n");
3358	sep_debug_print_lli_tables(sep,
3359		(struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
3360		(*dcb_region)->input_mlli_address),
3361		(*dcb_region)->input_mlli_num_entries,
3362		(*dcb_region)->input_mlli_data_size);
3363
3364	dev_dbg(&sep->pdev->dev, "activate: output table\n");
3365	sep_debug_print_lli_tables(sep,
3366		(struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
3367		(*dcb_region)->output_mlli_address),
3368		(*dcb_region)->output_mlli_num_entries,
3369		(*dcb_region)->output_mlli_data_size);
3370
3371	dev_dbg(&sep->pdev->dev,
3372		 "[PID%d] printing activated tables\n", current->pid);
3373
3374end_function:
3375	kfree(*dmatables_region);
3376	*dmatables_region = NULL;
3377
3378	kfree(*dcb_region);
3379	*dcb_region = NULL;
3380
3381	return error;
3382}
3383
3384/**
3385 *	sep_create_dcb_dmatables_context - Creates DCB & MLLI/DMA table context
3386 *	@sep: SEP device
3387 *	@dcb_region: DCB region buf to create for current transaction
3388 *	@dmatables_region: MLLI/DMA tables buf to create for current transaction
3389 *	@dma_ctx: DMA context buf to create for current transaction
3390 *	@user_dcb_args: User arguments for DCB/MLLI creation
3391 *	@num_dcbs: Number of DCBs to create
3392 *	@secure_dma: Indicate use of IMR restricted memory secure dma
3393 */
3394static ssize_t sep_create_dcb_dmatables_context(struct sep_device *sep,
3395			struct sep_dcblock **dcb_region,
3396			void **dmatables_region,
3397			struct sep_dma_context **dma_ctx,
3398			const struct build_dcb_struct __user *user_dcb_args,
3399			const u32 num_dcbs, bool secure_dma)
3400{
3401	int error = 0;
3402	int i = 0;
3403	struct build_dcb_struct *dcb_args = NULL;
3404
3405	dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
3406		current->pid);
3407
3408	if (!dcb_region || !dma_ctx || !dmatables_region || !user_dcb_args) {
3409		error = -EINVAL;
3410		goto end_function;
3411	}
3412
3413	if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
3414		dev_warn(&sep->pdev->dev,
3415			 "[PID%d] invalid number of dcbs 0x%08X\n",
3416			 current->pid, num_dcbs);
3417		error = -EINVAL;
3418		goto end_function;
3419	}
3420
3421	dcb_args = kzalloc(num_dcbs * sizeof(struct build_dcb_struct),
3422			   GFP_KERNEL);
3423	if (!dcb_args) {
3424		dev_warn(&sep->pdev->dev, "[PID%d] no memory for dcb args\n",
3425			 current->pid);
3426		error = -ENOMEM;
3427		goto end_function;
3428	}
3429
3430	if (copy_from_user(dcb_args,
3431			user_dcb_args,
3432			num_dcbs * sizeof(struct build_dcb_struct))) {
3433		error = -EINVAL;
3434		goto end_function;
3435	}
3436
3437	/* Allocate thread-specific memory for DCB */
3438	*dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
3439			      GFP_KERNEL);
3440	if (!(*dcb_region)) {
3441		error = -ENOMEM;
3442		goto end_function;
3443	}
3444
3445	/* Prepare DCB and MLLI table into the allocated regions */
3446	for (i = 0; i < num_dcbs; i++) {
3447		error = sep_prepare_input_output_dma_table_in_dcb(sep,
3448				(unsigned long)dcb_args[i].app_in_address,
3449				(unsigned long)dcb_args[i].app_out_address,
3450				dcb_args[i].data_in_size,
3451				dcb_args[i].block_size,
3452				dcb_args[i].tail_block_size,
3453				dcb_args[i].is_applet,
3454				false, secure_dma,
3455				*dcb_region, dmatables_region,
3456				dma_ctx,
3457				NULL,
3458				NULL);
3459		if (error) {
3460			dev_warn(&sep->pdev->dev,
3461				 "[PID%d] dma table creation failed\n",
3462				 current->pid);
3463			goto end_function;
3464		}
3465
3466		if (dcb_args[i].app_in_address != 0)
3467			(*dma_ctx)->input_data_len += dcb_args[i].data_in_size;
3468	}
3469
3470end_function:
3471	kfree(dcb_args);
3472	return error;
3473
3474}
3475
3476/**
3477 *	sep_create_dcb_dmatables_context_kernel - Creates DCB & MLLI/DMA table context
3478 *      for kernel crypto
3479 *	@sep: SEP device
3480 *	@dcb_region: DCB region buf to create for current transaction
3481 *	@dmatables_region: MLLI/DMA tables buf to create for current transaction
3482 *	@dma_ctx: DMA context buf to create for current transaction
3483 *	@user_dcb_args: User arguments for DCB/MLLI creation
3484 *	@num_dcbs: Number of DCBs to create
3485 *	This does that same thing as sep_create_dcb_dmatables_context
3486 *	except that it is used only for the kernel crypto operation. It is
3487 *	separate because there is no user data involved; the dcb data structure
3488 *	is specific for kernel crypto (build_dcb_struct_kernel)
3489 */
3490int sep_create_dcb_dmatables_context_kernel(struct sep_device *sep,
3491			struct sep_dcblock **dcb_region,
3492			void **dmatables_region,
3493			struct sep_dma_context **dma_ctx,
3494			const struct build_dcb_struct_kernel *dcb_data,
3495			const u32 num_dcbs)
3496{
3497	int error = 0;
3498	int i = 0;
3499
3500	dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
3501		current->pid);
3502
3503	if (!dcb_region || !dma_ctx || !dmatables_region || !dcb_data) {
3504		error = -EINVAL;
3505		goto end_function;
3506	}
3507
3508	if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
3509		dev_warn(&sep->pdev->dev,
3510			 "[PID%d] invalid number of dcbs 0x%08X\n",
3511			 current->pid, num_dcbs);
3512		error = -EINVAL;
3513		goto end_function;
3514	}
3515
3516	dev_dbg(&sep->pdev->dev, "[PID%d] num_dcbs is %d\n",
3517		current->pid, num_dcbs);
3518
3519	/* Allocate thread-specific memory for DCB */
3520	*dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
3521			      GFP_KERNEL);
3522	if (!(*dcb_region)) {
3523		error = -ENOMEM;
3524		goto end_function;
3525	}
3526
3527	/* Prepare DCB and MLLI table into the allocated regions */
3528	for (i = 0; i < num_dcbs; i++) {
3529		error = sep_prepare_input_output_dma_table_in_dcb(sep,
3530				(unsigned long)dcb_data->app_in_address,
3531				(unsigned long)dcb_data->app_out_address,
3532				dcb_data->data_in_size,
3533				dcb_data->block_size,
3534				dcb_data->tail_block_size,
3535				dcb_data->is_applet,
3536				true,
3537				false,
3538				*dcb_region, dmatables_region,
3539				dma_ctx,
3540				dcb_data->src_sg,
3541				dcb_data->dst_sg);
3542		if (error) {
3543			dev_warn(&sep->pdev->dev,
3544				 "[PID%d] dma table creation failed\n",
3545				 current->pid);
3546			goto end_function;
3547		}
3548	}
3549
3550end_function:
3551	return error;
3552
3553}
3554
3555/**
3556 *	sep_activate_msgarea_context - Takes the message area context into use
3557 *	@sep: SEP device
3558 *	@msg_region: Message area context buf
3559 *	@msg_len: Message area context buffer size
3560 */
3561static ssize_t sep_activate_msgarea_context(struct sep_device *sep,
3562					    void **msg_region,
3563					    const size_t msg_len)
3564{
3565	dev_dbg(&sep->pdev->dev, "[PID%d] activating msg region\n",
3566		current->pid);
3567
3568	if (!msg_region || !(*msg_region) ||
3569	    SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES < msg_len) {
3570		dev_warn(&sep->pdev->dev,
3571			 "[PID%d] invalid act msgarea len 0x%08zX\n",
3572			 current->pid, msg_len);
3573		return -EINVAL;
3574	}
3575
3576	memcpy(sep->shared_addr, *msg_region, msg_len);
3577
3578	return 0;
3579}
3580
3581/**
3582 *	sep_create_msgarea_context - Creates message area context
3583 *	@sep: SEP device
3584 *	@msg_region: Msg area region buf to create for current transaction
3585 *	@msg_user: Content for msg area region from user
3586 *	@msg_len: Message area size
3587 */
3588static ssize_t sep_create_msgarea_context(struct sep_device *sep,
3589					  void **msg_region,
3590					  const void __user *msg_user,
3591					  const size_t msg_len)
3592{
3593	int error = 0;
3594
3595	dev_dbg(&sep->pdev->dev, "[PID%d] creating msg region\n",
3596		current->pid);
3597
3598	if (!msg_region ||
3599	    !msg_user ||
3600	    SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < msg_len ||
3601	    SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > msg_len) {
3602		dev_warn(&sep->pdev->dev,
3603			 "[PID%d] invalid creat msgarea len 0x%08zX\n",
3604			 current->pid, msg_len);
3605		error = -EINVAL;
3606		goto end_function;
3607	}
3608
3609	/* Allocate thread-specific memory for message buffer */
3610	*msg_region = kzalloc(msg_len, GFP_KERNEL);
3611	if (!(*msg_region)) {
3612		dev_warn(&sep->pdev->dev,
3613			 "[PID%d] no mem for msgarea context\n",
3614			 current->pid);
3615		error = -ENOMEM;
3616		goto end_function;
3617	}
3618
3619	/* Copy input data to write() to allocated message buffer */
3620	if (copy_from_user(*msg_region, msg_user, msg_len)) {
3621		error = -EINVAL;
3622		goto end_function;
3623	}
3624
3625end_function:
3626	if (error && msg_region) {
3627		kfree(*msg_region);
3628		*msg_region = NULL;
3629	}
3630
3631	return error;
3632}
3633
3634
3635/**
3636 *	sep_read - Returns results of an operation for fastcall interface
3637 *	@filp: File pointer
3638 *	@buf_user: User buffer for storing results
3639 *	@count_user: User buffer size
3640 *	@offset: File offset, not supported
3641 *
3642 *	The implementation does not support reading in chunks, all data must be
3643 *	consumed during a single read system call.
3644 */
3645static ssize_t sep_read(struct file *filp,
3646			char __user *buf_user, size_t count_user,
3647			loff_t *offset)
3648{
3649	struct sep_private_data * const private_data = filp->private_data;
3650	struct sep_call_status *call_status = &private_data->call_status;
3651	struct sep_device *sep = private_data->device;
3652	struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
3653	struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
3654	ssize_t error = 0, error_tmp = 0;
3655
3656	/* Am I the process that owns the transaction? */
3657	error = sep_check_transaction_owner(sep);
3658	if (error) {
3659		dev_dbg(&sep->pdev->dev, "[PID%d] read pid is not owner\n",
3660			current->pid);
3661		goto end_function;
3662	}
3663
3664	/* Checks that user has called necessarry apis */
3665	if (0 == test_bit(SEP_FASTCALL_WRITE_DONE_OFFSET,
3666			&call_status->status)) {
3667		dev_warn(&sep->pdev->dev,
3668			 "[PID%d] fastcall write not called\n",
3669			 current->pid);
3670		error = -EPROTO;
3671		goto end_function_error;
3672	}
3673
3674	if (!buf_user) {
3675		dev_warn(&sep->pdev->dev,
3676			 "[PID%d] null user buffer\n",
3677			 current->pid);
3678		error = -EINVAL;
3679		goto end_function_error;
3680	}
3681
3682
3683	/* Wait for SEP to finish */
3684	wait_event(sep->event_interrupt,
3685		   test_bit(SEP_WORKING_LOCK_BIT,
3686			    &sep->in_use_flags) == 0);
3687
3688	sep_dump_message(sep);
3689
3690	dev_dbg(&sep->pdev->dev, "[PID%d] count_user = 0x%08zX\n",
3691		current->pid, count_user);
3692
3693	/* In case user has allocated bigger buffer */
3694	if (count_user > SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES)
3695		count_user = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES;
3696
3697	if (copy_to_user(buf_user, sep->shared_addr, count_user)) {
3698		error = -EFAULT;
3699		goto end_function_error;
3700	}
3701
3702	dev_dbg(&sep->pdev->dev, "[PID%d] read succeeded\n", current->pid);
3703	error = count_user;
3704
3705end_function_error:
3706	/* Copy possible tail data to user and free DCB and MLLIs */
3707	error_tmp = sep_free_dcb_handler(sep, dma_ctx);
3708	if (error_tmp)
3709		dev_warn(&sep->pdev->dev, "[PID%d] dcb free failed\n",
3710			current->pid);
3711
3712	/* End the transaction, wakeup pending ones */
3713	error_tmp = sep_end_transaction_handler(sep, dma_ctx, call_status,
3714		my_queue_elem);
3715	if (error_tmp)
3716		dev_warn(&sep->pdev->dev,
3717			 "[PID%d] ending transaction failed\n",
3718			 current->pid);
3719
3720end_function:
3721	return error;
3722}
3723
3724/**
3725 *	sep_fastcall_args_get - Gets fastcall params from user
3726 *	sep: SEP device
3727 *	@args: Parameters buffer
3728 *	@buf_user: User buffer for operation parameters
3729 *	@count_user: User buffer size
3730 */
3731static inline ssize_t sep_fastcall_args_get(struct sep_device *sep,
3732					    struct sep_fastcall_hdr *args,
3733					    const char __user *buf_user,
3734					    const size_t count_user)
3735{
3736	ssize_t error = 0;
3737	size_t actual_count = 0;
3738
3739	if (!buf_user) {
3740		dev_warn(&sep->pdev->dev,
3741			 "[PID%d] null user buffer\n",
3742			 current->pid);
3743		error = -EINVAL;
3744		goto end_function;
3745	}
3746
3747	if (count_user < sizeof(struct sep_fastcall_hdr)) {
3748		dev_warn(&sep->pdev->dev,
3749			 "[PID%d] too small message size 0x%08zX\n",
3750			 current->pid, count_user);
3751		error = -EINVAL;
3752		goto end_function;
3753	}
3754
3755
3756	if (copy_from_user(args, buf_user, sizeof(struct sep_fastcall_hdr))) {
3757		error = -EFAULT;
3758		goto end_function;
3759	}
3760
3761	if (SEP_FC_MAGIC != args->magic) {
3762		dev_warn(&sep->pdev->dev,
3763			 "[PID%d] invalid fastcall magic 0x%08X\n",
3764			 current->pid, args->magic);
3765		error = -EINVAL;
3766		goto end_function;
3767	}
3768
3769	dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr num of DCBs 0x%08X\n",
3770		current->pid, args->num_dcbs);
3771	dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr msg len 0x%08X\n",
3772		current->pid, args->msg_len);
3773
3774	if (SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < args->msg_len ||
3775	    SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > args->msg_len) {
3776		dev_warn(&sep->pdev->dev,
3777			 "[PID%d] invalid message length\n",
3778			 current->pid);
3779		error = -EINVAL;
3780		goto end_function;
3781	}
3782
3783	actual_count = sizeof(struct sep_fastcall_hdr)
3784			+ args->msg_len
3785			+ (args->num_dcbs * sizeof(struct build_dcb_struct));
3786
3787	if (actual_count != count_user) {
3788		dev_warn(&sep->pdev->dev,
3789			 "[PID%d] inconsistent message "
3790			 "sizes 0x%08zX vs 0x%08zX\n",
3791			 current->pid, actual_count, count_user);
3792		error = -EMSGSIZE;
3793		goto end_function;
3794	}
3795
3796end_function:
3797	return error;
3798}
3799
3800/**
3801 *	sep_write - Starts an operation for fastcall interface
3802 *	@filp: File pointer
3803 *	@buf_user: User buffer for operation parameters
3804 *	@count_user: User buffer size
3805 *	@offset: File offset, not supported
3806 *
3807 *	The implementation does not support writing in chunks,
3808 *	all data must be given during a single write system call.
3809 */
3810static ssize_t sep_write(struct file *filp,
3811			 const char __user *buf_user, size_t count_user,
3812			 loff_t *offset)
3813{
3814	struct sep_private_data * const private_data = filp->private_data;
3815	struct sep_call_status *call_status = &private_data->call_status;
3816	struct sep_device *sep = private_data->device;
3817	struct sep_dma_context *dma_ctx = NULL;
3818	struct sep_fastcall_hdr call_hdr = {0};
3819	void *msg_region = NULL;
3820	void *dmatables_region = NULL;
3821	struct sep_dcblock *dcb_region = NULL;
3822	ssize_t error = 0;
3823	struct sep_queue_info *my_queue_elem = NULL;
3824	bool my_secure_dma; /* are we using secure_dma (IMR)? */
3825
3826	dev_dbg(&sep->pdev->dev, "[PID%d] sep dev is 0x%p\n",
3827		current->pid, sep);
3828	dev_dbg(&sep->pdev->dev, "[PID%d] private_data is 0x%p\n",
3829		current->pid, private_data);
3830
3831	error = sep_fastcall_args_get(sep, &call_hdr, buf_user, count_user);
3832	if (error)
3833		goto end_function;
3834
3835	buf_user += sizeof(struct sep_fastcall_hdr);
3836
3837	if (call_hdr.secure_dma == 0)
3838		my_secure_dma = false;
3839	else
3840		my_secure_dma = true;
3841
3842	/*
3843	 * Controlling driver memory usage by limiting amount of
3844	 * buffers created. Only SEP_DOUBLEBUF_USERS_LIMIT number
3845	 * of threads can progress further at a time
3846	 */
3847	dev_dbg(&sep->pdev->dev, "[PID%d] waiting for double buffering "
3848				 "region access\n", current->pid);
3849	error = down_interruptible(&sep->sep_doublebuf);
3850	dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region start\n",
3851					current->pid);
3852	if (error) {
3853		/* Signal received */
3854		goto end_function_error;
3855	}
3856
3857
3858	/*
3859	 * Prepare contents of the shared area regions for
3860	 * the operation into temporary buffers
3861	 */
3862	if (0 < call_hdr.num_dcbs) {
3863		error = sep_create_dcb_dmatables_context(sep,
3864				&dcb_region,
3865				&dmatables_region,
3866				&dma_ctx,
3867				(const struct build_dcb_struct __user *)
3868					buf_user,
3869				call_hdr.num_dcbs, my_secure_dma);
3870		if (error)
3871			goto end_function_error_doublebuf;
3872
3873		buf_user += call_hdr.num_dcbs * sizeof(struct build_dcb_struct);
3874	}
3875
3876	error = sep_create_msgarea_context(sep,
3877					   &msg_region,
3878					   buf_user,
3879					   call_hdr.msg_len);
3880	if (error)
3881		goto end_function_error_doublebuf;
3882
3883	dev_dbg(&sep->pdev->dev, "[PID%d] updating queue status\n",
3884							current->pid);
3885	my_queue_elem = sep_queue_status_add(sep,
3886				((struct sep_msgarea_hdr *)msg_region)->opcode,
3887				(dma_ctx) ? dma_ctx->input_data_len : 0,
3888				     current->pid,
3889				     current->comm, sizeof(current->comm));
3890
3891	if (!my_queue_elem) {
3892		dev_dbg(&sep->pdev->dev, "[PID%d] updating queue"
3893					"status error\n", current->pid);
3894		error = -ENOMEM;
3895		goto end_function_error_doublebuf;
3896	}
3897
3898	/* Wait until current process gets the transaction */
3899	error = sep_wait_transaction(sep);
3900
3901	if (error) {
3902		/* Interrupted by signal, don't clear transaction */
3903		dev_dbg(&sep->pdev->dev, "[PID%d] interrupted by signal\n",
3904			current->pid);
3905		sep_queue_status_remove(sep, &my_queue_elem);
3906		goto end_function_error_doublebuf;
3907	}
3908
3909	dev_dbg(&sep->pdev->dev, "[PID%d] saving queue element\n",
3910		current->pid);
3911	private_data->my_queue_elem = my_queue_elem;
3912
3913	/* Activate shared area regions for the transaction */
3914	error = sep_activate_msgarea_context(sep, &msg_region,
3915					     call_hdr.msg_len);
3916	if (error)
3917		goto end_function_error_clear_transact;
3918
3919	sep_dump_message(sep);
3920
3921	if (0 < call_hdr.num_dcbs) {
3922		error = sep_activate_dcb_dmatables_context(sep,
3923				&dcb_region,
3924				&dmatables_region,
3925				dma_ctx);
3926		if (error)
3927			goto end_function_error_clear_transact;
3928	}
3929
3930	/* Send command to SEP */
3931	error = sep_send_command_handler(sep);
3932	if (error)
3933		goto end_function_error_clear_transact;
3934
3935	/* Store DMA context for the transaction */
3936	private_data->dma_ctx = dma_ctx;
3937	/* Update call status */
3938	set_bit(SEP_FASTCALL_WRITE_DONE_OFFSET, &call_status->status);
3939	error = count_user;
3940
3941	up(&sep->sep_doublebuf);
3942	dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
3943		current->pid);
3944
3945	goto end_function;
3946
3947end_function_error_clear_transact:
3948	sep_end_transaction_handler(sep, &dma_ctx, call_status,
3949						&private_data->my_queue_elem);
3950
3951end_function_error_doublebuf:
3952	up(&sep->sep_doublebuf);
3953	dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
3954		current->pid);
3955
3956end_function_error:
3957	if (dma_ctx)
3958		sep_free_dma_table_data_handler(sep, &dma_ctx);
3959
3960end_function:
3961	kfree(dcb_region);
3962	kfree(dmatables_region);
3963	kfree(msg_region);
3964
3965	return error;
3966}
3967/**
3968 *	sep_seek - Handler for seek system call
3969 *	@filp: File pointer
3970 *	@offset: File offset
3971 *	@origin: Options for offset
3972 *
3973 *	Fastcall interface does not support seeking, all reads
3974 *	and writes are from/to offset zero
3975 */
3976static loff_t sep_seek(struct file *filp, loff_t offset, int origin)
3977{
3978	return -ENOSYS;
3979}
3980
3981
3982
3983/**
3984 * sep_file_operations - file operation on sep device
3985 * @sep_ioctl:	ioctl handler from user space call
3986 * @sep_poll:	poll handler
3987 * @sep_open:	handles sep device open request
3988 * @sep_release:handles sep device release request
3989 * @sep_mmap:	handles memory mapping requests
3990 * @sep_read:	handles read request on sep device
3991 * @sep_write:	handles write request on sep device
3992 * @sep_seek:	handles seek request on sep device
3993 */
3994static const struct file_operations sep_file_operations = {
3995	.owner = THIS_MODULE,
3996	.unlocked_ioctl = sep_ioctl,
3997	.poll = sep_poll,
3998	.open = sep_open,
3999	.release = sep_release,
4000	.mmap = sep_mmap,
4001	.read = sep_read,
4002	.write = sep_write,
4003	.llseek = sep_seek,
4004};
4005
4006/**
4007 * sep_sysfs_read - read sysfs entry per gives arguments
4008 * @filp: file pointer
4009 * @kobj: kobject pointer
4010 * @attr: binary file attributes
4011 * @buf: read to this buffer
4012 * @pos: offset to read
4013 * @count: amount of data to read
4014 *
4015 * This function is to read sysfs entries for sep driver per given arguments.
4016 */
4017static ssize_t
4018sep_sysfs_read(struct file *filp, struct kobject *kobj,
4019		struct bin_attribute *attr,
4020		char *buf, loff_t pos, size_t count)
4021{
4022	unsigned long lck_flags;
4023	size_t nleft = count;
4024	struct sep_device *sep = sep_dev;
4025	struct sep_queue_info *queue_elem = NULL;
4026	u32 queue_num = 0;
4027	u32 i = 1;
4028
4029	spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
4030
4031	queue_num = sep->sep_queue_num;
4032	if (queue_num > SEP_DOUBLEBUF_USERS_LIMIT)
4033		queue_num = SEP_DOUBLEBUF_USERS_LIMIT;
4034
4035
4036	if (count < sizeof(queue_num)
4037			+ (queue_num * sizeof(struct sep_queue_data))) {
4038		spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
4039		return -EINVAL;
4040	}
4041
4042	memcpy(buf, &queue_num, sizeof(queue_num));
4043	buf += sizeof(queue_num);
4044	nleft -= sizeof(queue_num);
4045
4046	list_for_each_entry(queue_elem, &sep->sep_queue_status, list) {
4047		if (i++ > queue_num)
4048			break;
4049
4050		memcpy(buf, &queue_elem->data, sizeof(queue_elem->data));
4051		nleft -= sizeof(queue_elem->data);
4052		buf += sizeof(queue_elem->data);
4053	}
4054	spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
4055
4056	return count - nleft;
4057}
4058
4059/**
4060 * bin_attributes - defines attributes for queue_status
4061 * @attr: attributes (name & permissions)
4062 * @read: function pointer to read this file
4063 * @size: maxinum size of binary attribute
4064 */
4065static const struct bin_attribute queue_status = {
4066	.attr = {.name = "queue_status", .mode = 0444},
4067	.read = sep_sysfs_read,
4068	.size = sizeof(u32)
4069		+ (SEP_DOUBLEBUF_USERS_LIMIT * sizeof(struct sep_queue_data)),
4070};
4071
4072/**
4073 * sep_register_driver_with_fs - register misc devices
4074 * @sep: pointer to struct sep_device
4075 *
4076 * This function registers the driver with the file system
4077 */
4078static int sep_register_driver_with_fs(struct sep_device *sep)
4079{
4080	int ret_val;
4081
4082	sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
4083	sep->miscdev_sep.name = SEP_DEV_NAME;
4084	sep->miscdev_sep.fops = &sep_file_operations;
4085
4086	ret_val = misc_register(&sep->miscdev_sep);
4087	if (ret_val) {
4088		dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
4089			ret_val);
4090		return ret_val;
4091	}
4092
4093	ret_val = device_create_bin_file(sep->miscdev_sep.this_device,
4094								&queue_status);
4095	if (ret_val) {
4096		dev_warn(&sep->pdev->dev, "sysfs attribute1 fails for SEP %x\n",
4097			ret_val);
4098		return ret_val;
4099	}
4100
4101	return ret_val;
4102}
4103
4104
4105/**
4106 *sep_probe - probe a matching PCI device
4107 *@pdev:	pci_device
4108 *@ent:	pci_device_id
4109 *
4110 *Attempt to set up and configure a SEP device that has been
4111 *discovered by the PCI layer. Allocates all required resources.
4112 */
4113static int __devinit sep_probe(struct pci_dev *pdev,
4114	const struct pci_device_id *ent)
4115{
4116	int error = 0;
4117	struct sep_device *sep = NULL;
4118
4119	if (sep_dev != NULL) {
4120		dev_dbg(&pdev->dev, "only one SEP supported.\n");
4121		return -EBUSY;
4122	}
4123
4124	/* Enable the device */
4125	error = pci_enable_device(pdev);
4126	if (error) {
4127		dev_warn(&pdev->dev, "error enabling pci device\n");
4128		goto end_function;
4129	}
4130
4131	/* Allocate the sep_device structure for this device */
4132	sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
4133	if (sep_dev == NULL) {
4134		dev_warn(&pdev->dev,
4135			"can't kmalloc the sep_device structure\n");
4136		error = -ENOMEM;
4137		goto end_function_disable_device;
4138	}
4139
4140	/*
4141	 * We're going to use another variable for actually
4142	 * working with the device; this way, if we have
4143	 * multiple devices in the future, it would be easier
4144	 * to make appropriate changes
4145	 */
4146	sep = sep_dev;
4147
4148	sep->pdev = pci_dev_get(pdev);
4149
4150	init_waitqueue_head(&sep->event_transactions);
4151	init_waitqueue_head(&sep->event_interrupt);
4152	spin_lock_init(&sep->snd_rply_lck);
4153	spin_lock_init(&sep->sep_queue_lock);
4154	sema_init(&sep->sep_doublebuf, SEP_DOUBLEBUF_USERS_LIMIT);
4155
4156	INIT_LIST_HEAD(&sep->sep_queue_status);
4157
4158	dev_dbg(&sep->pdev->dev, "sep probe: PCI obtained, "
4159		"device being prepared\n");
4160
4161	/* Set up our register area */
4162	sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
4163	if (!sep->reg_physical_addr) {
4164		dev_warn(&sep->pdev->dev, "Error getting register start\n");
4165		error = -ENODEV;
4166		goto end_function_free_sep_dev;
4167	}
4168
4169	sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
4170	if (!sep->reg_physical_end) {
4171		dev_warn(&sep->pdev->dev, "Error getting register end\n");
4172		error = -ENODEV;
4173		goto end_function_free_sep_dev;
4174	}
4175
4176	sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
4177		(size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
4178	if (!sep->reg_addr) {
4179		dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
4180		error = -ENODEV;
4181		goto end_function_free_sep_dev;
4182	}
4183
4184	dev_dbg(&sep->pdev->dev,
4185		"Register area start %llx end %llx virtual %p\n",
4186		(unsigned long long)sep->reg_physical_addr,
4187		(unsigned long long)sep->reg_physical_end,
4188		sep->reg_addr);
4189
4190	/* Allocate the shared area */
4191	sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
4192		SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
4193		SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
4194		SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
4195		SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
4196
4197	if (sep_map_and_alloc_shared_area(sep)) {
4198		error = -ENOMEM;
4199		/* Allocation failed */
4200		goto end_function_error;
4201	}
4202
4203	/* Clear ICR register */
4204	sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4205
4206	/* Set the IMR register - open only GPR 2 */
4207	sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4208
4209	/* Read send/receive counters from SEP */
4210	sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4211	sep->reply_ct &= 0x3FFFFFFF;
4212	sep->send_ct = sep->reply_ct;
4213
4214	/* Get the interrupt line */
4215	error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
4216		"sep_driver", sep);
4217
4218	if (error)
4219		goto end_function_deallocate_sep_shared_area;
4220
4221	/* The new chip requires a shared area reconfigure */
4222	error = sep_reconfig_shared_area(sep);
4223	if (error)
4224		goto end_function_free_irq;
4225
4226	sep->in_use = 1;
4227
4228	/* Finally magic up the device nodes */
4229	/* Register driver with the fs */
4230	error = sep_register_driver_with_fs(sep);
4231
4232	if (error) {
4233		dev_err(&sep->pdev->dev, "error registering dev file\n");
4234		goto end_function_free_irq;
4235	}
4236
4237	sep->in_use = 0; /* through touching the device */
4238#ifdef SEP_ENABLE_RUNTIME_PM
4239	pm_runtime_put_noidle(&sep->pdev->dev);
4240	pm_runtime_allow(&sep->pdev->dev);
4241	pm_runtime_set_autosuspend_delay(&sep->pdev->dev,
4242		SUSPEND_DELAY);
4243	pm_runtime_use_autosuspend(&sep->pdev->dev);
4244	pm_runtime_mark_last_busy(&sep->pdev->dev);
4245	sep->power_save_setup = 1;
4246#endif
4247	/* register kernel crypto driver */
4248#if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4249	error = sep_crypto_setup();
4250	if (error) {
4251		dev_err(&sep->pdev->dev, "crypto setup failed\n");
4252		goto end_function_free_irq;
4253	}
4254#endif
4255	goto end_function;
4256
4257end_function_free_irq:
4258	free_irq(pdev->irq, sep);
4259
4260end_function_deallocate_sep_shared_area:
4261	/* De-allocate shared area */
4262	sep_unmap_and_free_shared_area(sep);
4263
4264end_function_error:
4265	iounmap(sep->reg_addr);
4266
4267end_function_free_sep_dev:
4268	pci_dev_put(sep_dev->pdev);
4269	kfree(sep_dev);
4270	sep_dev = NULL;
4271
4272end_function_disable_device:
4273	pci_disable_device(pdev);
4274
4275end_function:
4276	return error;
4277}
4278
4279/**
4280 * sep_remove -	handles removing device from pci subsystem
4281 * @pdev:	pointer to pci device
4282 *
4283 * This function will handle removing our sep device from pci subsystem on exit
4284 * or unloading this module. It should free up all used resources, and unmap if
4285 * any memory regions mapped.
4286 */
4287static void sep_remove(struct pci_dev *pdev)
4288{
4289	struct sep_device *sep = sep_dev;
4290
4291	/* Unregister from fs */
4292	misc_deregister(&sep->miscdev_sep);
4293
4294	/* Unregister from kernel crypto */
4295#if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4296	sep_crypto_takedown();
4297#endif
4298	/* Free the irq */
4299	free_irq(sep->pdev->irq, sep);
4300
4301	/* Free the shared area  */
4302	sep_unmap_and_free_shared_area(sep_dev);
4303	iounmap(sep_dev->reg_addr);
4304
4305#ifdef SEP_ENABLE_RUNTIME_PM
4306	if (sep->in_use) {
4307		sep->in_use = 0;
4308		pm_runtime_forbid(&sep->pdev->dev);
4309		pm_runtime_get_noresume(&sep->pdev->dev);
4310	}
4311#endif
4312	pci_dev_put(sep_dev->pdev);
4313	kfree(sep_dev);
4314	sep_dev = NULL;
4315}
4316
4317/* Initialize struct pci_device_id for our driver */
4318static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
4319	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0826)},
4320	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08e9)},
4321 	{0}
4322};
4323
4324/* Export our pci_device_id structure to user space */
4325MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
4326
4327#ifdef SEP_ENABLE_RUNTIME_PM
4328
4329/**
4330 * sep_pm_resume - rsume routine while waking up from S3 state
4331 * @dev:	pointer to sep device
4332 *
4333 * This function is to be used to wake up sep driver while system awakes from S3
4334 * state i.e. suspend to ram. The RAM in intact.
4335 * Notes - revisit with more understanding of pm, ICR/IMR & counters.
4336 */
4337static int sep_pci_resume(struct device *dev)
4338{
4339	struct sep_device *sep = sep_dev;
4340
4341	dev_dbg(&sep->pdev->dev, "pci resume called\n");
4342
4343	if (sep->power_state == SEP_DRIVER_POWERON)
4344		return 0;
4345
4346	/* Clear ICR register */
4347	sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4348
4349	/* Set the IMR register - open only GPR 2 */
4350	sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4351
4352	/* Read send/receive counters from SEP */
4353	sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4354	sep->reply_ct &= 0x3FFFFFFF;
4355	sep->send_ct = sep->reply_ct;
4356
4357	sep->power_state = SEP_DRIVER_POWERON;
4358
4359	return 0;
4360}
4361
4362/**
4363 * sep_pm_suspend - suspend routine while going to S3 state
4364 * @dev:	pointer to sep device
4365 *
4366 * This function is to be used to suspend sep driver while system goes to S3
4367 * state i.e. suspend to ram. The RAM in intact and ON during this suspend.
4368 * Notes - revisit with more understanding of pm, ICR/IMR
4369 */
4370static int sep_pci_suspend(struct device *dev)
4371{
4372	struct sep_device *sep = sep_dev;
4373
4374	dev_dbg(&sep->pdev->dev, "pci suspend called\n");
4375	if (sep->in_use == 1)
4376		return -EAGAIN;
4377
4378	sep->power_state = SEP_DRIVER_POWEROFF;
4379
4380	/* Clear ICR register */
4381	sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4382
4383	/* Set the IMR to block all */
4384	sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0xFFFFFFFF);
4385
4386	return 0;
4387}
4388
4389/**
4390 * sep_pm_runtime_resume - runtime resume routine
4391 * @dev:	pointer to sep device
4392 *
4393 * Notes - revisit with more understanding of pm, ICR/IMR & counters
4394 */
4395static int sep_pm_runtime_resume(struct device *dev)
4396{
4397
4398	u32 retval2;
4399	u32 delay_count;
4400	struct sep_device *sep = sep_dev;
4401
4402	dev_dbg(&sep->pdev->dev, "pm runtime resume called\n");
4403
4404	/**
4405	 * Wait until the SCU boot is ready
4406	 * This is done by iterating SCU_DELAY_ITERATION (10
4407	 * microseconds each) up to SCU_DELAY_MAX (50) times.
4408	 * This bit can be set in a random time that is less
4409	 * than 500 microseconds after each power resume
4410	 */
4411	retval2 = 0;
4412	delay_count = 0;
4413	while ((!retval2) && (delay_count < SCU_DELAY_MAX)) {
4414		retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
4415		retval2 &= 0x00000008;
4416		if (!retval2) {
4417			udelay(SCU_DELAY_ITERATION);
4418			delay_count += 1;
4419		}
4420	}
4421
4422	if (!retval2) {
4423		dev_warn(&sep->pdev->dev, "scu boot bit not set at resume\n");
4424		return -EINVAL;
4425	}
4426
4427	/* Clear ICR register */
4428	sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4429
4430	/* Set the IMR register - open only GPR 2 */
4431	sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4432
4433	/* Read send/receive counters from SEP */
4434	sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4435	sep->reply_ct &= 0x3FFFFFFF;
4436	sep->send_ct = sep->reply_ct;
4437
4438	return 0;
4439}
4440
4441/**
4442 * sep_pm_runtime_suspend - runtime suspend routine
4443 * @dev:	pointer to sep device
4444 *
4445 * Notes - revisit with more understanding of pm
4446 */
4447static int sep_pm_runtime_suspend(struct device *dev)
4448{
4449	struct sep_device *sep = sep_dev;
4450
4451	dev_dbg(&sep->pdev->dev, "pm runtime suspend called\n");
4452
4453	/* Clear ICR register */
4454	sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4455	return 0;
4456}
4457
4458/**
4459 * sep_pm - power management for sep driver
4460 * @sep_pm_runtime_resume:	resume- no communication with cpu & main memory
4461 * @sep_pm_runtime_suspend:	suspend- no communication with cpu & main memory
4462 * @sep_pci_suspend:		suspend - main memory is still ON
4463 * @sep_pci_resume:		resume - main memory is still ON
4464 */
4465static const struct dev_pm_ops sep_pm = {
4466	.runtime_resume = sep_pm_runtime_resume,
4467	.runtime_suspend = sep_pm_runtime_suspend,
4468	.resume = sep_pci_resume,
4469	.suspend = sep_pci_suspend,
4470};
4471#endif /* SEP_ENABLE_RUNTIME_PM */
4472
4473/**
4474 * sep_pci_driver - registers this device with pci subsystem
4475 * @name:	name identifier for this driver
4476 * @sep_pci_id_tbl:	pointer to struct pci_device_id table
4477 * @sep_probe:	pointer to probe function in PCI driver
4478 * @sep_remove:	pointer to remove function in PCI driver
4479 */
4480static struct pci_driver sep_pci_driver = {
4481#ifdef SEP_ENABLE_RUNTIME_PM
4482	.driver = {
4483		.pm = &sep_pm,
4484	},
4485#endif
4486	.name = "sep_sec_driver",
4487	.id_table = sep_pci_id_tbl,
4488	.probe = sep_probe,
4489	.remove = sep_remove
4490};
4491
4492/**
4493 * sep_init - init function
4494 *
4495 * Module load time. Register the PCI device driver.
4496 */
4497
4498static int __init sep_init(void)
4499{
4500	return pci_register_driver(&sep_pci_driver);
4501}
4502
4503
4504/**
4505 * sep_exit - called to unload driver
4506 *
4507 * Unregister the driver The device will perform all the cleanup required.
4508 */
4509static void __exit sep_exit(void)
4510{
4511	pci_unregister_driver(&sep_pci_driver);
4512}
4513
4514
4515module_init(sep_init);
4516module_exit(sep_exit);
4517
4518MODULE_LICENSE("GPL");