Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Keystone Queue Manager subsystem driver
   4 *
   5 * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
   6 * Authors:	Sandeep Nair <sandeep_n@ti.com>
   7 *		Cyril Chemparathy <cyril@ti.com>
   8 *		Santosh Shilimkar <santosh.shilimkar@ti.com>
   9 */
  10
  11#include <linux/debugfs.h>
  12#include <linux/dma-mapping.h>
  13#include <linux/firmware.h>
  14#include <linux/interrupt.h>
  15#include <linux/io.h>
  16#include <linux/module.h>
  17#include <linux/of_address.h>
  18#include <linux/of_device.h>
  19#include <linux/of_irq.h>
  20#include <linux/pm_runtime.h>
  21#include <linux/slab.h>
  22#include <linux/soc/ti/knav_qmss.h>
  23
  24#include "knav_qmss.h"
  25
  26static struct knav_device *kdev;
  27static DEFINE_MUTEX(knav_dev_lock);
  28
  29/* Queue manager register indices in DTS */
  30#define KNAV_QUEUE_PEEK_REG_INDEX	0
  31#define KNAV_QUEUE_STATUS_REG_INDEX	1
  32#define KNAV_QUEUE_CONFIG_REG_INDEX	2
  33#define KNAV_QUEUE_REGION_REG_INDEX	3
  34#define KNAV_QUEUE_PUSH_REG_INDEX	4
  35#define KNAV_QUEUE_POP_REG_INDEX	5
  36
  37/* Queue manager register indices in DTS for QMSS in K2G NAVSS.
  38 * There are no status and vbusm push registers on this version
  39 * of QMSS. Push registers are same as pop, So all indices above 1
  40 * are to be re-defined
  41 */
  42#define KNAV_L_QUEUE_CONFIG_REG_INDEX	1
  43#define KNAV_L_QUEUE_REGION_REG_INDEX	2
  44#define KNAV_L_QUEUE_PUSH_REG_INDEX	3
  45
  46/* PDSP register indices in DTS */
  47#define KNAV_QUEUE_PDSP_IRAM_REG_INDEX	0
  48#define KNAV_QUEUE_PDSP_REGS_REG_INDEX	1
  49#define KNAV_QUEUE_PDSP_INTD_REG_INDEX	2
  50#define KNAV_QUEUE_PDSP_CMD_REG_INDEX	3
  51
  52#define knav_queue_idx_to_inst(kdev, idx)			\
  53	(kdev->instances + (idx << kdev->inst_shift))
  54
  55#define for_each_handle_rcu(qh, inst)			\
  56	list_for_each_entry_rcu(qh, &inst->handles, list)
  57
  58#define for_each_instance(idx, inst, kdev)		\
  59	for (idx = 0, inst = kdev->instances;		\
  60	     idx < (kdev)->num_queues_in_use;			\
  61	     idx++, inst = knav_queue_idx_to_inst(kdev, idx))
  62
  63/* All firmware file names end up here. List the firmware file names below.
  64 * Newest followed by older ones. Search is done from start of the array
  65 * until a firmware file is found.
  66 */
  67const char *knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"};
  68
  69static bool device_ready;
  70bool knav_qmss_device_ready(void)
  71{
  72	return device_ready;
  73}
  74EXPORT_SYMBOL_GPL(knav_qmss_device_ready);
  75
  76/**
  77 * knav_queue_notify: qmss queue notfier call
  78 *
  79 * @inst:		qmss queue instance like accumulator
  80 */
  81void knav_queue_notify(struct knav_queue_inst *inst)
  82{
  83	struct knav_queue *qh;
  84
  85	if (!inst)
  86		return;
  87
  88	rcu_read_lock();
  89	for_each_handle_rcu(qh, inst) {
  90		if (atomic_read(&qh->notifier_enabled) <= 0)
  91			continue;
  92		if (WARN_ON(!qh->notifier_fn))
  93			continue;
  94		this_cpu_inc(qh->stats->notifies);
  95		qh->notifier_fn(qh->notifier_fn_arg);
  96	}
  97	rcu_read_unlock();
  98}
  99EXPORT_SYMBOL_GPL(knav_queue_notify);
 100
 101static irqreturn_t knav_queue_int_handler(int irq, void *_instdata)
 102{
 103	struct knav_queue_inst *inst = _instdata;
 104
 105	knav_queue_notify(inst);
 106	return IRQ_HANDLED;
 107}
 108
 109static int knav_queue_setup_irq(struct knav_range_info *range,
 110			  struct knav_queue_inst *inst)
 111{
 112	unsigned queue = inst->id - range->queue_base;
 113	int ret = 0, irq;
 114
 115	if (range->flags & RANGE_HAS_IRQ) {
 116		irq = range->irqs[queue].irq;
 117		ret = request_irq(irq, knav_queue_int_handler, 0,
 118					inst->irq_name, inst);
 119		if (ret)
 120			return ret;
 121		disable_irq(irq);
 122		if (range->irqs[queue].cpu_mask) {
 123			ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask);
 124			if (ret) {
 125				dev_warn(range->kdev->dev,
 126					 "Failed to set IRQ affinity\n");
 127				return ret;
 128			}
 129		}
 130	}
 131	return ret;
 132}
 133
 134static void knav_queue_free_irq(struct knav_queue_inst *inst)
 135{
 136	struct knav_range_info *range = inst->range;
 137	unsigned queue = inst->id - inst->range->queue_base;
 138	int irq;
 139
 140	if (range->flags & RANGE_HAS_IRQ) {
 141		irq = range->irqs[queue].irq;
 142		irq_set_affinity_hint(irq, NULL);
 143		free_irq(irq, inst);
 144	}
 145}
 146
 147static inline bool knav_queue_is_busy(struct knav_queue_inst *inst)
 148{
 149	return !list_empty(&inst->handles);
 150}
 151
 152static inline bool knav_queue_is_reserved(struct knav_queue_inst *inst)
 153{
 154	return inst->range->flags & RANGE_RESERVED;
 155}
 156
 157static inline bool knav_queue_is_shared(struct knav_queue_inst *inst)
 158{
 159	struct knav_queue *tmp;
 160
 161	rcu_read_lock();
 162	for_each_handle_rcu(tmp, inst) {
 163		if (tmp->flags & KNAV_QUEUE_SHARED) {
 164			rcu_read_unlock();
 165			return true;
 166		}
 167	}
 168	rcu_read_unlock();
 169	return false;
 170}
 171
 172static inline bool knav_queue_match_type(struct knav_queue_inst *inst,
 173						unsigned type)
 174{
 175	if ((type == KNAV_QUEUE_QPEND) &&
 176	    (inst->range->flags & RANGE_HAS_IRQ)) {
 177		return true;
 178	} else if ((type == KNAV_QUEUE_ACC) &&
 179		(inst->range->flags & RANGE_HAS_ACCUMULATOR)) {
 180		return true;
 181	} else if ((type == KNAV_QUEUE_GP) &&
 182		!(inst->range->flags &
 183			(RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) {
 184		return true;
 185	}
 186	return false;
 187}
 188
 189static inline struct knav_queue_inst *
 190knav_queue_match_id_to_inst(struct knav_device *kdev, unsigned id)
 191{
 192	struct knav_queue_inst *inst;
 193	int idx;
 194
 195	for_each_instance(idx, inst, kdev) {
 196		if (inst->id == id)
 197			return inst;
 198	}
 199	return NULL;
 200}
 201
 202static inline struct knav_queue_inst *knav_queue_find_by_id(int id)
 203{
 204	if (kdev->base_id <= id &&
 205	    kdev->base_id + kdev->num_queues > id) {
 206		id -= kdev->base_id;
 207		return knav_queue_match_id_to_inst(kdev, id);
 208	}
 209	return NULL;
 210}
 211
 212static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst,
 213				      const char *name, unsigned flags)
 214{
 215	struct knav_queue *qh;
 216	unsigned id;
 217	int ret = 0;
 218
 219	qh = devm_kzalloc(inst->kdev->dev, sizeof(*qh), GFP_KERNEL);
 220	if (!qh)
 221		return ERR_PTR(-ENOMEM);
 222
 223	qh->stats = alloc_percpu(struct knav_queue_stats);
 224	if (!qh->stats) {
 225		ret = -ENOMEM;
 226		goto err;
 227	}
 228
 229	qh->flags = flags;
 230	qh->inst = inst;
 231	id = inst->id - inst->qmgr->start_queue;
 232	qh->reg_push = &inst->qmgr->reg_push[id];
 233	qh->reg_pop = &inst->qmgr->reg_pop[id];
 234	qh->reg_peek = &inst->qmgr->reg_peek[id];
 235
 236	/* first opener? */
 237	if (!knav_queue_is_busy(inst)) {
 238		struct knav_range_info *range = inst->range;
 239
 240		inst->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
 241		if (range->ops && range->ops->open_queue)
 242			ret = range->ops->open_queue(range, inst, flags);
 243
 244		if (ret)
 245			goto err;
 246	}
 247	list_add_tail_rcu(&qh->list, &inst->handles);
 248	return qh;
 249
 250err:
 251	if (qh->stats)
 252		free_percpu(qh->stats);
 253	devm_kfree(inst->kdev->dev, qh);
 254	return ERR_PTR(ret);
 255}
 256
 257static struct knav_queue *
 258knav_queue_open_by_id(const char *name, unsigned id, unsigned flags)
 259{
 260	struct knav_queue_inst *inst;
 261	struct knav_queue *qh;
 262
 263	mutex_lock(&knav_dev_lock);
 264
 265	qh = ERR_PTR(-ENODEV);
 266	inst = knav_queue_find_by_id(id);
 267	if (!inst)
 268		goto unlock_ret;
 269
 270	qh = ERR_PTR(-EEXIST);
 271	if (!(flags & KNAV_QUEUE_SHARED) && knav_queue_is_busy(inst))
 272		goto unlock_ret;
 273
 274	qh = ERR_PTR(-EBUSY);
 275	if ((flags & KNAV_QUEUE_SHARED) &&
 276	    (knav_queue_is_busy(inst) && !knav_queue_is_shared(inst)))
 277		goto unlock_ret;
 278
 279	qh = __knav_queue_open(inst, name, flags);
 280
 281unlock_ret:
 282	mutex_unlock(&knav_dev_lock);
 283
 284	return qh;
 285}
 286
 287static struct knav_queue *knav_queue_open_by_type(const char *name,
 288						unsigned type, unsigned flags)
 289{
 290	struct knav_queue_inst *inst;
 291	struct knav_queue *qh = ERR_PTR(-EINVAL);
 292	int idx;
 293
 294	mutex_lock(&knav_dev_lock);
 295
 296	for_each_instance(idx, inst, kdev) {
 297		if (knav_queue_is_reserved(inst))
 298			continue;
 299		if (!knav_queue_match_type(inst, type))
 300			continue;
 301		if (knav_queue_is_busy(inst))
 302			continue;
 303		qh = __knav_queue_open(inst, name, flags);
 304		goto unlock_ret;
 305	}
 306
 307unlock_ret:
 308	mutex_unlock(&knav_dev_lock);
 309	return qh;
 310}
 311
 312static void knav_queue_set_notify(struct knav_queue_inst *inst, bool enabled)
 313{
 314	struct knav_range_info *range = inst->range;
 315
 316	if (range->ops && range->ops->set_notify)
 317		range->ops->set_notify(range, inst, enabled);
 318}
 319
 320static int knav_queue_enable_notifier(struct knav_queue *qh)
 321{
 322	struct knav_queue_inst *inst = qh->inst;
 323	bool first;
 324
 325	if (WARN_ON(!qh->notifier_fn))
 326		return -EINVAL;
 327
 328	/* Adjust the per handle notifier count */
 329	first = (atomic_inc_return(&qh->notifier_enabled) == 1);
 330	if (!first)
 331		return 0; /* nothing to do */
 332
 333	/* Now adjust the per instance notifier count */
 334	first = (atomic_inc_return(&inst->num_notifiers) == 1);
 335	if (first)
 336		knav_queue_set_notify(inst, true);
 337
 338	return 0;
 339}
 340
 341static int knav_queue_disable_notifier(struct knav_queue *qh)
 342{
 343	struct knav_queue_inst *inst = qh->inst;
 344	bool last;
 345
 346	last = (atomic_dec_return(&qh->notifier_enabled) == 0);
 347	if (!last)
 348		return 0; /* nothing to do */
 349
 350	last = (atomic_dec_return(&inst->num_notifiers) == 0);
 351	if (last)
 352		knav_queue_set_notify(inst, false);
 353
 354	return 0;
 355}
 356
 357static int knav_queue_set_notifier(struct knav_queue *qh,
 358				struct knav_queue_notify_config *cfg)
 359{
 360	knav_queue_notify_fn old_fn = qh->notifier_fn;
 361
 362	if (!cfg)
 363		return -EINVAL;
 364
 365	if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ)))
 366		return -ENOTSUPP;
 367
 368	if (!cfg->fn && old_fn)
 369		knav_queue_disable_notifier(qh);
 370
 371	qh->notifier_fn = cfg->fn;
 372	qh->notifier_fn_arg = cfg->fn_arg;
 373
 374	if (cfg->fn && !old_fn)
 375		knav_queue_enable_notifier(qh);
 376
 377	return 0;
 378}
 379
 380static int knav_gp_set_notify(struct knav_range_info *range,
 381			       struct knav_queue_inst *inst,
 382			       bool enabled)
 383{
 384	unsigned queue;
 385
 386	if (range->flags & RANGE_HAS_IRQ) {
 387		queue = inst->id - range->queue_base;
 388		if (enabled)
 389			enable_irq(range->irqs[queue].irq);
 390		else
 391			disable_irq_nosync(range->irqs[queue].irq);
 392	}
 393	return 0;
 394}
 395
 396static int knav_gp_open_queue(struct knav_range_info *range,
 397				struct knav_queue_inst *inst, unsigned flags)
 398{
 399	return knav_queue_setup_irq(range, inst);
 400}
 401
 402static int knav_gp_close_queue(struct knav_range_info *range,
 403				struct knav_queue_inst *inst)
 404{
 405	knav_queue_free_irq(inst);
 406	return 0;
 407}
 408
 409struct knav_range_ops knav_gp_range_ops = {
 410	.set_notify	= knav_gp_set_notify,
 411	.open_queue	= knav_gp_open_queue,
 412	.close_queue	= knav_gp_close_queue,
 413};
 414
 415
 416static int knav_queue_get_count(void *qhandle)
 417{
 418	struct knav_queue *qh = qhandle;
 419	struct knav_queue_inst *inst = qh->inst;
 420
 421	return readl_relaxed(&qh->reg_peek[0].entry_count) +
 422		atomic_read(&inst->desc_count);
 423}
 424
 425static void knav_queue_debug_show_instance(struct seq_file *s,
 426					struct knav_queue_inst *inst)
 427{
 428	struct knav_device *kdev = inst->kdev;
 429	struct knav_queue *qh;
 430	int cpu = 0;
 431	int pushes = 0;
 432	int pops = 0;
 433	int push_errors = 0;
 434	int pop_errors = 0;
 435	int notifies = 0;
 436
 437	if (!knav_queue_is_busy(inst))
 438		return;
 439
 440	seq_printf(s, "\tqueue id %d (%s)\n",
 441		   kdev->base_id + inst->id, inst->name);
 442	for_each_handle_rcu(qh, inst) {
 443		for_each_possible_cpu(cpu) {
 444			pushes += per_cpu_ptr(qh->stats, cpu)->pushes;
 445			pops += per_cpu_ptr(qh->stats, cpu)->pops;
 446			push_errors += per_cpu_ptr(qh->stats, cpu)->push_errors;
 447			pop_errors += per_cpu_ptr(qh->stats, cpu)->pop_errors;
 448			notifies += per_cpu_ptr(qh->stats, cpu)->notifies;
 449		}
 450
 451		seq_printf(s, "\t\thandle %p: pushes %8d, pops %8d, count %8d, notifies %8d, push errors %8d, pop errors %8d\n",
 452				qh,
 453				pushes,
 454				pops,
 455				knav_queue_get_count(qh),
 456				notifies,
 457				push_errors,
 458				pop_errors);
 459	}
 460}
 461
 462static int knav_queue_debug_show(struct seq_file *s, void *v)
 463{
 464	struct knav_queue_inst *inst;
 465	int idx;
 466
 467	mutex_lock(&knav_dev_lock);
 468	seq_printf(s, "%s: %u-%u\n",
 469		   dev_name(kdev->dev), kdev->base_id,
 470		   kdev->base_id + kdev->num_queues - 1);
 471	for_each_instance(idx, inst, kdev)
 472		knav_queue_debug_show_instance(s, inst);
 473	mutex_unlock(&knav_dev_lock);
 474
 475	return 0;
 476}
 477
 478static int knav_queue_debug_open(struct inode *inode, struct file *file)
 479{
 480	return single_open(file, knav_queue_debug_show, NULL);
 481}
 482
 483static const struct file_operations knav_queue_debug_ops = {
 484	.open		= knav_queue_debug_open,
 485	.read		= seq_read,
 486	.llseek		= seq_lseek,
 487	.release	= single_release,
 488};
 489
 490static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout,
 491					u32 flags)
 492{
 493	unsigned long end;
 494	u32 val = 0;
 495
 496	end = jiffies + msecs_to_jiffies(timeout);
 497	while (time_after(end, jiffies)) {
 498		val = readl_relaxed(addr);
 499		if (flags)
 500			val &= flags;
 501		if (!val)
 502			break;
 503		cpu_relax();
 504	}
 505	return val ? -ETIMEDOUT : 0;
 506}
 507
 508
 509static int knav_queue_flush(struct knav_queue *qh)
 510{
 511	struct knav_queue_inst *inst = qh->inst;
 512	unsigned id = inst->id - inst->qmgr->start_queue;
 513
 514	atomic_set(&inst->desc_count, 0);
 515	writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh);
 516	return 0;
 517}
 518
 519/**
 520 * knav_queue_open()	- open a hardware queue
 521 * @name		- name to give the queue handle
 522 * @id			- desired queue number if any or specifes the type
 523 *			  of queue
 524 * @flags		- the following flags are applicable to queues:
 525 *	KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are
 526 *			     exclusive by default.
 527 *			     Subsequent attempts to open a shared queue should
 528 *			     also have this flag.
 529 *
 530 * Returns a handle to the open hardware queue if successful. Use IS_ERR()
 531 * to check the returned value for error codes.
 532 */
 533void *knav_queue_open(const char *name, unsigned id,
 534					unsigned flags)
 535{
 536	struct knav_queue *qh = ERR_PTR(-EINVAL);
 537
 538	switch (id) {
 539	case KNAV_QUEUE_QPEND:
 540	case KNAV_QUEUE_ACC:
 541	case KNAV_QUEUE_GP:
 542		qh = knav_queue_open_by_type(name, id, flags);
 543		break;
 544
 545	default:
 546		qh = knav_queue_open_by_id(name, id, flags);
 547		break;
 548	}
 549	return qh;
 550}
 551EXPORT_SYMBOL_GPL(knav_queue_open);
 552
 553/**
 554 * knav_queue_close()	- close a hardware queue handle
 555 * @qh			- handle to close
 556 */
 557void knav_queue_close(void *qhandle)
 558{
 559	struct knav_queue *qh = qhandle;
 560	struct knav_queue_inst *inst = qh->inst;
 561
 562	while (atomic_read(&qh->notifier_enabled) > 0)
 563		knav_queue_disable_notifier(qh);
 564
 565	mutex_lock(&knav_dev_lock);
 566	list_del_rcu(&qh->list);
 567	mutex_unlock(&knav_dev_lock);
 568	synchronize_rcu();
 569	if (!knav_queue_is_busy(inst)) {
 570		struct knav_range_info *range = inst->range;
 571
 572		if (range->ops && range->ops->close_queue)
 573			range->ops->close_queue(range, inst);
 574	}
 575	free_percpu(qh->stats);
 576	devm_kfree(inst->kdev->dev, qh);
 577}
 578EXPORT_SYMBOL_GPL(knav_queue_close);
 579
 580/**
 581 * knav_queue_device_control()	- Perform control operations on a queue
 582 * @qh				- queue handle
 583 * @cmd				- control commands
 584 * @arg				- command argument
 585 *
 586 * Returns 0 on success, errno otherwise.
 587 */
 588int knav_queue_device_control(void *qhandle, enum knav_queue_ctrl_cmd cmd,
 589				unsigned long arg)
 590{
 591	struct knav_queue *qh = qhandle;
 592	struct knav_queue_notify_config *cfg;
 593	int ret;
 594
 595	switch ((int)cmd) {
 596	case KNAV_QUEUE_GET_ID:
 597		ret = qh->inst->kdev->base_id + qh->inst->id;
 598		break;
 599
 600	case KNAV_QUEUE_FLUSH:
 601		ret = knav_queue_flush(qh);
 602		break;
 603
 604	case KNAV_QUEUE_SET_NOTIFIER:
 605		cfg = (void *)arg;
 606		ret = knav_queue_set_notifier(qh, cfg);
 607		break;
 608
 609	case KNAV_QUEUE_ENABLE_NOTIFY:
 610		ret = knav_queue_enable_notifier(qh);
 611		break;
 612
 613	case KNAV_QUEUE_DISABLE_NOTIFY:
 614		ret = knav_queue_disable_notifier(qh);
 615		break;
 616
 617	case KNAV_QUEUE_GET_COUNT:
 618		ret = knav_queue_get_count(qh);
 619		break;
 620
 621	default:
 622		ret = -ENOTSUPP;
 623		break;
 624	}
 625	return ret;
 626}
 627EXPORT_SYMBOL_GPL(knav_queue_device_control);
 628
 629
 630
 631/**
 632 * knav_queue_push()	- push data (or descriptor) to the tail of a queue
 633 * @qh			- hardware queue handle
 634 * @data		- data to push
 635 * @size		- size of data to push
 636 * @flags		- can be used to pass additional information
 637 *
 638 * Returns 0 on success, errno otherwise.
 639 */
 640int knav_queue_push(void *qhandle, dma_addr_t dma,
 641					unsigned size, unsigned flags)
 642{
 643	struct knav_queue *qh = qhandle;
 644	u32 val;
 645
 646	val = (u32)dma | ((size / 16) - 1);
 647	writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh);
 648
 649	this_cpu_inc(qh->stats->pushes);
 650	return 0;
 651}
 652EXPORT_SYMBOL_GPL(knav_queue_push);
 653
 654/**
 655 * knav_queue_pop()	- pop data (or descriptor) from the head of a queue
 656 * @qh			- hardware queue handle
 657 * @size		- (optional) size of the data pop'ed.
 658 *
 659 * Returns a DMA address on success, 0 on failure.
 660 */
 661dma_addr_t knav_queue_pop(void *qhandle, unsigned *size)
 662{
 663	struct knav_queue *qh = qhandle;
 664	struct knav_queue_inst *inst = qh->inst;
 665	dma_addr_t dma;
 666	u32 val, idx;
 667
 668	/* are we accumulated? */
 669	if (inst->descs) {
 670		if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) {
 671			atomic_inc(&inst->desc_count);
 672			return 0;
 673		}
 674		idx  = atomic_inc_return(&inst->desc_head);
 675		idx &= ACC_DESCS_MASK;
 676		val = inst->descs[idx];
 677	} else {
 678		val = readl_relaxed(&qh->reg_pop[0].ptr_size_thresh);
 679		if (unlikely(!val))
 680			return 0;
 681	}
 682
 683	dma = val & DESC_PTR_MASK;
 684	if (size)
 685		*size = ((val & DESC_SIZE_MASK) + 1) * 16;
 686
 687	this_cpu_inc(qh->stats->pops);
 688	return dma;
 689}
 690EXPORT_SYMBOL_GPL(knav_queue_pop);
 691
 692/* carve out descriptors and push into queue */
 693static void kdesc_fill_pool(struct knav_pool *pool)
 694{
 695	struct knav_region *region;
 696	int i;
 697
 698	region = pool->region;
 699	pool->desc_size = region->desc_size;
 700	for (i = 0; i < pool->num_desc; i++) {
 701		int index = pool->region_offset + i;
 702		dma_addr_t dma_addr;
 703		unsigned dma_size;
 704		dma_addr = region->dma_start + (region->desc_size * index);
 705		dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES);
 706		dma_sync_single_for_device(pool->dev, dma_addr, dma_size,
 707					   DMA_TO_DEVICE);
 708		knav_queue_push(pool->queue, dma_addr, dma_size, 0);
 709	}
 710}
 711
 712/* pop out descriptors and close the queue */
 713static void kdesc_empty_pool(struct knav_pool *pool)
 714{
 715	dma_addr_t dma;
 716	unsigned size;
 717	void *desc;
 718	int i;
 719
 720	if (!pool->queue)
 721		return;
 722
 723	for (i = 0;; i++) {
 724		dma = knav_queue_pop(pool->queue, &size);
 725		if (!dma)
 726			break;
 727		desc = knav_pool_desc_dma_to_virt(pool, dma);
 728		if (!desc) {
 729			dev_dbg(pool->kdev->dev,
 730				"couldn't unmap desc, continuing\n");
 731			continue;
 732		}
 733	}
 734	WARN_ON(i != pool->num_desc);
 735	knav_queue_close(pool->queue);
 736}
 737
 738
 739/* Get the DMA address of a descriptor */
 740dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt)
 741{
 742	struct knav_pool *pool = ph;
 743	return pool->region->dma_start + (virt - pool->region->virt_start);
 744}
 745EXPORT_SYMBOL_GPL(knav_pool_desc_virt_to_dma);
 746
 747void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma)
 748{
 749	struct knav_pool *pool = ph;
 750	return pool->region->virt_start + (dma - pool->region->dma_start);
 751}
 752EXPORT_SYMBOL_GPL(knav_pool_desc_dma_to_virt);
 753
 754/**
 755 * knav_pool_create()	- Create a pool of descriptors
 756 * @name		- name to give the pool handle
 757 * @num_desc		- numbers of descriptors in the pool
 758 * @region_id		- QMSS region id from which the descriptors are to be
 759 *			  allocated.
 760 *
 761 * Returns a pool handle on success.
 762 * Use IS_ERR_OR_NULL() to identify error values on return.
 763 */
 764void *knav_pool_create(const char *name,
 765					int num_desc, int region_id)
 766{
 767	struct knav_region *reg_itr, *region = NULL;
 768	struct knav_pool *pool, *pi;
 769	struct list_head *node;
 770	unsigned last_offset;
 771	bool slot_found;
 772	int ret;
 773
 774	if (!kdev)
 775		return ERR_PTR(-EPROBE_DEFER);
 776
 777	if (!kdev->dev)
 778		return ERR_PTR(-ENODEV);
 779
 780	pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
 781	if (!pool) {
 782		dev_err(kdev->dev, "out of memory allocating pool\n");
 783		return ERR_PTR(-ENOMEM);
 784	}
 785
 786	for_each_region(kdev, reg_itr) {
 787		if (reg_itr->id != region_id)
 788			continue;
 789		region = reg_itr;
 790		break;
 791	}
 792
 793	if (!region) {
 794		dev_err(kdev->dev, "region-id(%d) not found\n", region_id);
 795		ret = -EINVAL;
 796		goto err;
 797	}
 798
 799	pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0);
 800	if (IS_ERR_OR_NULL(pool->queue)) {
 801		dev_err(kdev->dev,
 802			"failed to open queue for pool(%s), error %ld\n",
 803			name, PTR_ERR(pool->queue));
 804		ret = PTR_ERR(pool->queue);
 805		goto err;
 806	}
 807
 808	pool->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
 809	pool->kdev = kdev;
 810	pool->dev = kdev->dev;
 811
 812	mutex_lock(&knav_dev_lock);
 813
 814	if (num_desc > (region->num_desc - region->used_desc)) {
 815		dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n",
 816			region_id, name);
 817		ret = -ENOMEM;
 818		goto err_unlock;
 819	}
 820
 821	/* Region maintains a sorted (by region offset) list of pools
 822	 * use the first free slot which is large enough to accomodate
 823	 * the request
 824	 */
 825	last_offset = 0;
 826	slot_found = false;
 827	node = &region->pools;
 828	list_for_each_entry(pi, &region->pools, region_inst) {
 829		if ((pi->region_offset - last_offset) >= num_desc) {
 830			slot_found = true;
 831			break;
 832		}
 833		last_offset = pi->region_offset + pi->num_desc;
 834	}
 835	node = &pi->region_inst;
 836
 837	if (slot_found) {
 838		pool->region = region;
 839		pool->num_desc = num_desc;
 840		pool->region_offset = last_offset;
 841		region->used_desc += num_desc;
 842		list_add_tail(&pool->list, &kdev->pools);
 843		list_add_tail(&pool->region_inst, node);
 844	} else {
 845		dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n",
 846			name, region_id);
 847		ret = -ENOMEM;
 848		goto err_unlock;
 849	}
 850
 851	mutex_unlock(&knav_dev_lock);
 852	kdesc_fill_pool(pool);
 853	return pool;
 854
 855err_unlock:
 856	mutex_unlock(&knav_dev_lock);
 857err:
 858	kfree(pool->name);
 859	devm_kfree(kdev->dev, pool);
 860	return ERR_PTR(ret);
 861}
 862EXPORT_SYMBOL_GPL(knav_pool_create);
 863
 864/**
 865 * knav_pool_destroy()	- Free a pool of descriptors
 866 * @pool		- pool handle
 867 */
 868void knav_pool_destroy(void *ph)
 869{
 870	struct knav_pool *pool = ph;
 871
 872	if (!pool)
 873		return;
 874
 875	if (!pool->region)
 876		return;
 877
 878	kdesc_empty_pool(pool);
 879	mutex_lock(&knav_dev_lock);
 880
 881	pool->region->used_desc -= pool->num_desc;
 882	list_del(&pool->region_inst);
 883	list_del(&pool->list);
 884
 885	mutex_unlock(&knav_dev_lock);
 886	kfree(pool->name);
 887	devm_kfree(kdev->dev, pool);
 888}
 889EXPORT_SYMBOL_GPL(knav_pool_destroy);
 890
 891
 892/**
 893 * knav_pool_desc_get()	- Get a descriptor from the pool
 894 * @pool			- pool handle
 895 *
 896 * Returns descriptor from the pool.
 897 */
 898void *knav_pool_desc_get(void *ph)
 899{
 900	struct knav_pool *pool = ph;
 901	dma_addr_t dma;
 902	unsigned size;
 903	void *data;
 904
 905	dma = knav_queue_pop(pool->queue, &size);
 906	if (unlikely(!dma))
 907		return ERR_PTR(-ENOMEM);
 908	data = knav_pool_desc_dma_to_virt(pool, dma);
 909	return data;
 910}
 911EXPORT_SYMBOL_GPL(knav_pool_desc_get);
 912
 913/**
 914 * knav_pool_desc_put()	- return a descriptor to the pool
 915 * @pool			- pool handle
 916 */
 917void knav_pool_desc_put(void *ph, void *desc)
 918{
 919	struct knav_pool *pool = ph;
 920	dma_addr_t dma;
 921	dma = knav_pool_desc_virt_to_dma(pool, desc);
 922	knav_queue_push(pool->queue, dma, pool->region->desc_size, 0);
 923}
 924EXPORT_SYMBOL_GPL(knav_pool_desc_put);
 925
 926/**
 927 * knav_pool_desc_map()	- Map descriptor for DMA transfer
 928 * @pool			- pool handle
 929 * @desc			- address of descriptor to map
 930 * @size			- size of descriptor to map
 931 * @dma				- DMA address return pointer
 932 * @dma_sz			- adjusted return pointer
 933 *
 934 * Returns 0 on success, errno otherwise.
 935 */
 936int knav_pool_desc_map(void *ph, void *desc, unsigned size,
 937					dma_addr_t *dma, unsigned *dma_sz)
 938{
 939	struct knav_pool *pool = ph;
 940	*dma = knav_pool_desc_virt_to_dma(pool, desc);
 941	size = min(size, pool->region->desc_size);
 942	size = ALIGN(size, SMP_CACHE_BYTES);
 943	*dma_sz = size;
 944	dma_sync_single_for_device(pool->dev, *dma, size, DMA_TO_DEVICE);
 945
 946	/* Ensure the descriptor reaches to the memory */
 947	__iowmb();
 948
 949	return 0;
 950}
 951EXPORT_SYMBOL_GPL(knav_pool_desc_map);
 952
 953/**
 954 * knav_pool_desc_unmap()	- Unmap descriptor after DMA transfer
 955 * @pool			- pool handle
 956 * @dma				- DMA address of descriptor to unmap
 957 * @dma_sz			- size of descriptor to unmap
 958 *
 959 * Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify
 960 * error values on return.
 961 */
 962void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz)
 963{
 964	struct knav_pool *pool = ph;
 965	unsigned desc_sz;
 966	void *desc;
 967
 968	desc_sz = min(dma_sz, pool->region->desc_size);
 969	desc = knav_pool_desc_dma_to_virt(pool, dma);
 970	dma_sync_single_for_cpu(pool->dev, dma, desc_sz, DMA_FROM_DEVICE);
 971	prefetch(desc);
 972	return desc;
 973}
 974EXPORT_SYMBOL_GPL(knav_pool_desc_unmap);
 975
 976/**
 977 * knav_pool_count()	- Get the number of descriptors in pool.
 978 * @pool		- pool handle
 979 * Returns number of elements in the pool.
 980 */
 981int knav_pool_count(void *ph)
 982{
 983	struct knav_pool *pool = ph;
 984	return knav_queue_get_count(pool->queue);
 985}
 986EXPORT_SYMBOL_GPL(knav_pool_count);
 987
 988static void knav_queue_setup_region(struct knav_device *kdev,
 989					struct knav_region *region)
 990{
 991	unsigned hw_num_desc, hw_desc_size, size;
 992	struct knav_reg_region __iomem  *regs;
 993	struct knav_qmgr_info *qmgr;
 994	struct knav_pool *pool;
 995	int id = region->id;
 996	struct page *page;
 997
 998	/* unused region? */
 999	if (!region->num_desc) {
1000		dev_warn(kdev->dev, "unused region %s\n", region->name);
1001		return;
1002	}
1003
1004	/* get hardware descriptor value */
1005	hw_num_desc = ilog2(region->num_desc - 1) + 1;
1006
1007	/* did we force fit ourselves into nothingness? */
1008	if (region->num_desc < 32) {
1009		region->num_desc = 0;
1010		dev_warn(kdev->dev, "too few descriptors in region %s\n",
1011			 region->name);
1012		return;
1013	}
1014
1015	size = region->num_desc * region->desc_size;
1016	region->virt_start = alloc_pages_exact(size, GFP_KERNEL | GFP_DMA |
1017						GFP_DMA32);
1018	if (!region->virt_start) {
1019		region->num_desc = 0;
1020		dev_err(kdev->dev, "memory alloc failed for region %s\n",
1021			region->name);
1022		return;
1023	}
1024	region->virt_end = region->virt_start + size;
1025	page = virt_to_page(region->virt_start);
1026
1027	region->dma_start = dma_map_page(kdev->dev, page, 0, size,
1028					 DMA_BIDIRECTIONAL);
1029	if (dma_mapping_error(kdev->dev, region->dma_start)) {
1030		dev_err(kdev->dev, "dma map failed for region %s\n",
1031			region->name);
1032		goto fail;
1033	}
1034	region->dma_end = region->dma_start + size;
1035
1036	pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
1037	if (!pool) {
1038		dev_err(kdev->dev, "out of memory allocating dummy pool\n");
1039		goto fail;
1040	}
1041	pool->num_desc = 0;
1042	pool->region_offset = region->num_desc;
1043	list_add(&pool->region_inst, &region->pools);
1044
1045	dev_dbg(kdev->dev,
1046		"region %s (%d): size:%d, link:%d@%d, dma:%pad-%pad, virt:%p-%p\n",
1047		region->name, id, region->desc_size, region->num_desc,
1048		region->link_index, &region->dma_start, &region->dma_end,
1049		region->virt_start, region->virt_end);
1050
1051	hw_desc_size = (region->desc_size / 16) - 1;
1052	hw_num_desc -= 5;
1053
1054	for_each_qmgr(kdev, qmgr) {
1055		regs = qmgr->reg_region + id;
1056		writel_relaxed((u32)region->dma_start, &regs->base);
1057		writel_relaxed(region->link_index, &regs->start_index);
1058		writel_relaxed(hw_desc_size << 16 | hw_num_desc,
1059			       &regs->size_count);
1060	}
1061	return;
1062
1063fail:
1064	if (region->dma_start)
1065		dma_unmap_page(kdev->dev, region->dma_start, size,
1066				DMA_BIDIRECTIONAL);
1067	if (region->virt_start)
1068		free_pages_exact(region->virt_start, size);
1069	region->num_desc = 0;
1070	return;
1071}
1072
1073static const char *knav_queue_find_name(struct device_node *node)
1074{
1075	const char *name;
1076
1077	if (of_property_read_string(node, "label", &name) < 0)
1078		name = node->name;
1079	if (!name)
1080		name = "unknown";
1081	return name;
1082}
1083
1084static int knav_queue_setup_regions(struct knav_device *kdev,
1085					struct device_node *regions)
1086{
1087	struct device *dev = kdev->dev;
1088	struct knav_region *region;
1089	struct device_node *child;
1090	u32 temp[2];
1091	int ret;
1092
1093	for_each_child_of_node(regions, child) {
1094		region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
1095		if (!region) {
1096			dev_err(dev, "out of memory allocating region\n");
1097			return -ENOMEM;
1098		}
1099
1100		region->name = knav_queue_find_name(child);
1101		of_property_read_u32(child, "id", &region->id);
1102		ret = of_property_read_u32_array(child, "region-spec", temp, 2);
1103		if (!ret) {
1104			region->num_desc  = temp[0];
1105			region->desc_size = temp[1];
1106		} else {
1107			dev_err(dev, "invalid region info %s\n", region->name);
1108			devm_kfree(dev, region);
1109			continue;
1110		}
1111
1112		if (!of_get_property(child, "link-index", NULL)) {
1113			dev_err(dev, "No link info for %s\n", region->name);
1114			devm_kfree(dev, region);
1115			continue;
1116		}
1117		ret = of_property_read_u32(child, "link-index",
1118					   &region->link_index);
1119		if (ret) {
1120			dev_err(dev, "link index not found for %s\n",
1121				region->name);
1122			devm_kfree(dev, region);
1123			continue;
1124		}
1125
1126		INIT_LIST_HEAD(&region->pools);
1127		list_add_tail(&region->list, &kdev->regions);
1128	}
1129	if (list_empty(&kdev->regions)) {
1130		dev_err(dev, "no valid region information found\n");
1131		return -ENODEV;
1132	}
1133
1134	/* Next, we run through the regions and set things up */
1135	for_each_region(kdev, region)
1136		knav_queue_setup_region(kdev, region);
1137
1138	return 0;
1139}
1140
1141static int knav_get_link_ram(struct knav_device *kdev,
1142				       const char *name,
1143				       struct knav_link_ram_block *block)
1144{
1145	struct platform_device *pdev = to_platform_device(kdev->dev);
1146	struct device_node *node = pdev->dev.of_node;
1147	u32 temp[2];
1148
1149	/*
1150	 * Note: link ram resources are specified in "entry" sized units. In
1151	 * reality, although entries are ~40bits in hardware, we treat them as
1152	 * 64-bit entities here.
1153	 *
1154	 * For example, to specify the internal link ram for Keystone-I class
1155	 * devices, we would set the linkram0 resource to 0x80000-0x83fff.
1156	 *
1157	 * This gets a bit weird when other link rams are used.  For example,
1158	 * if the range specified is 0x0c000000-0x0c003fff (i.e., 16K entries
1159	 * in MSMC SRAM), the actual memory used is 0x0c000000-0x0c020000,
1160	 * which accounts for 64-bits per entry, for 16K entries.
1161	 */
1162	if (!of_property_read_u32_array(node, name , temp, 2)) {
1163		if (temp[0]) {
1164			/*
1165			 * queue_base specified => using internal or onchip
1166			 * link ram WARNING - we do not "reserve" this block
1167			 */
1168			block->dma = (dma_addr_t)temp[0];
1169			block->virt = NULL;
1170			block->size = temp[1];
1171		} else {
1172			block->size = temp[1];
1173			/* queue_base not specific => allocate requested size */
1174			block->virt = dmam_alloc_coherent(kdev->dev,
1175						  8 * block->size, &block->dma,
1176						  GFP_KERNEL);
1177			if (!block->virt) {
1178				dev_err(kdev->dev, "failed to alloc linkram\n");
1179				return -ENOMEM;
1180			}
1181		}
1182	} else {
1183		return -ENODEV;
1184	}
1185	return 0;
1186}
1187
1188static int knav_queue_setup_link_ram(struct knav_device *kdev)
1189{
1190	struct knav_link_ram_block *block;
1191	struct knav_qmgr_info *qmgr;
1192
1193	for_each_qmgr(kdev, qmgr) {
1194		block = &kdev->link_rams[0];
1195		dev_dbg(kdev->dev, "linkram0: dma:%pad, virt:%p, size:%x\n",
1196			&block->dma, block->virt, block->size);
1197		writel_relaxed((u32)block->dma, &qmgr->reg_config->link_ram_base0);
1198		if (kdev->version == QMSS_66AK2G)
1199			writel_relaxed(block->size,
1200				       &qmgr->reg_config->link_ram_size0);
1201		else
1202			writel_relaxed(block->size - 1,
1203				       &qmgr->reg_config->link_ram_size0);
1204		block++;
1205		if (!block->size)
1206			continue;
1207
1208		dev_dbg(kdev->dev, "linkram1: dma:%pad, virt:%p, size:%x\n",
1209			&block->dma, block->virt, block->size);
1210		writel_relaxed(block->dma, &qmgr->reg_config->link_ram_base1);
1211	}
1212
1213	return 0;
1214}
1215
1216static int knav_setup_queue_range(struct knav_device *kdev,
1217					struct device_node *node)
1218{
1219	struct device *dev = kdev->dev;
1220	struct knav_range_info *range;
1221	struct knav_qmgr_info *qmgr;
1222	u32 temp[2], start, end, id, index;
1223	int ret, i;
1224
1225	range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL);
1226	if (!range) {
1227		dev_err(dev, "out of memory allocating range\n");
1228		return -ENOMEM;
1229	}
1230
1231	range->kdev = kdev;
1232	range->name = knav_queue_find_name(node);
1233	ret = of_property_read_u32_array(node, "qrange", temp, 2);
1234	if (!ret) {
1235		range->queue_base = temp[0] - kdev->base_id;
1236		range->num_queues = temp[1];
1237	} else {
1238		dev_err(dev, "invalid queue range %s\n", range->name);
1239		devm_kfree(dev, range);
1240		return -EINVAL;
1241	}
1242
1243	for (i = 0; i < RANGE_MAX_IRQS; i++) {
1244		struct of_phandle_args oirq;
1245
1246		if (of_irq_parse_one(node, i, &oirq))
1247			break;
1248
1249		range->irqs[i].irq = irq_create_of_mapping(&oirq);
1250		if (range->irqs[i].irq == IRQ_NONE)
1251			break;
1252
1253		range->num_irqs++;
1254
1255		if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) {
1256			unsigned long mask;
1257			int bit;
1258
1259			range->irqs[i].cpu_mask = devm_kzalloc(dev,
1260							       cpumask_size(), GFP_KERNEL);
1261			if (!range->irqs[i].cpu_mask)
1262				return -ENOMEM;
1263
1264			mask = (oirq.args[2] & 0x0000ff00) >> 8;
1265			for_each_set_bit(bit, &mask, BITS_PER_LONG)
1266				cpumask_set_cpu(bit, range->irqs[i].cpu_mask);
1267		}
1268	}
1269
1270	range->num_irqs = min(range->num_irqs, range->num_queues);
1271	if (range->num_irqs)
1272		range->flags |= RANGE_HAS_IRQ;
1273
1274	if (of_get_property(node, "qalloc-by-id", NULL))
1275		range->flags |= RANGE_RESERVED;
1276
1277	if (of_get_property(node, "accumulator", NULL)) {
1278		ret = knav_init_acc_range(kdev, node, range);
1279		if (ret < 0) {
1280			devm_kfree(dev, range);
1281			return ret;
1282		}
1283	} else {
1284		range->ops = &knav_gp_range_ops;
1285	}
1286
1287	/* set threshold to 1, and flush out the queues */
1288	for_each_qmgr(kdev, qmgr) {
1289		start = max(qmgr->start_queue, range->queue_base);
1290		end   = min(qmgr->start_queue + qmgr->num_queues,
1291			    range->queue_base + range->num_queues);
1292		for (id = start; id < end; id++) {
1293			index = id - qmgr->start_queue;
1294			writel_relaxed(THRESH_GTE | 1,
1295				       &qmgr->reg_peek[index].ptr_size_thresh);
1296			writel_relaxed(0,
1297				       &qmgr->reg_push[index].ptr_size_thresh);
1298		}
1299	}
1300
1301	list_add_tail(&range->list, &kdev->queue_ranges);
1302	dev_dbg(dev, "added range %s: %d-%d, %d irqs%s%s%s\n",
1303		range->name, range->queue_base,
1304		range->queue_base + range->num_queues - 1,
1305		range->num_irqs,
1306		(range->flags & RANGE_HAS_IRQ) ? ", has irq" : "",
1307		(range->flags & RANGE_RESERVED) ? ", reserved" : "",
1308		(range->flags & RANGE_HAS_ACCUMULATOR) ? ", acc" : "");
1309	kdev->num_queues_in_use += range->num_queues;
1310	return 0;
1311}
1312
1313static int knav_setup_queue_pools(struct knav_device *kdev,
1314				   struct device_node *queue_pools)
1315{
1316	struct device_node *type, *range;
1317	int ret;
1318
1319	for_each_child_of_node(queue_pools, type) {
1320		for_each_child_of_node(type, range) {
1321			ret = knav_setup_queue_range(kdev, range);
1322			/* return value ignored, we init the rest... */
1323		}
1324	}
1325
1326	/* ... and barf if they all failed! */
1327	if (list_empty(&kdev->queue_ranges)) {
1328		dev_err(kdev->dev, "no valid queue range found\n");
1329		return -ENODEV;
1330	}
1331	return 0;
1332}
1333
1334static void knav_free_queue_range(struct knav_device *kdev,
1335				  struct knav_range_info *range)
1336{
1337	if (range->ops && range->ops->free_range)
1338		range->ops->free_range(range);
1339	list_del(&range->list);
1340	devm_kfree(kdev->dev, range);
1341}
1342
1343static void knav_free_queue_ranges(struct knav_device *kdev)
1344{
1345	struct knav_range_info *range;
1346
1347	for (;;) {
1348		range = first_queue_range(kdev);
1349		if (!range)
1350			break;
1351		knav_free_queue_range(kdev, range);
1352	}
1353}
1354
1355static void knav_queue_free_regions(struct knav_device *kdev)
1356{
1357	struct knav_region *region;
1358	struct knav_pool *pool, *tmp;
1359	unsigned size;
1360
1361	for (;;) {
1362		region = first_region(kdev);
1363		if (!region)
1364			break;
1365		list_for_each_entry_safe(pool, tmp, &region->pools, region_inst)
1366			knav_pool_destroy(pool);
1367
1368		size = region->virt_end - region->virt_start;
1369		if (size)
1370			free_pages_exact(region->virt_start, size);
1371		list_del(&region->list);
1372		devm_kfree(kdev->dev, region);
1373	}
1374}
1375
1376static void __iomem *knav_queue_map_reg(struct knav_device *kdev,
1377					struct device_node *node, int index)
1378{
1379	struct resource res;
1380	void __iomem *regs;
1381	int ret;
1382
1383	ret = of_address_to_resource(node, index, &res);
1384	if (ret) {
1385		dev_err(kdev->dev, "Can't translate of node(%pOFn) address for index(%d)\n",
1386			node, index);
1387		return ERR_PTR(ret);
1388	}
1389
1390	regs = devm_ioremap_resource(kdev->dev, &res);
1391	if (IS_ERR(regs))
1392		dev_err(kdev->dev, "Failed to map register base for index(%d) node(%pOFn)\n",
1393			index, node);
1394	return regs;
1395}
1396
1397static int knav_queue_init_qmgrs(struct knav_device *kdev,
1398					struct device_node *qmgrs)
1399{
1400	struct device *dev = kdev->dev;
1401	struct knav_qmgr_info *qmgr;
1402	struct device_node *child;
1403	u32 temp[2];
1404	int ret;
1405
1406	for_each_child_of_node(qmgrs, child) {
1407		qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL);
1408		if (!qmgr) {
1409			dev_err(dev, "out of memory allocating qmgr\n");
1410			return -ENOMEM;
1411		}
1412
1413		ret = of_property_read_u32_array(child, "managed-queues",
1414						 temp, 2);
1415		if (!ret) {
1416			qmgr->start_queue = temp[0];
1417			qmgr->num_queues = temp[1];
1418		} else {
1419			dev_err(dev, "invalid qmgr queue range\n");
1420			devm_kfree(dev, qmgr);
1421			continue;
1422		}
1423
1424		dev_info(dev, "qmgr start queue %d, number of queues %d\n",
1425			 qmgr->start_queue, qmgr->num_queues);
1426
1427		qmgr->reg_peek =
1428			knav_queue_map_reg(kdev, child,
1429					   KNAV_QUEUE_PEEK_REG_INDEX);
1430
1431		if (kdev->version == QMSS) {
1432			qmgr->reg_status =
1433				knav_queue_map_reg(kdev, child,
1434						   KNAV_QUEUE_STATUS_REG_INDEX);
1435		}
1436
1437		qmgr->reg_config =
1438			knav_queue_map_reg(kdev, child,
1439					   (kdev->version == QMSS_66AK2G) ?
1440					   KNAV_L_QUEUE_CONFIG_REG_INDEX :
1441					   KNAV_QUEUE_CONFIG_REG_INDEX);
1442		qmgr->reg_region =
1443			knav_queue_map_reg(kdev, child,
1444					   (kdev->version == QMSS_66AK2G) ?
1445					   KNAV_L_QUEUE_REGION_REG_INDEX :
1446					   KNAV_QUEUE_REGION_REG_INDEX);
1447
1448		qmgr->reg_push =
1449			knav_queue_map_reg(kdev, child,
1450					   (kdev->version == QMSS_66AK2G) ?
1451					    KNAV_L_QUEUE_PUSH_REG_INDEX :
1452					    KNAV_QUEUE_PUSH_REG_INDEX);
1453
1454		if (kdev->version == QMSS) {
1455			qmgr->reg_pop =
1456				knav_queue_map_reg(kdev, child,
1457						   KNAV_QUEUE_POP_REG_INDEX);
1458		}
1459
1460		if (IS_ERR(qmgr->reg_peek) ||
1461		    ((kdev->version == QMSS) &&
1462		    (IS_ERR(qmgr->reg_status) || IS_ERR(qmgr->reg_pop))) ||
1463		    IS_ERR(qmgr->reg_config) || IS_ERR(qmgr->reg_region) ||
1464		    IS_ERR(qmgr->reg_push)) {
1465			dev_err(dev, "failed to map qmgr regs\n");
1466			if (kdev->version == QMSS) {
1467				if (!IS_ERR(qmgr->reg_status))
1468					devm_iounmap(dev, qmgr->reg_status);
1469				if (!IS_ERR(qmgr->reg_pop))
1470					devm_iounmap(dev, qmgr->reg_pop);
1471			}
1472			if (!IS_ERR(qmgr->reg_peek))
1473				devm_iounmap(dev, qmgr->reg_peek);
1474			if (!IS_ERR(qmgr->reg_config))
1475				devm_iounmap(dev, qmgr->reg_config);
1476			if (!IS_ERR(qmgr->reg_region))
1477				devm_iounmap(dev, qmgr->reg_region);
1478			if (!IS_ERR(qmgr->reg_push))
1479				devm_iounmap(dev, qmgr->reg_push);
1480			devm_kfree(dev, qmgr);
1481			continue;
1482		}
1483
1484		/* Use same push register for pop as well */
1485		if (kdev->version == QMSS_66AK2G)
1486			qmgr->reg_pop = qmgr->reg_push;
1487
1488		list_add_tail(&qmgr->list, &kdev->qmgrs);
1489		dev_info(dev, "added qmgr start queue %d, num of queues %d, reg_peek %p, reg_status %p, reg_config %p, reg_region %p, reg_push %p, reg_pop %p\n",
1490			 qmgr->start_queue, qmgr->num_queues,
1491			 qmgr->reg_peek, qmgr->reg_status,
1492			 qmgr->reg_config, qmgr->reg_region,
1493			 qmgr->reg_push, qmgr->reg_pop);
1494	}
1495	return 0;
1496}
1497
1498static int knav_queue_init_pdsps(struct knav_device *kdev,
1499					struct device_node *pdsps)
1500{
1501	struct device *dev = kdev->dev;
1502	struct knav_pdsp_info *pdsp;
1503	struct device_node *child;
1504
1505	for_each_child_of_node(pdsps, child) {
1506		pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL);
1507		if (!pdsp) {
1508			dev_err(dev, "out of memory allocating pdsp\n");
1509			return -ENOMEM;
1510		}
1511		pdsp->name = knav_queue_find_name(child);
1512		pdsp->iram =
1513			knav_queue_map_reg(kdev, child,
1514					   KNAV_QUEUE_PDSP_IRAM_REG_INDEX);
1515		pdsp->regs =
1516			knav_queue_map_reg(kdev, child,
1517					   KNAV_QUEUE_PDSP_REGS_REG_INDEX);
1518		pdsp->intd =
1519			knav_queue_map_reg(kdev, child,
1520					   KNAV_QUEUE_PDSP_INTD_REG_INDEX);
1521		pdsp->command =
1522			knav_queue_map_reg(kdev, child,
1523					   KNAV_QUEUE_PDSP_CMD_REG_INDEX);
1524
1525		if (IS_ERR(pdsp->command) || IS_ERR(pdsp->iram) ||
1526		    IS_ERR(pdsp->regs) || IS_ERR(pdsp->intd)) {
1527			dev_err(dev, "failed to map pdsp %s regs\n",
1528				pdsp->name);
1529			if (!IS_ERR(pdsp->command))
1530				devm_iounmap(dev, pdsp->command);
1531			if (!IS_ERR(pdsp->iram))
1532				devm_iounmap(dev, pdsp->iram);
1533			if (!IS_ERR(pdsp->regs))
1534				devm_iounmap(dev, pdsp->regs);
1535			if (!IS_ERR(pdsp->intd))
1536				devm_iounmap(dev, pdsp->intd);
1537			devm_kfree(dev, pdsp);
1538			continue;
1539		}
1540		of_property_read_u32(child, "id", &pdsp->id);
1541		list_add_tail(&pdsp->list, &kdev->pdsps);
1542		dev_dbg(dev, "added pdsp %s: command %p, iram %p, regs %p, intd %p\n",
1543			pdsp->name, pdsp->command, pdsp->iram, pdsp->regs,
1544			pdsp->intd);
1545	}
1546	return 0;
1547}
1548
1549static int knav_queue_stop_pdsp(struct knav_device *kdev,
1550			  struct knav_pdsp_info *pdsp)
1551{
1552	u32 val, timeout = 1000;
1553	int ret;
1554
1555	val = readl_relaxed(&pdsp->regs->control) & ~PDSP_CTRL_ENABLE;
1556	writel_relaxed(val, &pdsp->regs->control);
1557	ret = knav_queue_pdsp_wait(&pdsp->regs->control, timeout,
1558					PDSP_CTRL_RUNNING);
1559	if (ret < 0) {
1560		dev_err(kdev->dev, "timed out on pdsp %s stop\n", pdsp->name);
1561		return ret;
1562	}
1563	pdsp->loaded = false;
1564	pdsp->started = false;
1565	return 0;
1566}
1567
1568static int knav_queue_load_pdsp(struct knav_device *kdev,
1569			  struct knav_pdsp_info *pdsp)
1570{
1571	int i, ret, fwlen;
1572	const struct firmware *fw;
1573	bool found = false;
1574	u32 *fwdata;
1575
1576	for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) {
1577		if (knav_acc_firmwares[i]) {
1578			ret = request_firmware_direct(&fw,
1579						      knav_acc_firmwares[i],
1580						      kdev->dev);
1581			if (!ret) {
1582				found = true;
1583				break;
1584			}
1585		}
1586	}
1587
1588	if (!found) {
1589		dev_err(kdev->dev, "failed to get firmware for pdsp\n");
1590		return -ENODEV;
1591	}
1592
1593	dev_info(kdev->dev, "firmware file %s downloaded for PDSP\n",
1594		 knav_acc_firmwares[i]);
1595
1596	writel_relaxed(pdsp->id + 1, pdsp->command + 0x18);
1597	/* download the firmware */
1598	fwdata = (u32 *)fw->data;
1599	fwlen = (fw->size + sizeof(u32) - 1) / sizeof(u32);
1600	for (i = 0; i < fwlen; i++)
1601		writel_relaxed(be32_to_cpu(fwdata[i]), pdsp->iram + i);
1602
1603	release_firmware(fw);
1604	return 0;
1605}
1606
1607static int knav_queue_start_pdsp(struct knav_device *kdev,
1608			   struct knav_pdsp_info *pdsp)
1609{
1610	u32 val, timeout = 1000;
1611	int ret;
1612
1613	/* write a command for sync */
1614	writel_relaxed(0xffffffff, pdsp->command);
1615	while (readl_relaxed(pdsp->command) != 0xffffffff)
1616		cpu_relax();
1617
1618	/* soft reset the PDSP */
1619	val  = readl_relaxed(&pdsp->regs->control);
1620	val &= ~(PDSP_CTRL_PC_MASK | PDSP_CTRL_SOFT_RESET);
1621	writel_relaxed(val, &pdsp->regs->control);
1622
1623	/* enable pdsp */
1624	val = readl_relaxed(&pdsp->regs->control) | PDSP_CTRL_ENABLE;
1625	writel_relaxed(val, &pdsp->regs->control);
1626
1627	/* wait for command register to clear */
1628	ret = knav_queue_pdsp_wait(pdsp->command, timeout, 0);
1629	if (ret < 0) {
1630		dev_err(kdev->dev,
1631			"timed out on pdsp %s command register wait\n",
1632			pdsp->name);
1633		return ret;
1634	}
1635	return 0;
1636}
1637
1638static void knav_queue_stop_pdsps(struct knav_device *kdev)
1639{
1640	struct knav_pdsp_info *pdsp;
1641
1642	/* disable all pdsps */
1643	for_each_pdsp(kdev, pdsp)
1644		knav_queue_stop_pdsp(kdev, pdsp);
1645}
1646
1647static int knav_queue_start_pdsps(struct knav_device *kdev)
1648{
1649	struct knav_pdsp_info *pdsp;
1650	int ret;
1651
1652	knav_queue_stop_pdsps(kdev);
1653	/* now load them all. We return success even if pdsp
1654	 * is not loaded as acc channels are optional on having
1655	 * firmware availability in the system. We set the loaded
1656	 * and stated flag and when initialize the acc range, check
1657	 * it and init the range only if pdsp is started.
1658	 */
1659	for_each_pdsp(kdev, pdsp) {
1660		ret = knav_queue_load_pdsp(kdev, pdsp);
1661		if (!ret)
1662			pdsp->loaded = true;
1663	}
1664
1665	for_each_pdsp(kdev, pdsp) {
1666		if (pdsp->loaded) {
1667			ret = knav_queue_start_pdsp(kdev, pdsp);
1668			if (!ret)
1669				pdsp->started = true;
1670		}
1671	}
1672	return 0;
1673}
1674
1675static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id)
1676{
1677	struct knav_qmgr_info *qmgr;
1678
1679	for_each_qmgr(kdev, qmgr) {
1680		if ((id >= qmgr->start_queue) &&
1681		    (id < qmgr->start_queue + qmgr->num_queues))
1682			return qmgr;
1683	}
1684	return NULL;
1685}
1686
1687static int knav_queue_init_queue(struct knav_device *kdev,
1688					struct knav_range_info *range,
1689					struct knav_queue_inst *inst,
1690					unsigned id)
1691{
1692	char irq_name[KNAV_NAME_SIZE];
1693	inst->qmgr = knav_find_qmgr(id);
1694	if (!inst->qmgr)
1695		return -1;
1696
1697	INIT_LIST_HEAD(&inst->handles);
1698	inst->kdev = kdev;
1699	inst->range = range;
1700	inst->irq_num = -1;
1701	inst->id = id;
1702	scnprintf(irq_name, sizeof(irq_name), "hwqueue-%d", id);
1703	inst->irq_name = kstrndup(irq_name, sizeof(irq_name), GFP_KERNEL);
1704
1705	if (range->ops && range->ops->init_queue)
1706		return range->ops->init_queue(range, inst);
1707	else
1708		return 0;
1709}
1710
1711static int knav_queue_init_queues(struct knav_device *kdev)
1712{
1713	struct knav_range_info *range;
1714	int size, id, base_idx;
1715	int idx = 0, ret = 0;
1716
1717	/* how much do we need for instance data? */
1718	size = sizeof(struct knav_queue_inst);
1719
1720	/* round this up to a power of 2, keep the index to instance
1721	 * arithmetic fast.
1722	 * */
1723	kdev->inst_shift = order_base_2(size);
1724	size = (1 << kdev->inst_shift) * kdev->num_queues_in_use;
1725	kdev->instances = devm_kzalloc(kdev->dev, size, GFP_KERNEL);
1726	if (!kdev->instances)
1727		return -ENOMEM;
1728
1729	for_each_queue_range(kdev, range) {
1730		if (range->ops && range->ops->init_range)
1731			range->ops->init_range(range);
1732		base_idx = idx;
1733		for (id = range->queue_base;
1734		     id < range->queue_base + range->num_queues; id++, idx++) {
1735			ret = knav_queue_init_queue(kdev, range,
1736					knav_queue_idx_to_inst(kdev, idx), id);
1737			if (ret < 0)
1738				return ret;
1739		}
1740		range->queue_base_inst =
1741			knav_queue_idx_to_inst(kdev, base_idx);
1742	}
1743	return 0;
1744}
1745
1746/* Match table for of_platform binding */
1747static const struct of_device_id keystone_qmss_of_match[] = {
1748	{
1749		.compatible = "ti,keystone-navigator-qmss",
1750	},
1751	{
1752		.compatible = "ti,66ak2g-navss-qm",
1753		.data	= (void *)QMSS_66AK2G,
1754	},
1755	{},
1756};
1757MODULE_DEVICE_TABLE(of, keystone_qmss_of_match);
1758
1759static int knav_queue_probe(struct platform_device *pdev)
1760{
1761	struct device_node *node = pdev->dev.of_node;
1762	struct device_node *qmgrs, *queue_pools, *regions, *pdsps;
1763	const struct of_device_id *match;
1764	struct device *dev = &pdev->dev;
1765	u32 temp[2];
1766	int ret;
1767
1768	if (!node) {
1769		dev_err(dev, "device tree info unavailable\n");
1770		return -ENODEV;
1771	}
1772
1773	kdev = devm_kzalloc(dev, sizeof(struct knav_device), GFP_KERNEL);
1774	if (!kdev) {
1775		dev_err(dev, "memory allocation failed\n");
1776		return -ENOMEM;
1777	}
1778
1779	match = of_match_device(of_match_ptr(keystone_qmss_of_match), dev);
1780	if (match && match->data)
1781		kdev->version = QMSS_66AK2G;
1782
1783	platform_set_drvdata(pdev, kdev);
1784	kdev->dev = dev;
1785	INIT_LIST_HEAD(&kdev->queue_ranges);
1786	INIT_LIST_HEAD(&kdev->qmgrs);
1787	INIT_LIST_HEAD(&kdev->pools);
1788	INIT_LIST_HEAD(&kdev->regions);
1789	INIT_LIST_HEAD(&kdev->pdsps);
1790
1791	pm_runtime_enable(&pdev->dev);
1792	ret = pm_runtime_get_sync(&pdev->dev);
1793	if (ret < 0) {
1794		dev_err(dev, "Failed to enable QMSS\n");
1795		return ret;
1796	}
1797
1798	if (of_property_read_u32_array(node, "queue-range", temp, 2)) {
1799		dev_err(dev, "queue-range not specified\n");
1800		ret = -ENODEV;
1801		goto err;
1802	}
1803	kdev->base_id    = temp[0];
1804	kdev->num_queues = temp[1];
1805
1806	/* Initialize queue managers using device tree configuration */
1807	qmgrs =  of_get_child_by_name(node, "qmgrs");
1808	if (!qmgrs) {
1809		dev_err(dev, "queue manager info not specified\n");
1810		ret = -ENODEV;
1811		goto err;
1812	}
1813	ret = knav_queue_init_qmgrs(kdev, qmgrs);
1814	of_node_put(qmgrs);
1815	if (ret)
1816		goto err;
1817
1818	/* get pdsp configuration values from device tree */
1819	pdsps =  of_get_child_by_name(node, "pdsps");
1820	if (pdsps) {
1821		ret = knav_queue_init_pdsps(kdev, pdsps);
1822		if (ret)
1823			goto err;
1824
1825		ret = knav_queue_start_pdsps(kdev);
1826		if (ret)
1827			goto err;
1828	}
1829	of_node_put(pdsps);
1830
1831	/* get usable queue range values from device tree */
1832	queue_pools = of_get_child_by_name(node, "queue-pools");
1833	if (!queue_pools) {
1834		dev_err(dev, "queue-pools not specified\n");
1835		ret = -ENODEV;
1836		goto err;
1837	}
1838	ret = knav_setup_queue_pools(kdev, queue_pools);
1839	of_node_put(queue_pools);
1840	if (ret)
1841		goto err;
1842
1843	ret = knav_get_link_ram(kdev, "linkram0", &kdev->link_rams[0]);
1844	if (ret) {
1845		dev_err(kdev->dev, "could not setup linking ram\n");
1846		goto err;
1847	}
1848
1849	ret = knav_get_link_ram(kdev, "linkram1", &kdev->link_rams[1]);
1850	if (ret) {
1851		/*
1852		 * nothing really, we have one linking ram already, so we just
1853		 * live within our means
1854		 */
1855	}
1856
1857	ret = knav_queue_setup_link_ram(kdev);
1858	if (ret)
1859		goto err;
1860
1861	regions =  of_get_child_by_name(node, "descriptor-regions");
1862	if (!regions) {
1863		dev_err(dev, "descriptor-regions not specified\n");
1864		goto err;
1865	}
1866	ret = knav_queue_setup_regions(kdev, regions);
1867	of_node_put(regions);
1868	if (ret)
1869		goto err;
1870
1871	ret = knav_queue_init_queues(kdev);
1872	if (ret < 0) {
1873		dev_err(dev, "hwqueue initialization failed\n");
1874		goto err;
1875	}
1876
1877	debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL,
1878			    &knav_queue_debug_ops);
1879	device_ready = true;
1880	return 0;
1881
1882err:
1883	knav_queue_stop_pdsps(kdev);
1884	knav_queue_free_regions(kdev);
1885	knav_free_queue_ranges(kdev);
1886	pm_runtime_put_sync(&pdev->dev);
1887	pm_runtime_disable(&pdev->dev);
1888	return ret;
1889}
1890
1891static int knav_queue_remove(struct platform_device *pdev)
1892{
1893	/* TODO: Free resources */
1894	pm_runtime_put_sync(&pdev->dev);
1895	pm_runtime_disable(&pdev->dev);
1896	return 0;
1897}
1898
1899static struct platform_driver keystone_qmss_driver = {
1900	.probe		= knav_queue_probe,
1901	.remove		= knav_queue_remove,
1902	.driver		= {
1903		.name	= "keystone-navigator-qmss",
1904		.of_match_table = keystone_qmss_of_match,
1905	},
1906};
1907module_platform_driver(keystone_qmss_driver);
1908
1909MODULE_LICENSE("GPL v2");
1910MODULE_DESCRIPTION("TI QMSS driver for Keystone SOCs");
1911MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
1912MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");