Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Keystone Queue Manager subsystem driver
   4 *
   5 * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
   6 * Authors:	Sandeep Nair <sandeep_n@ti.com>
   7 *		Cyril Chemparathy <cyril@ti.com>
   8 *		Santosh Shilimkar <santosh.shilimkar@ti.com>
   9 */
  10
  11#include <linux/debugfs.h>
  12#include <linux/dma-mapping.h>
  13#include <linux/firmware.h>
  14#include <linux/interrupt.h>
  15#include <linux/io.h>
  16#include <linux/module.h>
  17#include <linux/of.h>
  18#include <linux/of_address.h>
  19#include <linux/of_irq.h>
  20#include <linux/platform_device.h>
  21#include <linux/pm_runtime.h>
  22#include <linux/property.h>
  23#include <linux/slab.h>
  24#include <linux/soc/ti/knav_qmss.h>
  25
  26#include "knav_qmss.h"
  27
  28static struct knav_device *kdev;
  29static DEFINE_MUTEX(knav_dev_lock);
  30#define knav_dev_lock_held() \
  31	lockdep_is_held(&knav_dev_lock)
  32
  33/* Queue manager register indices in DTS */
  34#define KNAV_QUEUE_PEEK_REG_INDEX	0
  35#define KNAV_QUEUE_STATUS_REG_INDEX	1
  36#define KNAV_QUEUE_CONFIG_REG_INDEX	2
  37#define KNAV_QUEUE_REGION_REG_INDEX	3
  38#define KNAV_QUEUE_PUSH_REG_INDEX	4
  39#define KNAV_QUEUE_POP_REG_INDEX	5
  40
  41/* Queue manager register indices in DTS for QMSS in K2G NAVSS.
  42 * There are no status and vbusm push registers on this version
  43 * of QMSS. Push registers are same as pop, So all indices above 1
  44 * are to be re-defined
  45 */
  46#define KNAV_L_QUEUE_CONFIG_REG_INDEX	1
  47#define KNAV_L_QUEUE_REGION_REG_INDEX	2
  48#define KNAV_L_QUEUE_PUSH_REG_INDEX	3
  49
  50/* PDSP register indices in DTS */
  51#define KNAV_QUEUE_PDSP_IRAM_REG_INDEX	0
  52#define KNAV_QUEUE_PDSP_REGS_REG_INDEX	1
  53#define KNAV_QUEUE_PDSP_INTD_REG_INDEX	2
  54#define KNAV_QUEUE_PDSP_CMD_REG_INDEX	3
  55
  56#define knav_queue_idx_to_inst(kdev, idx)			\
  57	(kdev->instances + (idx << kdev->inst_shift))
  58
  59#define for_each_handle_rcu(qh, inst)				\
  60	list_for_each_entry_rcu(qh, &inst->handles, list,	\
  61				knav_dev_lock_held())
  62
  63#define for_each_instance(idx, inst, kdev)		\
  64	for (idx = 0, inst = kdev->instances;		\
  65	     idx < (kdev)->num_queues_in_use;			\
  66	     idx++, inst = knav_queue_idx_to_inst(kdev, idx))
  67
  68/* All firmware file names end up here. List the firmware file names below.
  69 * Newest followed by older ones. Search is done from start of the array
  70 * until a firmware file is found.
  71 */
  72static const char * const knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"};
  73
  74static bool device_ready;
  75bool knav_qmss_device_ready(void)
  76{
  77	return device_ready;
  78}
  79EXPORT_SYMBOL_GPL(knav_qmss_device_ready);
  80
  81/**
  82 * knav_queue_notify: qmss queue notfier call
  83 *
  84 * @inst:		- qmss queue instance like accumulator
  85 */
  86void knav_queue_notify(struct knav_queue_inst *inst)
  87{
  88	struct knav_queue *qh;
  89
  90	if (!inst)
  91		return;
  92
  93	rcu_read_lock();
  94	for_each_handle_rcu(qh, inst) {
  95		if (atomic_read(&qh->notifier_enabled) <= 0)
  96			continue;
  97		if (WARN_ON(!qh->notifier_fn))
  98			continue;
  99		this_cpu_inc(qh->stats->notifies);
 100		qh->notifier_fn(qh->notifier_fn_arg);
 101	}
 102	rcu_read_unlock();
 103}
 104EXPORT_SYMBOL_GPL(knav_queue_notify);
 105
 106static irqreturn_t knav_queue_int_handler(int irq, void *_instdata)
 107{
 108	struct knav_queue_inst *inst = _instdata;
 109
 110	knav_queue_notify(inst);
 111	return IRQ_HANDLED;
 112}
 113
 114static int knav_queue_setup_irq(struct knav_range_info *range,
 115			  struct knav_queue_inst *inst)
 116{
 117	unsigned queue = inst->id - range->queue_base;
 118	int ret = 0, irq;
 119
 120	if (range->flags & RANGE_HAS_IRQ) {
 121		irq = range->irqs[queue].irq;
 122		ret = request_irq(irq, knav_queue_int_handler, IRQF_NO_AUTOEN,
 123				  inst->irq_name, inst);
 124		if (ret)
 125			return ret;
 126		if (range->irqs[queue].cpu_mask) {
 127			ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask);
 128			if (ret) {
 129				dev_warn(range->kdev->dev,
 130					 "Failed to set IRQ affinity\n");
 131				return ret;
 132			}
 133		}
 134	}
 135	return ret;
 136}
 137
 138static void knav_queue_free_irq(struct knav_queue_inst *inst)
 139{
 140	struct knav_range_info *range = inst->range;
 141	unsigned queue = inst->id - inst->range->queue_base;
 142	int irq;
 143
 144	if (range->flags & RANGE_HAS_IRQ) {
 145		irq = range->irqs[queue].irq;
 146		irq_set_affinity_hint(irq, NULL);
 147		free_irq(irq, inst);
 148	}
 149}
 150
 151static inline bool knav_queue_is_busy(struct knav_queue_inst *inst)
 152{
 153	return !list_empty(&inst->handles);
 154}
 155
 156static inline bool knav_queue_is_reserved(struct knav_queue_inst *inst)
 157{
 158	return inst->range->flags & RANGE_RESERVED;
 159}
 160
 161static inline bool knav_queue_is_shared(struct knav_queue_inst *inst)
 162{
 163	struct knav_queue *tmp;
 164
 165	rcu_read_lock();
 166	for_each_handle_rcu(tmp, inst) {
 167		if (tmp->flags & KNAV_QUEUE_SHARED) {
 168			rcu_read_unlock();
 169			return true;
 170		}
 171	}
 172	rcu_read_unlock();
 173	return false;
 174}
 175
 176static inline bool knav_queue_match_type(struct knav_queue_inst *inst,
 177						unsigned type)
 178{
 179	if ((type == KNAV_QUEUE_QPEND) &&
 180	    (inst->range->flags & RANGE_HAS_IRQ)) {
 181		return true;
 182	} else if ((type == KNAV_QUEUE_ACC) &&
 183		(inst->range->flags & RANGE_HAS_ACCUMULATOR)) {
 184		return true;
 185	} else if ((type == KNAV_QUEUE_GP) &&
 186		!(inst->range->flags &
 187			(RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) {
 188		return true;
 189	}
 190	return false;
 191}
 192
 193static inline struct knav_queue_inst *
 194knav_queue_match_id_to_inst(struct knav_device *kdev, unsigned id)
 195{
 196	struct knav_queue_inst *inst;
 197	int idx;
 198
 199	for_each_instance(idx, inst, kdev) {
 200		if (inst->id == id)
 201			return inst;
 202	}
 203	return NULL;
 204}
 205
 206static inline struct knav_queue_inst *knav_queue_find_by_id(int id)
 207{
 208	if (kdev->base_id <= id &&
 209	    kdev->base_id + kdev->num_queues > id) {
 210		id -= kdev->base_id;
 211		return knav_queue_match_id_to_inst(kdev, id);
 212	}
 213	return NULL;
 214}
 215
 216static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst,
 217				      const char *name, unsigned flags)
 218{
 219	struct knav_queue *qh;
 220	unsigned id;
 221	int ret = 0;
 222
 223	qh = devm_kzalloc(inst->kdev->dev, sizeof(*qh), GFP_KERNEL);
 224	if (!qh)
 225		return ERR_PTR(-ENOMEM);
 226
 227	qh->stats = alloc_percpu(struct knav_queue_stats);
 228	if (!qh->stats) {
 229		ret = -ENOMEM;
 230		goto err;
 231	}
 232
 233	qh->flags = flags;
 234	qh->inst = inst;
 235	id = inst->id - inst->qmgr->start_queue;
 236	qh->reg_push = &inst->qmgr->reg_push[id];
 237	qh->reg_pop = &inst->qmgr->reg_pop[id];
 238	qh->reg_peek = &inst->qmgr->reg_peek[id];
 239
 240	/* first opener? */
 241	if (!knav_queue_is_busy(inst)) {
 242		struct knav_range_info *range = inst->range;
 243
 244		inst->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
 245		if (range->ops && range->ops->open_queue)
 246			ret = range->ops->open_queue(range, inst, flags);
 247
 248		if (ret)
 249			goto err;
 250	}
 251	list_add_tail_rcu(&qh->list, &inst->handles);
 252	return qh;
 253
 254err:
 255	if (qh->stats)
 256		free_percpu(qh->stats);
 257	devm_kfree(inst->kdev->dev, qh);
 258	return ERR_PTR(ret);
 259}
 260
 261static struct knav_queue *
 262knav_queue_open_by_id(const char *name, unsigned id, unsigned flags)
 263{
 264	struct knav_queue_inst *inst;
 265	struct knav_queue *qh;
 266
 267	mutex_lock(&knav_dev_lock);
 268
 269	qh = ERR_PTR(-ENODEV);
 270	inst = knav_queue_find_by_id(id);
 271	if (!inst)
 272		goto unlock_ret;
 273
 274	qh = ERR_PTR(-EEXIST);
 275	if (!(flags & KNAV_QUEUE_SHARED) && knav_queue_is_busy(inst))
 276		goto unlock_ret;
 277
 278	qh = ERR_PTR(-EBUSY);
 279	if ((flags & KNAV_QUEUE_SHARED) &&
 280	    (knav_queue_is_busy(inst) && !knav_queue_is_shared(inst)))
 281		goto unlock_ret;
 282
 283	qh = __knav_queue_open(inst, name, flags);
 284
 285unlock_ret:
 286	mutex_unlock(&knav_dev_lock);
 287
 288	return qh;
 289}
 290
 291static struct knav_queue *knav_queue_open_by_type(const char *name,
 292						unsigned type, unsigned flags)
 293{
 294	struct knav_queue_inst *inst;
 295	struct knav_queue *qh = ERR_PTR(-EINVAL);
 296	int idx;
 297
 298	mutex_lock(&knav_dev_lock);
 299
 300	for_each_instance(idx, inst, kdev) {
 301		if (knav_queue_is_reserved(inst))
 302			continue;
 303		if (!knav_queue_match_type(inst, type))
 304			continue;
 305		if (knav_queue_is_busy(inst))
 306			continue;
 307		qh = __knav_queue_open(inst, name, flags);
 308		goto unlock_ret;
 309	}
 310
 311unlock_ret:
 312	mutex_unlock(&knav_dev_lock);
 313	return qh;
 314}
 315
 316static void knav_queue_set_notify(struct knav_queue_inst *inst, bool enabled)
 317{
 318	struct knav_range_info *range = inst->range;
 319
 320	if (range->ops && range->ops->set_notify)
 321		range->ops->set_notify(range, inst, enabled);
 322}
 323
 324static int knav_queue_enable_notifier(struct knav_queue *qh)
 325{
 326	struct knav_queue_inst *inst = qh->inst;
 327	bool first;
 328
 329	if (WARN_ON(!qh->notifier_fn))
 330		return -EINVAL;
 331
 332	/* Adjust the per handle notifier count */
 333	first = (atomic_inc_return(&qh->notifier_enabled) == 1);
 334	if (!first)
 335		return 0; /* nothing to do */
 336
 337	/* Now adjust the per instance notifier count */
 338	first = (atomic_inc_return(&inst->num_notifiers) == 1);
 339	if (first)
 340		knav_queue_set_notify(inst, true);
 341
 342	return 0;
 343}
 344
 345static int knav_queue_disable_notifier(struct knav_queue *qh)
 346{
 347	struct knav_queue_inst *inst = qh->inst;
 348	bool last;
 349
 350	last = (atomic_dec_return(&qh->notifier_enabled) == 0);
 351	if (!last)
 352		return 0; /* nothing to do */
 353
 354	last = (atomic_dec_return(&inst->num_notifiers) == 0);
 355	if (last)
 356		knav_queue_set_notify(inst, false);
 357
 358	return 0;
 359}
 360
 361static int knav_queue_set_notifier(struct knav_queue *qh,
 362				struct knav_queue_notify_config *cfg)
 363{
 364	knav_queue_notify_fn old_fn = qh->notifier_fn;
 365
 366	if (!cfg)
 367		return -EINVAL;
 368
 369	if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ)))
 370		return -ENOTSUPP;
 371
 372	if (!cfg->fn && old_fn)
 373		knav_queue_disable_notifier(qh);
 374
 375	qh->notifier_fn = cfg->fn;
 376	qh->notifier_fn_arg = cfg->fn_arg;
 377
 378	if (cfg->fn && !old_fn)
 379		knav_queue_enable_notifier(qh);
 380
 381	return 0;
 382}
 383
 384static int knav_gp_set_notify(struct knav_range_info *range,
 385			       struct knav_queue_inst *inst,
 386			       bool enabled)
 387{
 388	unsigned queue;
 389
 390	if (range->flags & RANGE_HAS_IRQ) {
 391		queue = inst->id - range->queue_base;
 392		if (enabled)
 393			enable_irq(range->irqs[queue].irq);
 394		else
 395			disable_irq_nosync(range->irqs[queue].irq);
 396	}
 397	return 0;
 398}
 399
 400static int knav_gp_open_queue(struct knav_range_info *range,
 401				struct knav_queue_inst *inst, unsigned flags)
 402{
 403	return knav_queue_setup_irq(range, inst);
 404}
 405
 406static int knav_gp_close_queue(struct knav_range_info *range,
 407				struct knav_queue_inst *inst)
 408{
 409	knav_queue_free_irq(inst);
 410	return 0;
 411}
 412
 413static const struct knav_range_ops knav_gp_range_ops = {
 414	.set_notify	= knav_gp_set_notify,
 415	.open_queue	= knav_gp_open_queue,
 416	.close_queue	= knav_gp_close_queue,
 417};
 418
 419
 420static int knav_queue_get_count(void *qhandle)
 421{
 422	struct knav_queue *qh = qhandle;
 423	struct knav_queue_inst *inst = qh->inst;
 424
 425	return readl_relaxed(&qh->reg_peek[0].entry_count) +
 426		atomic_read(&inst->desc_count);
 427}
 428
 429static void knav_queue_debug_show_instance(struct seq_file *s,
 430					struct knav_queue_inst *inst)
 431{
 432	struct knav_device *kdev = inst->kdev;
 433	struct knav_queue *qh;
 434	int cpu = 0;
 435	int pushes = 0;
 436	int pops = 0;
 437	int push_errors = 0;
 438	int pop_errors = 0;
 439	int notifies = 0;
 440
 441	if (!knav_queue_is_busy(inst))
 442		return;
 443
 444	seq_printf(s, "\tqueue id %d (%s)\n",
 445		   kdev->base_id + inst->id, inst->name);
 446	for_each_handle_rcu(qh, inst) {
 447		for_each_possible_cpu(cpu) {
 448			pushes += per_cpu_ptr(qh->stats, cpu)->pushes;
 449			pops += per_cpu_ptr(qh->stats, cpu)->pops;
 450			push_errors += per_cpu_ptr(qh->stats, cpu)->push_errors;
 451			pop_errors += per_cpu_ptr(qh->stats, cpu)->pop_errors;
 452			notifies += per_cpu_ptr(qh->stats, cpu)->notifies;
 453		}
 454
 455		seq_printf(s, "\t\thandle %p: pushes %8d, pops %8d, count %8d, notifies %8d, push errors %8d, pop errors %8d\n",
 456				qh,
 457				pushes,
 458				pops,
 459				knav_queue_get_count(qh),
 460				notifies,
 461				push_errors,
 462				pop_errors);
 463	}
 464}
 465
 466static int knav_queue_debug_show(struct seq_file *s, void *v)
 467{
 468	struct knav_queue_inst *inst;
 469	int idx;
 470
 471	mutex_lock(&knav_dev_lock);
 472	seq_printf(s, "%s: %u-%u\n",
 473		   dev_name(kdev->dev), kdev->base_id,
 474		   kdev->base_id + kdev->num_queues - 1);
 475	for_each_instance(idx, inst, kdev)
 476		knav_queue_debug_show_instance(s, inst);
 477	mutex_unlock(&knav_dev_lock);
 478
 479	return 0;
 480}
 481
 482DEFINE_SHOW_ATTRIBUTE(knav_queue_debug);
 483
 484static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout,
 485					u32 flags)
 486{
 487	unsigned long end;
 488	u32 val = 0;
 489
 490	end = jiffies + msecs_to_jiffies(timeout);
 491	while (time_after(end, jiffies)) {
 492		val = readl_relaxed(addr);
 493		if (flags)
 494			val &= flags;
 495		if (!val)
 496			break;
 497		cpu_relax();
 498	}
 499	return val ? -ETIMEDOUT : 0;
 500}
 501
 502
 503static int knav_queue_flush(struct knav_queue *qh)
 504{
 505	struct knav_queue_inst *inst = qh->inst;
 506	unsigned id = inst->id - inst->qmgr->start_queue;
 507
 508	atomic_set(&inst->desc_count, 0);
 509	writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh);
 510	return 0;
 511}
 512
 513/**
 514 * knav_queue_open()	- open a hardware queue
 515 * @name:		- name to give the queue handle
 516 * @id:			- desired queue number if any or specifes the type
 517 *			  of queue
 518 * @flags:		- the following flags are applicable to queues:
 519 *	KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are
 520 *			     exclusive by default.
 521 *			     Subsequent attempts to open a shared queue should
 522 *			     also have this flag.
 523 *
 524 * Returns a handle to the open hardware queue if successful. Use IS_ERR()
 525 * to check the returned value for error codes.
 526 */
 527void *knav_queue_open(const char *name, unsigned id,
 528					unsigned flags)
 529{
 530	struct knav_queue *qh = ERR_PTR(-EINVAL);
 531
 532	switch (id) {
 533	case KNAV_QUEUE_QPEND:
 534	case KNAV_QUEUE_ACC:
 535	case KNAV_QUEUE_GP:
 536		qh = knav_queue_open_by_type(name, id, flags);
 537		break;
 538
 539	default:
 540		qh = knav_queue_open_by_id(name, id, flags);
 541		break;
 542	}
 543	return qh;
 544}
 545EXPORT_SYMBOL_GPL(knav_queue_open);
 546
 547/**
 548 * knav_queue_close()	- close a hardware queue handle
 549 * @qhandle:		- handle to close
 550 */
 551void knav_queue_close(void *qhandle)
 552{
 553	struct knav_queue *qh = qhandle;
 554	struct knav_queue_inst *inst = qh->inst;
 555
 556	while (atomic_read(&qh->notifier_enabled) > 0)
 557		knav_queue_disable_notifier(qh);
 558
 559	mutex_lock(&knav_dev_lock);
 560	list_del_rcu(&qh->list);
 561	mutex_unlock(&knav_dev_lock);
 562	synchronize_rcu();
 563	if (!knav_queue_is_busy(inst)) {
 564		struct knav_range_info *range = inst->range;
 565
 566		if (range->ops && range->ops->close_queue)
 567			range->ops->close_queue(range, inst);
 568	}
 569	free_percpu(qh->stats);
 570	devm_kfree(inst->kdev->dev, qh);
 571}
 572EXPORT_SYMBOL_GPL(knav_queue_close);
 573
 574/**
 575 * knav_queue_device_control()	- Perform control operations on a queue
 576 * @qhandle:			- queue handle
 577 * @cmd:			- control commands
 578 * @arg:			- command argument
 579 *
 580 * Returns 0 on success, errno otherwise.
 581 */
 582int knav_queue_device_control(void *qhandle, enum knav_queue_ctrl_cmd cmd,
 583				unsigned long arg)
 584{
 585	struct knav_queue *qh = qhandle;
 586	struct knav_queue_notify_config *cfg;
 587	int ret;
 588
 589	switch ((int)cmd) {
 590	case KNAV_QUEUE_GET_ID:
 591		ret = qh->inst->kdev->base_id + qh->inst->id;
 592		break;
 593
 594	case KNAV_QUEUE_FLUSH:
 595		ret = knav_queue_flush(qh);
 596		break;
 597
 598	case KNAV_QUEUE_SET_NOTIFIER:
 599		cfg = (void *)arg;
 600		ret = knav_queue_set_notifier(qh, cfg);
 601		break;
 602
 603	case KNAV_QUEUE_ENABLE_NOTIFY:
 604		ret = knav_queue_enable_notifier(qh);
 605		break;
 606
 607	case KNAV_QUEUE_DISABLE_NOTIFY:
 608		ret = knav_queue_disable_notifier(qh);
 609		break;
 610
 611	case KNAV_QUEUE_GET_COUNT:
 612		ret = knav_queue_get_count(qh);
 613		break;
 614
 615	default:
 616		ret = -ENOTSUPP;
 617		break;
 618	}
 619	return ret;
 620}
 621EXPORT_SYMBOL_GPL(knav_queue_device_control);
 622
 623
 624
 625/**
 626 * knav_queue_push()	- push data (or descriptor) to the tail of a queue
 627 * @qhandle:		- hardware queue handle
 628 * @dma:		- DMA data to push
 629 * @size:		- size of data to push
 630 * @flags:		- can be used to pass additional information
 631 *
 632 * Returns 0 on success, errno otherwise.
 633 */
 634int knav_queue_push(void *qhandle, dma_addr_t dma,
 635					unsigned size, unsigned flags)
 636{
 637	struct knav_queue *qh = qhandle;
 638	u32 val;
 639
 640	val = (u32)dma | ((size / 16) - 1);
 641	writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh);
 642
 643	this_cpu_inc(qh->stats->pushes);
 644	return 0;
 645}
 646EXPORT_SYMBOL_GPL(knav_queue_push);
 647
 648/**
 649 * knav_queue_pop()	- pop data (or descriptor) from the head of a queue
 650 * @qhandle:		- hardware queue handle
 651 * @size:		- (optional) size of the data pop'ed.
 652 *
 653 * Returns a DMA address on success, 0 on failure.
 654 */
 655dma_addr_t knav_queue_pop(void *qhandle, unsigned *size)
 656{
 657	struct knav_queue *qh = qhandle;
 658	struct knav_queue_inst *inst = qh->inst;
 659	dma_addr_t dma;
 660	u32 val, idx;
 661
 662	/* are we accumulated? */
 663	if (inst->descs) {
 664		if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) {
 665			atomic_inc(&inst->desc_count);
 666			return 0;
 667		}
 668		idx  = atomic_inc_return(&inst->desc_head);
 669		idx &= ACC_DESCS_MASK;
 670		val = inst->descs[idx];
 671	} else {
 672		val = readl_relaxed(&qh->reg_pop[0].ptr_size_thresh);
 673		if (unlikely(!val))
 674			return 0;
 675	}
 676
 677	dma = val & DESC_PTR_MASK;
 678	if (size)
 679		*size = ((val & DESC_SIZE_MASK) + 1) * 16;
 680
 681	this_cpu_inc(qh->stats->pops);
 682	return dma;
 683}
 684EXPORT_SYMBOL_GPL(knav_queue_pop);
 685
 686/* carve out descriptors and push into queue */
 687static void kdesc_fill_pool(struct knav_pool *pool)
 688{
 689	struct knav_region *region;
 690	int i;
 691
 692	region = pool->region;
 693	pool->desc_size = region->desc_size;
 694	for (i = 0; i < pool->num_desc; i++) {
 695		int index = pool->region_offset + i;
 696		dma_addr_t dma_addr;
 697		unsigned dma_size;
 698		dma_addr = region->dma_start + (region->desc_size * index);
 699		dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES);
 700		dma_sync_single_for_device(pool->dev, dma_addr, dma_size,
 701					   DMA_TO_DEVICE);
 702		knav_queue_push(pool->queue, dma_addr, dma_size, 0);
 703	}
 704}
 705
 706/* pop out descriptors and close the queue */
 707static void kdesc_empty_pool(struct knav_pool *pool)
 708{
 709	dma_addr_t dma;
 710	unsigned size;
 711	void *desc;
 712	int i;
 713
 714	if (!pool->queue)
 715		return;
 716
 717	for (i = 0;; i++) {
 718		dma = knav_queue_pop(pool->queue, &size);
 719		if (!dma)
 720			break;
 721		desc = knav_pool_desc_dma_to_virt(pool, dma);
 722		if (!desc) {
 723			dev_dbg(pool->kdev->dev,
 724				"couldn't unmap desc, continuing\n");
 725		}
 726	}
 727	WARN_ON(i != pool->num_desc);
 728	knav_queue_close(pool->queue);
 729}
 730
 731
 732/* Get the DMA address of a descriptor */
 733dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt)
 734{
 735	struct knav_pool *pool = ph;
 736	return pool->region->dma_start + (virt - pool->region->virt_start);
 737}
 738EXPORT_SYMBOL_GPL(knav_pool_desc_virt_to_dma);
 739
 740void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma)
 741{
 742	struct knav_pool *pool = ph;
 743	return pool->region->virt_start + (dma - pool->region->dma_start);
 744}
 745EXPORT_SYMBOL_GPL(knav_pool_desc_dma_to_virt);
 746
 747/**
 748 * knav_pool_create()	- Create a pool of descriptors
 749 * @name:		- name to give the pool handle
 750 * @num_desc:		- numbers of descriptors in the pool
 751 * @region_id:		- QMSS region id from which the descriptors are to be
 752 *			  allocated.
 753 *
 754 * Returns a pool handle on success.
 755 * Use IS_ERR_OR_NULL() to identify error values on return.
 756 */
 757void *knav_pool_create(const char *name,
 758					int num_desc, int region_id)
 759{
 760	struct knav_region *reg_itr, *region = NULL;
 761	struct knav_pool *pool, *pi = NULL, *iter;
 762	struct list_head *node;
 763	unsigned last_offset;
 764	int ret;
 765
 766	if (!kdev)
 767		return ERR_PTR(-EPROBE_DEFER);
 768
 769	if (!kdev->dev)
 770		return ERR_PTR(-ENODEV);
 771
 772	pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
 773	if (!pool) {
 774		dev_err(kdev->dev, "out of memory allocating pool\n");
 775		return ERR_PTR(-ENOMEM);
 776	}
 777
 778	for_each_region(kdev, reg_itr) {
 779		if (reg_itr->id != region_id)
 780			continue;
 781		region = reg_itr;
 782		break;
 783	}
 784
 785	if (!region) {
 786		dev_err(kdev->dev, "region-id(%d) not found\n", region_id);
 787		ret = -EINVAL;
 788		goto err;
 789	}
 790
 791	pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0);
 792	if (IS_ERR(pool->queue)) {
 793		dev_err(kdev->dev,
 794			"failed to open queue for pool(%s), error %ld\n",
 795			name, PTR_ERR(pool->queue));
 796		ret = PTR_ERR(pool->queue);
 797		goto err;
 798	}
 799
 800	pool->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
 801	pool->kdev = kdev;
 802	pool->dev = kdev->dev;
 803
 804	mutex_lock(&knav_dev_lock);
 805
 806	if (num_desc > (region->num_desc - region->used_desc)) {
 807		dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n",
 808			region_id, name);
 809		ret = -ENOMEM;
 810		goto err_unlock;
 811	}
 812
 813	/* Region maintains a sorted (by region offset) list of pools
 814	 * use the first free slot which is large enough to accomodate
 815	 * the request
 816	 */
 817	last_offset = 0;
 818	node = &region->pools;
 819	list_for_each_entry(iter, &region->pools, region_inst) {
 820		if ((iter->region_offset - last_offset) >= num_desc) {
 821			pi = iter;
 822			break;
 823		}
 824		last_offset = iter->region_offset + iter->num_desc;
 825	}
 826
 827	if (pi) {
 828		node = &pi->region_inst;
 829		pool->region = region;
 830		pool->num_desc = num_desc;
 831		pool->region_offset = last_offset;
 832		region->used_desc += num_desc;
 833		list_add_tail(&pool->list, &kdev->pools);
 834		list_add_tail(&pool->region_inst, node);
 835	} else {
 836		dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n",
 837			name, region_id);
 838		ret = -ENOMEM;
 839		goto err_unlock;
 840	}
 841
 842	mutex_unlock(&knav_dev_lock);
 843	kdesc_fill_pool(pool);
 844	return pool;
 845
 846err_unlock:
 847	mutex_unlock(&knav_dev_lock);
 848err:
 849	kfree(pool->name);
 850	devm_kfree(kdev->dev, pool);
 851	return ERR_PTR(ret);
 852}
 853EXPORT_SYMBOL_GPL(knav_pool_create);
 854
 855/**
 856 * knav_pool_destroy()	- Free a pool of descriptors
 857 * @ph:		- pool handle
 858 */
 859void knav_pool_destroy(void *ph)
 860{
 861	struct knav_pool *pool = ph;
 862
 863	if (!pool)
 864		return;
 865
 866	if (!pool->region)
 867		return;
 868
 869	kdesc_empty_pool(pool);
 870	mutex_lock(&knav_dev_lock);
 871
 872	pool->region->used_desc -= pool->num_desc;
 873	list_del(&pool->region_inst);
 874	list_del(&pool->list);
 875
 876	mutex_unlock(&knav_dev_lock);
 877	kfree(pool->name);
 878	devm_kfree(kdev->dev, pool);
 879}
 880EXPORT_SYMBOL_GPL(knav_pool_destroy);
 881
 882
 883/**
 884 * knav_pool_desc_get()	- Get a descriptor from the pool
 885 * @ph:		- pool handle
 886 *
 887 * Returns descriptor from the pool.
 888 */
 889void *knav_pool_desc_get(void *ph)
 890{
 891	struct knav_pool *pool = ph;
 892	dma_addr_t dma;
 893	unsigned size;
 894	void *data;
 895
 896	dma = knav_queue_pop(pool->queue, &size);
 897	if (unlikely(!dma))
 898		return ERR_PTR(-ENOMEM);
 899	data = knav_pool_desc_dma_to_virt(pool, dma);
 900	return data;
 901}
 902EXPORT_SYMBOL_GPL(knav_pool_desc_get);
 903
 904/**
 905 * knav_pool_desc_put()	- return a descriptor to the pool
 906 * @ph:		- pool handle
 907 * @desc:	- virtual address
 908 */
 909void knav_pool_desc_put(void *ph, void *desc)
 910{
 911	struct knav_pool *pool = ph;
 912	dma_addr_t dma;
 913	dma = knav_pool_desc_virt_to_dma(pool, desc);
 914	knav_queue_push(pool->queue, dma, pool->region->desc_size, 0);
 915}
 916EXPORT_SYMBOL_GPL(knav_pool_desc_put);
 917
 918/**
 919 * knav_pool_desc_map()	- Map descriptor for DMA transfer
 920 * @ph:				- pool handle
 921 * @desc:			- address of descriptor to map
 922 * @size:			- size of descriptor to map
 923 * @dma:			- DMA address return pointer
 924 * @dma_sz:			- adjusted return pointer
 925 *
 926 * Returns 0 on success, errno otherwise.
 927 */
 928int knav_pool_desc_map(void *ph, void *desc, unsigned size,
 929					dma_addr_t *dma, unsigned *dma_sz)
 930{
 931	struct knav_pool *pool = ph;
 932	*dma = knav_pool_desc_virt_to_dma(pool, desc);
 933	size = min(size, pool->region->desc_size);
 934	size = ALIGN(size, SMP_CACHE_BYTES);
 935	*dma_sz = size;
 936	dma_sync_single_for_device(pool->dev, *dma, size, DMA_TO_DEVICE);
 937
 938	/* Ensure the descriptor reaches to the memory */
 939	__iowmb();
 940
 941	return 0;
 942}
 943EXPORT_SYMBOL_GPL(knav_pool_desc_map);
 944
 945/**
 946 * knav_pool_desc_unmap()	- Unmap descriptor after DMA transfer
 947 * @ph:				- pool handle
 948 * @dma:			- DMA address of descriptor to unmap
 949 * @dma_sz:			- size of descriptor to unmap
 950 *
 951 * Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify
 952 * error values on return.
 953 */
 954void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz)
 955{
 956	struct knav_pool *pool = ph;
 957	unsigned desc_sz;
 958	void *desc;
 959
 960	desc_sz = min(dma_sz, pool->region->desc_size);
 961	desc = knav_pool_desc_dma_to_virt(pool, dma);
 962	dma_sync_single_for_cpu(pool->dev, dma, desc_sz, DMA_FROM_DEVICE);
 963	prefetch(desc);
 964	return desc;
 965}
 966EXPORT_SYMBOL_GPL(knav_pool_desc_unmap);
 967
 968/**
 969 * knav_pool_count()	- Get the number of descriptors in pool.
 970 * @ph:			- pool handle
 971 * Returns number of elements in the pool.
 972 */
 973int knav_pool_count(void *ph)
 974{
 975	struct knav_pool *pool = ph;
 976	return knav_queue_get_count(pool->queue);
 977}
 978EXPORT_SYMBOL_GPL(knav_pool_count);
 979
 980static void knav_queue_setup_region(struct knav_device *kdev,
 981					struct knav_region *region)
 982{
 983	unsigned hw_num_desc, hw_desc_size, size;
 984	struct knav_reg_region __iomem  *regs;
 985	struct knav_qmgr_info *qmgr;
 986	struct knav_pool *pool;
 987	int id = region->id;
 988	struct page *page;
 989
 990	/* unused region? */
 991	if (!region->num_desc) {
 992		dev_warn(kdev->dev, "unused region %s\n", region->name);
 993		return;
 994	}
 995
 996	/* get hardware descriptor value */
 997	hw_num_desc = ilog2(region->num_desc - 1) + 1;
 998
 999	/* did we force fit ourselves into nothingness? */
1000	if (region->num_desc < 32) {
1001		region->num_desc = 0;
1002		dev_warn(kdev->dev, "too few descriptors in region %s\n",
1003			 region->name);
1004		return;
1005	}
1006
1007	size = region->num_desc * region->desc_size;
1008	region->virt_start = alloc_pages_exact(size, GFP_KERNEL | GFP_DMA |
1009						GFP_DMA32);
1010	if (!region->virt_start) {
1011		region->num_desc = 0;
1012		dev_err(kdev->dev, "memory alloc failed for region %s\n",
1013			region->name);
1014		return;
1015	}
1016	region->virt_end = region->virt_start + size;
1017	page = virt_to_page(region->virt_start);
1018
1019	region->dma_start = dma_map_page(kdev->dev, page, 0, size,
1020					 DMA_BIDIRECTIONAL);
1021	if (dma_mapping_error(kdev->dev, region->dma_start)) {
1022		dev_err(kdev->dev, "dma map failed for region %s\n",
1023			region->name);
1024		goto fail;
1025	}
1026	region->dma_end = region->dma_start + size;
1027
1028	pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
1029	if (!pool) {
1030		dev_err(kdev->dev, "out of memory allocating dummy pool\n");
1031		goto fail;
1032	}
1033	pool->num_desc = 0;
1034	pool->region_offset = region->num_desc;
1035	list_add(&pool->region_inst, &region->pools);
1036
1037	dev_dbg(kdev->dev,
1038		"region %s (%d): size:%d, link:%d@%d, dma:%pad-%pad, virt:%p-%p\n",
1039		region->name, id, region->desc_size, region->num_desc,
1040		region->link_index, &region->dma_start, &region->dma_end,
1041		region->virt_start, region->virt_end);
1042
1043	hw_desc_size = (region->desc_size / 16) - 1;
1044	hw_num_desc -= 5;
1045
1046	for_each_qmgr(kdev, qmgr) {
1047		regs = qmgr->reg_region + id;
1048		writel_relaxed((u32)region->dma_start, &regs->base);
1049		writel_relaxed(region->link_index, &regs->start_index);
1050		writel_relaxed(hw_desc_size << 16 | hw_num_desc,
1051			       &regs->size_count);
1052	}
1053	return;
1054
1055fail:
1056	if (region->dma_start)
1057		dma_unmap_page(kdev->dev, region->dma_start, size,
1058				DMA_BIDIRECTIONAL);
1059	if (region->virt_start)
1060		free_pages_exact(region->virt_start, size);
1061	region->num_desc = 0;
1062	return;
1063}
1064
1065static const char *knav_queue_find_name(struct device_node *node)
1066{
1067	const char *name;
1068
1069	if (of_property_read_string(node, "label", &name) < 0)
1070		name = node->name;
1071	if (!name)
1072		name = "unknown";
1073	return name;
1074}
1075
1076static int knav_queue_setup_regions(struct knav_device *kdev,
1077				    struct device_node *node)
1078{
1079	struct device *dev = kdev->dev;
1080	struct device_node *regions __free(device_node) =
1081			of_get_child_by_name(node, "descriptor-regions");
1082	struct knav_region *region;
1083	struct device_node *child;
1084	u32 temp[2];
1085	int ret;
1086
1087	if (!regions)
1088		return dev_err_probe(dev, -ENODEV,
1089				     "descriptor-regions not specified\n");
1090
1091	for_each_child_of_node(regions, child) {
1092		region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
1093		if (!region) {
1094			of_node_put(child);
1095			dev_err(dev, "out of memory allocating region\n");
1096			return -ENOMEM;
1097		}
1098
1099		region->name = knav_queue_find_name(child);
1100		of_property_read_u32(child, "id", &region->id);
1101		ret = of_property_read_u32_array(child, "region-spec", temp, 2);
1102		if (!ret) {
1103			region->num_desc  = temp[0];
1104			region->desc_size = temp[1];
1105		} else {
1106			dev_err(dev, "invalid region info %s\n", region->name);
1107			devm_kfree(dev, region);
1108			continue;
1109		}
1110
1111		ret = of_property_read_u32(child, "link-index",
1112					   &region->link_index);
1113		if (ret) {
1114			dev_err(dev, "link index not found for %s\n",
1115				region->name);
1116			devm_kfree(dev, region);
1117			continue;
1118		}
1119
1120		INIT_LIST_HEAD(&region->pools);
1121		list_add_tail(&region->list, &kdev->regions);
1122	}
1123	if (list_empty(&kdev->regions))
1124		return dev_err_probe(dev, -ENODEV,
1125				     "no valid region information found\n");
1126
1127	/* Next, we run through the regions and set things up */
1128	for_each_region(kdev, region)
1129		knav_queue_setup_region(kdev, region);
1130
1131	return 0;
1132}
1133
1134static int knav_get_link_ram(struct knav_device *kdev,
1135				       const char *name,
1136				       struct knav_link_ram_block *block)
1137{
1138	struct platform_device *pdev = to_platform_device(kdev->dev);
1139	struct device_node *node = pdev->dev.of_node;
1140	u32 temp[2];
1141
1142	/*
1143	 * Note: link ram resources are specified in "entry" sized units. In
1144	 * reality, although entries are ~40bits in hardware, we treat them as
1145	 * 64-bit entities here.
1146	 *
1147	 * For example, to specify the internal link ram for Keystone-I class
1148	 * devices, we would set the linkram0 resource to 0x80000-0x83fff.
1149	 *
1150	 * This gets a bit weird when other link rams are used.  For example,
1151	 * if the range specified is 0x0c000000-0x0c003fff (i.e., 16K entries
1152	 * in MSMC SRAM), the actual memory used is 0x0c000000-0x0c020000,
1153	 * which accounts for 64-bits per entry, for 16K entries.
1154	 */
1155	if (!of_property_read_u32_array(node, name , temp, 2)) {
1156		if (temp[0]) {
1157			/*
1158			 * queue_base specified => using internal or onchip
1159			 * link ram WARNING - we do not "reserve" this block
1160			 */
1161			block->dma = (dma_addr_t)temp[0];
1162			block->virt = NULL;
1163			block->size = temp[1];
1164		} else {
1165			block->size = temp[1];
1166			/* queue_base not specific => allocate requested size */
1167			block->virt = dmam_alloc_coherent(kdev->dev,
1168						  8 * block->size, &block->dma,
1169						  GFP_KERNEL);
1170			if (!block->virt) {
1171				dev_err(kdev->dev, "failed to alloc linkram\n");
1172				return -ENOMEM;
1173			}
1174		}
1175	} else {
1176		return -ENODEV;
1177	}
1178	return 0;
1179}
1180
1181static int knav_queue_setup_link_ram(struct knav_device *kdev)
1182{
1183	struct knav_link_ram_block *block;
1184	struct knav_qmgr_info *qmgr;
1185
1186	for_each_qmgr(kdev, qmgr) {
1187		block = &kdev->link_rams[0];
1188		dev_dbg(kdev->dev, "linkram0: dma:%pad, virt:%p, size:%x\n",
1189			&block->dma, block->virt, block->size);
1190		writel_relaxed((u32)block->dma, &qmgr->reg_config->link_ram_base0);
1191		if (kdev->version == QMSS_66AK2G)
1192			writel_relaxed(block->size,
1193				       &qmgr->reg_config->link_ram_size0);
1194		else
1195			writel_relaxed(block->size - 1,
1196				       &qmgr->reg_config->link_ram_size0);
1197		block++;
1198		if (!block->size)
1199			continue;
1200
1201		dev_dbg(kdev->dev, "linkram1: dma:%pad, virt:%p, size:%x\n",
1202			&block->dma, block->virt, block->size);
1203		writel_relaxed(block->dma, &qmgr->reg_config->link_ram_base1);
1204	}
1205
1206	return 0;
1207}
1208
1209static int knav_setup_queue_range(struct knav_device *kdev,
1210					struct device_node *node)
1211{
1212	struct device *dev = kdev->dev;
1213	struct knav_range_info *range;
1214	struct knav_qmgr_info *qmgr;
1215	u32 temp[2], start, end, id, index;
1216	int ret, i;
1217
1218	range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL);
1219	if (!range) {
1220		dev_err(dev, "out of memory allocating range\n");
1221		return -ENOMEM;
1222	}
1223
1224	range->kdev = kdev;
1225	range->name = knav_queue_find_name(node);
1226	ret = of_property_read_u32_array(node, "qrange", temp, 2);
1227	if (!ret) {
1228		range->queue_base = temp[0] - kdev->base_id;
1229		range->num_queues = temp[1];
1230	} else {
1231		dev_err(dev, "invalid queue range %s\n", range->name);
1232		devm_kfree(dev, range);
1233		return -EINVAL;
1234	}
1235
1236	for (i = 0; i < RANGE_MAX_IRQS; i++) {
1237		struct of_phandle_args oirq;
1238
1239		if (of_irq_parse_one(node, i, &oirq))
1240			break;
1241
1242		range->irqs[i].irq = irq_create_of_mapping(&oirq);
1243		if (range->irqs[i].irq == IRQ_NONE)
1244			break;
1245
1246		range->num_irqs++;
1247
1248		if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) {
1249			unsigned long mask;
1250			int bit;
1251
1252			range->irqs[i].cpu_mask = devm_kzalloc(dev,
1253							       cpumask_size(), GFP_KERNEL);
1254			if (!range->irqs[i].cpu_mask)
1255				return -ENOMEM;
1256
1257			mask = (oirq.args[2] & 0x0000ff00) >> 8;
1258			for_each_set_bit(bit, &mask, BITS_PER_LONG)
1259				cpumask_set_cpu(bit, range->irqs[i].cpu_mask);
1260		}
1261	}
1262
1263	range->num_irqs = min(range->num_irqs, range->num_queues);
1264	if (range->num_irqs)
1265		range->flags |= RANGE_HAS_IRQ;
1266
1267	if (of_property_read_bool(node, "qalloc-by-id"))
1268		range->flags |= RANGE_RESERVED;
1269
1270	if (of_property_present(node, "accumulator")) {
1271		ret = knav_init_acc_range(kdev, node, range);
1272		if (ret < 0) {
1273			devm_kfree(dev, range);
1274			return ret;
1275		}
1276	} else {
1277		range->ops = &knav_gp_range_ops;
1278	}
1279
1280	/* set threshold to 1, and flush out the queues */
1281	for_each_qmgr(kdev, qmgr) {
1282		start = max(qmgr->start_queue, range->queue_base);
1283		end   = min(qmgr->start_queue + qmgr->num_queues,
1284			    range->queue_base + range->num_queues);
1285		for (id = start; id < end; id++) {
1286			index = id - qmgr->start_queue;
1287			writel_relaxed(THRESH_GTE | 1,
1288				       &qmgr->reg_peek[index].ptr_size_thresh);
1289			writel_relaxed(0,
1290				       &qmgr->reg_push[index].ptr_size_thresh);
1291		}
1292	}
1293
1294	list_add_tail(&range->list, &kdev->queue_ranges);
1295	dev_dbg(dev, "added range %s: %d-%d, %d irqs%s%s%s\n",
1296		range->name, range->queue_base,
1297		range->queue_base + range->num_queues - 1,
1298		range->num_irqs,
1299		(range->flags & RANGE_HAS_IRQ) ? ", has irq" : "",
1300		(range->flags & RANGE_RESERVED) ? ", reserved" : "",
1301		(range->flags & RANGE_HAS_ACCUMULATOR) ? ", acc" : "");
1302	kdev->num_queues_in_use += range->num_queues;
1303	return 0;
1304}
1305
1306static int knav_setup_queue_pools(struct knav_device *kdev,
1307				  struct device_node *node)
1308{
1309	struct device_node *queue_pools __free(device_node) =
1310			of_get_child_by_name(node, "queue-pools");
1311	struct device_node *type, *range;
1312
1313	if (!queue_pools)
1314		return dev_err_probe(kdev->dev, -ENODEV,
1315				     "queue-pools not specified\n");
1316
1317	for_each_child_of_node(queue_pools, type) {
1318		for_each_child_of_node(type, range) {
1319			/* return value ignored, we init the rest... */
1320			knav_setup_queue_range(kdev, range);
1321		}
1322	}
1323
1324	/* ... and barf if they all failed! */
1325	if (list_empty(&kdev->queue_ranges))
1326		return dev_err_probe(kdev->dev, -ENODEV,
1327				     "no valid queue range found\n");
1328	return 0;
1329}
1330
1331static void knav_free_queue_range(struct knav_device *kdev,
1332				  struct knav_range_info *range)
1333{
1334	if (range->ops && range->ops->free_range)
1335		range->ops->free_range(range);
1336	list_del(&range->list);
1337	devm_kfree(kdev->dev, range);
1338}
1339
1340static void knav_free_queue_ranges(struct knav_device *kdev)
1341{
1342	struct knav_range_info *range;
1343
1344	for (;;) {
1345		range = first_queue_range(kdev);
1346		if (!range)
1347			break;
1348		knav_free_queue_range(kdev, range);
1349	}
1350}
1351
1352static void knav_queue_free_regions(struct knav_device *kdev)
1353{
1354	struct knav_region *region;
1355	struct knav_pool *pool, *tmp;
1356	unsigned size;
1357
1358	for (;;) {
1359		region = first_region(kdev);
1360		if (!region)
1361			break;
1362		list_for_each_entry_safe(pool, tmp, &region->pools, region_inst)
1363			knav_pool_destroy(pool);
1364
1365		size = region->virt_end - region->virt_start;
1366		if (size)
1367			free_pages_exact(region->virt_start, size);
1368		list_del(&region->list);
1369		devm_kfree(kdev->dev, region);
1370	}
1371}
1372
1373static void __iomem *knav_queue_map_reg(struct knav_device *kdev,
1374					struct device_node *node, int index)
1375{
1376	struct resource res;
1377	void __iomem *regs;
1378	int ret;
1379
1380	ret = of_address_to_resource(node, index, &res);
1381	if (ret) {
1382		dev_err(kdev->dev, "Can't translate of node(%pOFn) address for index(%d)\n",
1383			node, index);
1384		return ERR_PTR(ret);
1385	}
1386
1387	regs = devm_ioremap_resource(kdev->dev, &res);
1388	if (IS_ERR(regs))
1389		dev_err(kdev->dev, "Failed to map register base for index(%d) node(%pOFn)\n",
1390			index, node);
1391	return regs;
1392}
1393
1394static int knav_queue_init_qmgrs(struct knav_device *kdev,
1395				 struct device_node *node)
1396{
1397	struct device *dev = kdev->dev;
1398	struct device_node *qmgrs __free(device_node) =
1399			of_get_child_by_name(node, "qmgrs");
1400	struct knav_qmgr_info *qmgr;
1401	struct device_node *child;
1402	u32 temp[2];
1403	int ret;
1404
1405	if (!qmgrs)
1406		return dev_err_probe(dev, -ENODEV,
1407				     "queue manager info not specified\n");
1408
1409	for_each_child_of_node(qmgrs, child) {
1410		qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL);
1411		if (!qmgr) {
1412			of_node_put(child);
1413			dev_err(dev, "out of memory allocating qmgr\n");
1414			return -ENOMEM;
1415		}
1416
1417		ret = of_property_read_u32_array(child, "managed-queues",
1418						 temp, 2);
1419		if (!ret) {
1420			qmgr->start_queue = temp[0];
1421			qmgr->num_queues = temp[1];
1422		} else {
1423			dev_err(dev, "invalid qmgr queue range\n");
1424			devm_kfree(dev, qmgr);
1425			continue;
1426		}
1427
1428		dev_info(dev, "qmgr start queue %d, number of queues %d\n",
1429			 qmgr->start_queue, qmgr->num_queues);
1430
1431		qmgr->reg_peek =
1432			knav_queue_map_reg(kdev, child,
1433					   KNAV_QUEUE_PEEK_REG_INDEX);
1434
1435		if (kdev->version == QMSS) {
1436			qmgr->reg_status =
1437				knav_queue_map_reg(kdev, child,
1438						   KNAV_QUEUE_STATUS_REG_INDEX);
1439		}
1440
1441		qmgr->reg_config =
1442			knav_queue_map_reg(kdev, child,
1443					   (kdev->version == QMSS_66AK2G) ?
1444					   KNAV_L_QUEUE_CONFIG_REG_INDEX :
1445					   KNAV_QUEUE_CONFIG_REG_INDEX);
1446		qmgr->reg_region =
1447			knav_queue_map_reg(kdev, child,
1448					   (kdev->version == QMSS_66AK2G) ?
1449					   KNAV_L_QUEUE_REGION_REG_INDEX :
1450					   KNAV_QUEUE_REGION_REG_INDEX);
1451
1452		qmgr->reg_push =
1453			knav_queue_map_reg(kdev, child,
1454					   (kdev->version == QMSS_66AK2G) ?
1455					    KNAV_L_QUEUE_PUSH_REG_INDEX :
1456					    KNAV_QUEUE_PUSH_REG_INDEX);
1457
1458		if (kdev->version == QMSS) {
1459			qmgr->reg_pop =
1460				knav_queue_map_reg(kdev, child,
1461						   KNAV_QUEUE_POP_REG_INDEX);
1462		}
1463
1464		if (IS_ERR(qmgr->reg_peek) ||
1465		    ((kdev->version == QMSS) &&
1466		    (IS_ERR(qmgr->reg_status) || IS_ERR(qmgr->reg_pop))) ||
1467		    IS_ERR(qmgr->reg_config) || IS_ERR(qmgr->reg_region) ||
1468		    IS_ERR(qmgr->reg_push)) {
1469			dev_err(dev, "failed to map qmgr regs\n");
1470			if (kdev->version == QMSS) {
1471				if (!IS_ERR(qmgr->reg_status))
1472					devm_iounmap(dev, qmgr->reg_status);
1473				if (!IS_ERR(qmgr->reg_pop))
1474					devm_iounmap(dev, qmgr->reg_pop);
1475			}
1476			if (!IS_ERR(qmgr->reg_peek))
1477				devm_iounmap(dev, qmgr->reg_peek);
1478			if (!IS_ERR(qmgr->reg_config))
1479				devm_iounmap(dev, qmgr->reg_config);
1480			if (!IS_ERR(qmgr->reg_region))
1481				devm_iounmap(dev, qmgr->reg_region);
1482			if (!IS_ERR(qmgr->reg_push))
1483				devm_iounmap(dev, qmgr->reg_push);
1484			devm_kfree(dev, qmgr);
1485			continue;
1486		}
1487
1488		/* Use same push register for pop as well */
1489		if (kdev->version == QMSS_66AK2G)
1490			qmgr->reg_pop = qmgr->reg_push;
1491
1492		list_add_tail(&qmgr->list, &kdev->qmgrs);
1493		dev_info(dev, "added qmgr start queue %d, num of queues %d, reg_peek %p, reg_status %p, reg_config %p, reg_region %p, reg_push %p, reg_pop %p\n",
1494			 qmgr->start_queue, qmgr->num_queues,
1495			 qmgr->reg_peek, qmgr->reg_status,
1496			 qmgr->reg_config, qmgr->reg_region,
1497			 qmgr->reg_push, qmgr->reg_pop);
1498	}
1499	return 0;
1500}
1501
1502static int knav_queue_init_pdsps(struct knav_device *kdev,
1503					struct device_node *pdsps)
1504{
1505	struct device *dev = kdev->dev;
1506	struct knav_pdsp_info *pdsp;
1507	struct device_node *child;
1508
1509	for_each_child_of_node(pdsps, child) {
1510		pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL);
1511		if (!pdsp) {
1512			of_node_put(child);
1513			dev_err(dev, "out of memory allocating pdsp\n");
1514			return -ENOMEM;
1515		}
1516		pdsp->name = knav_queue_find_name(child);
1517		pdsp->iram =
1518			knav_queue_map_reg(kdev, child,
1519					   KNAV_QUEUE_PDSP_IRAM_REG_INDEX);
1520		pdsp->regs =
1521			knav_queue_map_reg(kdev, child,
1522					   KNAV_QUEUE_PDSP_REGS_REG_INDEX);
1523		pdsp->intd =
1524			knav_queue_map_reg(kdev, child,
1525					   KNAV_QUEUE_PDSP_INTD_REG_INDEX);
1526		pdsp->command =
1527			knav_queue_map_reg(kdev, child,
1528					   KNAV_QUEUE_PDSP_CMD_REG_INDEX);
1529
1530		if (IS_ERR(pdsp->command) || IS_ERR(pdsp->iram) ||
1531		    IS_ERR(pdsp->regs) || IS_ERR(pdsp->intd)) {
1532			dev_err(dev, "failed to map pdsp %s regs\n",
1533				pdsp->name);
1534			if (!IS_ERR(pdsp->command))
1535				devm_iounmap(dev, pdsp->command);
1536			if (!IS_ERR(pdsp->iram))
1537				devm_iounmap(dev, pdsp->iram);
1538			if (!IS_ERR(pdsp->regs))
1539				devm_iounmap(dev, pdsp->regs);
1540			if (!IS_ERR(pdsp->intd))
1541				devm_iounmap(dev, pdsp->intd);
1542			devm_kfree(dev, pdsp);
1543			continue;
1544		}
1545		of_property_read_u32(child, "id", &pdsp->id);
1546		list_add_tail(&pdsp->list, &kdev->pdsps);
1547		dev_dbg(dev, "added pdsp %s: command %p, iram %p, regs %p, intd %p\n",
1548			pdsp->name, pdsp->command, pdsp->iram, pdsp->regs,
1549			pdsp->intd);
1550	}
1551	return 0;
1552}
1553
1554static int knav_queue_stop_pdsp(struct knav_device *kdev,
1555			  struct knav_pdsp_info *pdsp)
1556{
1557	u32 val, timeout = 1000;
1558	int ret;
1559
1560	val = readl_relaxed(&pdsp->regs->control) & ~PDSP_CTRL_ENABLE;
1561	writel_relaxed(val, &pdsp->regs->control);
1562	ret = knav_queue_pdsp_wait(&pdsp->regs->control, timeout,
1563					PDSP_CTRL_RUNNING);
1564	if (ret < 0) {
1565		dev_err(kdev->dev, "timed out on pdsp %s stop\n", pdsp->name);
1566		return ret;
1567	}
1568	pdsp->loaded = false;
1569	pdsp->started = false;
1570	return 0;
1571}
1572
1573static int knav_queue_load_pdsp(struct knav_device *kdev,
1574			  struct knav_pdsp_info *pdsp)
1575{
1576	int i, ret, fwlen;
1577	const struct firmware *fw;
1578	bool found = false;
1579	u32 *fwdata;
1580
1581	for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) {
1582		if (knav_acc_firmwares[i]) {
1583			ret = request_firmware_direct(&fw,
1584						      knav_acc_firmwares[i],
1585						      kdev->dev);
1586			if (!ret) {
1587				found = true;
1588				break;
1589			}
1590		}
1591	}
1592
1593	if (!found) {
1594		dev_err(kdev->dev, "failed to get firmware for pdsp\n");
1595		return -ENODEV;
1596	}
1597
1598	dev_info(kdev->dev, "firmware file %s downloaded for PDSP\n",
1599		 knav_acc_firmwares[i]);
1600
1601	writel_relaxed(pdsp->id + 1, pdsp->command + 0x18);
1602	/* download the firmware */
1603	fwdata = (u32 *)fw->data;
1604	fwlen = (fw->size + sizeof(u32) - 1) / sizeof(u32);
1605	for (i = 0; i < fwlen; i++)
1606		writel_relaxed(be32_to_cpu(fwdata[i]), pdsp->iram + i);
1607
1608	release_firmware(fw);
1609	return 0;
1610}
1611
1612static int knav_queue_start_pdsp(struct knav_device *kdev,
1613			   struct knav_pdsp_info *pdsp)
1614{
1615	u32 val, timeout = 1000;
1616	int ret;
1617
1618	/* write a command for sync */
1619	writel_relaxed(0xffffffff, pdsp->command);
1620	while (readl_relaxed(pdsp->command) != 0xffffffff)
1621		cpu_relax();
1622
1623	/* soft reset the PDSP */
1624	val  = readl_relaxed(&pdsp->regs->control);
1625	val &= ~(PDSP_CTRL_PC_MASK | PDSP_CTRL_SOFT_RESET);
1626	writel_relaxed(val, &pdsp->regs->control);
1627
1628	/* enable pdsp */
1629	val = readl_relaxed(&pdsp->regs->control) | PDSP_CTRL_ENABLE;
1630	writel_relaxed(val, &pdsp->regs->control);
1631
1632	/* wait for command register to clear */
1633	ret = knav_queue_pdsp_wait(pdsp->command, timeout, 0);
1634	if (ret < 0) {
1635		dev_err(kdev->dev,
1636			"timed out on pdsp %s command register wait\n",
1637			pdsp->name);
1638		return ret;
1639	}
1640	return 0;
1641}
1642
1643static void knav_queue_stop_pdsps(struct knav_device *kdev)
1644{
1645	struct knav_pdsp_info *pdsp;
1646
1647	/* disable all pdsps */
1648	for_each_pdsp(kdev, pdsp)
1649		knav_queue_stop_pdsp(kdev, pdsp);
1650}
1651
1652static int knav_queue_start_pdsps(struct knav_device *kdev)
1653{
1654	struct knav_pdsp_info *pdsp;
1655	int ret;
1656
1657	knav_queue_stop_pdsps(kdev);
1658	/* now load them all. We return success even if pdsp
1659	 * is not loaded as acc channels are optional on having
1660	 * firmware availability in the system. We set the loaded
1661	 * and stated flag and when initialize the acc range, check
1662	 * it and init the range only if pdsp is started.
1663	 */
1664	for_each_pdsp(kdev, pdsp) {
1665		ret = knav_queue_load_pdsp(kdev, pdsp);
1666		if (!ret)
1667			pdsp->loaded = true;
1668	}
1669
1670	for_each_pdsp(kdev, pdsp) {
1671		if (pdsp->loaded) {
1672			ret = knav_queue_start_pdsp(kdev, pdsp);
1673			if (!ret)
1674				pdsp->started = true;
1675		}
1676	}
1677	return 0;
1678}
1679
1680static int knav_queue_setup_pdsps(struct knav_device *kdev,
1681				  struct device_node *node)
1682{
1683	struct device_node *pdsps __free(device_node) =
1684			of_get_child_by_name(node, "pdsps");
1685
1686	if (pdsps) {
1687		int ret;
1688
1689		ret = knav_queue_init_pdsps(kdev, pdsps);
1690		if (ret)
1691			return ret;
1692
1693		ret = knav_queue_start_pdsps(kdev);
1694		if (ret)
1695			return ret;
1696	}
1697	return 0;
1698}
1699
1700static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id)
1701{
1702	struct knav_qmgr_info *qmgr;
1703
1704	for_each_qmgr(kdev, qmgr) {
1705		if ((id >= qmgr->start_queue) &&
1706		    (id < qmgr->start_queue + qmgr->num_queues))
1707			return qmgr;
1708	}
1709	return NULL;
1710}
1711
1712static int knav_queue_init_queue(struct knav_device *kdev,
1713					struct knav_range_info *range,
1714					struct knav_queue_inst *inst,
1715					unsigned id)
1716{
1717	char irq_name[KNAV_NAME_SIZE];
1718	inst->qmgr = knav_find_qmgr(id);
1719	if (!inst->qmgr)
1720		return -1;
1721
1722	INIT_LIST_HEAD(&inst->handles);
1723	inst->kdev = kdev;
1724	inst->range = range;
1725	inst->irq_num = -1;
1726	inst->id = id;
1727	scnprintf(irq_name, sizeof(irq_name), "hwqueue-%d", id);
1728	inst->irq_name = kstrndup(irq_name, sizeof(irq_name), GFP_KERNEL);
1729
1730	if (range->ops && range->ops->init_queue)
1731		return range->ops->init_queue(range, inst);
1732	else
1733		return 0;
1734}
1735
1736static int knav_queue_init_queues(struct knav_device *kdev)
1737{
1738	struct knav_range_info *range;
1739	int size, id, base_idx;
1740	int idx = 0, ret = 0;
1741
1742	/* how much do we need for instance data? */
1743	size = sizeof(struct knav_queue_inst);
1744
1745	/* round this up to a power of 2, keep the index to instance
1746	 * arithmetic fast.
1747	 * */
1748	kdev->inst_shift = order_base_2(size);
1749	size = (1 << kdev->inst_shift) * kdev->num_queues_in_use;
1750	kdev->instances = devm_kzalloc(kdev->dev, size, GFP_KERNEL);
1751	if (!kdev->instances)
1752		return -ENOMEM;
1753
1754	for_each_queue_range(kdev, range) {
1755		if (range->ops && range->ops->init_range)
1756			range->ops->init_range(range);
1757		base_idx = idx;
1758		for (id = range->queue_base;
1759		     id < range->queue_base + range->num_queues; id++, idx++) {
1760			ret = knav_queue_init_queue(kdev, range,
1761					knav_queue_idx_to_inst(kdev, idx), id);
1762			if (ret < 0)
1763				return ret;
1764		}
1765		range->queue_base_inst =
1766			knav_queue_idx_to_inst(kdev, base_idx);
1767	}
1768	return 0;
1769}
1770
1771/* Match table for of_platform binding */
1772static const struct of_device_id keystone_qmss_of_match[] = {
1773	{
1774		.compatible = "ti,keystone-navigator-qmss",
1775	},
1776	{
1777		.compatible = "ti,66ak2g-navss-qm",
1778		.data	= (void *)QMSS_66AK2G,
1779	},
1780	{},
1781};
1782MODULE_DEVICE_TABLE(of, keystone_qmss_of_match);
1783
1784static int knav_queue_probe(struct platform_device *pdev)
1785{
1786	struct device_node *node = pdev->dev.of_node;
1787	struct device *dev = &pdev->dev;
1788	u32 temp[2];
1789	int ret;
1790
1791	if (!node) {
1792		dev_err(dev, "device tree info unavailable\n");
1793		return -ENODEV;
1794	}
1795
1796	kdev = devm_kzalloc(dev, sizeof(struct knav_device), GFP_KERNEL);
1797	if (!kdev) {
1798		dev_err(dev, "memory allocation failed\n");
1799		return -ENOMEM;
1800	}
1801
1802	if (device_get_match_data(dev))
1803		kdev->version = QMSS_66AK2G;
1804
1805	platform_set_drvdata(pdev, kdev);
1806	kdev->dev = dev;
1807	INIT_LIST_HEAD(&kdev->queue_ranges);
1808	INIT_LIST_HEAD(&kdev->qmgrs);
1809	INIT_LIST_HEAD(&kdev->pools);
1810	INIT_LIST_HEAD(&kdev->regions);
1811	INIT_LIST_HEAD(&kdev->pdsps);
1812
1813	pm_runtime_enable(&pdev->dev);
1814	ret = pm_runtime_resume_and_get(&pdev->dev);
1815	if (ret < 0) {
1816		pm_runtime_disable(&pdev->dev);
1817		dev_err(dev, "Failed to enable QMSS\n");
1818		return ret;
1819	}
1820
1821	if (of_property_read_u32_array(node, "queue-range", temp, 2)) {
1822		dev_err(dev, "queue-range not specified\n");
1823		ret = -ENODEV;
1824		goto err;
1825	}
1826	kdev->base_id    = temp[0];
1827	kdev->num_queues = temp[1];
1828
1829	/* Initialize queue managers using device tree configuration */
1830	ret = knav_queue_init_qmgrs(kdev, node);
1831	if (ret)
1832		goto err;
1833
1834	/* get pdsp configuration values from device tree */
1835	ret = knav_queue_setup_pdsps(kdev, node);
1836	if (ret)
1837		goto err;
1838
1839	/* get usable queue range values from device tree */
1840	ret = knav_setup_queue_pools(kdev, node);
1841	if (ret)
1842		goto err;
1843
1844	ret = knav_get_link_ram(kdev, "linkram0", &kdev->link_rams[0]);
1845	if (ret) {
1846		dev_err(kdev->dev, "could not setup linking ram\n");
1847		goto err;
1848	}
1849
1850	ret = knav_get_link_ram(kdev, "linkram1", &kdev->link_rams[1]);
1851	if (ret) {
1852		/*
1853		 * nothing really, we have one linking ram already, so we just
1854		 * live within our means
1855		 */
1856	}
1857
1858	ret = knav_queue_setup_link_ram(kdev);
1859	if (ret)
1860		goto err;
1861
1862	ret = knav_queue_setup_regions(kdev, node);
1863	if (ret)
1864		goto err;
1865
1866	ret = knav_queue_init_queues(kdev);
1867	if (ret < 0) {
1868		dev_err(dev, "hwqueue initialization failed\n");
1869		goto err;
1870	}
1871
1872	debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL,
1873			    &knav_queue_debug_fops);
1874	device_ready = true;
1875	return 0;
1876
1877err:
1878	knav_queue_stop_pdsps(kdev);
1879	knav_queue_free_regions(kdev);
1880	knav_free_queue_ranges(kdev);
1881	pm_runtime_put_sync(&pdev->dev);
1882	pm_runtime_disable(&pdev->dev);
1883	return ret;
1884}
1885
1886static void knav_queue_remove(struct platform_device *pdev)
1887{
1888	/* TODO: Free resources */
1889	pm_runtime_put_sync(&pdev->dev);
1890	pm_runtime_disable(&pdev->dev);
1891}
1892
1893static struct platform_driver keystone_qmss_driver = {
1894	.probe		= knav_queue_probe,
1895	.remove		= knav_queue_remove,
1896	.driver		= {
1897		.name	= "keystone-navigator-qmss",
1898		.of_match_table = keystone_qmss_of_match,
1899	},
1900};
1901module_platform_driver(keystone_qmss_driver);
1902
1903MODULE_LICENSE("GPL v2");
1904MODULE_DESCRIPTION("TI QMSS driver for Keystone SOCs");
1905MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
1906MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");