Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Keystone Queue Manager subsystem driver
   4 *
   5 * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
   6 * Authors:	Sandeep Nair <sandeep_n@ti.com>
   7 *		Cyril Chemparathy <cyril@ti.com>
   8 *		Santosh Shilimkar <santosh.shilimkar@ti.com>
   9 */
  10
  11#include <linux/debugfs.h>
  12#include <linux/dma-mapping.h>
  13#include <linux/firmware.h>
  14#include <linux/interrupt.h>
  15#include <linux/io.h>
  16#include <linux/module.h>
  17#include <linux/of_address.h>
  18#include <linux/of_device.h>
  19#include <linux/of_irq.h>
  20#include <linux/pm_runtime.h>
  21#include <linux/slab.h>
  22#include <linux/soc/ti/knav_qmss.h>
  23
  24#include "knav_qmss.h"
  25
  26static struct knav_device *kdev;
  27static DEFINE_MUTEX(knav_dev_lock);
  28#define knav_dev_lock_held() \
  29	lockdep_is_held(&knav_dev_lock)
  30
  31/* Queue manager register indices in DTS */
  32#define KNAV_QUEUE_PEEK_REG_INDEX	0
  33#define KNAV_QUEUE_STATUS_REG_INDEX	1
  34#define KNAV_QUEUE_CONFIG_REG_INDEX	2
  35#define KNAV_QUEUE_REGION_REG_INDEX	3
  36#define KNAV_QUEUE_PUSH_REG_INDEX	4
  37#define KNAV_QUEUE_POP_REG_INDEX	5
  38
  39/* Queue manager register indices in DTS for QMSS in K2G NAVSS.
  40 * There are no status and vbusm push registers on this version
  41 * of QMSS. Push registers are same as pop, So all indices above 1
  42 * are to be re-defined
  43 */
  44#define KNAV_L_QUEUE_CONFIG_REG_INDEX	1
  45#define KNAV_L_QUEUE_REGION_REG_INDEX	2
  46#define KNAV_L_QUEUE_PUSH_REG_INDEX	3
  47
  48/* PDSP register indices in DTS */
  49#define KNAV_QUEUE_PDSP_IRAM_REG_INDEX	0
  50#define KNAV_QUEUE_PDSP_REGS_REG_INDEX	1
  51#define KNAV_QUEUE_PDSP_INTD_REG_INDEX	2
  52#define KNAV_QUEUE_PDSP_CMD_REG_INDEX	3
  53
  54#define knav_queue_idx_to_inst(kdev, idx)			\
  55	(kdev->instances + (idx << kdev->inst_shift))
  56
  57#define for_each_handle_rcu(qh, inst)				\
  58	list_for_each_entry_rcu(qh, &inst->handles, list,	\
  59				knav_dev_lock_held())
  60
  61#define for_each_instance(idx, inst, kdev)		\
  62	for (idx = 0, inst = kdev->instances;		\
  63	     idx < (kdev)->num_queues_in_use;			\
  64	     idx++, inst = knav_queue_idx_to_inst(kdev, idx))
  65
  66/* All firmware file names end up here. List the firmware file names below.
  67 * Newest followed by older ones. Search is done from start of the array
  68 * until a firmware file is found.
  69 */
  70const char *knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"};
  71
  72static bool device_ready;
  73bool knav_qmss_device_ready(void)
  74{
  75	return device_ready;
  76}
  77EXPORT_SYMBOL_GPL(knav_qmss_device_ready);
  78
  79/**
  80 * knav_queue_notify: qmss queue notfier call
  81 *
  82 * @inst:		qmss queue instance like accumulator
  83 */
  84void knav_queue_notify(struct knav_queue_inst *inst)
  85{
  86	struct knav_queue *qh;
  87
  88	if (!inst)
  89		return;
  90
  91	rcu_read_lock();
  92	for_each_handle_rcu(qh, inst) {
  93		if (atomic_read(&qh->notifier_enabled) <= 0)
  94			continue;
  95		if (WARN_ON(!qh->notifier_fn))
  96			continue;
  97		this_cpu_inc(qh->stats->notifies);
  98		qh->notifier_fn(qh->notifier_fn_arg);
  99	}
 100	rcu_read_unlock();
 101}
 102EXPORT_SYMBOL_GPL(knav_queue_notify);
 103
 104static irqreturn_t knav_queue_int_handler(int irq, void *_instdata)
 105{
 106	struct knav_queue_inst *inst = _instdata;
 107
 108	knav_queue_notify(inst);
 109	return IRQ_HANDLED;
 110}
 111
 112static int knav_queue_setup_irq(struct knav_range_info *range,
 113			  struct knav_queue_inst *inst)
 114{
 115	unsigned queue = inst->id - range->queue_base;
 116	int ret = 0, irq;
 117
 118	if (range->flags & RANGE_HAS_IRQ) {
 119		irq = range->irqs[queue].irq;
 120		ret = request_irq(irq, knav_queue_int_handler, 0,
 121					inst->irq_name, inst);
 122		if (ret)
 123			return ret;
 124		disable_irq(irq);
 125		if (range->irqs[queue].cpu_mask) {
 126			ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask);
 127			if (ret) {
 128				dev_warn(range->kdev->dev,
 129					 "Failed to set IRQ affinity\n");
 130				return ret;
 131			}
 132		}
 133	}
 134	return ret;
 135}
 136
 137static void knav_queue_free_irq(struct knav_queue_inst *inst)
 138{
 139	struct knav_range_info *range = inst->range;
 140	unsigned queue = inst->id - inst->range->queue_base;
 141	int irq;
 142
 143	if (range->flags & RANGE_HAS_IRQ) {
 144		irq = range->irqs[queue].irq;
 145		irq_set_affinity_hint(irq, NULL);
 146		free_irq(irq, inst);
 147	}
 148}
 149
 150static inline bool knav_queue_is_busy(struct knav_queue_inst *inst)
 151{
 152	return !list_empty(&inst->handles);
 153}
 154
 155static inline bool knav_queue_is_reserved(struct knav_queue_inst *inst)
 156{
 157	return inst->range->flags & RANGE_RESERVED;
 158}
 159
 160static inline bool knav_queue_is_shared(struct knav_queue_inst *inst)
 161{
 162	struct knav_queue *tmp;
 163
 164	rcu_read_lock();
 165	for_each_handle_rcu(tmp, inst) {
 166		if (tmp->flags & KNAV_QUEUE_SHARED) {
 167			rcu_read_unlock();
 168			return true;
 169		}
 170	}
 171	rcu_read_unlock();
 172	return false;
 173}
 174
 175static inline bool knav_queue_match_type(struct knav_queue_inst *inst,
 176						unsigned type)
 177{
 178	if ((type == KNAV_QUEUE_QPEND) &&
 179	    (inst->range->flags & RANGE_HAS_IRQ)) {
 180		return true;
 181	} else if ((type == KNAV_QUEUE_ACC) &&
 182		(inst->range->flags & RANGE_HAS_ACCUMULATOR)) {
 183		return true;
 184	} else if ((type == KNAV_QUEUE_GP) &&
 185		!(inst->range->flags &
 186			(RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) {
 187		return true;
 188	}
 189	return false;
 190}
 191
 192static inline struct knav_queue_inst *
 193knav_queue_match_id_to_inst(struct knav_device *kdev, unsigned id)
 194{
 195	struct knav_queue_inst *inst;
 196	int idx;
 197
 198	for_each_instance(idx, inst, kdev) {
 199		if (inst->id == id)
 200			return inst;
 201	}
 202	return NULL;
 203}
 204
 205static inline struct knav_queue_inst *knav_queue_find_by_id(int id)
 206{
 207	if (kdev->base_id <= id &&
 208	    kdev->base_id + kdev->num_queues > id) {
 209		id -= kdev->base_id;
 210		return knav_queue_match_id_to_inst(kdev, id);
 211	}
 212	return NULL;
 213}
 214
 215static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst,
 216				      const char *name, unsigned flags)
 217{
 218	struct knav_queue *qh;
 219	unsigned id;
 220	int ret = 0;
 221
 222	qh = devm_kzalloc(inst->kdev->dev, sizeof(*qh), GFP_KERNEL);
 223	if (!qh)
 224		return ERR_PTR(-ENOMEM);
 225
 226	qh->stats = alloc_percpu(struct knav_queue_stats);
 227	if (!qh->stats) {
 228		ret = -ENOMEM;
 229		goto err;
 230	}
 231
 232	qh->flags = flags;
 233	qh->inst = inst;
 234	id = inst->id - inst->qmgr->start_queue;
 235	qh->reg_push = &inst->qmgr->reg_push[id];
 236	qh->reg_pop = &inst->qmgr->reg_pop[id];
 237	qh->reg_peek = &inst->qmgr->reg_peek[id];
 238
 239	/* first opener? */
 240	if (!knav_queue_is_busy(inst)) {
 241		struct knav_range_info *range = inst->range;
 242
 243		inst->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
 244		if (range->ops && range->ops->open_queue)
 245			ret = range->ops->open_queue(range, inst, flags);
 246
 247		if (ret)
 248			goto err;
 249	}
 250	list_add_tail_rcu(&qh->list, &inst->handles);
 251	return qh;
 252
 253err:
 254	if (qh->stats)
 255		free_percpu(qh->stats);
 256	devm_kfree(inst->kdev->dev, qh);
 257	return ERR_PTR(ret);
 258}
 259
 260static struct knav_queue *
 261knav_queue_open_by_id(const char *name, unsigned id, unsigned flags)
 262{
 263	struct knav_queue_inst *inst;
 264	struct knav_queue *qh;
 265
 266	mutex_lock(&knav_dev_lock);
 267
 268	qh = ERR_PTR(-ENODEV);
 269	inst = knav_queue_find_by_id(id);
 270	if (!inst)
 271		goto unlock_ret;
 272
 273	qh = ERR_PTR(-EEXIST);
 274	if (!(flags & KNAV_QUEUE_SHARED) && knav_queue_is_busy(inst))
 275		goto unlock_ret;
 276
 277	qh = ERR_PTR(-EBUSY);
 278	if ((flags & KNAV_QUEUE_SHARED) &&
 279	    (knav_queue_is_busy(inst) && !knav_queue_is_shared(inst)))
 280		goto unlock_ret;
 281
 282	qh = __knav_queue_open(inst, name, flags);
 283
 284unlock_ret:
 285	mutex_unlock(&knav_dev_lock);
 286
 287	return qh;
 288}
 289
 290static struct knav_queue *knav_queue_open_by_type(const char *name,
 291						unsigned type, unsigned flags)
 292{
 293	struct knav_queue_inst *inst;
 294	struct knav_queue *qh = ERR_PTR(-EINVAL);
 295	int idx;
 296
 297	mutex_lock(&knav_dev_lock);
 298
 299	for_each_instance(idx, inst, kdev) {
 300		if (knav_queue_is_reserved(inst))
 301			continue;
 302		if (!knav_queue_match_type(inst, type))
 303			continue;
 304		if (knav_queue_is_busy(inst))
 305			continue;
 306		qh = __knav_queue_open(inst, name, flags);
 307		goto unlock_ret;
 308	}
 309
 310unlock_ret:
 311	mutex_unlock(&knav_dev_lock);
 312	return qh;
 313}
 314
 315static void knav_queue_set_notify(struct knav_queue_inst *inst, bool enabled)
 316{
 317	struct knav_range_info *range = inst->range;
 318
 319	if (range->ops && range->ops->set_notify)
 320		range->ops->set_notify(range, inst, enabled);
 321}
 322
 323static int knav_queue_enable_notifier(struct knav_queue *qh)
 324{
 325	struct knav_queue_inst *inst = qh->inst;
 326	bool first;
 327
 328	if (WARN_ON(!qh->notifier_fn))
 329		return -EINVAL;
 330
 331	/* Adjust the per handle notifier count */
 332	first = (atomic_inc_return(&qh->notifier_enabled) == 1);
 333	if (!first)
 334		return 0; /* nothing to do */
 335
 336	/* Now adjust the per instance notifier count */
 337	first = (atomic_inc_return(&inst->num_notifiers) == 1);
 338	if (first)
 339		knav_queue_set_notify(inst, true);
 340
 341	return 0;
 342}
 343
 344static int knav_queue_disable_notifier(struct knav_queue *qh)
 345{
 346	struct knav_queue_inst *inst = qh->inst;
 347	bool last;
 348
 349	last = (atomic_dec_return(&qh->notifier_enabled) == 0);
 350	if (!last)
 351		return 0; /* nothing to do */
 352
 353	last = (atomic_dec_return(&inst->num_notifiers) == 0);
 354	if (last)
 355		knav_queue_set_notify(inst, false);
 356
 357	return 0;
 358}
 359
 360static int knav_queue_set_notifier(struct knav_queue *qh,
 361				struct knav_queue_notify_config *cfg)
 362{
 363	knav_queue_notify_fn old_fn = qh->notifier_fn;
 364
 365	if (!cfg)
 366		return -EINVAL;
 367
 368	if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ)))
 369		return -ENOTSUPP;
 370
 371	if (!cfg->fn && old_fn)
 372		knav_queue_disable_notifier(qh);
 373
 374	qh->notifier_fn = cfg->fn;
 375	qh->notifier_fn_arg = cfg->fn_arg;
 376
 377	if (cfg->fn && !old_fn)
 378		knav_queue_enable_notifier(qh);
 379
 380	return 0;
 381}
 382
 383static int knav_gp_set_notify(struct knav_range_info *range,
 384			       struct knav_queue_inst *inst,
 385			       bool enabled)
 386{
 387	unsigned queue;
 388
 389	if (range->flags & RANGE_HAS_IRQ) {
 390		queue = inst->id - range->queue_base;
 391		if (enabled)
 392			enable_irq(range->irqs[queue].irq);
 393		else
 394			disable_irq_nosync(range->irqs[queue].irq);
 395	}
 396	return 0;
 397}
 398
 399static int knav_gp_open_queue(struct knav_range_info *range,
 400				struct knav_queue_inst *inst, unsigned flags)
 401{
 402	return knav_queue_setup_irq(range, inst);
 403}
 404
 405static int knav_gp_close_queue(struct knav_range_info *range,
 406				struct knav_queue_inst *inst)
 407{
 408	knav_queue_free_irq(inst);
 409	return 0;
 410}
 411
 412static struct knav_range_ops knav_gp_range_ops = {
 413	.set_notify	= knav_gp_set_notify,
 414	.open_queue	= knav_gp_open_queue,
 415	.close_queue	= knav_gp_close_queue,
 416};
 417
 418
 419static int knav_queue_get_count(void *qhandle)
 420{
 421	struct knav_queue *qh = qhandle;
 422	struct knav_queue_inst *inst = qh->inst;
 423
 424	return readl_relaxed(&qh->reg_peek[0].entry_count) +
 425		atomic_read(&inst->desc_count);
 426}
 427
 428static void knav_queue_debug_show_instance(struct seq_file *s,
 429					struct knav_queue_inst *inst)
 430{
 431	struct knav_device *kdev = inst->kdev;
 432	struct knav_queue *qh;
 433	int cpu = 0;
 434	int pushes = 0;
 435	int pops = 0;
 436	int push_errors = 0;
 437	int pop_errors = 0;
 438	int notifies = 0;
 439
 440	if (!knav_queue_is_busy(inst))
 441		return;
 442
 443	seq_printf(s, "\tqueue id %d (%s)\n",
 444		   kdev->base_id + inst->id, inst->name);
 445	for_each_handle_rcu(qh, inst) {
 446		for_each_possible_cpu(cpu) {
 447			pushes += per_cpu_ptr(qh->stats, cpu)->pushes;
 448			pops += per_cpu_ptr(qh->stats, cpu)->pops;
 449			push_errors += per_cpu_ptr(qh->stats, cpu)->push_errors;
 450			pop_errors += per_cpu_ptr(qh->stats, cpu)->pop_errors;
 451			notifies += per_cpu_ptr(qh->stats, cpu)->notifies;
 452		}
 453
 454		seq_printf(s, "\t\thandle %p: pushes %8d, pops %8d, count %8d, notifies %8d, push errors %8d, pop errors %8d\n",
 455				qh,
 456				pushes,
 457				pops,
 458				knav_queue_get_count(qh),
 459				notifies,
 460				push_errors,
 461				pop_errors);
 462	}
 463}
 464
 465static int knav_queue_debug_show(struct seq_file *s, void *v)
 466{
 467	struct knav_queue_inst *inst;
 468	int idx;
 469
 470	mutex_lock(&knav_dev_lock);
 471	seq_printf(s, "%s: %u-%u\n",
 472		   dev_name(kdev->dev), kdev->base_id,
 473		   kdev->base_id + kdev->num_queues - 1);
 474	for_each_instance(idx, inst, kdev)
 475		knav_queue_debug_show_instance(s, inst);
 476	mutex_unlock(&knav_dev_lock);
 477
 478	return 0;
 479}
 480
 481static int knav_queue_debug_open(struct inode *inode, struct file *file)
 482{
 483	return single_open(file, knav_queue_debug_show, NULL);
 484}
 485
 486static const struct file_operations knav_queue_debug_ops = {
 487	.open		= knav_queue_debug_open,
 488	.read		= seq_read,
 489	.llseek		= seq_lseek,
 490	.release	= single_release,
 491};
 492
 493static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout,
 494					u32 flags)
 495{
 496	unsigned long end;
 497	u32 val = 0;
 498
 499	end = jiffies + msecs_to_jiffies(timeout);
 500	while (time_after(end, jiffies)) {
 501		val = readl_relaxed(addr);
 502		if (flags)
 503			val &= flags;
 504		if (!val)
 505			break;
 506		cpu_relax();
 507	}
 508	return val ? -ETIMEDOUT : 0;
 509}
 510
 511
 512static int knav_queue_flush(struct knav_queue *qh)
 513{
 514	struct knav_queue_inst *inst = qh->inst;
 515	unsigned id = inst->id - inst->qmgr->start_queue;
 516
 517	atomic_set(&inst->desc_count, 0);
 518	writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh);
 519	return 0;
 520}
 521
 522/**
 523 * knav_queue_open()	- open a hardware queue
 524 * @name		- name to give the queue handle
 525 * @id			- desired queue number if any or specifes the type
 526 *			  of queue
 527 * @flags		- the following flags are applicable to queues:
 528 *	KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are
 529 *			     exclusive by default.
 530 *			     Subsequent attempts to open a shared queue should
 531 *			     also have this flag.
 532 *
 533 * Returns a handle to the open hardware queue if successful. Use IS_ERR()
 534 * to check the returned value for error codes.
 535 */
 536void *knav_queue_open(const char *name, unsigned id,
 537					unsigned flags)
 538{
 539	struct knav_queue *qh = ERR_PTR(-EINVAL);
 540
 541	switch (id) {
 542	case KNAV_QUEUE_QPEND:
 543	case KNAV_QUEUE_ACC:
 544	case KNAV_QUEUE_GP:
 545		qh = knav_queue_open_by_type(name, id, flags);
 546		break;
 547
 548	default:
 549		qh = knav_queue_open_by_id(name, id, flags);
 550		break;
 551	}
 552	return qh;
 553}
 554EXPORT_SYMBOL_GPL(knav_queue_open);
 555
 556/**
 557 * knav_queue_close()	- close a hardware queue handle
 558 * @qh			- handle to close
 559 */
 560void knav_queue_close(void *qhandle)
 561{
 562	struct knav_queue *qh = qhandle;
 563	struct knav_queue_inst *inst = qh->inst;
 564
 565	while (atomic_read(&qh->notifier_enabled) > 0)
 566		knav_queue_disable_notifier(qh);
 567
 568	mutex_lock(&knav_dev_lock);
 569	list_del_rcu(&qh->list);
 570	mutex_unlock(&knav_dev_lock);
 571	synchronize_rcu();
 572	if (!knav_queue_is_busy(inst)) {
 573		struct knav_range_info *range = inst->range;
 574
 575		if (range->ops && range->ops->close_queue)
 576			range->ops->close_queue(range, inst);
 577	}
 578	free_percpu(qh->stats);
 579	devm_kfree(inst->kdev->dev, qh);
 580}
 581EXPORT_SYMBOL_GPL(knav_queue_close);
 582
 583/**
 584 * knav_queue_device_control()	- Perform control operations on a queue
 585 * @qh				- queue handle
 586 * @cmd				- control commands
 587 * @arg				- command argument
 588 *
 589 * Returns 0 on success, errno otherwise.
 590 */
 591int knav_queue_device_control(void *qhandle, enum knav_queue_ctrl_cmd cmd,
 592				unsigned long arg)
 593{
 594	struct knav_queue *qh = qhandle;
 595	struct knav_queue_notify_config *cfg;
 596	int ret;
 597
 598	switch ((int)cmd) {
 599	case KNAV_QUEUE_GET_ID:
 600		ret = qh->inst->kdev->base_id + qh->inst->id;
 601		break;
 602
 603	case KNAV_QUEUE_FLUSH:
 604		ret = knav_queue_flush(qh);
 605		break;
 606
 607	case KNAV_QUEUE_SET_NOTIFIER:
 608		cfg = (void *)arg;
 609		ret = knav_queue_set_notifier(qh, cfg);
 610		break;
 611
 612	case KNAV_QUEUE_ENABLE_NOTIFY:
 613		ret = knav_queue_enable_notifier(qh);
 614		break;
 615
 616	case KNAV_QUEUE_DISABLE_NOTIFY:
 617		ret = knav_queue_disable_notifier(qh);
 618		break;
 619
 620	case KNAV_QUEUE_GET_COUNT:
 621		ret = knav_queue_get_count(qh);
 622		break;
 623
 624	default:
 625		ret = -ENOTSUPP;
 626		break;
 627	}
 628	return ret;
 629}
 630EXPORT_SYMBOL_GPL(knav_queue_device_control);
 631
 632
 633
 634/**
 635 * knav_queue_push()	- push data (or descriptor) to the tail of a queue
 636 * @qh			- hardware queue handle
 637 * @data		- data to push
 638 * @size		- size of data to push
 639 * @flags		- can be used to pass additional information
 640 *
 641 * Returns 0 on success, errno otherwise.
 642 */
 643int knav_queue_push(void *qhandle, dma_addr_t dma,
 644					unsigned size, unsigned flags)
 645{
 646	struct knav_queue *qh = qhandle;
 647	u32 val;
 648
 649	val = (u32)dma | ((size / 16) - 1);
 650	writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh);
 651
 652	this_cpu_inc(qh->stats->pushes);
 653	return 0;
 654}
 655EXPORT_SYMBOL_GPL(knav_queue_push);
 656
 657/**
 658 * knav_queue_pop()	- pop data (or descriptor) from the head of a queue
 659 * @qh			- hardware queue handle
 660 * @size		- (optional) size of the data pop'ed.
 661 *
 662 * Returns a DMA address on success, 0 on failure.
 663 */
 664dma_addr_t knav_queue_pop(void *qhandle, unsigned *size)
 665{
 666	struct knav_queue *qh = qhandle;
 667	struct knav_queue_inst *inst = qh->inst;
 668	dma_addr_t dma;
 669	u32 val, idx;
 670
 671	/* are we accumulated? */
 672	if (inst->descs) {
 673		if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) {
 674			atomic_inc(&inst->desc_count);
 675			return 0;
 676		}
 677		idx  = atomic_inc_return(&inst->desc_head);
 678		idx &= ACC_DESCS_MASK;
 679		val = inst->descs[idx];
 680	} else {
 681		val = readl_relaxed(&qh->reg_pop[0].ptr_size_thresh);
 682		if (unlikely(!val))
 683			return 0;
 684	}
 685
 686	dma = val & DESC_PTR_MASK;
 687	if (size)
 688		*size = ((val & DESC_SIZE_MASK) + 1) * 16;
 689
 690	this_cpu_inc(qh->stats->pops);
 691	return dma;
 692}
 693EXPORT_SYMBOL_GPL(knav_queue_pop);
 694
 695/* carve out descriptors and push into queue */
 696static void kdesc_fill_pool(struct knav_pool *pool)
 697{
 698	struct knav_region *region;
 699	int i;
 700
 701	region = pool->region;
 702	pool->desc_size = region->desc_size;
 703	for (i = 0; i < pool->num_desc; i++) {
 704		int index = pool->region_offset + i;
 705		dma_addr_t dma_addr;
 706		unsigned dma_size;
 707		dma_addr = region->dma_start + (region->desc_size * index);
 708		dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES);
 709		dma_sync_single_for_device(pool->dev, dma_addr, dma_size,
 710					   DMA_TO_DEVICE);
 711		knav_queue_push(pool->queue, dma_addr, dma_size, 0);
 712	}
 713}
 714
 715/* pop out descriptors and close the queue */
 716static void kdesc_empty_pool(struct knav_pool *pool)
 717{
 718	dma_addr_t dma;
 719	unsigned size;
 720	void *desc;
 721	int i;
 722
 723	if (!pool->queue)
 724		return;
 725
 726	for (i = 0;; i++) {
 727		dma = knav_queue_pop(pool->queue, &size);
 728		if (!dma)
 729			break;
 730		desc = knav_pool_desc_dma_to_virt(pool, dma);
 731		if (!desc) {
 732			dev_dbg(pool->kdev->dev,
 733				"couldn't unmap desc, continuing\n");
 734			continue;
 735		}
 736	}
 737	WARN_ON(i != pool->num_desc);
 738	knav_queue_close(pool->queue);
 739}
 740
 741
 742/* Get the DMA address of a descriptor */
 743dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt)
 744{
 745	struct knav_pool *pool = ph;
 746	return pool->region->dma_start + (virt - pool->region->virt_start);
 747}
 748EXPORT_SYMBOL_GPL(knav_pool_desc_virt_to_dma);
 749
 750void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma)
 751{
 752	struct knav_pool *pool = ph;
 753	return pool->region->virt_start + (dma - pool->region->dma_start);
 754}
 755EXPORT_SYMBOL_GPL(knav_pool_desc_dma_to_virt);
 756
 757/**
 758 * knav_pool_create()	- Create a pool of descriptors
 759 * @name		- name to give the pool handle
 760 * @num_desc		- numbers of descriptors in the pool
 761 * @region_id		- QMSS region id from which the descriptors are to be
 762 *			  allocated.
 763 *
 764 * Returns a pool handle on success.
 765 * Use IS_ERR_OR_NULL() to identify error values on return.
 766 */
 767void *knav_pool_create(const char *name,
 768					int num_desc, int region_id)
 769{
 770	struct knav_region *reg_itr, *region = NULL;
 771	struct knav_pool *pool, *pi;
 772	struct list_head *node;
 773	unsigned last_offset;
 774	bool slot_found;
 775	int ret;
 776
 777	if (!kdev)
 778		return ERR_PTR(-EPROBE_DEFER);
 779
 780	if (!kdev->dev)
 781		return ERR_PTR(-ENODEV);
 782
 783	pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
 784	if (!pool) {
 785		dev_err(kdev->dev, "out of memory allocating pool\n");
 786		return ERR_PTR(-ENOMEM);
 787	}
 788
 789	for_each_region(kdev, reg_itr) {
 790		if (reg_itr->id != region_id)
 791			continue;
 792		region = reg_itr;
 793		break;
 794	}
 795
 796	if (!region) {
 797		dev_err(kdev->dev, "region-id(%d) not found\n", region_id);
 798		ret = -EINVAL;
 799		goto err;
 800	}
 801
 802	pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0);
 803	if (IS_ERR_OR_NULL(pool->queue)) {
 804		dev_err(kdev->dev,
 805			"failed to open queue for pool(%s), error %ld\n",
 806			name, PTR_ERR(pool->queue));
 807		ret = PTR_ERR(pool->queue);
 808		goto err;
 809	}
 810
 811	pool->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
 812	pool->kdev = kdev;
 813	pool->dev = kdev->dev;
 814
 815	mutex_lock(&knav_dev_lock);
 816
 817	if (num_desc > (region->num_desc - region->used_desc)) {
 818		dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n",
 819			region_id, name);
 820		ret = -ENOMEM;
 821		goto err_unlock;
 822	}
 823
 824	/* Region maintains a sorted (by region offset) list of pools
 825	 * use the first free slot which is large enough to accomodate
 826	 * the request
 827	 */
 828	last_offset = 0;
 829	slot_found = false;
 830	node = &region->pools;
 831	list_for_each_entry(pi, &region->pools, region_inst) {
 832		if ((pi->region_offset - last_offset) >= num_desc) {
 833			slot_found = true;
 834			break;
 835		}
 836		last_offset = pi->region_offset + pi->num_desc;
 837	}
 838	node = &pi->region_inst;
 839
 840	if (slot_found) {
 
 841		pool->region = region;
 842		pool->num_desc = num_desc;
 843		pool->region_offset = last_offset;
 844		region->used_desc += num_desc;
 845		list_add_tail(&pool->list, &kdev->pools);
 846		list_add_tail(&pool->region_inst, node);
 847	} else {
 848		dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n",
 849			name, region_id);
 850		ret = -ENOMEM;
 851		goto err_unlock;
 852	}
 853
 854	mutex_unlock(&knav_dev_lock);
 855	kdesc_fill_pool(pool);
 856	return pool;
 857
 858err_unlock:
 859	mutex_unlock(&knav_dev_lock);
 860err:
 861	kfree(pool->name);
 862	devm_kfree(kdev->dev, pool);
 863	return ERR_PTR(ret);
 864}
 865EXPORT_SYMBOL_GPL(knav_pool_create);
 866
 867/**
 868 * knav_pool_destroy()	- Free a pool of descriptors
 869 * @pool		- pool handle
 870 */
 871void knav_pool_destroy(void *ph)
 872{
 873	struct knav_pool *pool = ph;
 874
 875	if (!pool)
 876		return;
 877
 878	if (!pool->region)
 879		return;
 880
 881	kdesc_empty_pool(pool);
 882	mutex_lock(&knav_dev_lock);
 883
 884	pool->region->used_desc -= pool->num_desc;
 885	list_del(&pool->region_inst);
 886	list_del(&pool->list);
 887
 888	mutex_unlock(&knav_dev_lock);
 889	kfree(pool->name);
 890	devm_kfree(kdev->dev, pool);
 891}
 892EXPORT_SYMBOL_GPL(knav_pool_destroy);
 893
 894
 895/**
 896 * knav_pool_desc_get()	- Get a descriptor from the pool
 897 * @pool			- pool handle
 898 *
 899 * Returns descriptor from the pool.
 900 */
 901void *knav_pool_desc_get(void *ph)
 902{
 903	struct knav_pool *pool = ph;
 904	dma_addr_t dma;
 905	unsigned size;
 906	void *data;
 907
 908	dma = knav_queue_pop(pool->queue, &size);
 909	if (unlikely(!dma))
 910		return ERR_PTR(-ENOMEM);
 911	data = knav_pool_desc_dma_to_virt(pool, dma);
 912	return data;
 913}
 914EXPORT_SYMBOL_GPL(knav_pool_desc_get);
 915
 916/**
 917 * knav_pool_desc_put()	- return a descriptor to the pool
 918 * @pool			- pool handle
 
 919 */
 920void knav_pool_desc_put(void *ph, void *desc)
 921{
 922	struct knav_pool *pool = ph;
 923	dma_addr_t dma;
 924	dma = knav_pool_desc_virt_to_dma(pool, desc);
 925	knav_queue_push(pool->queue, dma, pool->region->desc_size, 0);
 926}
 927EXPORT_SYMBOL_GPL(knav_pool_desc_put);
 928
 929/**
 930 * knav_pool_desc_map()	- Map descriptor for DMA transfer
 931 * @pool			- pool handle
 932 * @desc			- address of descriptor to map
 933 * @size			- size of descriptor to map
 934 * @dma				- DMA address return pointer
 935 * @dma_sz			- adjusted return pointer
 936 *
 937 * Returns 0 on success, errno otherwise.
 938 */
 939int knav_pool_desc_map(void *ph, void *desc, unsigned size,
 940					dma_addr_t *dma, unsigned *dma_sz)
 941{
 942	struct knav_pool *pool = ph;
 943	*dma = knav_pool_desc_virt_to_dma(pool, desc);
 944	size = min(size, pool->region->desc_size);
 945	size = ALIGN(size, SMP_CACHE_BYTES);
 946	*dma_sz = size;
 947	dma_sync_single_for_device(pool->dev, *dma, size, DMA_TO_DEVICE);
 948
 949	/* Ensure the descriptor reaches to the memory */
 950	__iowmb();
 951
 952	return 0;
 953}
 954EXPORT_SYMBOL_GPL(knav_pool_desc_map);
 955
 956/**
 957 * knav_pool_desc_unmap()	- Unmap descriptor after DMA transfer
 958 * @pool			- pool handle
 959 * @dma				- DMA address of descriptor to unmap
 960 * @dma_sz			- size of descriptor to unmap
 961 *
 962 * Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify
 963 * error values on return.
 964 */
 965void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz)
 966{
 967	struct knav_pool *pool = ph;
 968	unsigned desc_sz;
 969	void *desc;
 970
 971	desc_sz = min(dma_sz, pool->region->desc_size);
 972	desc = knav_pool_desc_dma_to_virt(pool, dma);
 973	dma_sync_single_for_cpu(pool->dev, dma, desc_sz, DMA_FROM_DEVICE);
 974	prefetch(desc);
 975	return desc;
 976}
 977EXPORT_SYMBOL_GPL(knav_pool_desc_unmap);
 978
 979/**
 980 * knav_pool_count()	- Get the number of descriptors in pool.
 981 * @pool		- pool handle
 982 * Returns number of elements in the pool.
 983 */
 984int knav_pool_count(void *ph)
 985{
 986	struct knav_pool *pool = ph;
 987	return knav_queue_get_count(pool->queue);
 988}
 989EXPORT_SYMBOL_GPL(knav_pool_count);
 990
 991static void knav_queue_setup_region(struct knav_device *kdev,
 992					struct knav_region *region)
 993{
 994	unsigned hw_num_desc, hw_desc_size, size;
 995	struct knav_reg_region __iomem  *regs;
 996	struct knav_qmgr_info *qmgr;
 997	struct knav_pool *pool;
 998	int id = region->id;
 999	struct page *page;
1000
1001	/* unused region? */
1002	if (!region->num_desc) {
1003		dev_warn(kdev->dev, "unused region %s\n", region->name);
1004		return;
1005	}
1006
1007	/* get hardware descriptor value */
1008	hw_num_desc = ilog2(region->num_desc - 1) + 1;
1009
1010	/* did we force fit ourselves into nothingness? */
1011	if (region->num_desc < 32) {
1012		region->num_desc = 0;
1013		dev_warn(kdev->dev, "too few descriptors in region %s\n",
1014			 region->name);
1015		return;
1016	}
1017
1018	size = region->num_desc * region->desc_size;
1019	region->virt_start = alloc_pages_exact(size, GFP_KERNEL | GFP_DMA |
1020						GFP_DMA32);
1021	if (!region->virt_start) {
1022		region->num_desc = 0;
1023		dev_err(kdev->dev, "memory alloc failed for region %s\n",
1024			region->name);
1025		return;
1026	}
1027	region->virt_end = region->virt_start + size;
1028	page = virt_to_page(region->virt_start);
1029
1030	region->dma_start = dma_map_page(kdev->dev, page, 0, size,
1031					 DMA_BIDIRECTIONAL);
1032	if (dma_mapping_error(kdev->dev, region->dma_start)) {
1033		dev_err(kdev->dev, "dma map failed for region %s\n",
1034			region->name);
1035		goto fail;
1036	}
1037	region->dma_end = region->dma_start + size;
1038
1039	pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
1040	if (!pool) {
1041		dev_err(kdev->dev, "out of memory allocating dummy pool\n");
1042		goto fail;
1043	}
1044	pool->num_desc = 0;
1045	pool->region_offset = region->num_desc;
1046	list_add(&pool->region_inst, &region->pools);
1047
1048	dev_dbg(kdev->dev,
1049		"region %s (%d): size:%d, link:%d@%d, dma:%pad-%pad, virt:%p-%p\n",
1050		region->name, id, region->desc_size, region->num_desc,
1051		region->link_index, &region->dma_start, &region->dma_end,
1052		region->virt_start, region->virt_end);
1053
1054	hw_desc_size = (region->desc_size / 16) - 1;
1055	hw_num_desc -= 5;
1056
1057	for_each_qmgr(kdev, qmgr) {
1058		regs = qmgr->reg_region + id;
1059		writel_relaxed((u32)region->dma_start, &regs->base);
1060		writel_relaxed(region->link_index, &regs->start_index);
1061		writel_relaxed(hw_desc_size << 16 | hw_num_desc,
1062			       &regs->size_count);
1063	}
1064	return;
1065
1066fail:
1067	if (region->dma_start)
1068		dma_unmap_page(kdev->dev, region->dma_start, size,
1069				DMA_BIDIRECTIONAL);
1070	if (region->virt_start)
1071		free_pages_exact(region->virt_start, size);
1072	region->num_desc = 0;
1073	return;
1074}
1075
1076static const char *knav_queue_find_name(struct device_node *node)
1077{
1078	const char *name;
1079
1080	if (of_property_read_string(node, "label", &name) < 0)
1081		name = node->name;
1082	if (!name)
1083		name = "unknown";
1084	return name;
1085}
1086
1087static int knav_queue_setup_regions(struct knav_device *kdev,
1088					struct device_node *regions)
1089{
1090	struct device *dev = kdev->dev;
1091	struct knav_region *region;
1092	struct device_node *child;
1093	u32 temp[2];
1094	int ret;
1095
1096	for_each_child_of_node(regions, child) {
1097		region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
1098		if (!region) {
 
1099			dev_err(dev, "out of memory allocating region\n");
1100			return -ENOMEM;
1101		}
1102
1103		region->name = knav_queue_find_name(child);
1104		of_property_read_u32(child, "id", &region->id);
1105		ret = of_property_read_u32_array(child, "region-spec", temp, 2);
1106		if (!ret) {
1107			region->num_desc  = temp[0];
1108			region->desc_size = temp[1];
1109		} else {
1110			dev_err(dev, "invalid region info %s\n", region->name);
1111			devm_kfree(dev, region);
1112			continue;
1113		}
1114
1115		if (!of_get_property(child, "link-index", NULL)) {
1116			dev_err(dev, "No link info for %s\n", region->name);
1117			devm_kfree(dev, region);
1118			continue;
1119		}
1120		ret = of_property_read_u32(child, "link-index",
1121					   &region->link_index);
1122		if (ret) {
1123			dev_err(dev, "link index not found for %s\n",
1124				region->name);
1125			devm_kfree(dev, region);
1126			continue;
1127		}
1128
1129		INIT_LIST_HEAD(&region->pools);
1130		list_add_tail(&region->list, &kdev->regions);
1131	}
1132	if (list_empty(&kdev->regions)) {
1133		dev_err(dev, "no valid region information found\n");
1134		return -ENODEV;
1135	}
1136
1137	/* Next, we run through the regions and set things up */
1138	for_each_region(kdev, region)
1139		knav_queue_setup_region(kdev, region);
1140
1141	return 0;
1142}
1143
1144static int knav_get_link_ram(struct knav_device *kdev,
1145				       const char *name,
1146				       struct knav_link_ram_block *block)
1147{
1148	struct platform_device *pdev = to_platform_device(kdev->dev);
1149	struct device_node *node = pdev->dev.of_node;
1150	u32 temp[2];
1151
1152	/*
1153	 * Note: link ram resources are specified in "entry" sized units. In
1154	 * reality, although entries are ~40bits in hardware, we treat them as
1155	 * 64-bit entities here.
1156	 *
1157	 * For example, to specify the internal link ram for Keystone-I class
1158	 * devices, we would set the linkram0 resource to 0x80000-0x83fff.
1159	 *
1160	 * This gets a bit weird when other link rams are used.  For example,
1161	 * if the range specified is 0x0c000000-0x0c003fff (i.e., 16K entries
1162	 * in MSMC SRAM), the actual memory used is 0x0c000000-0x0c020000,
1163	 * which accounts for 64-bits per entry, for 16K entries.
1164	 */
1165	if (!of_property_read_u32_array(node, name , temp, 2)) {
1166		if (temp[0]) {
1167			/*
1168			 * queue_base specified => using internal or onchip
1169			 * link ram WARNING - we do not "reserve" this block
1170			 */
1171			block->dma = (dma_addr_t)temp[0];
1172			block->virt = NULL;
1173			block->size = temp[1];
1174		} else {
1175			block->size = temp[1];
1176			/* queue_base not specific => allocate requested size */
1177			block->virt = dmam_alloc_coherent(kdev->dev,
1178						  8 * block->size, &block->dma,
1179						  GFP_KERNEL);
1180			if (!block->virt) {
1181				dev_err(kdev->dev, "failed to alloc linkram\n");
1182				return -ENOMEM;
1183			}
1184		}
1185	} else {
1186		return -ENODEV;
1187	}
1188	return 0;
1189}
1190
1191static int knav_queue_setup_link_ram(struct knav_device *kdev)
1192{
1193	struct knav_link_ram_block *block;
1194	struct knav_qmgr_info *qmgr;
1195
1196	for_each_qmgr(kdev, qmgr) {
1197		block = &kdev->link_rams[0];
1198		dev_dbg(kdev->dev, "linkram0: dma:%pad, virt:%p, size:%x\n",
1199			&block->dma, block->virt, block->size);
1200		writel_relaxed((u32)block->dma, &qmgr->reg_config->link_ram_base0);
1201		if (kdev->version == QMSS_66AK2G)
1202			writel_relaxed(block->size,
1203				       &qmgr->reg_config->link_ram_size0);
1204		else
1205			writel_relaxed(block->size - 1,
1206				       &qmgr->reg_config->link_ram_size0);
1207		block++;
1208		if (!block->size)
1209			continue;
1210
1211		dev_dbg(kdev->dev, "linkram1: dma:%pad, virt:%p, size:%x\n",
1212			&block->dma, block->virt, block->size);
1213		writel_relaxed(block->dma, &qmgr->reg_config->link_ram_base1);
1214	}
1215
1216	return 0;
1217}
1218
1219static int knav_setup_queue_range(struct knav_device *kdev,
1220					struct device_node *node)
1221{
1222	struct device *dev = kdev->dev;
1223	struct knav_range_info *range;
1224	struct knav_qmgr_info *qmgr;
1225	u32 temp[2], start, end, id, index;
1226	int ret, i;
1227
1228	range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL);
1229	if (!range) {
1230		dev_err(dev, "out of memory allocating range\n");
1231		return -ENOMEM;
1232	}
1233
1234	range->kdev = kdev;
1235	range->name = knav_queue_find_name(node);
1236	ret = of_property_read_u32_array(node, "qrange", temp, 2);
1237	if (!ret) {
1238		range->queue_base = temp[0] - kdev->base_id;
1239		range->num_queues = temp[1];
1240	} else {
1241		dev_err(dev, "invalid queue range %s\n", range->name);
1242		devm_kfree(dev, range);
1243		return -EINVAL;
1244	}
1245
1246	for (i = 0; i < RANGE_MAX_IRQS; i++) {
1247		struct of_phandle_args oirq;
1248
1249		if (of_irq_parse_one(node, i, &oirq))
1250			break;
1251
1252		range->irqs[i].irq = irq_create_of_mapping(&oirq);
1253		if (range->irqs[i].irq == IRQ_NONE)
1254			break;
1255
1256		range->num_irqs++;
1257
1258		if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) {
1259			unsigned long mask;
1260			int bit;
1261
1262			range->irqs[i].cpu_mask = devm_kzalloc(dev,
1263							       cpumask_size(), GFP_KERNEL);
1264			if (!range->irqs[i].cpu_mask)
1265				return -ENOMEM;
1266
1267			mask = (oirq.args[2] & 0x0000ff00) >> 8;
1268			for_each_set_bit(bit, &mask, BITS_PER_LONG)
1269				cpumask_set_cpu(bit, range->irqs[i].cpu_mask);
1270		}
1271	}
1272
1273	range->num_irqs = min(range->num_irqs, range->num_queues);
1274	if (range->num_irqs)
1275		range->flags |= RANGE_HAS_IRQ;
1276
1277	if (of_get_property(node, "qalloc-by-id", NULL))
1278		range->flags |= RANGE_RESERVED;
1279
1280	if (of_get_property(node, "accumulator", NULL)) {
1281		ret = knav_init_acc_range(kdev, node, range);
1282		if (ret < 0) {
1283			devm_kfree(dev, range);
1284			return ret;
1285		}
1286	} else {
1287		range->ops = &knav_gp_range_ops;
1288	}
1289
1290	/* set threshold to 1, and flush out the queues */
1291	for_each_qmgr(kdev, qmgr) {
1292		start = max(qmgr->start_queue, range->queue_base);
1293		end   = min(qmgr->start_queue + qmgr->num_queues,
1294			    range->queue_base + range->num_queues);
1295		for (id = start; id < end; id++) {
1296			index = id - qmgr->start_queue;
1297			writel_relaxed(THRESH_GTE | 1,
1298				       &qmgr->reg_peek[index].ptr_size_thresh);
1299			writel_relaxed(0,
1300				       &qmgr->reg_push[index].ptr_size_thresh);
1301		}
1302	}
1303
1304	list_add_tail(&range->list, &kdev->queue_ranges);
1305	dev_dbg(dev, "added range %s: %d-%d, %d irqs%s%s%s\n",
1306		range->name, range->queue_base,
1307		range->queue_base + range->num_queues - 1,
1308		range->num_irqs,
1309		(range->flags & RANGE_HAS_IRQ) ? ", has irq" : "",
1310		(range->flags & RANGE_RESERVED) ? ", reserved" : "",
1311		(range->flags & RANGE_HAS_ACCUMULATOR) ? ", acc" : "");
1312	kdev->num_queues_in_use += range->num_queues;
1313	return 0;
1314}
1315
1316static int knav_setup_queue_pools(struct knav_device *kdev,
1317				   struct device_node *queue_pools)
1318{
1319	struct device_node *type, *range;
1320	int ret;
1321
1322	for_each_child_of_node(queue_pools, type) {
1323		for_each_child_of_node(type, range) {
1324			ret = knav_setup_queue_range(kdev, range);
1325			/* return value ignored, we init the rest... */
 
1326		}
1327	}
1328
1329	/* ... and barf if they all failed! */
1330	if (list_empty(&kdev->queue_ranges)) {
1331		dev_err(kdev->dev, "no valid queue range found\n");
1332		return -ENODEV;
1333	}
1334	return 0;
1335}
1336
1337static void knav_free_queue_range(struct knav_device *kdev,
1338				  struct knav_range_info *range)
1339{
1340	if (range->ops && range->ops->free_range)
1341		range->ops->free_range(range);
1342	list_del(&range->list);
1343	devm_kfree(kdev->dev, range);
1344}
1345
1346static void knav_free_queue_ranges(struct knav_device *kdev)
1347{
1348	struct knav_range_info *range;
1349
1350	for (;;) {
1351		range = first_queue_range(kdev);
1352		if (!range)
1353			break;
1354		knav_free_queue_range(kdev, range);
1355	}
1356}
1357
1358static void knav_queue_free_regions(struct knav_device *kdev)
1359{
1360	struct knav_region *region;
1361	struct knav_pool *pool, *tmp;
1362	unsigned size;
1363
1364	for (;;) {
1365		region = first_region(kdev);
1366		if (!region)
1367			break;
1368		list_for_each_entry_safe(pool, tmp, &region->pools, region_inst)
1369			knav_pool_destroy(pool);
1370
1371		size = region->virt_end - region->virt_start;
1372		if (size)
1373			free_pages_exact(region->virt_start, size);
1374		list_del(&region->list);
1375		devm_kfree(kdev->dev, region);
1376	}
1377}
1378
1379static void __iomem *knav_queue_map_reg(struct knav_device *kdev,
1380					struct device_node *node, int index)
1381{
1382	struct resource res;
1383	void __iomem *regs;
1384	int ret;
1385
1386	ret = of_address_to_resource(node, index, &res);
1387	if (ret) {
1388		dev_err(kdev->dev, "Can't translate of node(%pOFn) address for index(%d)\n",
1389			node, index);
1390		return ERR_PTR(ret);
1391	}
1392
1393	regs = devm_ioremap_resource(kdev->dev, &res);
1394	if (IS_ERR(regs))
1395		dev_err(kdev->dev, "Failed to map register base for index(%d) node(%pOFn)\n",
1396			index, node);
1397	return regs;
1398}
1399
1400static int knav_queue_init_qmgrs(struct knav_device *kdev,
1401					struct device_node *qmgrs)
1402{
1403	struct device *dev = kdev->dev;
1404	struct knav_qmgr_info *qmgr;
1405	struct device_node *child;
1406	u32 temp[2];
1407	int ret;
1408
1409	for_each_child_of_node(qmgrs, child) {
1410		qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL);
1411		if (!qmgr) {
 
1412			dev_err(dev, "out of memory allocating qmgr\n");
1413			return -ENOMEM;
1414		}
1415
1416		ret = of_property_read_u32_array(child, "managed-queues",
1417						 temp, 2);
1418		if (!ret) {
1419			qmgr->start_queue = temp[0];
1420			qmgr->num_queues = temp[1];
1421		} else {
1422			dev_err(dev, "invalid qmgr queue range\n");
1423			devm_kfree(dev, qmgr);
1424			continue;
1425		}
1426
1427		dev_info(dev, "qmgr start queue %d, number of queues %d\n",
1428			 qmgr->start_queue, qmgr->num_queues);
1429
1430		qmgr->reg_peek =
1431			knav_queue_map_reg(kdev, child,
1432					   KNAV_QUEUE_PEEK_REG_INDEX);
1433
1434		if (kdev->version == QMSS) {
1435			qmgr->reg_status =
1436				knav_queue_map_reg(kdev, child,
1437						   KNAV_QUEUE_STATUS_REG_INDEX);
1438		}
1439
1440		qmgr->reg_config =
1441			knav_queue_map_reg(kdev, child,
1442					   (kdev->version == QMSS_66AK2G) ?
1443					   KNAV_L_QUEUE_CONFIG_REG_INDEX :
1444					   KNAV_QUEUE_CONFIG_REG_INDEX);
1445		qmgr->reg_region =
1446			knav_queue_map_reg(kdev, child,
1447					   (kdev->version == QMSS_66AK2G) ?
1448					   KNAV_L_QUEUE_REGION_REG_INDEX :
1449					   KNAV_QUEUE_REGION_REG_INDEX);
1450
1451		qmgr->reg_push =
1452			knav_queue_map_reg(kdev, child,
1453					   (kdev->version == QMSS_66AK2G) ?
1454					    KNAV_L_QUEUE_PUSH_REG_INDEX :
1455					    KNAV_QUEUE_PUSH_REG_INDEX);
1456
1457		if (kdev->version == QMSS) {
1458			qmgr->reg_pop =
1459				knav_queue_map_reg(kdev, child,
1460						   KNAV_QUEUE_POP_REG_INDEX);
1461		}
1462
1463		if (IS_ERR(qmgr->reg_peek) ||
1464		    ((kdev->version == QMSS) &&
1465		    (IS_ERR(qmgr->reg_status) || IS_ERR(qmgr->reg_pop))) ||
1466		    IS_ERR(qmgr->reg_config) || IS_ERR(qmgr->reg_region) ||
1467		    IS_ERR(qmgr->reg_push)) {
1468			dev_err(dev, "failed to map qmgr regs\n");
1469			if (kdev->version == QMSS) {
1470				if (!IS_ERR(qmgr->reg_status))
1471					devm_iounmap(dev, qmgr->reg_status);
1472				if (!IS_ERR(qmgr->reg_pop))
1473					devm_iounmap(dev, qmgr->reg_pop);
1474			}
1475			if (!IS_ERR(qmgr->reg_peek))
1476				devm_iounmap(dev, qmgr->reg_peek);
1477			if (!IS_ERR(qmgr->reg_config))
1478				devm_iounmap(dev, qmgr->reg_config);
1479			if (!IS_ERR(qmgr->reg_region))
1480				devm_iounmap(dev, qmgr->reg_region);
1481			if (!IS_ERR(qmgr->reg_push))
1482				devm_iounmap(dev, qmgr->reg_push);
1483			devm_kfree(dev, qmgr);
1484			continue;
1485		}
1486
1487		/* Use same push register for pop as well */
1488		if (kdev->version == QMSS_66AK2G)
1489			qmgr->reg_pop = qmgr->reg_push;
1490
1491		list_add_tail(&qmgr->list, &kdev->qmgrs);
1492		dev_info(dev, "added qmgr start queue %d, num of queues %d, reg_peek %p, reg_status %p, reg_config %p, reg_region %p, reg_push %p, reg_pop %p\n",
1493			 qmgr->start_queue, qmgr->num_queues,
1494			 qmgr->reg_peek, qmgr->reg_status,
1495			 qmgr->reg_config, qmgr->reg_region,
1496			 qmgr->reg_push, qmgr->reg_pop);
1497	}
1498	return 0;
1499}
1500
1501static int knav_queue_init_pdsps(struct knav_device *kdev,
1502					struct device_node *pdsps)
1503{
1504	struct device *dev = kdev->dev;
1505	struct knav_pdsp_info *pdsp;
1506	struct device_node *child;
1507
1508	for_each_child_of_node(pdsps, child) {
1509		pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL);
1510		if (!pdsp) {
 
1511			dev_err(dev, "out of memory allocating pdsp\n");
1512			return -ENOMEM;
1513		}
1514		pdsp->name = knav_queue_find_name(child);
1515		pdsp->iram =
1516			knav_queue_map_reg(kdev, child,
1517					   KNAV_QUEUE_PDSP_IRAM_REG_INDEX);
1518		pdsp->regs =
1519			knav_queue_map_reg(kdev, child,
1520					   KNAV_QUEUE_PDSP_REGS_REG_INDEX);
1521		pdsp->intd =
1522			knav_queue_map_reg(kdev, child,
1523					   KNAV_QUEUE_PDSP_INTD_REG_INDEX);
1524		pdsp->command =
1525			knav_queue_map_reg(kdev, child,
1526					   KNAV_QUEUE_PDSP_CMD_REG_INDEX);
1527
1528		if (IS_ERR(pdsp->command) || IS_ERR(pdsp->iram) ||
1529		    IS_ERR(pdsp->regs) || IS_ERR(pdsp->intd)) {
1530			dev_err(dev, "failed to map pdsp %s regs\n",
1531				pdsp->name);
1532			if (!IS_ERR(pdsp->command))
1533				devm_iounmap(dev, pdsp->command);
1534			if (!IS_ERR(pdsp->iram))
1535				devm_iounmap(dev, pdsp->iram);
1536			if (!IS_ERR(pdsp->regs))
1537				devm_iounmap(dev, pdsp->regs);
1538			if (!IS_ERR(pdsp->intd))
1539				devm_iounmap(dev, pdsp->intd);
1540			devm_kfree(dev, pdsp);
1541			continue;
1542		}
1543		of_property_read_u32(child, "id", &pdsp->id);
1544		list_add_tail(&pdsp->list, &kdev->pdsps);
1545		dev_dbg(dev, "added pdsp %s: command %p, iram %p, regs %p, intd %p\n",
1546			pdsp->name, pdsp->command, pdsp->iram, pdsp->regs,
1547			pdsp->intd);
1548	}
1549	return 0;
1550}
1551
1552static int knav_queue_stop_pdsp(struct knav_device *kdev,
1553			  struct knav_pdsp_info *pdsp)
1554{
1555	u32 val, timeout = 1000;
1556	int ret;
1557
1558	val = readl_relaxed(&pdsp->regs->control) & ~PDSP_CTRL_ENABLE;
1559	writel_relaxed(val, &pdsp->regs->control);
1560	ret = knav_queue_pdsp_wait(&pdsp->regs->control, timeout,
1561					PDSP_CTRL_RUNNING);
1562	if (ret < 0) {
1563		dev_err(kdev->dev, "timed out on pdsp %s stop\n", pdsp->name);
1564		return ret;
1565	}
1566	pdsp->loaded = false;
1567	pdsp->started = false;
1568	return 0;
1569}
1570
1571static int knav_queue_load_pdsp(struct knav_device *kdev,
1572			  struct knav_pdsp_info *pdsp)
1573{
1574	int i, ret, fwlen;
1575	const struct firmware *fw;
1576	bool found = false;
1577	u32 *fwdata;
1578
1579	for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) {
1580		if (knav_acc_firmwares[i]) {
1581			ret = request_firmware_direct(&fw,
1582						      knav_acc_firmwares[i],
1583						      kdev->dev);
1584			if (!ret) {
1585				found = true;
1586				break;
1587			}
1588		}
1589	}
1590
1591	if (!found) {
1592		dev_err(kdev->dev, "failed to get firmware for pdsp\n");
1593		return -ENODEV;
1594	}
1595
1596	dev_info(kdev->dev, "firmware file %s downloaded for PDSP\n",
1597		 knav_acc_firmwares[i]);
1598
1599	writel_relaxed(pdsp->id + 1, pdsp->command + 0x18);
1600	/* download the firmware */
1601	fwdata = (u32 *)fw->data;
1602	fwlen = (fw->size + sizeof(u32) - 1) / sizeof(u32);
1603	for (i = 0; i < fwlen; i++)
1604		writel_relaxed(be32_to_cpu(fwdata[i]), pdsp->iram + i);
1605
1606	release_firmware(fw);
1607	return 0;
1608}
1609
1610static int knav_queue_start_pdsp(struct knav_device *kdev,
1611			   struct knav_pdsp_info *pdsp)
1612{
1613	u32 val, timeout = 1000;
1614	int ret;
1615
1616	/* write a command for sync */
1617	writel_relaxed(0xffffffff, pdsp->command);
1618	while (readl_relaxed(pdsp->command) != 0xffffffff)
1619		cpu_relax();
1620
1621	/* soft reset the PDSP */
1622	val  = readl_relaxed(&pdsp->regs->control);
1623	val &= ~(PDSP_CTRL_PC_MASK | PDSP_CTRL_SOFT_RESET);
1624	writel_relaxed(val, &pdsp->regs->control);
1625
1626	/* enable pdsp */
1627	val = readl_relaxed(&pdsp->regs->control) | PDSP_CTRL_ENABLE;
1628	writel_relaxed(val, &pdsp->regs->control);
1629
1630	/* wait for command register to clear */
1631	ret = knav_queue_pdsp_wait(pdsp->command, timeout, 0);
1632	if (ret < 0) {
1633		dev_err(kdev->dev,
1634			"timed out on pdsp %s command register wait\n",
1635			pdsp->name);
1636		return ret;
1637	}
1638	return 0;
1639}
1640
1641static void knav_queue_stop_pdsps(struct knav_device *kdev)
1642{
1643	struct knav_pdsp_info *pdsp;
1644
1645	/* disable all pdsps */
1646	for_each_pdsp(kdev, pdsp)
1647		knav_queue_stop_pdsp(kdev, pdsp);
1648}
1649
1650static int knav_queue_start_pdsps(struct knav_device *kdev)
1651{
1652	struct knav_pdsp_info *pdsp;
1653	int ret;
1654
1655	knav_queue_stop_pdsps(kdev);
1656	/* now load them all. We return success even if pdsp
1657	 * is not loaded as acc channels are optional on having
1658	 * firmware availability in the system. We set the loaded
1659	 * and stated flag and when initialize the acc range, check
1660	 * it and init the range only if pdsp is started.
1661	 */
1662	for_each_pdsp(kdev, pdsp) {
1663		ret = knav_queue_load_pdsp(kdev, pdsp);
1664		if (!ret)
1665			pdsp->loaded = true;
1666	}
1667
1668	for_each_pdsp(kdev, pdsp) {
1669		if (pdsp->loaded) {
1670			ret = knav_queue_start_pdsp(kdev, pdsp);
1671			if (!ret)
1672				pdsp->started = true;
1673		}
1674	}
1675	return 0;
1676}
1677
1678static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id)
1679{
1680	struct knav_qmgr_info *qmgr;
1681
1682	for_each_qmgr(kdev, qmgr) {
1683		if ((id >= qmgr->start_queue) &&
1684		    (id < qmgr->start_queue + qmgr->num_queues))
1685			return qmgr;
1686	}
1687	return NULL;
1688}
1689
1690static int knav_queue_init_queue(struct knav_device *kdev,
1691					struct knav_range_info *range,
1692					struct knav_queue_inst *inst,
1693					unsigned id)
1694{
1695	char irq_name[KNAV_NAME_SIZE];
1696	inst->qmgr = knav_find_qmgr(id);
1697	if (!inst->qmgr)
1698		return -1;
1699
1700	INIT_LIST_HEAD(&inst->handles);
1701	inst->kdev = kdev;
1702	inst->range = range;
1703	inst->irq_num = -1;
1704	inst->id = id;
1705	scnprintf(irq_name, sizeof(irq_name), "hwqueue-%d", id);
1706	inst->irq_name = kstrndup(irq_name, sizeof(irq_name), GFP_KERNEL);
1707
1708	if (range->ops && range->ops->init_queue)
1709		return range->ops->init_queue(range, inst);
1710	else
1711		return 0;
1712}
1713
1714static int knav_queue_init_queues(struct knav_device *kdev)
1715{
1716	struct knav_range_info *range;
1717	int size, id, base_idx;
1718	int idx = 0, ret = 0;
1719
1720	/* how much do we need for instance data? */
1721	size = sizeof(struct knav_queue_inst);
1722
1723	/* round this up to a power of 2, keep the index to instance
1724	 * arithmetic fast.
1725	 * */
1726	kdev->inst_shift = order_base_2(size);
1727	size = (1 << kdev->inst_shift) * kdev->num_queues_in_use;
1728	kdev->instances = devm_kzalloc(kdev->dev, size, GFP_KERNEL);
1729	if (!kdev->instances)
1730		return -ENOMEM;
1731
1732	for_each_queue_range(kdev, range) {
1733		if (range->ops && range->ops->init_range)
1734			range->ops->init_range(range);
1735		base_idx = idx;
1736		for (id = range->queue_base;
1737		     id < range->queue_base + range->num_queues; id++, idx++) {
1738			ret = knav_queue_init_queue(kdev, range,
1739					knav_queue_idx_to_inst(kdev, idx), id);
1740			if (ret < 0)
1741				return ret;
1742		}
1743		range->queue_base_inst =
1744			knav_queue_idx_to_inst(kdev, base_idx);
1745	}
1746	return 0;
1747}
1748
1749/* Match table for of_platform binding */
1750static const struct of_device_id keystone_qmss_of_match[] = {
1751	{
1752		.compatible = "ti,keystone-navigator-qmss",
1753	},
1754	{
1755		.compatible = "ti,66ak2g-navss-qm",
1756		.data	= (void *)QMSS_66AK2G,
1757	},
1758	{},
1759};
1760MODULE_DEVICE_TABLE(of, keystone_qmss_of_match);
1761
1762static int knav_queue_probe(struct platform_device *pdev)
1763{
1764	struct device_node *node = pdev->dev.of_node;
1765	struct device_node *qmgrs, *queue_pools, *regions, *pdsps;
1766	const struct of_device_id *match;
1767	struct device *dev = &pdev->dev;
1768	u32 temp[2];
1769	int ret;
1770
1771	if (!node) {
1772		dev_err(dev, "device tree info unavailable\n");
1773		return -ENODEV;
1774	}
1775
1776	kdev = devm_kzalloc(dev, sizeof(struct knav_device), GFP_KERNEL);
1777	if (!kdev) {
1778		dev_err(dev, "memory allocation failed\n");
1779		return -ENOMEM;
1780	}
1781
1782	match = of_match_device(of_match_ptr(keystone_qmss_of_match), dev);
1783	if (match && match->data)
1784		kdev->version = QMSS_66AK2G;
1785
1786	platform_set_drvdata(pdev, kdev);
1787	kdev->dev = dev;
1788	INIT_LIST_HEAD(&kdev->queue_ranges);
1789	INIT_LIST_HEAD(&kdev->qmgrs);
1790	INIT_LIST_HEAD(&kdev->pools);
1791	INIT_LIST_HEAD(&kdev->regions);
1792	INIT_LIST_HEAD(&kdev->pdsps);
1793
1794	pm_runtime_enable(&pdev->dev);
1795	ret = pm_runtime_get_sync(&pdev->dev);
1796	if (ret < 0) {
 
1797		dev_err(dev, "Failed to enable QMSS\n");
1798		return ret;
1799	}
1800
1801	if (of_property_read_u32_array(node, "queue-range", temp, 2)) {
1802		dev_err(dev, "queue-range not specified\n");
1803		ret = -ENODEV;
1804		goto err;
1805	}
1806	kdev->base_id    = temp[0];
1807	kdev->num_queues = temp[1];
1808
1809	/* Initialize queue managers using device tree configuration */
1810	qmgrs =  of_get_child_by_name(node, "qmgrs");
1811	if (!qmgrs) {
1812		dev_err(dev, "queue manager info not specified\n");
1813		ret = -ENODEV;
1814		goto err;
1815	}
1816	ret = knav_queue_init_qmgrs(kdev, qmgrs);
1817	of_node_put(qmgrs);
1818	if (ret)
1819		goto err;
1820
1821	/* get pdsp configuration values from device tree */
1822	pdsps =  of_get_child_by_name(node, "pdsps");
1823	if (pdsps) {
1824		ret = knav_queue_init_pdsps(kdev, pdsps);
1825		if (ret)
1826			goto err;
1827
1828		ret = knav_queue_start_pdsps(kdev);
1829		if (ret)
1830			goto err;
1831	}
1832	of_node_put(pdsps);
1833
1834	/* get usable queue range values from device tree */
1835	queue_pools = of_get_child_by_name(node, "queue-pools");
1836	if (!queue_pools) {
1837		dev_err(dev, "queue-pools not specified\n");
1838		ret = -ENODEV;
1839		goto err;
1840	}
1841	ret = knav_setup_queue_pools(kdev, queue_pools);
1842	of_node_put(queue_pools);
1843	if (ret)
1844		goto err;
1845
1846	ret = knav_get_link_ram(kdev, "linkram0", &kdev->link_rams[0]);
1847	if (ret) {
1848		dev_err(kdev->dev, "could not setup linking ram\n");
1849		goto err;
1850	}
1851
1852	ret = knav_get_link_ram(kdev, "linkram1", &kdev->link_rams[1]);
1853	if (ret) {
1854		/*
1855		 * nothing really, we have one linking ram already, so we just
1856		 * live within our means
1857		 */
1858	}
1859
1860	ret = knav_queue_setup_link_ram(kdev);
1861	if (ret)
1862		goto err;
1863
1864	regions =  of_get_child_by_name(node, "descriptor-regions");
1865	if (!regions) {
1866		dev_err(dev, "descriptor-regions not specified\n");
 
1867		goto err;
1868	}
1869	ret = knav_queue_setup_regions(kdev, regions);
1870	of_node_put(regions);
1871	if (ret)
1872		goto err;
1873
1874	ret = knav_queue_init_queues(kdev);
1875	if (ret < 0) {
1876		dev_err(dev, "hwqueue initialization failed\n");
1877		goto err;
1878	}
1879
1880	debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL,
1881			    &knav_queue_debug_ops);
1882	device_ready = true;
1883	return 0;
1884
1885err:
1886	knav_queue_stop_pdsps(kdev);
1887	knav_queue_free_regions(kdev);
1888	knav_free_queue_ranges(kdev);
1889	pm_runtime_put_sync(&pdev->dev);
1890	pm_runtime_disable(&pdev->dev);
1891	return ret;
1892}
1893
1894static int knav_queue_remove(struct platform_device *pdev)
1895{
1896	/* TODO: Free resources */
1897	pm_runtime_put_sync(&pdev->dev);
1898	pm_runtime_disable(&pdev->dev);
1899	return 0;
1900}
1901
1902static struct platform_driver keystone_qmss_driver = {
1903	.probe		= knav_queue_probe,
1904	.remove		= knav_queue_remove,
1905	.driver		= {
1906		.name	= "keystone-navigator-qmss",
1907		.of_match_table = keystone_qmss_of_match,
1908	},
1909};
1910module_platform_driver(keystone_qmss_driver);
1911
1912MODULE_LICENSE("GPL v2");
1913MODULE_DESCRIPTION("TI QMSS driver for Keystone SOCs");
1914MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
1915MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Keystone Queue Manager subsystem driver
   4 *
   5 * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
   6 * Authors:	Sandeep Nair <sandeep_n@ti.com>
   7 *		Cyril Chemparathy <cyril@ti.com>
   8 *		Santosh Shilimkar <santosh.shilimkar@ti.com>
   9 */
  10
  11#include <linux/debugfs.h>
  12#include <linux/dma-mapping.h>
  13#include <linux/firmware.h>
  14#include <linux/interrupt.h>
  15#include <linux/io.h>
  16#include <linux/module.h>
  17#include <linux/of_address.h>
  18#include <linux/of_device.h>
  19#include <linux/of_irq.h>
  20#include <linux/pm_runtime.h>
  21#include <linux/slab.h>
  22#include <linux/soc/ti/knav_qmss.h>
  23
  24#include "knav_qmss.h"
  25
  26static struct knav_device *kdev;
  27static DEFINE_MUTEX(knav_dev_lock);
  28#define knav_dev_lock_held() \
  29	lockdep_is_held(&knav_dev_lock)
  30
  31/* Queue manager register indices in DTS */
  32#define KNAV_QUEUE_PEEK_REG_INDEX	0
  33#define KNAV_QUEUE_STATUS_REG_INDEX	1
  34#define KNAV_QUEUE_CONFIG_REG_INDEX	2
  35#define KNAV_QUEUE_REGION_REG_INDEX	3
  36#define KNAV_QUEUE_PUSH_REG_INDEX	4
  37#define KNAV_QUEUE_POP_REG_INDEX	5
  38
  39/* Queue manager register indices in DTS for QMSS in K2G NAVSS.
  40 * There are no status and vbusm push registers on this version
  41 * of QMSS. Push registers are same as pop, So all indices above 1
  42 * are to be re-defined
  43 */
  44#define KNAV_L_QUEUE_CONFIG_REG_INDEX	1
  45#define KNAV_L_QUEUE_REGION_REG_INDEX	2
  46#define KNAV_L_QUEUE_PUSH_REG_INDEX	3
  47
  48/* PDSP register indices in DTS */
  49#define KNAV_QUEUE_PDSP_IRAM_REG_INDEX	0
  50#define KNAV_QUEUE_PDSP_REGS_REG_INDEX	1
  51#define KNAV_QUEUE_PDSP_INTD_REG_INDEX	2
  52#define KNAV_QUEUE_PDSP_CMD_REG_INDEX	3
  53
  54#define knav_queue_idx_to_inst(kdev, idx)			\
  55	(kdev->instances + (idx << kdev->inst_shift))
  56
  57#define for_each_handle_rcu(qh, inst)				\
  58	list_for_each_entry_rcu(qh, &inst->handles, list,	\
  59				knav_dev_lock_held())
  60
  61#define for_each_instance(idx, inst, kdev)		\
  62	for (idx = 0, inst = kdev->instances;		\
  63	     idx < (kdev)->num_queues_in_use;			\
  64	     idx++, inst = knav_queue_idx_to_inst(kdev, idx))
  65
  66/* All firmware file names end up here. List the firmware file names below.
  67 * Newest followed by older ones. Search is done from start of the array
  68 * until a firmware file is found.
  69 */
  70static const char * const knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"};
  71
  72static bool device_ready;
  73bool knav_qmss_device_ready(void)
  74{
  75	return device_ready;
  76}
  77EXPORT_SYMBOL_GPL(knav_qmss_device_ready);
  78
  79/**
  80 * knav_queue_notify: qmss queue notfier call
  81 *
  82 * @inst:		- qmss queue instance like accumulator
  83 */
  84void knav_queue_notify(struct knav_queue_inst *inst)
  85{
  86	struct knav_queue *qh;
  87
  88	if (!inst)
  89		return;
  90
  91	rcu_read_lock();
  92	for_each_handle_rcu(qh, inst) {
  93		if (atomic_read(&qh->notifier_enabled) <= 0)
  94			continue;
  95		if (WARN_ON(!qh->notifier_fn))
  96			continue;
  97		this_cpu_inc(qh->stats->notifies);
  98		qh->notifier_fn(qh->notifier_fn_arg);
  99	}
 100	rcu_read_unlock();
 101}
 102EXPORT_SYMBOL_GPL(knav_queue_notify);
 103
 104static irqreturn_t knav_queue_int_handler(int irq, void *_instdata)
 105{
 106	struct knav_queue_inst *inst = _instdata;
 107
 108	knav_queue_notify(inst);
 109	return IRQ_HANDLED;
 110}
 111
 112static int knav_queue_setup_irq(struct knav_range_info *range,
 113			  struct knav_queue_inst *inst)
 114{
 115	unsigned queue = inst->id - range->queue_base;
 116	int ret = 0, irq;
 117
 118	if (range->flags & RANGE_HAS_IRQ) {
 119		irq = range->irqs[queue].irq;
 120		ret = request_irq(irq, knav_queue_int_handler, 0,
 121					inst->irq_name, inst);
 122		if (ret)
 123			return ret;
 124		disable_irq(irq);
 125		if (range->irqs[queue].cpu_mask) {
 126			ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask);
 127			if (ret) {
 128				dev_warn(range->kdev->dev,
 129					 "Failed to set IRQ affinity\n");
 130				return ret;
 131			}
 132		}
 133	}
 134	return ret;
 135}
 136
 137static void knav_queue_free_irq(struct knav_queue_inst *inst)
 138{
 139	struct knav_range_info *range = inst->range;
 140	unsigned queue = inst->id - inst->range->queue_base;
 141	int irq;
 142
 143	if (range->flags & RANGE_HAS_IRQ) {
 144		irq = range->irqs[queue].irq;
 145		irq_set_affinity_hint(irq, NULL);
 146		free_irq(irq, inst);
 147	}
 148}
 149
 150static inline bool knav_queue_is_busy(struct knav_queue_inst *inst)
 151{
 152	return !list_empty(&inst->handles);
 153}
 154
 155static inline bool knav_queue_is_reserved(struct knav_queue_inst *inst)
 156{
 157	return inst->range->flags & RANGE_RESERVED;
 158}
 159
 160static inline bool knav_queue_is_shared(struct knav_queue_inst *inst)
 161{
 162	struct knav_queue *tmp;
 163
 164	rcu_read_lock();
 165	for_each_handle_rcu(tmp, inst) {
 166		if (tmp->flags & KNAV_QUEUE_SHARED) {
 167			rcu_read_unlock();
 168			return true;
 169		}
 170	}
 171	rcu_read_unlock();
 172	return false;
 173}
 174
 175static inline bool knav_queue_match_type(struct knav_queue_inst *inst,
 176						unsigned type)
 177{
 178	if ((type == KNAV_QUEUE_QPEND) &&
 179	    (inst->range->flags & RANGE_HAS_IRQ)) {
 180		return true;
 181	} else if ((type == KNAV_QUEUE_ACC) &&
 182		(inst->range->flags & RANGE_HAS_ACCUMULATOR)) {
 183		return true;
 184	} else if ((type == KNAV_QUEUE_GP) &&
 185		!(inst->range->flags &
 186			(RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) {
 187		return true;
 188	}
 189	return false;
 190}
 191
 192static inline struct knav_queue_inst *
 193knav_queue_match_id_to_inst(struct knav_device *kdev, unsigned id)
 194{
 195	struct knav_queue_inst *inst;
 196	int idx;
 197
 198	for_each_instance(idx, inst, kdev) {
 199		if (inst->id == id)
 200			return inst;
 201	}
 202	return NULL;
 203}
 204
 205static inline struct knav_queue_inst *knav_queue_find_by_id(int id)
 206{
 207	if (kdev->base_id <= id &&
 208	    kdev->base_id + kdev->num_queues > id) {
 209		id -= kdev->base_id;
 210		return knav_queue_match_id_to_inst(kdev, id);
 211	}
 212	return NULL;
 213}
 214
 215static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst,
 216				      const char *name, unsigned flags)
 217{
 218	struct knav_queue *qh;
 219	unsigned id;
 220	int ret = 0;
 221
 222	qh = devm_kzalloc(inst->kdev->dev, sizeof(*qh), GFP_KERNEL);
 223	if (!qh)
 224		return ERR_PTR(-ENOMEM);
 225
 226	qh->stats = alloc_percpu(struct knav_queue_stats);
 227	if (!qh->stats) {
 228		ret = -ENOMEM;
 229		goto err;
 230	}
 231
 232	qh->flags = flags;
 233	qh->inst = inst;
 234	id = inst->id - inst->qmgr->start_queue;
 235	qh->reg_push = &inst->qmgr->reg_push[id];
 236	qh->reg_pop = &inst->qmgr->reg_pop[id];
 237	qh->reg_peek = &inst->qmgr->reg_peek[id];
 238
 239	/* first opener? */
 240	if (!knav_queue_is_busy(inst)) {
 241		struct knav_range_info *range = inst->range;
 242
 243		inst->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
 244		if (range->ops && range->ops->open_queue)
 245			ret = range->ops->open_queue(range, inst, flags);
 246
 247		if (ret)
 248			goto err;
 249	}
 250	list_add_tail_rcu(&qh->list, &inst->handles);
 251	return qh;
 252
 253err:
 254	if (qh->stats)
 255		free_percpu(qh->stats);
 256	devm_kfree(inst->kdev->dev, qh);
 257	return ERR_PTR(ret);
 258}
 259
 260static struct knav_queue *
 261knav_queue_open_by_id(const char *name, unsigned id, unsigned flags)
 262{
 263	struct knav_queue_inst *inst;
 264	struct knav_queue *qh;
 265
 266	mutex_lock(&knav_dev_lock);
 267
 268	qh = ERR_PTR(-ENODEV);
 269	inst = knav_queue_find_by_id(id);
 270	if (!inst)
 271		goto unlock_ret;
 272
 273	qh = ERR_PTR(-EEXIST);
 274	if (!(flags & KNAV_QUEUE_SHARED) && knav_queue_is_busy(inst))
 275		goto unlock_ret;
 276
 277	qh = ERR_PTR(-EBUSY);
 278	if ((flags & KNAV_QUEUE_SHARED) &&
 279	    (knav_queue_is_busy(inst) && !knav_queue_is_shared(inst)))
 280		goto unlock_ret;
 281
 282	qh = __knav_queue_open(inst, name, flags);
 283
 284unlock_ret:
 285	mutex_unlock(&knav_dev_lock);
 286
 287	return qh;
 288}
 289
 290static struct knav_queue *knav_queue_open_by_type(const char *name,
 291						unsigned type, unsigned flags)
 292{
 293	struct knav_queue_inst *inst;
 294	struct knav_queue *qh = ERR_PTR(-EINVAL);
 295	int idx;
 296
 297	mutex_lock(&knav_dev_lock);
 298
 299	for_each_instance(idx, inst, kdev) {
 300		if (knav_queue_is_reserved(inst))
 301			continue;
 302		if (!knav_queue_match_type(inst, type))
 303			continue;
 304		if (knav_queue_is_busy(inst))
 305			continue;
 306		qh = __knav_queue_open(inst, name, flags);
 307		goto unlock_ret;
 308	}
 309
 310unlock_ret:
 311	mutex_unlock(&knav_dev_lock);
 312	return qh;
 313}
 314
 315static void knav_queue_set_notify(struct knav_queue_inst *inst, bool enabled)
 316{
 317	struct knav_range_info *range = inst->range;
 318
 319	if (range->ops && range->ops->set_notify)
 320		range->ops->set_notify(range, inst, enabled);
 321}
 322
 323static int knav_queue_enable_notifier(struct knav_queue *qh)
 324{
 325	struct knav_queue_inst *inst = qh->inst;
 326	bool first;
 327
 328	if (WARN_ON(!qh->notifier_fn))
 329		return -EINVAL;
 330
 331	/* Adjust the per handle notifier count */
 332	first = (atomic_inc_return(&qh->notifier_enabled) == 1);
 333	if (!first)
 334		return 0; /* nothing to do */
 335
 336	/* Now adjust the per instance notifier count */
 337	first = (atomic_inc_return(&inst->num_notifiers) == 1);
 338	if (first)
 339		knav_queue_set_notify(inst, true);
 340
 341	return 0;
 342}
 343
 344static int knav_queue_disable_notifier(struct knav_queue *qh)
 345{
 346	struct knav_queue_inst *inst = qh->inst;
 347	bool last;
 348
 349	last = (atomic_dec_return(&qh->notifier_enabled) == 0);
 350	if (!last)
 351		return 0; /* nothing to do */
 352
 353	last = (atomic_dec_return(&inst->num_notifiers) == 0);
 354	if (last)
 355		knav_queue_set_notify(inst, false);
 356
 357	return 0;
 358}
 359
 360static int knav_queue_set_notifier(struct knav_queue *qh,
 361				struct knav_queue_notify_config *cfg)
 362{
 363	knav_queue_notify_fn old_fn = qh->notifier_fn;
 364
 365	if (!cfg)
 366		return -EINVAL;
 367
 368	if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ)))
 369		return -ENOTSUPP;
 370
 371	if (!cfg->fn && old_fn)
 372		knav_queue_disable_notifier(qh);
 373
 374	qh->notifier_fn = cfg->fn;
 375	qh->notifier_fn_arg = cfg->fn_arg;
 376
 377	if (cfg->fn && !old_fn)
 378		knav_queue_enable_notifier(qh);
 379
 380	return 0;
 381}
 382
 383static int knav_gp_set_notify(struct knav_range_info *range,
 384			       struct knav_queue_inst *inst,
 385			       bool enabled)
 386{
 387	unsigned queue;
 388
 389	if (range->flags & RANGE_HAS_IRQ) {
 390		queue = inst->id - range->queue_base;
 391		if (enabled)
 392			enable_irq(range->irqs[queue].irq);
 393		else
 394			disable_irq_nosync(range->irqs[queue].irq);
 395	}
 396	return 0;
 397}
 398
 399static int knav_gp_open_queue(struct knav_range_info *range,
 400				struct knav_queue_inst *inst, unsigned flags)
 401{
 402	return knav_queue_setup_irq(range, inst);
 403}
 404
 405static int knav_gp_close_queue(struct knav_range_info *range,
 406				struct knav_queue_inst *inst)
 407{
 408	knav_queue_free_irq(inst);
 409	return 0;
 410}
 411
 412static struct knav_range_ops knav_gp_range_ops = {
 413	.set_notify	= knav_gp_set_notify,
 414	.open_queue	= knav_gp_open_queue,
 415	.close_queue	= knav_gp_close_queue,
 416};
 417
 418
 419static int knav_queue_get_count(void *qhandle)
 420{
 421	struct knav_queue *qh = qhandle;
 422	struct knav_queue_inst *inst = qh->inst;
 423
 424	return readl_relaxed(&qh->reg_peek[0].entry_count) +
 425		atomic_read(&inst->desc_count);
 426}
 427
 428static void knav_queue_debug_show_instance(struct seq_file *s,
 429					struct knav_queue_inst *inst)
 430{
 431	struct knav_device *kdev = inst->kdev;
 432	struct knav_queue *qh;
 433	int cpu = 0;
 434	int pushes = 0;
 435	int pops = 0;
 436	int push_errors = 0;
 437	int pop_errors = 0;
 438	int notifies = 0;
 439
 440	if (!knav_queue_is_busy(inst))
 441		return;
 442
 443	seq_printf(s, "\tqueue id %d (%s)\n",
 444		   kdev->base_id + inst->id, inst->name);
 445	for_each_handle_rcu(qh, inst) {
 446		for_each_possible_cpu(cpu) {
 447			pushes += per_cpu_ptr(qh->stats, cpu)->pushes;
 448			pops += per_cpu_ptr(qh->stats, cpu)->pops;
 449			push_errors += per_cpu_ptr(qh->stats, cpu)->push_errors;
 450			pop_errors += per_cpu_ptr(qh->stats, cpu)->pop_errors;
 451			notifies += per_cpu_ptr(qh->stats, cpu)->notifies;
 452		}
 453
 454		seq_printf(s, "\t\thandle %p: pushes %8d, pops %8d, count %8d, notifies %8d, push errors %8d, pop errors %8d\n",
 455				qh,
 456				pushes,
 457				pops,
 458				knav_queue_get_count(qh),
 459				notifies,
 460				push_errors,
 461				pop_errors);
 462	}
 463}
 464
 465static int knav_queue_debug_show(struct seq_file *s, void *v)
 466{
 467	struct knav_queue_inst *inst;
 468	int idx;
 469
 470	mutex_lock(&knav_dev_lock);
 471	seq_printf(s, "%s: %u-%u\n",
 472		   dev_name(kdev->dev), kdev->base_id,
 473		   kdev->base_id + kdev->num_queues - 1);
 474	for_each_instance(idx, inst, kdev)
 475		knav_queue_debug_show_instance(s, inst);
 476	mutex_unlock(&knav_dev_lock);
 477
 478	return 0;
 479}
 480
 481DEFINE_SHOW_ATTRIBUTE(knav_queue_debug);
 
 
 
 
 
 
 
 
 
 
 482
 483static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout,
 484					u32 flags)
 485{
 486	unsigned long end;
 487	u32 val = 0;
 488
 489	end = jiffies + msecs_to_jiffies(timeout);
 490	while (time_after(end, jiffies)) {
 491		val = readl_relaxed(addr);
 492		if (flags)
 493			val &= flags;
 494		if (!val)
 495			break;
 496		cpu_relax();
 497	}
 498	return val ? -ETIMEDOUT : 0;
 499}
 500
 501
 502static int knav_queue_flush(struct knav_queue *qh)
 503{
 504	struct knav_queue_inst *inst = qh->inst;
 505	unsigned id = inst->id - inst->qmgr->start_queue;
 506
 507	atomic_set(&inst->desc_count, 0);
 508	writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh);
 509	return 0;
 510}
 511
 512/**
 513 * knav_queue_open()	- open a hardware queue
 514 * @name:		- name to give the queue handle
 515 * @id:			- desired queue number if any or specifes the type
 516 *			  of queue
 517 * @flags:		- the following flags are applicable to queues:
 518 *	KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are
 519 *			     exclusive by default.
 520 *			     Subsequent attempts to open a shared queue should
 521 *			     also have this flag.
 522 *
 523 * Returns a handle to the open hardware queue if successful. Use IS_ERR()
 524 * to check the returned value for error codes.
 525 */
 526void *knav_queue_open(const char *name, unsigned id,
 527					unsigned flags)
 528{
 529	struct knav_queue *qh = ERR_PTR(-EINVAL);
 530
 531	switch (id) {
 532	case KNAV_QUEUE_QPEND:
 533	case KNAV_QUEUE_ACC:
 534	case KNAV_QUEUE_GP:
 535		qh = knav_queue_open_by_type(name, id, flags);
 536		break;
 537
 538	default:
 539		qh = knav_queue_open_by_id(name, id, flags);
 540		break;
 541	}
 542	return qh;
 543}
 544EXPORT_SYMBOL_GPL(knav_queue_open);
 545
 546/**
 547 * knav_queue_close()	- close a hardware queue handle
 548 * @qhandle:		- handle to close
 549 */
 550void knav_queue_close(void *qhandle)
 551{
 552	struct knav_queue *qh = qhandle;
 553	struct knav_queue_inst *inst = qh->inst;
 554
 555	while (atomic_read(&qh->notifier_enabled) > 0)
 556		knav_queue_disable_notifier(qh);
 557
 558	mutex_lock(&knav_dev_lock);
 559	list_del_rcu(&qh->list);
 560	mutex_unlock(&knav_dev_lock);
 561	synchronize_rcu();
 562	if (!knav_queue_is_busy(inst)) {
 563		struct knav_range_info *range = inst->range;
 564
 565		if (range->ops && range->ops->close_queue)
 566			range->ops->close_queue(range, inst);
 567	}
 568	free_percpu(qh->stats);
 569	devm_kfree(inst->kdev->dev, qh);
 570}
 571EXPORT_SYMBOL_GPL(knav_queue_close);
 572
 573/**
 574 * knav_queue_device_control()	- Perform control operations on a queue
 575 * @qhandle:			- queue handle
 576 * @cmd:			- control commands
 577 * @arg:			- command argument
 578 *
 579 * Returns 0 on success, errno otherwise.
 580 */
 581int knav_queue_device_control(void *qhandle, enum knav_queue_ctrl_cmd cmd,
 582				unsigned long arg)
 583{
 584	struct knav_queue *qh = qhandle;
 585	struct knav_queue_notify_config *cfg;
 586	int ret;
 587
 588	switch ((int)cmd) {
 589	case KNAV_QUEUE_GET_ID:
 590		ret = qh->inst->kdev->base_id + qh->inst->id;
 591		break;
 592
 593	case KNAV_QUEUE_FLUSH:
 594		ret = knav_queue_flush(qh);
 595		break;
 596
 597	case KNAV_QUEUE_SET_NOTIFIER:
 598		cfg = (void *)arg;
 599		ret = knav_queue_set_notifier(qh, cfg);
 600		break;
 601
 602	case KNAV_QUEUE_ENABLE_NOTIFY:
 603		ret = knav_queue_enable_notifier(qh);
 604		break;
 605
 606	case KNAV_QUEUE_DISABLE_NOTIFY:
 607		ret = knav_queue_disable_notifier(qh);
 608		break;
 609
 610	case KNAV_QUEUE_GET_COUNT:
 611		ret = knav_queue_get_count(qh);
 612		break;
 613
 614	default:
 615		ret = -ENOTSUPP;
 616		break;
 617	}
 618	return ret;
 619}
 620EXPORT_SYMBOL_GPL(knav_queue_device_control);
 621
 622
 623
 624/**
 625 * knav_queue_push()	- push data (or descriptor) to the tail of a queue
 626 * @qhandle:		- hardware queue handle
 627 * @dma:		- DMA data to push
 628 * @size:		- size of data to push
 629 * @flags:		- can be used to pass additional information
 630 *
 631 * Returns 0 on success, errno otherwise.
 632 */
 633int knav_queue_push(void *qhandle, dma_addr_t dma,
 634					unsigned size, unsigned flags)
 635{
 636	struct knav_queue *qh = qhandle;
 637	u32 val;
 638
 639	val = (u32)dma | ((size / 16) - 1);
 640	writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh);
 641
 642	this_cpu_inc(qh->stats->pushes);
 643	return 0;
 644}
 645EXPORT_SYMBOL_GPL(knav_queue_push);
 646
 647/**
 648 * knav_queue_pop()	- pop data (or descriptor) from the head of a queue
 649 * @qhandle:		- hardware queue handle
 650 * @size:		- (optional) size of the data pop'ed.
 651 *
 652 * Returns a DMA address on success, 0 on failure.
 653 */
 654dma_addr_t knav_queue_pop(void *qhandle, unsigned *size)
 655{
 656	struct knav_queue *qh = qhandle;
 657	struct knav_queue_inst *inst = qh->inst;
 658	dma_addr_t dma;
 659	u32 val, idx;
 660
 661	/* are we accumulated? */
 662	if (inst->descs) {
 663		if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) {
 664			atomic_inc(&inst->desc_count);
 665			return 0;
 666		}
 667		idx  = atomic_inc_return(&inst->desc_head);
 668		idx &= ACC_DESCS_MASK;
 669		val = inst->descs[idx];
 670	} else {
 671		val = readl_relaxed(&qh->reg_pop[0].ptr_size_thresh);
 672		if (unlikely(!val))
 673			return 0;
 674	}
 675
 676	dma = val & DESC_PTR_MASK;
 677	if (size)
 678		*size = ((val & DESC_SIZE_MASK) + 1) * 16;
 679
 680	this_cpu_inc(qh->stats->pops);
 681	return dma;
 682}
 683EXPORT_SYMBOL_GPL(knav_queue_pop);
 684
 685/* carve out descriptors and push into queue */
 686static void kdesc_fill_pool(struct knav_pool *pool)
 687{
 688	struct knav_region *region;
 689	int i;
 690
 691	region = pool->region;
 692	pool->desc_size = region->desc_size;
 693	for (i = 0; i < pool->num_desc; i++) {
 694		int index = pool->region_offset + i;
 695		dma_addr_t dma_addr;
 696		unsigned dma_size;
 697		dma_addr = region->dma_start + (region->desc_size * index);
 698		dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES);
 699		dma_sync_single_for_device(pool->dev, dma_addr, dma_size,
 700					   DMA_TO_DEVICE);
 701		knav_queue_push(pool->queue, dma_addr, dma_size, 0);
 702	}
 703}
 704
 705/* pop out descriptors and close the queue */
 706static void kdesc_empty_pool(struct knav_pool *pool)
 707{
 708	dma_addr_t dma;
 709	unsigned size;
 710	void *desc;
 711	int i;
 712
 713	if (!pool->queue)
 714		return;
 715
 716	for (i = 0;; i++) {
 717		dma = knav_queue_pop(pool->queue, &size);
 718		if (!dma)
 719			break;
 720		desc = knav_pool_desc_dma_to_virt(pool, dma);
 721		if (!desc) {
 722			dev_dbg(pool->kdev->dev,
 723				"couldn't unmap desc, continuing\n");
 724			continue;
 725		}
 726	}
 727	WARN_ON(i != pool->num_desc);
 728	knav_queue_close(pool->queue);
 729}
 730
 731
 732/* Get the DMA address of a descriptor */
 733dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt)
 734{
 735	struct knav_pool *pool = ph;
 736	return pool->region->dma_start + (virt - pool->region->virt_start);
 737}
 738EXPORT_SYMBOL_GPL(knav_pool_desc_virt_to_dma);
 739
 740void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma)
 741{
 742	struct knav_pool *pool = ph;
 743	return pool->region->virt_start + (dma - pool->region->dma_start);
 744}
 745EXPORT_SYMBOL_GPL(knav_pool_desc_dma_to_virt);
 746
 747/**
 748 * knav_pool_create()	- Create a pool of descriptors
 749 * @name:		- name to give the pool handle
 750 * @num_desc:		- numbers of descriptors in the pool
 751 * @region_id:		- QMSS region id from which the descriptors are to be
 752 *			  allocated.
 753 *
 754 * Returns a pool handle on success.
 755 * Use IS_ERR_OR_NULL() to identify error values on return.
 756 */
 757void *knav_pool_create(const char *name,
 758					int num_desc, int region_id)
 759{
 760	struct knav_region *reg_itr, *region = NULL;
 761	struct knav_pool *pool, *pi = NULL, *iter;
 762	struct list_head *node;
 763	unsigned last_offset;
 
 764	int ret;
 765
 766	if (!kdev)
 767		return ERR_PTR(-EPROBE_DEFER);
 768
 769	if (!kdev->dev)
 770		return ERR_PTR(-ENODEV);
 771
 772	pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
 773	if (!pool) {
 774		dev_err(kdev->dev, "out of memory allocating pool\n");
 775		return ERR_PTR(-ENOMEM);
 776	}
 777
 778	for_each_region(kdev, reg_itr) {
 779		if (reg_itr->id != region_id)
 780			continue;
 781		region = reg_itr;
 782		break;
 783	}
 784
 785	if (!region) {
 786		dev_err(kdev->dev, "region-id(%d) not found\n", region_id);
 787		ret = -EINVAL;
 788		goto err;
 789	}
 790
 791	pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0);
 792	if (IS_ERR(pool->queue)) {
 793		dev_err(kdev->dev,
 794			"failed to open queue for pool(%s), error %ld\n",
 795			name, PTR_ERR(pool->queue));
 796		ret = PTR_ERR(pool->queue);
 797		goto err;
 798	}
 799
 800	pool->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
 801	pool->kdev = kdev;
 802	pool->dev = kdev->dev;
 803
 804	mutex_lock(&knav_dev_lock);
 805
 806	if (num_desc > (region->num_desc - region->used_desc)) {
 807		dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n",
 808			region_id, name);
 809		ret = -ENOMEM;
 810		goto err_unlock;
 811	}
 812
 813	/* Region maintains a sorted (by region offset) list of pools
 814	 * use the first free slot which is large enough to accomodate
 815	 * the request
 816	 */
 817	last_offset = 0;
 
 818	node = &region->pools;
 819	list_for_each_entry(iter, &region->pools, region_inst) {
 820		if ((iter->region_offset - last_offset) >= num_desc) {
 821			pi = iter;
 822			break;
 823		}
 824		last_offset = iter->region_offset + iter->num_desc;
 825	}
 
 826
 827	if (pi) {
 828		node = &pi->region_inst;
 829		pool->region = region;
 830		pool->num_desc = num_desc;
 831		pool->region_offset = last_offset;
 832		region->used_desc += num_desc;
 833		list_add_tail(&pool->list, &kdev->pools);
 834		list_add_tail(&pool->region_inst, node);
 835	} else {
 836		dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n",
 837			name, region_id);
 838		ret = -ENOMEM;
 839		goto err_unlock;
 840	}
 841
 842	mutex_unlock(&knav_dev_lock);
 843	kdesc_fill_pool(pool);
 844	return pool;
 845
 846err_unlock:
 847	mutex_unlock(&knav_dev_lock);
 848err:
 849	kfree(pool->name);
 850	devm_kfree(kdev->dev, pool);
 851	return ERR_PTR(ret);
 852}
 853EXPORT_SYMBOL_GPL(knav_pool_create);
 854
 855/**
 856 * knav_pool_destroy()	- Free a pool of descriptors
 857 * @ph:		- pool handle
 858 */
 859void knav_pool_destroy(void *ph)
 860{
 861	struct knav_pool *pool = ph;
 862
 863	if (!pool)
 864		return;
 865
 866	if (!pool->region)
 867		return;
 868
 869	kdesc_empty_pool(pool);
 870	mutex_lock(&knav_dev_lock);
 871
 872	pool->region->used_desc -= pool->num_desc;
 873	list_del(&pool->region_inst);
 874	list_del(&pool->list);
 875
 876	mutex_unlock(&knav_dev_lock);
 877	kfree(pool->name);
 878	devm_kfree(kdev->dev, pool);
 879}
 880EXPORT_SYMBOL_GPL(knav_pool_destroy);
 881
 882
 883/**
 884 * knav_pool_desc_get()	- Get a descriptor from the pool
 885 * @ph:		- pool handle
 886 *
 887 * Returns descriptor from the pool.
 888 */
 889void *knav_pool_desc_get(void *ph)
 890{
 891	struct knav_pool *pool = ph;
 892	dma_addr_t dma;
 893	unsigned size;
 894	void *data;
 895
 896	dma = knav_queue_pop(pool->queue, &size);
 897	if (unlikely(!dma))
 898		return ERR_PTR(-ENOMEM);
 899	data = knav_pool_desc_dma_to_virt(pool, dma);
 900	return data;
 901}
 902EXPORT_SYMBOL_GPL(knav_pool_desc_get);
 903
 904/**
 905 * knav_pool_desc_put()	- return a descriptor to the pool
 906 * @ph:		- pool handle
 907 * @desc:	- virtual address
 908 */
 909void knav_pool_desc_put(void *ph, void *desc)
 910{
 911	struct knav_pool *pool = ph;
 912	dma_addr_t dma;
 913	dma = knav_pool_desc_virt_to_dma(pool, desc);
 914	knav_queue_push(pool->queue, dma, pool->region->desc_size, 0);
 915}
 916EXPORT_SYMBOL_GPL(knav_pool_desc_put);
 917
 918/**
 919 * knav_pool_desc_map()	- Map descriptor for DMA transfer
 920 * @ph:				- pool handle
 921 * @desc:			- address of descriptor to map
 922 * @size:			- size of descriptor to map
 923 * @dma:			- DMA address return pointer
 924 * @dma_sz:			- adjusted return pointer
 925 *
 926 * Returns 0 on success, errno otherwise.
 927 */
 928int knav_pool_desc_map(void *ph, void *desc, unsigned size,
 929					dma_addr_t *dma, unsigned *dma_sz)
 930{
 931	struct knav_pool *pool = ph;
 932	*dma = knav_pool_desc_virt_to_dma(pool, desc);
 933	size = min(size, pool->region->desc_size);
 934	size = ALIGN(size, SMP_CACHE_BYTES);
 935	*dma_sz = size;
 936	dma_sync_single_for_device(pool->dev, *dma, size, DMA_TO_DEVICE);
 937
 938	/* Ensure the descriptor reaches to the memory */
 939	__iowmb();
 940
 941	return 0;
 942}
 943EXPORT_SYMBOL_GPL(knav_pool_desc_map);
 944
 945/**
 946 * knav_pool_desc_unmap()	- Unmap descriptor after DMA transfer
 947 * @ph:				- pool handle
 948 * @dma:			- DMA address of descriptor to unmap
 949 * @dma_sz:			- size of descriptor to unmap
 950 *
 951 * Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify
 952 * error values on return.
 953 */
 954void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz)
 955{
 956	struct knav_pool *pool = ph;
 957	unsigned desc_sz;
 958	void *desc;
 959
 960	desc_sz = min(dma_sz, pool->region->desc_size);
 961	desc = knav_pool_desc_dma_to_virt(pool, dma);
 962	dma_sync_single_for_cpu(pool->dev, dma, desc_sz, DMA_FROM_DEVICE);
 963	prefetch(desc);
 964	return desc;
 965}
 966EXPORT_SYMBOL_GPL(knav_pool_desc_unmap);
 967
 968/**
 969 * knav_pool_count()	- Get the number of descriptors in pool.
 970 * @ph:			- pool handle
 971 * Returns number of elements in the pool.
 972 */
 973int knav_pool_count(void *ph)
 974{
 975	struct knav_pool *pool = ph;
 976	return knav_queue_get_count(pool->queue);
 977}
 978EXPORT_SYMBOL_GPL(knav_pool_count);
 979
 980static void knav_queue_setup_region(struct knav_device *kdev,
 981					struct knav_region *region)
 982{
 983	unsigned hw_num_desc, hw_desc_size, size;
 984	struct knav_reg_region __iomem  *regs;
 985	struct knav_qmgr_info *qmgr;
 986	struct knav_pool *pool;
 987	int id = region->id;
 988	struct page *page;
 989
 990	/* unused region? */
 991	if (!region->num_desc) {
 992		dev_warn(kdev->dev, "unused region %s\n", region->name);
 993		return;
 994	}
 995
 996	/* get hardware descriptor value */
 997	hw_num_desc = ilog2(region->num_desc - 1) + 1;
 998
 999	/* did we force fit ourselves into nothingness? */
1000	if (region->num_desc < 32) {
1001		region->num_desc = 0;
1002		dev_warn(kdev->dev, "too few descriptors in region %s\n",
1003			 region->name);
1004		return;
1005	}
1006
1007	size = region->num_desc * region->desc_size;
1008	region->virt_start = alloc_pages_exact(size, GFP_KERNEL | GFP_DMA |
1009						GFP_DMA32);
1010	if (!region->virt_start) {
1011		region->num_desc = 0;
1012		dev_err(kdev->dev, "memory alloc failed for region %s\n",
1013			region->name);
1014		return;
1015	}
1016	region->virt_end = region->virt_start + size;
1017	page = virt_to_page(region->virt_start);
1018
1019	region->dma_start = dma_map_page(kdev->dev, page, 0, size,
1020					 DMA_BIDIRECTIONAL);
1021	if (dma_mapping_error(kdev->dev, region->dma_start)) {
1022		dev_err(kdev->dev, "dma map failed for region %s\n",
1023			region->name);
1024		goto fail;
1025	}
1026	region->dma_end = region->dma_start + size;
1027
1028	pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
1029	if (!pool) {
1030		dev_err(kdev->dev, "out of memory allocating dummy pool\n");
1031		goto fail;
1032	}
1033	pool->num_desc = 0;
1034	pool->region_offset = region->num_desc;
1035	list_add(&pool->region_inst, &region->pools);
1036
1037	dev_dbg(kdev->dev,
1038		"region %s (%d): size:%d, link:%d@%d, dma:%pad-%pad, virt:%p-%p\n",
1039		region->name, id, region->desc_size, region->num_desc,
1040		region->link_index, &region->dma_start, &region->dma_end,
1041		region->virt_start, region->virt_end);
1042
1043	hw_desc_size = (region->desc_size / 16) - 1;
1044	hw_num_desc -= 5;
1045
1046	for_each_qmgr(kdev, qmgr) {
1047		regs = qmgr->reg_region + id;
1048		writel_relaxed((u32)region->dma_start, &regs->base);
1049		writel_relaxed(region->link_index, &regs->start_index);
1050		writel_relaxed(hw_desc_size << 16 | hw_num_desc,
1051			       &regs->size_count);
1052	}
1053	return;
1054
1055fail:
1056	if (region->dma_start)
1057		dma_unmap_page(kdev->dev, region->dma_start, size,
1058				DMA_BIDIRECTIONAL);
1059	if (region->virt_start)
1060		free_pages_exact(region->virt_start, size);
1061	region->num_desc = 0;
1062	return;
1063}
1064
1065static const char *knav_queue_find_name(struct device_node *node)
1066{
1067	const char *name;
1068
1069	if (of_property_read_string(node, "label", &name) < 0)
1070		name = node->name;
1071	if (!name)
1072		name = "unknown";
1073	return name;
1074}
1075
1076static int knav_queue_setup_regions(struct knav_device *kdev,
1077					struct device_node *regions)
1078{
1079	struct device *dev = kdev->dev;
1080	struct knav_region *region;
1081	struct device_node *child;
1082	u32 temp[2];
1083	int ret;
1084
1085	for_each_child_of_node(regions, child) {
1086		region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
1087		if (!region) {
1088			of_node_put(child);
1089			dev_err(dev, "out of memory allocating region\n");
1090			return -ENOMEM;
1091		}
1092
1093		region->name = knav_queue_find_name(child);
1094		of_property_read_u32(child, "id", &region->id);
1095		ret = of_property_read_u32_array(child, "region-spec", temp, 2);
1096		if (!ret) {
1097			region->num_desc  = temp[0];
1098			region->desc_size = temp[1];
1099		} else {
1100			dev_err(dev, "invalid region info %s\n", region->name);
1101			devm_kfree(dev, region);
1102			continue;
1103		}
1104
1105		if (!of_get_property(child, "link-index", NULL)) {
1106			dev_err(dev, "No link info for %s\n", region->name);
1107			devm_kfree(dev, region);
1108			continue;
1109		}
1110		ret = of_property_read_u32(child, "link-index",
1111					   &region->link_index);
1112		if (ret) {
1113			dev_err(dev, "link index not found for %s\n",
1114				region->name);
1115			devm_kfree(dev, region);
1116			continue;
1117		}
1118
1119		INIT_LIST_HEAD(&region->pools);
1120		list_add_tail(&region->list, &kdev->regions);
1121	}
1122	if (list_empty(&kdev->regions)) {
1123		dev_err(dev, "no valid region information found\n");
1124		return -ENODEV;
1125	}
1126
1127	/* Next, we run through the regions and set things up */
1128	for_each_region(kdev, region)
1129		knav_queue_setup_region(kdev, region);
1130
1131	return 0;
1132}
1133
1134static int knav_get_link_ram(struct knav_device *kdev,
1135				       const char *name,
1136				       struct knav_link_ram_block *block)
1137{
1138	struct platform_device *pdev = to_platform_device(kdev->dev);
1139	struct device_node *node = pdev->dev.of_node;
1140	u32 temp[2];
1141
1142	/*
1143	 * Note: link ram resources are specified in "entry" sized units. In
1144	 * reality, although entries are ~40bits in hardware, we treat them as
1145	 * 64-bit entities here.
1146	 *
1147	 * For example, to specify the internal link ram for Keystone-I class
1148	 * devices, we would set the linkram0 resource to 0x80000-0x83fff.
1149	 *
1150	 * This gets a bit weird when other link rams are used.  For example,
1151	 * if the range specified is 0x0c000000-0x0c003fff (i.e., 16K entries
1152	 * in MSMC SRAM), the actual memory used is 0x0c000000-0x0c020000,
1153	 * which accounts for 64-bits per entry, for 16K entries.
1154	 */
1155	if (!of_property_read_u32_array(node, name , temp, 2)) {
1156		if (temp[0]) {
1157			/*
1158			 * queue_base specified => using internal or onchip
1159			 * link ram WARNING - we do not "reserve" this block
1160			 */
1161			block->dma = (dma_addr_t)temp[0];
1162			block->virt = NULL;
1163			block->size = temp[1];
1164		} else {
1165			block->size = temp[1];
1166			/* queue_base not specific => allocate requested size */
1167			block->virt = dmam_alloc_coherent(kdev->dev,
1168						  8 * block->size, &block->dma,
1169						  GFP_KERNEL);
1170			if (!block->virt) {
1171				dev_err(kdev->dev, "failed to alloc linkram\n");
1172				return -ENOMEM;
1173			}
1174		}
1175	} else {
1176		return -ENODEV;
1177	}
1178	return 0;
1179}
1180
1181static int knav_queue_setup_link_ram(struct knav_device *kdev)
1182{
1183	struct knav_link_ram_block *block;
1184	struct knav_qmgr_info *qmgr;
1185
1186	for_each_qmgr(kdev, qmgr) {
1187		block = &kdev->link_rams[0];
1188		dev_dbg(kdev->dev, "linkram0: dma:%pad, virt:%p, size:%x\n",
1189			&block->dma, block->virt, block->size);
1190		writel_relaxed((u32)block->dma, &qmgr->reg_config->link_ram_base0);
1191		if (kdev->version == QMSS_66AK2G)
1192			writel_relaxed(block->size,
1193				       &qmgr->reg_config->link_ram_size0);
1194		else
1195			writel_relaxed(block->size - 1,
1196				       &qmgr->reg_config->link_ram_size0);
1197		block++;
1198		if (!block->size)
1199			continue;
1200
1201		dev_dbg(kdev->dev, "linkram1: dma:%pad, virt:%p, size:%x\n",
1202			&block->dma, block->virt, block->size);
1203		writel_relaxed(block->dma, &qmgr->reg_config->link_ram_base1);
1204	}
1205
1206	return 0;
1207}
1208
1209static int knav_setup_queue_range(struct knav_device *kdev,
1210					struct device_node *node)
1211{
1212	struct device *dev = kdev->dev;
1213	struct knav_range_info *range;
1214	struct knav_qmgr_info *qmgr;
1215	u32 temp[2], start, end, id, index;
1216	int ret, i;
1217
1218	range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL);
1219	if (!range) {
1220		dev_err(dev, "out of memory allocating range\n");
1221		return -ENOMEM;
1222	}
1223
1224	range->kdev = kdev;
1225	range->name = knav_queue_find_name(node);
1226	ret = of_property_read_u32_array(node, "qrange", temp, 2);
1227	if (!ret) {
1228		range->queue_base = temp[0] - kdev->base_id;
1229		range->num_queues = temp[1];
1230	} else {
1231		dev_err(dev, "invalid queue range %s\n", range->name);
1232		devm_kfree(dev, range);
1233		return -EINVAL;
1234	}
1235
1236	for (i = 0; i < RANGE_MAX_IRQS; i++) {
1237		struct of_phandle_args oirq;
1238
1239		if (of_irq_parse_one(node, i, &oirq))
1240			break;
1241
1242		range->irqs[i].irq = irq_create_of_mapping(&oirq);
1243		if (range->irqs[i].irq == IRQ_NONE)
1244			break;
1245
1246		range->num_irqs++;
1247
1248		if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) {
1249			unsigned long mask;
1250			int bit;
1251
1252			range->irqs[i].cpu_mask = devm_kzalloc(dev,
1253							       cpumask_size(), GFP_KERNEL);
1254			if (!range->irqs[i].cpu_mask)
1255				return -ENOMEM;
1256
1257			mask = (oirq.args[2] & 0x0000ff00) >> 8;
1258			for_each_set_bit(bit, &mask, BITS_PER_LONG)
1259				cpumask_set_cpu(bit, range->irqs[i].cpu_mask);
1260		}
1261	}
1262
1263	range->num_irqs = min(range->num_irqs, range->num_queues);
1264	if (range->num_irqs)
1265		range->flags |= RANGE_HAS_IRQ;
1266
1267	if (of_get_property(node, "qalloc-by-id", NULL))
1268		range->flags |= RANGE_RESERVED;
1269
1270	if (of_get_property(node, "accumulator", NULL)) {
1271		ret = knav_init_acc_range(kdev, node, range);
1272		if (ret < 0) {
1273			devm_kfree(dev, range);
1274			return ret;
1275		}
1276	} else {
1277		range->ops = &knav_gp_range_ops;
1278	}
1279
1280	/* set threshold to 1, and flush out the queues */
1281	for_each_qmgr(kdev, qmgr) {
1282		start = max(qmgr->start_queue, range->queue_base);
1283		end   = min(qmgr->start_queue + qmgr->num_queues,
1284			    range->queue_base + range->num_queues);
1285		for (id = start; id < end; id++) {
1286			index = id - qmgr->start_queue;
1287			writel_relaxed(THRESH_GTE | 1,
1288				       &qmgr->reg_peek[index].ptr_size_thresh);
1289			writel_relaxed(0,
1290				       &qmgr->reg_push[index].ptr_size_thresh);
1291		}
1292	}
1293
1294	list_add_tail(&range->list, &kdev->queue_ranges);
1295	dev_dbg(dev, "added range %s: %d-%d, %d irqs%s%s%s\n",
1296		range->name, range->queue_base,
1297		range->queue_base + range->num_queues - 1,
1298		range->num_irqs,
1299		(range->flags & RANGE_HAS_IRQ) ? ", has irq" : "",
1300		(range->flags & RANGE_RESERVED) ? ", reserved" : "",
1301		(range->flags & RANGE_HAS_ACCUMULATOR) ? ", acc" : "");
1302	kdev->num_queues_in_use += range->num_queues;
1303	return 0;
1304}
1305
1306static int knav_setup_queue_pools(struct knav_device *kdev,
1307				   struct device_node *queue_pools)
1308{
1309	struct device_node *type, *range;
 
1310
1311	for_each_child_of_node(queue_pools, type) {
1312		for_each_child_of_node(type, range) {
 
1313			/* return value ignored, we init the rest... */
1314			knav_setup_queue_range(kdev, range);
1315		}
1316	}
1317
1318	/* ... and barf if they all failed! */
1319	if (list_empty(&kdev->queue_ranges)) {
1320		dev_err(kdev->dev, "no valid queue range found\n");
1321		return -ENODEV;
1322	}
1323	return 0;
1324}
1325
1326static void knav_free_queue_range(struct knav_device *kdev,
1327				  struct knav_range_info *range)
1328{
1329	if (range->ops && range->ops->free_range)
1330		range->ops->free_range(range);
1331	list_del(&range->list);
1332	devm_kfree(kdev->dev, range);
1333}
1334
1335static void knav_free_queue_ranges(struct knav_device *kdev)
1336{
1337	struct knav_range_info *range;
1338
1339	for (;;) {
1340		range = first_queue_range(kdev);
1341		if (!range)
1342			break;
1343		knav_free_queue_range(kdev, range);
1344	}
1345}
1346
1347static void knav_queue_free_regions(struct knav_device *kdev)
1348{
1349	struct knav_region *region;
1350	struct knav_pool *pool, *tmp;
1351	unsigned size;
1352
1353	for (;;) {
1354		region = first_region(kdev);
1355		if (!region)
1356			break;
1357		list_for_each_entry_safe(pool, tmp, &region->pools, region_inst)
1358			knav_pool_destroy(pool);
1359
1360		size = region->virt_end - region->virt_start;
1361		if (size)
1362			free_pages_exact(region->virt_start, size);
1363		list_del(&region->list);
1364		devm_kfree(kdev->dev, region);
1365	}
1366}
1367
1368static void __iomem *knav_queue_map_reg(struct knav_device *kdev,
1369					struct device_node *node, int index)
1370{
1371	struct resource res;
1372	void __iomem *regs;
1373	int ret;
1374
1375	ret = of_address_to_resource(node, index, &res);
1376	if (ret) {
1377		dev_err(kdev->dev, "Can't translate of node(%pOFn) address for index(%d)\n",
1378			node, index);
1379		return ERR_PTR(ret);
1380	}
1381
1382	regs = devm_ioremap_resource(kdev->dev, &res);
1383	if (IS_ERR(regs))
1384		dev_err(kdev->dev, "Failed to map register base for index(%d) node(%pOFn)\n",
1385			index, node);
1386	return regs;
1387}
1388
1389static int knav_queue_init_qmgrs(struct knav_device *kdev,
1390					struct device_node *qmgrs)
1391{
1392	struct device *dev = kdev->dev;
1393	struct knav_qmgr_info *qmgr;
1394	struct device_node *child;
1395	u32 temp[2];
1396	int ret;
1397
1398	for_each_child_of_node(qmgrs, child) {
1399		qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL);
1400		if (!qmgr) {
1401			of_node_put(child);
1402			dev_err(dev, "out of memory allocating qmgr\n");
1403			return -ENOMEM;
1404		}
1405
1406		ret = of_property_read_u32_array(child, "managed-queues",
1407						 temp, 2);
1408		if (!ret) {
1409			qmgr->start_queue = temp[0];
1410			qmgr->num_queues = temp[1];
1411		} else {
1412			dev_err(dev, "invalid qmgr queue range\n");
1413			devm_kfree(dev, qmgr);
1414			continue;
1415		}
1416
1417		dev_info(dev, "qmgr start queue %d, number of queues %d\n",
1418			 qmgr->start_queue, qmgr->num_queues);
1419
1420		qmgr->reg_peek =
1421			knav_queue_map_reg(kdev, child,
1422					   KNAV_QUEUE_PEEK_REG_INDEX);
1423
1424		if (kdev->version == QMSS) {
1425			qmgr->reg_status =
1426				knav_queue_map_reg(kdev, child,
1427						   KNAV_QUEUE_STATUS_REG_INDEX);
1428		}
1429
1430		qmgr->reg_config =
1431			knav_queue_map_reg(kdev, child,
1432					   (kdev->version == QMSS_66AK2G) ?
1433					   KNAV_L_QUEUE_CONFIG_REG_INDEX :
1434					   KNAV_QUEUE_CONFIG_REG_INDEX);
1435		qmgr->reg_region =
1436			knav_queue_map_reg(kdev, child,
1437					   (kdev->version == QMSS_66AK2G) ?
1438					   KNAV_L_QUEUE_REGION_REG_INDEX :
1439					   KNAV_QUEUE_REGION_REG_INDEX);
1440
1441		qmgr->reg_push =
1442			knav_queue_map_reg(kdev, child,
1443					   (kdev->version == QMSS_66AK2G) ?
1444					    KNAV_L_QUEUE_PUSH_REG_INDEX :
1445					    KNAV_QUEUE_PUSH_REG_INDEX);
1446
1447		if (kdev->version == QMSS) {
1448			qmgr->reg_pop =
1449				knav_queue_map_reg(kdev, child,
1450						   KNAV_QUEUE_POP_REG_INDEX);
1451		}
1452
1453		if (IS_ERR(qmgr->reg_peek) ||
1454		    ((kdev->version == QMSS) &&
1455		    (IS_ERR(qmgr->reg_status) || IS_ERR(qmgr->reg_pop))) ||
1456		    IS_ERR(qmgr->reg_config) || IS_ERR(qmgr->reg_region) ||
1457		    IS_ERR(qmgr->reg_push)) {
1458			dev_err(dev, "failed to map qmgr regs\n");
1459			if (kdev->version == QMSS) {
1460				if (!IS_ERR(qmgr->reg_status))
1461					devm_iounmap(dev, qmgr->reg_status);
1462				if (!IS_ERR(qmgr->reg_pop))
1463					devm_iounmap(dev, qmgr->reg_pop);
1464			}
1465			if (!IS_ERR(qmgr->reg_peek))
1466				devm_iounmap(dev, qmgr->reg_peek);
1467			if (!IS_ERR(qmgr->reg_config))
1468				devm_iounmap(dev, qmgr->reg_config);
1469			if (!IS_ERR(qmgr->reg_region))
1470				devm_iounmap(dev, qmgr->reg_region);
1471			if (!IS_ERR(qmgr->reg_push))
1472				devm_iounmap(dev, qmgr->reg_push);
1473			devm_kfree(dev, qmgr);
1474			continue;
1475		}
1476
1477		/* Use same push register for pop as well */
1478		if (kdev->version == QMSS_66AK2G)
1479			qmgr->reg_pop = qmgr->reg_push;
1480
1481		list_add_tail(&qmgr->list, &kdev->qmgrs);
1482		dev_info(dev, "added qmgr start queue %d, num of queues %d, reg_peek %p, reg_status %p, reg_config %p, reg_region %p, reg_push %p, reg_pop %p\n",
1483			 qmgr->start_queue, qmgr->num_queues,
1484			 qmgr->reg_peek, qmgr->reg_status,
1485			 qmgr->reg_config, qmgr->reg_region,
1486			 qmgr->reg_push, qmgr->reg_pop);
1487	}
1488	return 0;
1489}
1490
1491static int knav_queue_init_pdsps(struct knav_device *kdev,
1492					struct device_node *pdsps)
1493{
1494	struct device *dev = kdev->dev;
1495	struct knav_pdsp_info *pdsp;
1496	struct device_node *child;
1497
1498	for_each_child_of_node(pdsps, child) {
1499		pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL);
1500		if (!pdsp) {
1501			of_node_put(child);
1502			dev_err(dev, "out of memory allocating pdsp\n");
1503			return -ENOMEM;
1504		}
1505		pdsp->name = knav_queue_find_name(child);
1506		pdsp->iram =
1507			knav_queue_map_reg(kdev, child,
1508					   KNAV_QUEUE_PDSP_IRAM_REG_INDEX);
1509		pdsp->regs =
1510			knav_queue_map_reg(kdev, child,
1511					   KNAV_QUEUE_PDSP_REGS_REG_INDEX);
1512		pdsp->intd =
1513			knav_queue_map_reg(kdev, child,
1514					   KNAV_QUEUE_PDSP_INTD_REG_INDEX);
1515		pdsp->command =
1516			knav_queue_map_reg(kdev, child,
1517					   KNAV_QUEUE_PDSP_CMD_REG_INDEX);
1518
1519		if (IS_ERR(pdsp->command) || IS_ERR(pdsp->iram) ||
1520		    IS_ERR(pdsp->regs) || IS_ERR(pdsp->intd)) {
1521			dev_err(dev, "failed to map pdsp %s regs\n",
1522				pdsp->name);
1523			if (!IS_ERR(pdsp->command))
1524				devm_iounmap(dev, pdsp->command);
1525			if (!IS_ERR(pdsp->iram))
1526				devm_iounmap(dev, pdsp->iram);
1527			if (!IS_ERR(pdsp->regs))
1528				devm_iounmap(dev, pdsp->regs);
1529			if (!IS_ERR(pdsp->intd))
1530				devm_iounmap(dev, pdsp->intd);
1531			devm_kfree(dev, pdsp);
1532			continue;
1533		}
1534		of_property_read_u32(child, "id", &pdsp->id);
1535		list_add_tail(&pdsp->list, &kdev->pdsps);
1536		dev_dbg(dev, "added pdsp %s: command %p, iram %p, regs %p, intd %p\n",
1537			pdsp->name, pdsp->command, pdsp->iram, pdsp->regs,
1538			pdsp->intd);
1539	}
1540	return 0;
1541}
1542
1543static int knav_queue_stop_pdsp(struct knav_device *kdev,
1544			  struct knav_pdsp_info *pdsp)
1545{
1546	u32 val, timeout = 1000;
1547	int ret;
1548
1549	val = readl_relaxed(&pdsp->regs->control) & ~PDSP_CTRL_ENABLE;
1550	writel_relaxed(val, &pdsp->regs->control);
1551	ret = knav_queue_pdsp_wait(&pdsp->regs->control, timeout,
1552					PDSP_CTRL_RUNNING);
1553	if (ret < 0) {
1554		dev_err(kdev->dev, "timed out on pdsp %s stop\n", pdsp->name);
1555		return ret;
1556	}
1557	pdsp->loaded = false;
1558	pdsp->started = false;
1559	return 0;
1560}
1561
1562static int knav_queue_load_pdsp(struct knav_device *kdev,
1563			  struct knav_pdsp_info *pdsp)
1564{
1565	int i, ret, fwlen;
1566	const struct firmware *fw;
1567	bool found = false;
1568	u32 *fwdata;
1569
1570	for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) {
1571		if (knav_acc_firmwares[i]) {
1572			ret = request_firmware_direct(&fw,
1573						      knav_acc_firmwares[i],
1574						      kdev->dev);
1575			if (!ret) {
1576				found = true;
1577				break;
1578			}
1579		}
1580	}
1581
1582	if (!found) {
1583		dev_err(kdev->dev, "failed to get firmware for pdsp\n");
1584		return -ENODEV;
1585	}
1586
1587	dev_info(kdev->dev, "firmware file %s downloaded for PDSP\n",
1588		 knav_acc_firmwares[i]);
1589
1590	writel_relaxed(pdsp->id + 1, pdsp->command + 0x18);
1591	/* download the firmware */
1592	fwdata = (u32 *)fw->data;
1593	fwlen = (fw->size + sizeof(u32) - 1) / sizeof(u32);
1594	for (i = 0; i < fwlen; i++)
1595		writel_relaxed(be32_to_cpu(fwdata[i]), pdsp->iram + i);
1596
1597	release_firmware(fw);
1598	return 0;
1599}
1600
1601static int knav_queue_start_pdsp(struct knav_device *kdev,
1602			   struct knav_pdsp_info *pdsp)
1603{
1604	u32 val, timeout = 1000;
1605	int ret;
1606
1607	/* write a command for sync */
1608	writel_relaxed(0xffffffff, pdsp->command);
1609	while (readl_relaxed(pdsp->command) != 0xffffffff)
1610		cpu_relax();
1611
1612	/* soft reset the PDSP */
1613	val  = readl_relaxed(&pdsp->regs->control);
1614	val &= ~(PDSP_CTRL_PC_MASK | PDSP_CTRL_SOFT_RESET);
1615	writel_relaxed(val, &pdsp->regs->control);
1616
1617	/* enable pdsp */
1618	val = readl_relaxed(&pdsp->regs->control) | PDSP_CTRL_ENABLE;
1619	writel_relaxed(val, &pdsp->regs->control);
1620
1621	/* wait for command register to clear */
1622	ret = knav_queue_pdsp_wait(pdsp->command, timeout, 0);
1623	if (ret < 0) {
1624		dev_err(kdev->dev,
1625			"timed out on pdsp %s command register wait\n",
1626			pdsp->name);
1627		return ret;
1628	}
1629	return 0;
1630}
1631
1632static void knav_queue_stop_pdsps(struct knav_device *kdev)
1633{
1634	struct knav_pdsp_info *pdsp;
1635
1636	/* disable all pdsps */
1637	for_each_pdsp(kdev, pdsp)
1638		knav_queue_stop_pdsp(kdev, pdsp);
1639}
1640
1641static int knav_queue_start_pdsps(struct knav_device *kdev)
1642{
1643	struct knav_pdsp_info *pdsp;
1644	int ret;
1645
1646	knav_queue_stop_pdsps(kdev);
1647	/* now load them all. We return success even if pdsp
1648	 * is not loaded as acc channels are optional on having
1649	 * firmware availability in the system. We set the loaded
1650	 * and stated flag and when initialize the acc range, check
1651	 * it and init the range only if pdsp is started.
1652	 */
1653	for_each_pdsp(kdev, pdsp) {
1654		ret = knav_queue_load_pdsp(kdev, pdsp);
1655		if (!ret)
1656			pdsp->loaded = true;
1657	}
1658
1659	for_each_pdsp(kdev, pdsp) {
1660		if (pdsp->loaded) {
1661			ret = knav_queue_start_pdsp(kdev, pdsp);
1662			if (!ret)
1663				pdsp->started = true;
1664		}
1665	}
1666	return 0;
1667}
1668
1669static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id)
1670{
1671	struct knav_qmgr_info *qmgr;
1672
1673	for_each_qmgr(kdev, qmgr) {
1674		if ((id >= qmgr->start_queue) &&
1675		    (id < qmgr->start_queue + qmgr->num_queues))
1676			return qmgr;
1677	}
1678	return NULL;
1679}
1680
1681static int knav_queue_init_queue(struct knav_device *kdev,
1682					struct knav_range_info *range,
1683					struct knav_queue_inst *inst,
1684					unsigned id)
1685{
1686	char irq_name[KNAV_NAME_SIZE];
1687	inst->qmgr = knav_find_qmgr(id);
1688	if (!inst->qmgr)
1689		return -1;
1690
1691	INIT_LIST_HEAD(&inst->handles);
1692	inst->kdev = kdev;
1693	inst->range = range;
1694	inst->irq_num = -1;
1695	inst->id = id;
1696	scnprintf(irq_name, sizeof(irq_name), "hwqueue-%d", id);
1697	inst->irq_name = kstrndup(irq_name, sizeof(irq_name), GFP_KERNEL);
1698
1699	if (range->ops && range->ops->init_queue)
1700		return range->ops->init_queue(range, inst);
1701	else
1702		return 0;
1703}
1704
1705static int knav_queue_init_queues(struct knav_device *kdev)
1706{
1707	struct knav_range_info *range;
1708	int size, id, base_idx;
1709	int idx = 0, ret = 0;
1710
1711	/* how much do we need for instance data? */
1712	size = sizeof(struct knav_queue_inst);
1713
1714	/* round this up to a power of 2, keep the index to instance
1715	 * arithmetic fast.
1716	 * */
1717	kdev->inst_shift = order_base_2(size);
1718	size = (1 << kdev->inst_shift) * kdev->num_queues_in_use;
1719	kdev->instances = devm_kzalloc(kdev->dev, size, GFP_KERNEL);
1720	if (!kdev->instances)
1721		return -ENOMEM;
1722
1723	for_each_queue_range(kdev, range) {
1724		if (range->ops && range->ops->init_range)
1725			range->ops->init_range(range);
1726		base_idx = idx;
1727		for (id = range->queue_base;
1728		     id < range->queue_base + range->num_queues; id++, idx++) {
1729			ret = knav_queue_init_queue(kdev, range,
1730					knav_queue_idx_to_inst(kdev, idx), id);
1731			if (ret < 0)
1732				return ret;
1733		}
1734		range->queue_base_inst =
1735			knav_queue_idx_to_inst(kdev, base_idx);
1736	}
1737	return 0;
1738}
1739
1740/* Match table for of_platform binding */
1741static const struct of_device_id keystone_qmss_of_match[] = {
1742	{
1743		.compatible = "ti,keystone-navigator-qmss",
1744	},
1745	{
1746		.compatible = "ti,66ak2g-navss-qm",
1747		.data	= (void *)QMSS_66AK2G,
1748	},
1749	{},
1750};
1751MODULE_DEVICE_TABLE(of, keystone_qmss_of_match);
1752
1753static int knav_queue_probe(struct platform_device *pdev)
1754{
1755	struct device_node *node = pdev->dev.of_node;
1756	struct device_node *qmgrs, *queue_pools, *regions, *pdsps;
1757	const struct of_device_id *match;
1758	struct device *dev = &pdev->dev;
1759	u32 temp[2];
1760	int ret;
1761
1762	if (!node) {
1763		dev_err(dev, "device tree info unavailable\n");
1764		return -ENODEV;
1765	}
1766
1767	kdev = devm_kzalloc(dev, sizeof(struct knav_device), GFP_KERNEL);
1768	if (!kdev) {
1769		dev_err(dev, "memory allocation failed\n");
1770		return -ENOMEM;
1771	}
1772
1773	match = of_match_device(of_match_ptr(keystone_qmss_of_match), dev);
1774	if (match && match->data)
1775		kdev->version = QMSS_66AK2G;
1776
1777	platform_set_drvdata(pdev, kdev);
1778	kdev->dev = dev;
1779	INIT_LIST_HEAD(&kdev->queue_ranges);
1780	INIT_LIST_HEAD(&kdev->qmgrs);
1781	INIT_LIST_HEAD(&kdev->pools);
1782	INIT_LIST_HEAD(&kdev->regions);
1783	INIT_LIST_HEAD(&kdev->pdsps);
1784
1785	pm_runtime_enable(&pdev->dev);
1786	ret = pm_runtime_resume_and_get(&pdev->dev);
1787	if (ret < 0) {
1788		pm_runtime_disable(&pdev->dev);
1789		dev_err(dev, "Failed to enable QMSS\n");
1790		return ret;
1791	}
1792
1793	if (of_property_read_u32_array(node, "queue-range", temp, 2)) {
1794		dev_err(dev, "queue-range not specified\n");
1795		ret = -ENODEV;
1796		goto err;
1797	}
1798	kdev->base_id    = temp[0];
1799	kdev->num_queues = temp[1];
1800
1801	/* Initialize queue managers using device tree configuration */
1802	qmgrs =  of_get_child_by_name(node, "qmgrs");
1803	if (!qmgrs) {
1804		dev_err(dev, "queue manager info not specified\n");
1805		ret = -ENODEV;
1806		goto err;
1807	}
1808	ret = knav_queue_init_qmgrs(kdev, qmgrs);
1809	of_node_put(qmgrs);
1810	if (ret)
1811		goto err;
1812
1813	/* get pdsp configuration values from device tree */
1814	pdsps =  of_get_child_by_name(node, "pdsps");
1815	if (pdsps) {
1816		ret = knav_queue_init_pdsps(kdev, pdsps);
1817		if (ret)
1818			goto err;
1819
1820		ret = knav_queue_start_pdsps(kdev);
1821		if (ret)
1822			goto err;
1823	}
1824	of_node_put(pdsps);
1825
1826	/* get usable queue range values from device tree */
1827	queue_pools = of_get_child_by_name(node, "queue-pools");
1828	if (!queue_pools) {
1829		dev_err(dev, "queue-pools not specified\n");
1830		ret = -ENODEV;
1831		goto err;
1832	}
1833	ret = knav_setup_queue_pools(kdev, queue_pools);
1834	of_node_put(queue_pools);
1835	if (ret)
1836		goto err;
1837
1838	ret = knav_get_link_ram(kdev, "linkram0", &kdev->link_rams[0]);
1839	if (ret) {
1840		dev_err(kdev->dev, "could not setup linking ram\n");
1841		goto err;
1842	}
1843
1844	ret = knav_get_link_ram(kdev, "linkram1", &kdev->link_rams[1]);
1845	if (ret) {
1846		/*
1847		 * nothing really, we have one linking ram already, so we just
1848		 * live within our means
1849		 */
1850	}
1851
1852	ret = knav_queue_setup_link_ram(kdev);
1853	if (ret)
1854		goto err;
1855
1856	regions = of_get_child_by_name(node, "descriptor-regions");
1857	if (!regions) {
1858		dev_err(dev, "descriptor-regions not specified\n");
1859		ret = -ENODEV;
1860		goto err;
1861	}
1862	ret = knav_queue_setup_regions(kdev, regions);
1863	of_node_put(regions);
1864	if (ret)
1865		goto err;
1866
1867	ret = knav_queue_init_queues(kdev);
1868	if (ret < 0) {
1869		dev_err(dev, "hwqueue initialization failed\n");
1870		goto err;
1871	}
1872
1873	debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL,
1874			    &knav_queue_debug_fops);
1875	device_ready = true;
1876	return 0;
1877
1878err:
1879	knav_queue_stop_pdsps(kdev);
1880	knav_queue_free_regions(kdev);
1881	knav_free_queue_ranges(kdev);
1882	pm_runtime_put_sync(&pdev->dev);
1883	pm_runtime_disable(&pdev->dev);
1884	return ret;
1885}
1886
1887static int knav_queue_remove(struct platform_device *pdev)
1888{
1889	/* TODO: Free resources */
1890	pm_runtime_put_sync(&pdev->dev);
1891	pm_runtime_disable(&pdev->dev);
1892	return 0;
1893}
1894
1895static struct platform_driver keystone_qmss_driver = {
1896	.probe		= knav_queue_probe,
1897	.remove		= knav_queue_remove,
1898	.driver		= {
1899		.name	= "keystone-navigator-qmss",
1900		.of_match_table = keystone_qmss_of_match,
1901	},
1902};
1903module_platform_driver(keystone_qmss_driver);
1904
1905MODULE_LICENSE("GPL v2");
1906MODULE_DESCRIPTION("TI QMSS driver for Keystone SOCs");
1907MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
1908MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");