Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
 
   4 */
   5
   6#define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
   7
   8#include <linux/atomic.h>
   9#include <linux/cpu_pm.h>
  10#include <linux/delay.h>
  11#include <linux/interrupt.h>
  12#include <linux/io.h>
  13#include <linux/iopoll.h>
  14#include <linux/kernel.h>
 
  15#include <linux/list.h>
 
 
  16#include <linux/of.h>
  17#include <linux/of_irq.h>
  18#include <linux/of_platform.h>
  19#include <linux/platform_device.h>
 
 
  20#include <linux/slab.h>
  21#include <linux/spinlock.h>
 
  22
 
  23#include <soc/qcom/cmd-db.h>
  24#include <soc/qcom/tcs.h>
  25#include <dt-bindings/soc/qcom,rpmh-rsc.h>
  26
  27#include "rpmh-internal.h"
  28
  29#define CREATE_TRACE_POINTS
  30#include "trace-rpmh.h"
  31
  32#define RSC_DRV_TCS_OFFSET		672
  33#define RSC_DRV_CMD_OFFSET		20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  34
  35/* DRV HW Solver Configuration Information Register */
  36#define DRV_SOLVER_CONFIG		0x04
  37#define DRV_HW_SOLVER_MASK		1
  38#define DRV_HW_SOLVER_SHIFT		24
  39
  40/* DRV TCS Configuration Information Register */
  41#define DRV_PRNT_CHLD_CONFIG		0x0C
  42#define DRV_NUM_TCS_MASK		0x3F
  43#define DRV_NUM_TCS_SHIFT		6
  44#define DRV_NCPT_MASK			0x1F
  45#define DRV_NCPT_SHIFT			27
  46
  47/* Offsets for common TCS Registers, one bit per TCS */
  48#define RSC_DRV_IRQ_ENABLE		0x00
  49#define RSC_DRV_IRQ_STATUS		0x04
  50#define RSC_DRV_IRQ_CLEAR		0x08	/* w/o; write 1 to clear */
  51
  52/*
  53 * Offsets for per TCS Registers.
  54 *
  55 * TCSes start at 0x10 from tcs_base and are stored one after another.
  56 * Multiply tcs_id by RSC_DRV_TCS_OFFSET to find a given TCS and add one
  57 * of the below to find a register.
  58 */
  59#define RSC_DRV_CMD_WAIT_FOR_CMPL	0x10	/* 1 bit per command */
  60#define RSC_DRV_CONTROL			0x14
  61#define RSC_DRV_STATUS			0x18	/* zero if tcs is busy */
  62#define RSC_DRV_CMD_ENABLE		0x1C	/* 1 bit per command */
  63
  64/*
  65 * Offsets for per command in a TCS.
  66 *
  67 * Commands (up to 16) start at 0x30 in a TCS; multiply command index
  68 * by RSC_DRV_CMD_OFFSET and add one of the below to find a register.
  69 */
  70#define RSC_DRV_CMD_MSGID		0x30
  71#define RSC_DRV_CMD_ADDR		0x34
  72#define RSC_DRV_CMD_DATA		0x38
  73#define RSC_DRV_CMD_STATUS		0x3C
  74#define RSC_DRV_CMD_RESP_DATA		0x40
  75
  76#define TCS_AMC_MODE_ENABLE		BIT(16)
  77#define TCS_AMC_MODE_TRIGGER		BIT(24)
  78
  79/* TCS CMD register bit mask */
  80#define CMD_MSGID_LEN			8
  81#define CMD_MSGID_RESP_REQ		BIT(8)
  82#define CMD_MSGID_WRITE			BIT(16)
  83#define CMD_STATUS_ISSUED		BIT(8)
  84#define CMD_STATUS_COMPL		BIT(16)
  85
  86/*
  87 * Here's a high level overview of how all the registers in RPMH work
  88 * together:
  89 *
  90 * - The main rpmh-rsc address is the base of a register space that can
  91 *   be used to find overall configuration of the hardware
  92 *   (DRV_PRNT_CHLD_CONFIG). Also found within the rpmh-rsc register
  93 *   space are all the TCS blocks. The offset of the TCS blocks is
  94 *   specified in the device tree by "qcom,tcs-offset" and used to
  95 *   compute tcs_base.
  96 * - TCS blocks come one after another. Type, count, and order are
  97 *   specified by the device tree as "qcom,tcs-config".
  98 * - Each TCS block has some registers, then space for up to 16 commands.
  99 *   Note that though address space is reserved for 16 commands, fewer
 100 *   might be present. See ncpt (num cmds per TCS).
 101 *
 102 * Here's a picture:
 103 *
 104 *  +---------------------------------------------------+
 105 *  |RSC                                                |
 106 *  | ctrl                                              |
 107 *  |                                                   |
 108 *  | Drvs:                                             |
 109 *  | +-----------------------------------------------+ |
 110 *  | |DRV0                                           | |
 111 *  | | ctrl/config                                   | |
 112 *  | | IRQ                                           | |
 113 *  | |                                               | |
 114 *  | | TCSes:                                        | |
 115 *  | | +------------------------------------------+  | |
 116 *  | | |TCS0  |  |  |  |  |  |  |  |  |  |  |  |  |  | |
 117 *  | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15|  | |
 118 *  | | |      |  |  |  |  |  |  |  |  |  |  |  |  |  | |
 119 *  | | +------------------------------------------+  | |
 120 *  | | +------------------------------------------+  | |
 121 *  | | |TCS1  |  |  |  |  |  |  |  |  |  |  |  |  |  | |
 122 *  | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15|  | |
 123 *  | | |      |  |  |  |  |  |  |  |  |  |  |  |  |  | |
 124 *  | | +------------------------------------------+  | |
 125 *  | | +------------------------------------------+  | |
 126 *  | | |TCS2  |  |  |  |  |  |  |  |  |  |  |  |  |  | |
 127 *  | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15|  | |
 128 *  | | |      |  |  |  |  |  |  |  |  |  |  |  |  |  | |
 129 *  | | +------------------------------------------+  | |
 130 *  | |                    ......                     | |
 131 *  | +-----------------------------------------------+ |
 132 *  | +-----------------------------------------------+ |
 133 *  | |DRV1                                           | |
 134 *  | | (same as DRV0)                                | |
 135 *  | +-----------------------------------------------+ |
 136 *  |                      ......                       |
 137 *  +---------------------------------------------------+
 138 */
 139
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 140static inline void __iomem *
 141tcs_reg_addr(const struct rsc_drv *drv, int reg, int tcs_id)
 142{
 143	return drv->tcs_base + RSC_DRV_TCS_OFFSET * tcs_id + reg;
 144}
 145
 146static inline void __iomem *
 147tcs_cmd_addr(const struct rsc_drv *drv, int reg, int tcs_id, int cmd_id)
 148{
 149	return tcs_reg_addr(drv, reg, tcs_id) + RSC_DRV_CMD_OFFSET * cmd_id;
 150}
 151
 152static u32 read_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id,
 153			int cmd_id)
 154{
 155	return readl_relaxed(tcs_cmd_addr(drv, reg, tcs_id, cmd_id));
 156}
 157
 158static u32 read_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id)
 159{
 160	return readl_relaxed(tcs_reg_addr(drv, reg, tcs_id));
 161}
 162
 163static void write_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id,
 164			  int cmd_id, u32 data)
 165{
 166	writel_relaxed(data, tcs_cmd_addr(drv, reg, tcs_id, cmd_id));
 167}
 168
 169static void write_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id,
 170			  u32 data)
 171{
 172	writel_relaxed(data, tcs_reg_addr(drv, reg, tcs_id));
 173}
 174
 175static void write_tcs_reg_sync(const struct rsc_drv *drv, int reg, int tcs_id,
 176			       u32 data)
 177{
 178	int i;
 179
 180	writel(data, tcs_reg_addr(drv, reg, tcs_id));
 181
 182	/*
 183	 * Wait until we read back the same value.  Use a counter rather than
 184	 * ktime for timeout since this may be called after timekeeping stops.
 185	 */
 186	for (i = 0; i < USEC_PER_SEC; i++) {
 187		if (readl(tcs_reg_addr(drv, reg, tcs_id)) == data)
 188			return;
 189		udelay(1);
 190	}
 191	pr_err("%s: error writing %#x to %d:%#x\n", drv->name,
 192	       data, tcs_id, reg);
 193}
 194
 195/**
 196 * tcs_is_free() - Return if a TCS is totally free.
 197 * @drv:    The RSC controller.
 198 * @tcs_id: The global ID of this TCS.
 199 *
 200 * Returns true if nobody has claimed this TCS (by setting tcs_in_use).
 201 *
 202 * Context: Must be called with the drv->lock held.
 203 *
 204 * Return: true if the given TCS is free.
 205 */
 206static bool tcs_is_free(struct rsc_drv *drv, int tcs_id)
 207{
 208	return !test_bit(tcs_id, drv->tcs_in_use);
 209}
 210
 211/**
 212 * tcs_invalidate() - Invalidate all TCSes of the given type (sleep or wake).
 213 * @drv:  The RSC controller.
 214 * @type: SLEEP_TCS or WAKE_TCS
 215 *
 216 * This will clear the "slots" variable of the given tcs_group and also
 217 * tell the hardware to forget about all entries.
 218 *
 219 * The caller must ensure that no other RPMH actions are happening when this
 220 * function is called, since otherwise the device may immediately become
 221 * used again even before this function exits.
 222 */
 223static void tcs_invalidate(struct rsc_drv *drv, int type)
 224{
 225	int m;
 226	struct tcs_group *tcs = &drv->tcs[type];
 227
 228	/* Caller ensures nobody else is running so no lock */
 229	if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS))
 230		return;
 231
 232	for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
 233		write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, m, 0);
 234		write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, m, 0);
 235	}
 236	bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
 237}
 238
 239/**
 240 * rpmh_rsc_invalidate() - Invalidate sleep and wake TCSes.
 241 * @drv: The RSC controller.
 242 *
 243 * The caller must ensure that no other RPMH actions are happening when this
 244 * function is called, since otherwise the device may immediately become
 245 * used again even before this function exits.
 246 */
 247void rpmh_rsc_invalidate(struct rsc_drv *drv)
 248{
 249	tcs_invalidate(drv, SLEEP_TCS);
 250	tcs_invalidate(drv, WAKE_TCS);
 251}
 252
 253/**
 254 * get_tcs_for_msg() - Get the tcs_group used to send the given message.
 255 * @drv: The RSC controller.
 256 * @msg: The message we want to send.
 257 *
 258 * This is normally pretty straightforward except if we are trying to send
 259 * an ACTIVE_ONLY message but don't have any active_only TCSes.
 260 *
 261 * Return: A pointer to a tcs_group or an ERR_PTR.
 262 */
 263static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv,
 264					 const struct tcs_request *msg)
 265{
 266	int type;
 267	struct tcs_group *tcs;
 268
 269	switch (msg->state) {
 270	case RPMH_ACTIVE_ONLY_STATE:
 271		type = ACTIVE_TCS;
 272		break;
 273	case RPMH_WAKE_ONLY_STATE:
 274		type = WAKE_TCS;
 275		break;
 276	case RPMH_SLEEP_STATE:
 277		type = SLEEP_TCS;
 278		break;
 279	default:
 280		return ERR_PTR(-EINVAL);
 281	}
 282
 283	/*
 284	 * If we are making an active request on a RSC that does not have a
 285	 * dedicated TCS for active state use, then re-purpose a wake TCS to
 286	 * send active votes. This is safe because we ensure any active-only
 287	 * transfers have finished before we use it (maybe by running from
 288	 * the last CPU in PM code).
 289	 */
 290	tcs = &drv->tcs[type];
 291	if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs)
 292		tcs = &drv->tcs[WAKE_TCS];
 293
 294	return tcs;
 295}
 296
 297/**
 298 * get_req_from_tcs() - Get a stashed request that was xfering on the given TCS.
 299 * @drv:    The RSC controller.
 300 * @tcs_id: The global ID of this TCS.
 301 *
 302 * For ACTIVE_ONLY transfers we want to call back into the client when the
 303 * transfer finishes. To do this we need the "request" that the client
 304 * originally provided us. This function grabs the request that we stashed
 305 * when we started the transfer.
 306 *
 307 * This only makes sense for ACTIVE_ONLY transfers since those are the only
 308 * ones we track sending (the only ones we enable interrupts for and the only
 309 * ones we call back to the client for).
 310 *
 311 * Return: The stashed request.
 312 */
 313static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv,
 314						  int tcs_id)
 315{
 316	struct tcs_group *tcs;
 317	int i;
 318
 319	for (i = 0; i < TCS_TYPE_NR; i++) {
 320		tcs = &drv->tcs[i];
 321		if (tcs->mask & BIT(tcs_id))
 322			return tcs->req[tcs_id - tcs->offset];
 323	}
 324
 325	return NULL;
 326}
 327
 328/**
 329 * __tcs_set_trigger() - Start xfer on a TCS or unset trigger on a borrowed TCS
 330 * @drv:     The controller.
 331 * @tcs_id:  The global ID of this TCS.
 332 * @trigger: If true then untrigger/retrigger. If false then just untrigger.
 333 *
 334 * In the normal case we only ever call with "trigger=true" to start a
 335 * transfer. That will un-trigger/disable the TCS from the last transfer
 336 * then trigger/enable for this transfer.
 337 *
 338 * If we borrowed a wake TCS for an active-only transfer we'll also call
 339 * this function with "trigger=false" to just do the un-trigger/disable
 340 * before using the TCS for wake purposes again.
 341 *
 342 * Note that the AP is only in charge of triggering active-only transfers.
 343 * The AP never triggers sleep/wake values using this function.
 344 */
 345static void __tcs_set_trigger(struct rsc_drv *drv, int tcs_id, bool trigger)
 346{
 347	u32 enable;
 
 348
 349	/*
 350	 * HW req: Clear the DRV_CONTROL and enable TCS again
 351	 * While clearing ensure that the AMC mode trigger is cleared
 352	 * and then the mode enable is cleared.
 353	 */
 354	enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id);
 355	enable &= ~TCS_AMC_MODE_TRIGGER;
 356	write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
 357	enable &= ~TCS_AMC_MODE_ENABLE;
 358	write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
 359
 360	if (trigger) {
 361		/* Enable the AMC mode on the TCS and then trigger the TCS */
 362		enable = TCS_AMC_MODE_ENABLE;
 363		write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
 364		enable |= TCS_AMC_MODE_TRIGGER;
 365		write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
 366	}
 367}
 368
 369/**
 370 * enable_tcs_irq() - Enable or disable interrupts on the given TCS.
 371 * @drv:     The controller.
 372 * @tcs_id:  The global ID of this TCS.
 373 * @enable:  If true then enable; if false then disable
 374 *
 375 * We only ever call this when we borrow a wake TCS for an active-only
 376 * transfer. For active-only TCSes interrupts are always left enabled.
 377 */
 378static void enable_tcs_irq(struct rsc_drv *drv, int tcs_id, bool enable)
 379{
 380	u32 data;
 
 381
 382	data = readl_relaxed(drv->tcs_base + RSC_DRV_IRQ_ENABLE);
 383	if (enable)
 384		data |= BIT(tcs_id);
 385	else
 386		data &= ~BIT(tcs_id);
 387	writel_relaxed(data, drv->tcs_base + RSC_DRV_IRQ_ENABLE);
 388}
 389
 390/**
 391 * tcs_tx_done() - TX Done interrupt handler.
 392 * @irq: The IRQ number (ignored).
 393 * @p:   Pointer to "struct rsc_drv".
 394 *
 395 * Called for ACTIVE_ONLY transfers (those are the only ones we enable the
 396 * IRQ for) when a transfer is done.
 397 *
 398 * Return: IRQ_HANDLED
 399 */
 400static irqreturn_t tcs_tx_done(int irq, void *p)
 401{
 402	struct rsc_drv *drv = p;
 403	int i, j, err = 0;
 404	unsigned long irq_status;
 405	const struct tcs_request *req;
 406	struct tcs_cmd *cmd;
 407
 408	irq_status = readl_relaxed(drv->tcs_base + RSC_DRV_IRQ_STATUS);
 409
 410	for_each_set_bit(i, &irq_status, BITS_PER_LONG) {
 411		req = get_req_from_tcs(drv, i);
 412		if (!req) {
 413			WARN_ON(1);
 414			goto skip;
 415		}
 416
 417		err = 0;
 418		for (j = 0; j < req->num_cmds; j++) {
 419			u32 sts;
 420
 421			cmd = &req->cmds[j];
 422			sts = read_tcs_cmd(drv, RSC_DRV_CMD_STATUS, i, j);
 423			if (!(sts & CMD_STATUS_ISSUED) ||
 424			   ((req->wait_for_compl || cmd->wait) &&
 425			   !(sts & CMD_STATUS_COMPL))) {
 426				pr_err("Incomplete request: %s: addr=%#x data=%#x",
 427				       drv->name, cmd->addr, cmd->data);
 428				err = -EIO;
 429			}
 430		}
 431
 432		trace_rpmh_tx_done(drv, i, req, err);
 433
 434		/*
 435		 * If wake tcs was re-purposed for sending active
 436		 * votes, clear AMC trigger & enable modes and
 437		 * disable interrupt for this TCS
 438		 */
 439		if (!drv->tcs[ACTIVE_TCS].num_tcs)
 440			__tcs_set_trigger(drv, i, false);
 441skip:
 442		/* Reclaim the TCS */
 443		write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i, 0);
 444		write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, i, 0);
 445		writel_relaxed(BIT(i), drv->tcs_base + RSC_DRV_IRQ_CLEAR);
 446		spin_lock(&drv->lock);
 447		clear_bit(i, drv->tcs_in_use);
 448		/*
 449		 * Disable interrupt for WAKE TCS to avoid being
 450		 * spammed with interrupts coming when the solver
 451		 * sends its wake votes.
 452		 */
 453		if (!drv->tcs[ACTIVE_TCS].num_tcs)
 454			enable_tcs_irq(drv, i, false);
 455		spin_unlock(&drv->lock);
 
 456		if (req)
 457			rpmh_tx_done(req, err);
 458	}
 459
 460	return IRQ_HANDLED;
 461}
 462
 463/**
 464 * __tcs_buffer_write() - Write to TCS hardware from a request; don't trigger.
 465 * @drv:    The controller.
 466 * @tcs_id: The global ID of this TCS.
 467 * @cmd_id: The index within the TCS to start writing.
 468 * @msg:    The message we want to send, which will contain several addr/data
 469 *          pairs to program (but few enough that they all fit in one TCS).
 470 *
 471 * This is used for all types of transfers (active, sleep, and wake).
 472 */
 473static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
 474			       const struct tcs_request *msg)
 475{
 476	u32 msgid, cmd_msgid;
 
 477	u32 cmd_enable = 0;
 478	u32 cmd_complete;
 479	struct tcs_cmd *cmd;
 480	int i, j;
 481
 482	cmd_msgid = CMD_MSGID_LEN;
 483	cmd_msgid |= msg->wait_for_compl ? CMD_MSGID_RESP_REQ : 0;
 484	cmd_msgid |= CMD_MSGID_WRITE;
 485
 486	cmd_complete = read_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id);
 487
 488	for (i = 0, j = cmd_id; i < msg->num_cmds; i++, j++) {
 489		cmd = &msg->cmds[i];
 490		cmd_enable |= BIT(j);
 491		cmd_complete |= cmd->wait << j;
 492		msgid = cmd_msgid;
 
 
 
 
 493		msgid |= cmd->wait ? CMD_MSGID_RESP_REQ : 0;
 494
 495		write_tcs_cmd(drv, RSC_DRV_CMD_MSGID, tcs_id, j, msgid);
 496		write_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j, cmd->addr);
 497		write_tcs_cmd(drv, RSC_DRV_CMD_DATA, tcs_id, j, cmd->data);
 498		trace_rpmh_send_msg_rcuidle(drv, tcs_id, j, msgid, cmd);
 499	}
 500
 501	write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete);
 502	cmd_enable |= read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id);
 503	write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, cmd_enable);
 504}
 505
 506/**
 507 * check_for_req_inflight() - Look to see if conflicting cmds are in flight.
 508 * @drv: The controller.
 509 * @tcs: A pointer to the tcs_group used for ACTIVE_ONLY transfers.
 510 * @msg: The message we want to send, which will contain several addr/data
 511 *       pairs to program (but few enough that they all fit in one TCS).
 512 *
 513 * This will walk through the TCSes in the group and check if any of them
 514 * appear to be sending to addresses referenced in the message. If it finds
 515 * one it'll return -EBUSY.
 516 *
 517 * Only for use for active-only transfers.
 518 *
 519 * Must be called with the drv->lock held since that protects tcs_in_use.
 520 *
 521 * Return: 0 if nothing in flight or -EBUSY if we should try again later.
 522 *         The caller must re-enable interrupts between tries since that's
 523 *         the only way tcs_is_free() will ever return true and the only way
 524 *         RSC_DRV_CMD_ENABLE will ever be cleared.
 525 */
 526static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
 527				  const struct tcs_request *msg)
 528{
 529	unsigned long curr_enabled;
 530	u32 addr;
 531	int i, j, k;
 532	int tcs_id = tcs->offset;
 533
 534	for (i = 0; i < tcs->num_tcs; i++, tcs_id++) {
 535		if (tcs_is_free(drv, tcs_id))
 536			continue;
 537
 538		curr_enabled = read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id);
 
 539
 540		for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) {
 541			addr = read_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j);
 542			for (k = 0; k < msg->num_cmds; k++) {
 543				if (addr == msg->cmds[k].addr)
 544					return -EBUSY;
 545			}
 546		}
 547	}
 548
 549	return 0;
 550}
 551
 552/**
 553 * find_free_tcs() - Find free tcs in the given tcs_group; only for active.
 554 * @tcs: A pointer to the active-only tcs_group (or the wake tcs_group if
 555 *       we borrowed it because there are zero active-only ones).
 556 *
 557 * Must be called with the drv->lock held since that protects tcs_in_use.
 558 *
 559 * Return: The first tcs that's free.
 560 */
 561static int find_free_tcs(struct tcs_group *tcs)
 562{
 563	int i;
 564
 565	for (i = 0; i < tcs->num_tcs; i++) {
 566		if (tcs_is_free(tcs->drv, tcs->offset + i))
 567			return tcs->offset + i;
 568	}
 
 569
 570	return -EBUSY;
 571}
 572
 573/**
 574 * tcs_write() - Store messages into a TCS right now, or return -EBUSY.
 575 * @drv: The controller.
 
 576 * @msg: The data to be sent.
 577 *
 578 * Grabs a TCS for ACTIVE_ONLY transfers and writes the messages to it.
 
 579 *
 580 * If there are no free TCSes for ACTIVE_ONLY transfers or if a command for
 581 * the same address is already transferring returns -EBUSY which means the
 582 * client should retry shortly.
 583 *
 584 * Return: 0 on success, -EBUSY if client should retry, or an error.
 585 *         Client should have interrupts enabled for a bit before retrying.
 586 */
 587static int tcs_write(struct rsc_drv *drv, const struct tcs_request *msg)
 
 588{
 589	struct tcs_group *tcs;
 590	int tcs_id;
 591	unsigned long flags;
 592	int ret;
 593
 594	tcs = get_tcs_for_msg(drv, msg);
 595	if (IS_ERR(tcs))
 596		return PTR_ERR(tcs);
 597
 598	spin_lock_irqsave(&drv->lock, flags);
 599	/*
 600	 * The h/w does not like if we send a request to the same address,
 601	 * when one is already in-flight or being processed.
 602	 */
 603	ret = check_for_req_inflight(drv, tcs, msg);
 604	if (ret)
 605		goto unlock;
 606
 607	ret = find_free_tcs(tcs);
 608	if (ret < 0)
 609		goto unlock;
 610	tcs_id = ret;
 611
 612	tcs->req[tcs_id - tcs->offset] = msg;
 613	set_bit(tcs_id, drv->tcs_in_use);
 614	if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) {
 615		/*
 616		 * Clear previously programmed WAKE commands in selected
 617		 * repurposed TCS to avoid triggering them. tcs->slots will be
 618		 * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate()
 619		 */
 620		write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
 621		write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
 622		enable_tcs_irq(drv, tcs_id, true);
 623	}
 624	spin_unlock_irqrestore(&drv->lock, flags);
 625
 626	/*
 627	 * These two can be done after the lock is released because:
 628	 * - We marked "tcs_in_use" under lock.
 629	 * - Once "tcs_in_use" has been marked nobody else could be writing
 630	 *   to these registers until the interrupt goes off.
 631	 * - The interrupt can't go off until we trigger w/ the last line
 632	 *   of __tcs_set_trigger() below.
 633	 */
 634	__tcs_buffer_write(drv, tcs_id, 0, msg);
 635	__tcs_set_trigger(drv, tcs_id, true);
 636
 637	return 0;
 638unlock:
 639	spin_unlock_irqrestore(&drv->lock, flags);
 640	return ret;
 641}
 642
 643/**
 644 * rpmh_rsc_send_data() - Write / trigger active-only message.
 645 * @drv: The controller.
 646 * @msg: The data to be sent.
 647 *
 648 * NOTES:
 649 * - This is only used for "ACTIVE_ONLY" since the limitations of this
 650 *   function don't make sense for sleep/wake cases.
 651 * - To do the transfer, we will grab a whole TCS for ourselves--we don't
 652 *   try to share. If there are none available we'll wait indefinitely
 653 *   for a free one.
 654 * - This function will not wait for the commands to be finished, only for
 655 *   data to be programmed into the RPMh. See rpmh_tx_done() which will
 656 *   be called when the transfer is fully complete.
 657 * - This function must be called with interrupts enabled. If the hardware
 658 *   is busy doing someone else's transfer we need that transfer to fully
 659 *   finish so that we can have the hardware, and to fully finish it needs
 660 *   the interrupt handler to run. If the interrupts is set to run on the
 661 *   active CPU this can never happen if interrupts are disabled.
 662 *
 663 * Return: 0 on success, -EINVAL on error.
 664 */
 665int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
 666{
 667	int ret;
 
 668
 669	do {
 670		ret = tcs_write(drv, msg);
 671		if (ret == -EBUSY) {
 672			pr_info_ratelimited("TCS Busy, retrying RPMH message send: addr=%#x\n",
 673					    msg->cmds[0].addr);
 674			udelay(10);
 675		}
 676	} while (ret == -EBUSY);
 677
 678	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 679}
 680
 681/**
 682 * find_slots() - Find a place to write the given message.
 683 * @tcs:    The tcs group to search.
 684 * @msg:    The message we want to find room for.
 685 * @tcs_id: If we return 0 from the function, we return the global ID of the
 686 *          TCS to write to here.
 687 * @cmd_id: If we return 0 from the function, we return the index of
 688 *          the command array of the returned TCS where the client should
 689 *          start writing the message.
 690 *
 691 * Only for use on sleep/wake TCSes since those are the only ones we maintain
 692 * tcs->slots for.
 693 *
 694 * Return: -ENOMEM if there was no room, else 0.
 695 */
 696static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg,
 697		      int *tcs_id, int *cmd_id)
 698{
 699	int slot, offset;
 700	int i = 0;
 701
 702	/* Do over, until we can fit the full payload in a single TCS */
 703	do {
 704		slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS,
 705						  i, msg->num_cmds, 0);
 706		if (slot >= tcs->num_tcs * tcs->ncpt)
 707			return -ENOMEM;
 708		i += tcs->ncpt;
 709	} while (slot + msg->num_cmds - 1 >= i);
 710
 711	bitmap_set(tcs->slots, slot, msg->num_cmds);
 712
 713	offset = slot / tcs->ncpt;
 714	*tcs_id = offset + tcs->offset;
 715	*cmd_id = slot % tcs->ncpt;
 716
 717	return 0;
 718}
 719
 720/**
 721 * rpmh_rsc_write_ctrl_data() - Write request to controller but don't trigger.
 722 * @drv: The controller.
 723 * @msg: The data to be written to the controller.
 724 *
 725 * This should only be called for for sleep/wake state, never active-only
 726 * state.
 727 *
 728 * The caller must ensure that no other RPMH actions are happening and the
 729 * controller is idle when this function is called since it runs lockless.
 730 *
 731 * Return: 0 if no error; else -error.
 732 */
 733int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg)
 734{
 735	struct tcs_group *tcs;
 736	int tcs_id = 0, cmd_id = 0;
 737	int ret;
 738
 739	tcs = get_tcs_for_msg(drv, msg);
 740	if (IS_ERR(tcs))
 741		return PTR_ERR(tcs);
 742
 743	/* find the TCS id and the command in the TCS to write to */
 744	ret = find_slots(tcs, msg, &tcs_id, &cmd_id);
 745	if (!ret)
 746		__tcs_buffer_write(drv, tcs_id, cmd_id, msg);
 747
 748	return ret;
 749}
 750
 751/**
 752 * rpmh_rsc_ctrlr_is_busy() - Check if any of the AMCs are busy.
 753 * @drv: The controller
 754 *
 755 * Checks if any of the AMCs are busy in handling ACTIVE sets.
 756 * This is called from the last cpu powering down before flushing
 757 * SLEEP and WAKE sets. If AMCs are busy, controller can not enter
 758 * power collapse, so deny from the last cpu's pm notification.
 759 *
 760 * Context: Must be called with the drv->lock held.
 761 *
 762 * Return:
 763 * * False		- AMCs are idle
 764 * * True		- AMCs are busy
 765 */
 766static bool rpmh_rsc_ctrlr_is_busy(struct rsc_drv *drv)
 767{
 768	int m;
 769	struct tcs_group *tcs = &drv->tcs[ACTIVE_TCS];
 
 770
 771	/*
 772	 * If we made an active request on a RSC that does not have a
 773	 * dedicated TCS for active state use, then re-purposed wake TCSes
 774	 * should be checked for not busy, because we used wake TCSes for
 775	 * active requests in this case.
 776	 */
 777	if (!tcs->num_tcs)
 778		tcs = &drv->tcs[WAKE_TCS];
 779
 780	for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
 781		if (!tcs_is_free(drv, m))
 782			return true;
 783	}
 784
 785	return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 786}
 787
 788/**
 789 * rpmh_rsc_cpu_pm_callback() - Check if any of the AMCs are busy.
 790 * @nfb:    Pointer to the notifier block in struct rsc_drv.
 791 * @action: CPU_PM_ENTER, CPU_PM_ENTER_FAILED, or CPU_PM_EXIT.
 792 * @v:      Unused
 793 *
 794 * This function is given to cpu_pm_register_notifier so we can be informed
 795 * about when CPUs go down. When all CPUs go down we know no more active
 796 * transfers will be started so we write sleep/wake sets. This function gets
 797 * called from cpuidle code paths and also at system suspend time.
 798 *
 799 * If its last CPU going down and AMCs are not busy then writes cached sleep
 800 * and wake messages to TCSes. The firmware then takes care of triggering
 801 * them when entering deepest low power modes.
 802 *
 803 * Return: See cpu_pm_register_notifier()
 804 */
 805static int rpmh_rsc_cpu_pm_callback(struct notifier_block *nfb,
 806				    unsigned long action, void *v)
 807{
 808	struct rsc_drv *drv = container_of(nfb, struct rsc_drv, rsc_pm);
 809	int ret = NOTIFY_OK;
 810	int cpus_in_pm;
 811
 812	switch (action) {
 813	case CPU_PM_ENTER:
 814		cpus_in_pm = atomic_inc_return(&drv->cpus_in_pm);
 815		/*
 816		 * NOTE: comments for num_online_cpus() point out that it's
 817		 * only a snapshot so we need to be careful. It should be OK
 818		 * for us to use, though.  It's important for us not to miss
 819		 * if we're the last CPU going down so it would only be a
 820		 * problem if a CPU went offline right after we did the check
 821		 * AND that CPU was not idle AND that CPU was the last non-idle
 822		 * CPU. That can't happen. CPUs would have to come out of idle
 823		 * before the CPU could go offline.
 824		 */
 825		if (cpus_in_pm < num_online_cpus())
 826			return NOTIFY_OK;
 827		break;
 828	case CPU_PM_ENTER_FAILED:
 829	case CPU_PM_EXIT:
 830		atomic_dec(&drv->cpus_in_pm);
 831		return NOTIFY_OK;
 832	default:
 833		return NOTIFY_DONE;
 834	}
 835
 836	/*
 837	 * It's likely we're on the last CPU. Grab the drv->lock and write
 838	 * out the sleep/wake commands to RPMH hardware. Grabbing the lock
 839	 * means that if we race with another CPU coming up we are still
 840	 * guaranteed to be safe. If another CPU came up just after we checked
 841	 * and has grabbed the lock or started an active transfer then we'll
 842	 * notice we're busy and abort. If another CPU comes up after we start
 843	 * flushing it will be blocked from starting an active transfer until
 844	 * we're done flushing. If another CPU starts an active transfer after
 845	 * we release the lock we're still OK because we're no longer the last
 846	 * CPU.
 847	 */
 848	if (spin_trylock(&drv->lock)) {
 849		if (rpmh_rsc_ctrlr_is_busy(drv) || rpmh_flush(&drv->client))
 850			ret = NOTIFY_BAD;
 851		spin_unlock(&drv->lock);
 852	} else {
 853		/* Another CPU must be up */
 854		return NOTIFY_OK;
 855	}
 856
 857	if (ret == NOTIFY_BAD) {
 858		/* Double-check if we're here because someone else is up */
 859		if (cpus_in_pm < num_online_cpus())
 860			ret = NOTIFY_OK;
 861		else
 862			/* We won't be called w/ CPU_PM_ENTER_FAILED */
 863			atomic_dec(&drv->cpus_in_pm);
 864	}
 865
 866	return ret;
 867}
 868
 869static int rpmh_probe_tcs_config(struct platform_device *pdev,
 870				 struct rsc_drv *drv, void __iomem *base)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 871{
 872	struct tcs_type_config {
 873		u32 type;
 874		u32 n;
 875	} tcs_cfg[TCS_TYPE_NR] = { { 0 } };
 876	struct device_node *dn = pdev->dev.of_node;
 877	u32 config, max_tcs, ncpt, offset;
 878	int i, ret, n, st = 0;
 879	struct tcs_group *tcs;
 880
 881	ret = of_property_read_u32(dn, "qcom,tcs-offset", &offset);
 882	if (ret)
 883		return ret;
 884	drv->tcs_base = base + offset;
 885
 886	config = readl_relaxed(base + DRV_PRNT_CHLD_CONFIG);
 887
 888	max_tcs = config;
 889	max_tcs &= DRV_NUM_TCS_MASK << (DRV_NUM_TCS_SHIFT * drv->id);
 890	max_tcs = max_tcs >> (DRV_NUM_TCS_SHIFT * drv->id);
 891
 892	ncpt = config & (DRV_NCPT_MASK << DRV_NCPT_SHIFT);
 893	ncpt = ncpt >> DRV_NCPT_SHIFT;
 894
 895	n = of_property_count_u32_elems(dn, "qcom,tcs-config");
 896	if (n != 2 * TCS_TYPE_NR)
 897		return -EINVAL;
 898
 899	for (i = 0; i < TCS_TYPE_NR; i++) {
 900		ret = of_property_read_u32_index(dn, "qcom,tcs-config",
 901						 i * 2, &tcs_cfg[i].type);
 902		if (ret)
 903			return ret;
 904		if (tcs_cfg[i].type >= TCS_TYPE_NR)
 905			return -EINVAL;
 906
 907		ret = of_property_read_u32_index(dn, "qcom,tcs-config",
 908						 i * 2 + 1, &tcs_cfg[i].n);
 909		if (ret)
 910			return ret;
 911		if (tcs_cfg[i].n > MAX_TCS_PER_TYPE)
 912			return -EINVAL;
 913	}
 914
 915	for (i = 0; i < TCS_TYPE_NR; i++) {
 916		tcs = &drv->tcs[tcs_cfg[i].type];
 917		if (tcs->drv)
 918			return -EINVAL;
 919		tcs->drv = drv;
 920		tcs->type = tcs_cfg[i].type;
 921		tcs->num_tcs = tcs_cfg[i].n;
 922		tcs->ncpt = ncpt;
 923
 924		if (!tcs->num_tcs || tcs->type == CONTROL_TCS)
 925			continue;
 926
 927		if (st + tcs->num_tcs > max_tcs ||
 928		    st + tcs->num_tcs >= BITS_PER_BYTE * sizeof(tcs->mask))
 929			return -EINVAL;
 930
 931		tcs->mask = ((1 << tcs->num_tcs) - 1) << st;
 932		tcs->offset = st;
 933		st += tcs->num_tcs;
 934	}
 935
 936	drv->num_tcs = st;
 937
 938	return 0;
 939}
 940
 941static int rpmh_rsc_probe(struct platform_device *pdev)
 942{
 943	struct device_node *dn = pdev->dev.of_node;
 944	struct rsc_drv *drv;
 945	struct resource *res;
 946	char drv_id[10] = {0};
 947	int ret, irq;
 948	u32 solver_config;
 949	void __iomem *base;
 950
 951	/*
 952	 * Even though RPMh doesn't directly use cmd-db, all of its children
 953	 * do. To avoid adding this check to our children we'll do it now.
 954	 */
 955	ret = cmd_db_ready();
 956	if (ret) {
 957		if (ret != -EPROBE_DEFER)
 958			dev_err(&pdev->dev, "Command DB not available (%d)\n",
 959									ret);
 960		return ret;
 961	}
 962
 963	drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
 964	if (!drv)
 965		return -ENOMEM;
 966
 967	ret = of_property_read_u32(dn, "qcom,drv-id", &drv->id);
 968	if (ret)
 969		return ret;
 970
 971	drv->name = of_get_property(dn, "label", NULL);
 972	if (!drv->name)
 973		drv->name = dev_name(&pdev->dev);
 974
 975	snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id);
 976	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, drv_id);
 977	base = devm_ioremap_resource(&pdev->dev, res);
 978	if (IS_ERR(base))
 979		return PTR_ERR(base);
 
 
 
 
 
 980
 981	ret = rpmh_probe_tcs_config(pdev, drv, base);
 
 
 
 
 
 982	if (ret)
 983		return ret;
 984
 985	spin_lock_init(&drv->lock);
 
 986	bitmap_zero(drv->tcs_in_use, MAX_TCS_NR);
 987
 988	irq = platform_get_irq(pdev, drv->id);
 989	if (irq < 0)
 990		return irq;
 991
 992	ret = devm_request_irq(&pdev->dev, irq, tcs_tx_done,
 993			       IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
 994			       drv->name, drv);
 995	if (ret)
 996		return ret;
 997
 998	/*
 999	 * CPU PM notification are not required for controllers that support
1000	 * 'HW solver' mode where they can be in autonomous mode executing low
1001	 * power mode to power down.
1002	 */
1003	solver_config = readl_relaxed(base + DRV_SOLVER_CONFIG);
1004	solver_config &= DRV_HW_SOLVER_MASK << DRV_HW_SOLVER_SHIFT;
1005	solver_config = solver_config >> DRV_HW_SOLVER_SHIFT;
1006	if (!solver_config) {
1007		drv->rsc_pm.notifier_call = rpmh_rsc_cpu_pm_callback;
1008		cpu_pm_register_notifier(&drv->rsc_pm);
 
 
 
 
 
 
1009	}
1010
1011	/* Enable the active TCS to send requests immediately */
1012	writel_relaxed(drv->tcs[ACTIVE_TCS].mask,
1013		       drv->tcs_base + RSC_DRV_IRQ_ENABLE);
1014
1015	spin_lock_init(&drv->client.cache_lock);
1016	INIT_LIST_HEAD(&drv->client.cache);
1017	INIT_LIST_HEAD(&drv->client.batch_cache);
1018
1019	dev_set_drvdata(&pdev->dev, drv);
 
1020
1021	return devm_of_platform_populate(&pdev->dev);
 
 
 
 
 
 
1022}
1023
1024static const struct of_device_id rpmh_drv_match[] = {
1025	{ .compatible = "qcom,rpmh-rsc", },
1026	{ }
1027};
 
1028
1029static struct platform_driver rpmh_driver = {
1030	.probe = rpmh_rsc_probe,
1031	.driver = {
1032		  .name = "rpmh",
1033		  .of_match_table = rpmh_drv_match,
1034		  .suppress_bind_attrs = true,
1035	},
1036};
1037
1038static int __init rpmh_driver_init(void)
1039{
1040	return platform_driver_register(&rpmh_driver);
1041}
1042arch_initcall(rpmh_driver_init);
 
 
 
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
   4 * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
   5 */
   6
   7#define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
   8
   9#include <linux/atomic.h>
  10#include <linux/cpu_pm.h>
  11#include <linux/delay.h>
  12#include <linux/interrupt.h>
  13#include <linux/io.h>
  14#include <linux/iopoll.h>
  15#include <linux/kernel.h>
  16#include <linux/ktime.h>
  17#include <linux/list.h>
  18#include <linux/module.h>
  19#include <linux/notifier.h>
  20#include <linux/of.h>
  21#include <linux/of_irq.h>
  22#include <linux/of_platform.h>
  23#include <linux/platform_device.h>
  24#include <linux/pm_domain.h>
  25#include <linux/pm_runtime.h>
  26#include <linux/slab.h>
  27#include <linux/spinlock.h>
  28#include <linux/wait.h>
  29
  30#include <clocksource/arm_arch_timer.h>
  31#include <soc/qcom/cmd-db.h>
  32#include <soc/qcom/tcs.h>
  33#include <dt-bindings/soc/qcom,rpmh-rsc.h>
  34
  35#include "rpmh-internal.h"
  36
  37#define CREATE_TRACE_POINTS
  38#include "trace-rpmh.h"
  39
  40
  41#define RSC_DRV_ID			0
  42
  43#define MAJOR_VER_MASK			0xFF
  44#define MAJOR_VER_SHIFT			16
  45#define MINOR_VER_MASK			0xFF
  46#define MINOR_VER_SHIFT			8
  47
  48enum {
  49	RSC_DRV_TCS_OFFSET,
  50	RSC_DRV_CMD_OFFSET,
  51	DRV_SOLVER_CONFIG,
  52	DRV_PRNT_CHLD_CONFIG,
  53	RSC_DRV_IRQ_ENABLE,
  54	RSC_DRV_IRQ_STATUS,
  55	RSC_DRV_IRQ_CLEAR,
  56	RSC_DRV_CMD_WAIT_FOR_CMPL,
  57	RSC_DRV_CONTROL,
  58	RSC_DRV_STATUS,
  59	RSC_DRV_CMD_ENABLE,
  60	RSC_DRV_CMD_MSGID,
  61	RSC_DRV_CMD_ADDR,
  62	RSC_DRV_CMD_DATA,
  63	RSC_DRV_CMD_STATUS,
  64	RSC_DRV_CMD_RESP_DATA,
  65};
  66
  67/* DRV HW Solver Configuration Information Register */
 
  68#define DRV_HW_SOLVER_MASK		1
  69#define DRV_HW_SOLVER_SHIFT		24
  70
  71/* DRV TCS Configuration Information Register */
 
  72#define DRV_NUM_TCS_MASK		0x3F
  73#define DRV_NUM_TCS_SHIFT		6
  74#define DRV_NCPT_MASK			0x1F
  75#define DRV_NCPT_SHIFT			27
  76
  77/* Offsets for CONTROL TCS Registers */
  78#define RSC_DRV_CTL_TCS_DATA_HI		0x38
  79#define RSC_DRV_CTL_TCS_DATA_HI_MASK	0xFFFFFF
  80#define RSC_DRV_CTL_TCS_DATA_HI_VALID	BIT(31)
  81#define RSC_DRV_CTL_TCS_DATA_LO		0x40
  82#define RSC_DRV_CTL_TCS_DATA_LO_MASK	0xFFFFFFFF
  83#define RSC_DRV_CTL_TCS_DATA_SIZE	32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  84
  85#define TCS_AMC_MODE_ENABLE		BIT(16)
  86#define TCS_AMC_MODE_TRIGGER		BIT(24)
  87
  88/* TCS CMD register bit mask */
  89#define CMD_MSGID_LEN			8
  90#define CMD_MSGID_RESP_REQ		BIT(8)
  91#define CMD_MSGID_WRITE			BIT(16)
  92#define CMD_STATUS_ISSUED		BIT(8)
  93#define CMD_STATUS_COMPL		BIT(16)
  94
  95/*
  96 * Here's a high level overview of how all the registers in RPMH work
  97 * together:
  98 *
  99 * - The main rpmh-rsc address is the base of a register space that can
 100 *   be used to find overall configuration of the hardware
 101 *   (DRV_PRNT_CHLD_CONFIG). Also found within the rpmh-rsc register
 102 *   space are all the TCS blocks. The offset of the TCS blocks is
 103 *   specified in the device tree by "qcom,tcs-offset" and used to
 104 *   compute tcs_base.
 105 * - TCS blocks come one after another. Type, count, and order are
 106 *   specified by the device tree as "qcom,tcs-config".
 107 * - Each TCS block has some registers, then space for up to 16 commands.
 108 *   Note that though address space is reserved for 16 commands, fewer
 109 *   might be present. See ncpt (num cmds per TCS).
 110 *
 111 * Here's a picture:
 112 *
 113 *  +---------------------------------------------------+
 114 *  |RSC                                                |
 115 *  | ctrl                                              |
 116 *  |                                                   |
 117 *  | Drvs:                                             |
 118 *  | +-----------------------------------------------+ |
 119 *  | |DRV0                                           | |
 120 *  | | ctrl/config                                   | |
 121 *  | | IRQ                                           | |
 122 *  | |                                               | |
 123 *  | | TCSes:                                        | |
 124 *  | | +------------------------------------------+  | |
 125 *  | | |TCS0  |  |  |  |  |  |  |  |  |  |  |  |  |  | |
 126 *  | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15|  | |
 127 *  | | |      |  |  |  |  |  |  |  |  |  |  |  |  |  | |
 128 *  | | +------------------------------------------+  | |
 129 *  | | +------------------------------------------+  | |
 130 *  | | |TCS1  |  |  |  |  |  |  |  |  |  |  |  |  |  | |
 131 *  | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15|  | |
 132 *  | | |      |  |  |  |  |  |  |  |  |  |  |  |  |  | |
 133 *  | | +------------------------------------------+  | |
 134 *  | | +------------------------------------------+  | |
 135 *  | | |TCS2  |  |  |  |  |  |  |  |  |  |  |  |  |  | |
 136 *  | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15|  | |
 137 *  | | |      |  |  |  |  |  |  |  |  |  |  |  |  |  | |
 138 *  | | +------------------------------------------+  | |
 139 *  | |                    ......                     | |
 140 *  | +-----------------------------------------------+ |
 141 *  | +-----------------------------------------------+ |
 142 *  | |DRV1                                           | |
 143 *  | | (same as DRV0)                                | |
 144 *  | +-----------------------------------------------+ |
 145 *  |                      ......                       |
 146 *  +---------------------------------------------------+
 147 */
 148
 149#define USECS_TO_CYCLES(time_usecs)			\
 150	xloops_to_cycles((time_usecs) * 0x10C7UL)
 151
 152static inline unsigned long xloops_to_cycles(u64 xloops)
 153{
 154	return (xloops * loops_per_jiffy * HZ) >> 32;
 155}
 156
 157static u32 rpmh_rsc_reg_offset_ver_2_7[] = {
 158	[RSC_DRV_TCS_OFFSET]		= 672,
 159	[RSC_DRV_CMD_OFFSET]		= 20,
 160	[DRV_SOLVER_CONFIG]		= 0x04,
 161	[DRV_PRNT_CHLD_CONFIG]		= 0x0C,
 162	[RSC_DRV_IRQ_ENABLE]		= 0x00,
 163	[RSC_DRV_IRQ_STATUS]		= 0x04,
 164	[RSC_DRV_IRQ_CLEAR]		= 0x08,
 165	[RSC_DRV_CMD_WAIT_FOR_CMPL]	= 0x10,
 166	[RSC_DRV_CONTROL]		= 0x14,
 167	[RSC_DRV_STATUS]		= 0x18,
 168	[RSC_DRV_CMD_ENABLE]		= 0x1C,
 169	[RSC_DRV_CMD_MSGID]		= 0x30,
 170	[RSC_DRV_CMD_ADDR]		= 0x34,
 171	[RSC_DRV_CMD_DATA]		= 0x38,
 172	[RSC_DRV_CMD_STATUS]		= 0x3C,
 173	[RSC_DRV_CMD_RESP_DATA]		= 0x40,
 174};
 175
 176static u32 rpmh_rsc_reg_offset_ver_3_0[] = {
 177	[RSC_DRV_TCS_OFFSET]		= 672,
 178	[RSC_DRV_CMD_OFFSET]		= 24,
 179	[DRV_SOLVER_CONFIG]		= 0x04,
 180	[DRV_PRNT_CHLD_CONFIG]		= 0x0C,
 181	[RSC_DRV_IRQ_ENABLE]		= 0x00,
 182	[RSC_DRV_IRQ_STATUS]		= 0x04,
 183	[RSC_DRV_IRQ_CLEAR]		= 0x08,
 184	[RSC_DRV_CMD_WAIT_FOR_CMPL]	= 0x20,
 185	[RSC_DRV_CONTROL]		= 0x24,
 186	[RSC_DRV_STATUS]		= 0x28,
 187	[RSC_DRV_CMD_ENABLE]		= 0x2C,
 188	[RSC_DRV_CMD_MSGID]		= 0x34,
 189	[RSC_DRV_CMD_ADDR]		= 0x38,
 190	[RSC_DRV_CMD_DATA]		= 0x3C,
 191	[RSC_DRV_CMD_STATUS]		= 0x40,
 192	[RSC_DRV_CMD_RESP_DATA]		= 0x44,
 193};
 194
 195static inline void __iomem *
 196tcs_reg_addr(const struct rsc_drv *drv, int reg, int tcs_id)
 197{
 198	return drv->tcs_base + drv->regs[RSC_DRV_TCS_OFFSET] * tcs_id + reg;
 199}
 200
 201static inline void __iomem *
 202tcs_cmd_addr(const struct rsc_drv *drv, int reg, int tcs_id, int cmd_id)
 203{
 204	return tcs_reg_addr(drv, reg, tcs_id) + drv->regs[RSC_DRV_CMD_OFFSET] * cmd_id;
 205}
 206
 207static u32 read_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id,
 208			int cmd_id)
 209{
 210	return readl_relaxed(tcs_cmd_addr(drv, reg, tcs_id, cmd_id));
 211}
 212
 213static u32 read_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id)
 214{
 215	return readl_relaxed(tcs_reg_addr(drv, reg, tcs_id));
 216}
 217
 218static void write_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id,
 219			  int cmd_id, u32 data)
 220{
 221	writel_relaxed(data, tcs_cmd_addr(drv, reg, tcs_id, cmd_id));
 222}
 223
 224static void write_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id,
 225			  u32 data)
 226{
 227	writel_relaxed(data, tcs_reg_addr(drv, reg, tcs_id));
 228}
 229
 230static void write_tcs_reg_sync(const struct rsc_drv *drv, int reg, int tcs_id,
 231			       u32 data)
 232{
 233	int i;
 234
 235	writel(data, tcs_reg_addr(drv, reg, tcs_id));
 236
 237	/*
 238	 * Wait until we read back the same value.  Use a counter rather than
 239	 * ktime for timeout since this may be called after timekeeping stops.
 240	 */
 241	for (i = 0; i < USEC_PER_SEC; i++) {
 242		if (readl(tcs_reg_addr(drv, reg, tcs_id)) == data)
 243			return;
 244		udelay(1);
 245	}
 246	pr_err("%s: error writing %#x to %d:%#x\n", drv->name,
 247	       data, tcs_id, reg);
 248}
 249
 250/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 251 * tcs_invalidate() - Invalidate all TCSes of the given type (sleep or wake).
 252 * @drv:  The RSC controller.
 253 * @type: SLEEP_TCS or WAKE_TCS
 254 *
 255 * This will clear the "slots" variable of the given tcs_group and also
 256 * tell the hardware to forget about all entries.
 257 *
 258 * The caller must ensure that no other RPMH actions are happening when this
 259 * function is called, since otherwise the device may immediately become
 260 * used again even before this function exits.
 261 */
 262static void tcs_invalidate(struct rsc_drv *drv, int type)
 263{
 264	int m;
 265	struct tcs_group *tcs = &drv->tcs[type];
 266
 267	/* Caller ensures nobody else is running so no lock */
 268	if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS))
 269		return;
 270
 271	for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++)
 272		write_tcs_reg_sync(drv, drv->regs[RSC_DRV_CMD_ENABLE], m, 0);
 273
 
 274	bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
 275}
 276
 277/**
 278 * rpmh_rsc_invalidate() - Invalidate sleep and wake TCSes.
 279 * @drv: The RSC controller.
 280 *
 281 * The caller must ensure that no other RPMH actions are happening when this
 282 * function is called, since otherwise the device may immediately become
 283 * used again even before this function exits.
 284 */
 285void rpmh_rsc_invalidate(struct rsc_drv *drv)
 286{
 287	tcs_invalidate(drv, SLEEP_TCS);
 288	tcs_invalidate(drv, WAKE_TCS);
 289}
 290
 291/**
 292 * get_tcs_for_msg() - Get the tcs_group used to send the given message.
 293 * @drv: The RSC controller.
 294 * @msg: The message we want to send.
 295 *
 296 * This is normally pretty straightforward except if we are trying to send
 297 * an ACTIVE_ONLY message but don't have any active_only TCSes.
 298 *
 299 * Return: A pointer to a tcs_group or an ERR_PTR.
 300 */
 301static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv,
 302					 const struct tcs_request *msg)
 303{
 304	int type;
 305	struct tcs_group *tcs;
 306
 307	switch (msg->state) {
 308	case RPMH_ACTIVE_ONLY_STATE:
 309		type = ACTIVE_TCS;
 310		break;
 311	case RPMH_WAKE_ONLY_STATE:
 312		type = WAKE_TCS;
 313		break;
 314	case RPMH_SLEEP_STATE:
 315		type = SLEEP_TCS;
 316		break;
 317	default:
 318		return ERR_PTR(-EINVAL);
 319	}
 320
 321	/*
 322	 * If we are making an active request on a RSC that does not have a
 323	 * dedicated TCS for active state use, then re-purpose a wake TCS to
 324	 * send active votes. This is safe because we ensure any active-only
 325	 * transfers have finished before we use it (maybe by running from
 326	 * the last CPU in PM code).
 327	 */
 328	tcs = &drv->tcs[type];
 329	if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs)
 330		tcs = &drv->tcs[WAKE_TCS];
 331
 332	return tcs;
 333}
 334
 335/**
 336 * get_req_from_tcs() - Get a stashed request that was xfering on the given TCS.
 337 * @drv:    The RSC controller.
 338 * @tcs_id: The global ID of this TCS.
 339 *
 340 * For ACTIVE_ONLY transfers we want to call back into the client when the
 341 * transfer finishes. To do this we need the "request" that the client
 342 * originally provided us. This function grabs the request that we stashed
 343 * when we started the transfer.
 344 *
 345 * This only makes sense for ACTIVE_ONLY transfers since those are the only
 346 * ones we track sending (the only ones we enable interrupts for and the only
 347 * ones we call back to the client for).
 348 *
 349 * Return: The stashed request.
 350 */
 351static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv,
 352						  int tcs_id)
 353{
 354	struct tcs_group *tcs;
 355	int i;
 356
 357	for (i = 0; i < TCS_TYPE_NR; i++) {
 358		tcs = &drv->tcs[i];
 359		if (tcs->mask & BIT(tcs_id))
 360			return tcs->req[tcs_id - tcs->offset];
 361	}
 362
 363	return NULL;
 364}
 365
 366/**
 367 * __tcs_set_trigger() - Start xfer on a TCS or unset trigger on a borrowed TCS
 368 * @drv:     The controller.
 369 * @tcs_id:  The global ID of this TCS.
 370 * @trigger: If true then untrigger/retrigger. If false then just untrigger.
 371 *
 372 * In the normal case we only ever call with "trigger=true" to start a
 373 * transfer. That will un-trigger/disable the TCS from the last transfer
 374 * then trigger/enable for this transfer.
 375 *
 376 * If we borrowed a wake TCS for an active-only transfer we'll also call
 377 * this function with "trigger=false" to just do the un-trigger/disable
 378 * before using the TCS for wake purposes again.
 379 *
 380 * Note that the AP is only in charge of triggering active-only transfers.
 381 * The AP never triggers sleep/wake values using this function.
 382 */
 383static void __tcs_set_trigger(struct rsc_drv *drv, int tcs_id, bool trigger)
 384{
 385	u32 enable;
 386	u32 reg = drv->regs[RSC_DRV_CONTROL];
 387
 388	/*
 389	 * HW req: Clear the DRV_CONTROL and enable TCS again
 390	 * While clearing ensure that the AMC mode trigger is cleared
 391	 * and then the mode enable is cleared.
 392	 */
 393	enable = read_tcs_reg(drv, reg, tcs_id);
 394	enable &= ~TCS_AMC_MODE_TRIGGER;
 395	write_tcs_reg_sync(drv, reg, tcs_id, enable);
 396	enable &= ~TCS_AMC_MODE_ENABLE;
 397	write_tcs_reg_sync(drv, reg, tcs_id, enable);
 398
 399	if (trigger) {
 400		/* Enable the AMC mode on the TCS and then trigger the TCS */
 401		enable = TCS_AMC_MODE_ENABLE;
 402		write_tcs_reg_sync(drv, reg, tcs_id, enable);
 403		enable |= TCS_AMC_MODE_TRIGGER;
 404		write_tcs_reg(drv, reg, tcs_id, enable);
 405	}
 406}
 407
 408/**
 409 * enable_tcs_irq() - Enable or disable interrupts on the given TCS.
 410 * @drv:     The controller.
 411 * @tcs_id:  The global ID of this TCS.
 412 * @enable:  If true then enable; if false then disable
 413 *
 414 * We only ever call this when we borrow a wake TCS for an active-only
 415 * transfer. For active-only TCSes interrupts are always left enabled.
 416 */
 417static void enable_tcs_irq(struct rsc_drv *drv, int tcs_id, bool enable)
 418{
 419	u32 data;
 420	u32 reg = drv->regs[RSC_DRV_IRQ_ENABLE];
 421
 422	data = readl_relaxed(drv->tcs_base + reg);
 423	if (enable)
 424		data |= BIT(tcs_id);
 425	else
 426		data &= ~BIT(tcs_id);
 427	writel_relaxed(data, drv->tcs_base + reg);
 428}
 429
 430/**
 431 * tcs_tx_done() - TX Done interrupt handler.
 432 * @irq: The IRQ number (ignored).
 433 * @p:   Pointer to "struct rsc_drv".
 434 *
 435 * Called for ACTIVE_ONLY transfers (those are the only ones we enable the
 436 * IRQ for) when a transfer is done.
 437 *
 438 * Return: IRQ_HANDLED
 439 */
 440static irqreturn_t tcs_tx_done(int irq, void *p)
 441{
 442	struct rsc_drv *drv = p;
 443	int i;
 444	unsigned long irq_status;
 445	const struct tcs_request *req;
 
 446
 447	irq_status = readl_relaxed(drv->tcs_base + drv->regs[RSC_DRV_IRQ_STATUS]);
 448
 449	for_each_set_bit(i, &irq_status, BITS_PER_TYPE(u32)) {
 450		req = get_req_from_tcs(drv, i);
 451		if (WARN_ON(!req))
 
 452			goto skip;
 
 453
 454		trace_rpmh_tx_done(drv, i, req);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 455
 456		/*
 457		 * If wake tcs was re-purposed for sending active
 458		 * votes, clear AMC trigger & enable modes and
 459		 * disable interrupt for this TCS
 460		 */
 461		if (!drv->tcs[ACTIVE_TCS].num_tcs)
 462			__tcs_set_trigger(drv, i, false);
 463skip:
 464		/* Reclaim the TCS */
 465		write_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], i, 0);
 466		writel_relaxed(BIT(i), drv->tcs_base + drv->regs[RSC_DRV_IRQ_CLEAR]);
 
 467		spin_lock(&drv->lock);
 468		clear_bit(i, drv->tcs_in_use);
 469		/*
 470		 * Disable interrupt for WAKE TCS to avoid being
 471		 * spammed with interrupts coming when the solver
 472		 * sends its wake votes.
 473		 */
 474		if (!drv->tcs[ACTIVE_TCS].num_tcs)
 475			enable_tcs_irq(drv, i, false);
 476		spin_unlock(&drv->lock);
 477		wake_up(&drv->tcs_wait);
 478		if (req)
 479			rpmh_tx_done(req);
 480	}
 481
 482	return IRQ_HANDLED;
 483}
 484
 485/**
 486 * __tcs_buffer_write() - Write to TCS hardware from a request; don't trigger.
 487 * @drv:    The controller.
 488 * @tcs_id: The global ID of this TCS.
 489 * @cmd_id: The index within the TCS to start writing.
 490 * @msg:    The message we want to send, which will contain several addr/data
 491 *          pairs to program (but few enough that they all fit in one TCS).
 492 *
 493 * This is used for all types of transfers (active, sleep, and wake).
 494 */
 495static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
 496			       const struct tcs_request *msg)
 497{
 498	u32 msgid;
 499	u32 cmd_msgid = CMD_MSGID_LEN | CMD_MSGID_WRITE;
 500	u32 cmd_enable = 0;
 
 501	struct tcs_cmd *cmd;
 502	int i, j;
 503
 504	/* Convert all commands to RR when the request has wait_for_compl set */
 505	cmd_msgid |= msg->wait_for_compl ? CMD_MSGID_RESP_REQ : 0;
 
 
 
 506
 507	for (i = 0, j = cmd_id; i < msg->num_cmds; i++, j++) {
 508		cmd = &msg->cmds[i];
 509		cmd_enable |= BIT(j);
 
 510		msgid = cmd_msgid;
 511		/*
 512		 * Additionally, if the cmd->wait is set, make the command
 513		 * response reqd even if the overall request was fire-n-forget.
 514		 */
 515		msgid |= cmd->wait ? CMD_MSGID_RESP_REQ : 0;
 516
 517		write_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_MSGID], tcs_id, j, msgid);
 518		write_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_ADDR], tcs_id, j, cmd->addr);
 519		write_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_DATA], tcs_id, j, cmd->data);
 520		trace_rpmh_send_msg(drv, tcs_id, msg->state, j, msgid, cmd);
 521	}
 522
 523	cmd_enable |= read_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], tcs_id);
 524	write_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], tcs_id, cmd_enable);
 
 525}
 526
 527/**
 528 * check_for_req_inflight() - Look to see if conflicting cmds are in flight.
 529 * @drv: The controller.
 530 * @tcs: A pointer to the tcs_group used for ACTIVE_ONLY transfers.
 531 * @msg: The message we want to send, which will contain several addr/data
 532 *       pairs to program (but few enough that they all fit in one TCS).
 533 *
 534 * This will walk through the TCSes in the group and check if any of them
 535 * appear to be sending to addresses referenced in the message. If it finds
 536 * one it'll return -EBUSY.
 537 *
 538 * Only for use for active-only transfers.
 539 *
 540 * Must be called with the drv->lock held since that protects tcs_in_use.
 541 *
 542 * Return: 0 if nothing in flight or -EBUSY if we should try again later.
 543 *         The caller must re-enable interrupts between tries since that's
 544 *         the only way tcs_in_use will ever be updated and the only way
 545 *         RSC_DRV_CMD_ENABLE will ever be cleared.
 546 */
 547static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
 548				  const struct tcs_request *msg)
 549{
 550	unsigned long curr_enabled;
 551	u32 addr;
 552	int j, k;
 553	int i = tcs->offset;
 
 
 
 
 554
 555	for_each_set_bit_from(i, drv->tcs_in_use, tcs->offset + tcs->num_tcs) {
 556		curr_enabled = read_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], i);
 557
 558		for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) {
 559			addr = read_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_ADDR], i, j);
 560			for (k = 0; k < msg->num_cmds; k++) {
 561				if (cmd_db_match_resource_addr(msg->cmds[k].addr, addr))
 562					return -EBUSY;
 563			}
 564		}
 565	}
 566
 567	return 0;
 568}
 569
 570/**
 571 * find_free_tcs() - Find free tcs in the given tcs_group; only for active.
 572 * @tcs: A pointer to the active-only tcs_group (or the wake tcs_group if
 573 *       we borrowed it because there are zero active-only ones).
 574 *
 575 * Must be called with the drv->lock held since that protects tcs_in_use.
 576 *
 577 * Return: The first tcs that's free or -EBUSY if all in use.
 578 */
 579static int find_free_tcs(struct tcs_group *tcs)
 580{
 581	const struct rsc_drv *drv = tcs->drv;
 582	unsigned long i;
 583	unsigned long max = tcs->offset + tcs->num_tcs;
 584
 585	i = find_next_zero_bit(drv->tcs_in_use, max, tcs->offset);
 586	if (i >= max)
 587		return -EBUSY;
 588
 589	return i;
 590}
 591
 592/**
 593 * claim_tcs_for_req() - Claim a tcs in the given tcs_group; only for active.
 594 * @drv: The controller.
 595 * @tcs: The tcs_group used for ACTIVE_ONLY transfers.
 596 * @msg: The data to be sent.
 597 *
 598 * Claims a tcs in the given tcs_group while making sure that no existing cmd
 599 * is in flight that would conflict with the one in @msg.
 600 *
 601 * Context: Must be called with the drv->lock held since that protects
 602 * tcs_in_use.
 
 603 *
 604 * Return: The id of the claimed tcs or -EBUSY if a matching msg is in flight
 605 * or the tcs_group is full.
 606 */
 607static int claim_tcs_for_req(struct rsc_drv *drv, struct tcs_group *tcs,
 608			     const struct tcs_request *msg)
 609{
 
 
 
 610	int ret;
 611
 
 
 
 
 
 612	/*
 613	 * The h/w does not like if we send a request to the same address,
 614	 * when one is already in-flight or being processed.
 615	 */
 616	ret = check_for_req_inflight(drv, tcs, msg);
 617	if (ret)
 618		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 619
 620	return find_free_tcs(tcs);
 
 
 
 621}
 622
 623/**
 624 * rpmh_rsc_send_data() - Write / trigger active-only message.
 625 * @drv: The controller.
 626 * @msg: The data to be sent.
 627 *
 628 * NOTES:
 629 * - This is only used for "ACTIVE_ONLY" since the limitations of this
 630 *   function don't make sense for sleep/wake cases.
 631 * - To do the transfer, we will grab a whole TCS for ourselves--we don't
 632 *   try to share. If there are none available we'll wait indefinitely
 633 *   for a free one.
 634 * - This function will not wait for the commands to be finished, only for
 635 *   data to be programmed into the RPMh. See rpmh_tx_done() which will
 636 *   be called when the transfer is fully complete.
 637 * - This function must be called with interrupts enabled. If the hardware
 638 *   is busy doing someone else's transfer we need that transfer to fully
 639 *   finish so that we can have the hardware, and to fully finish it needs
 640 *   the interrupt handler to run. If the interrupts is set to run on the
 641 *   active CPU this can never happen if interrupts are disabled.
 642 *
 643 * Return: 0 on success, -EINVAL on error.
 644 */
 645int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
 646{
 647	struct tcs_group *tcs;
 648	int tcs_id;
 649
 650	might_sleep();
 
 
 
 
 
 
 
 651
 652	tcs = get_tcs_for_msg(drv, msg);
 653	if (IS_ERR(tcs))
 654		return PTR_ERR(tcs);
 655
 656	spin_lock_irq(&drv->lock);
 657
 658	/* Wait forever for a free tcs. It better be there eventually! */
 659	wait_event_lock_irq(drv->tcs_wait,
 660			    (tcs_id = claim_tcs_for_req(drv, tcs, msg)) >= 0,
 661			    drv->lock);
 662
 663	tcs->req[tcs_id - tcs->offset] = msg;
 664	set_bit(tcs_id, drv->tcs_in_use);
 665	if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) {
 666		/*
 667		 * Clear previously programmed WAKE commands in selected
 668		 * repurposed TCS to avoid triggering them. tcs->slots will be
 669		 * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate()
 670		 */
 671		write_tcs_reg_sync(drv, drv->regs[RSC_DRV_CMD_ENABLE], tcs_id, 0);
 672		enable_tcs_irq(drv, tcs_id, true);
 673	}
 674	spin_unlock_irq(&drv->lock);
 675
 676	/*
 677	 * These two can be done after the lock is released because:
 678	 * - We marked "tcs_in_use" under lock.
 679	 * - Once "tcs_in_use" has been marked nobody else could be writing
 680	 *   to these registers until the interrupt goes off.
 681	 * - The interrupt can't go off until we trigger w/ the last line
 682	 *   of __tcs_set_trigger() below.
 683	 */
 684	__tcs_buffer_write(drv, tcs_id, 0, msg);
 685	__tcs_set_trigger(drv, tcs_id, true);
 686
 687	return 0;
 688}
 689
 690/**
 691 * find_slots() - Find a place to write the given message.
 692 * @tcs:    The tcs group to search.
 693 * @msg:    The message we want to find room for.
 694 * @tcs_id: If we return 0 from the function, we return the global ID of the
 695 *          TCS to write to here.
 696 * @cmd_id: If we return 0 from the function, we return the index of
 697 *          the command array of the returned TCS where the client should
 698 *          start writing the message.
 699 *
 700 * Only for use on sleep/wake TCSes since those are the only ones we maintain
 701 * tcs->slots for.
 702 *
 703 * Return: -ENOMEM if there was no room, else 0.
 704 */
 705static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg,
 706		      int *tcs_id, int *cmd_id)
 707{
 708	int slot, offset;
 709	int i = 0;
 710
 711	/* Do over, until we can fit the full payload in a single TCS */
 712	do {
 713		slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS,
 714						  i, msg->num_cmds, 0);
 715		if (slot >= tcs->num_tcs * tcs->ncpt)
 716			return -ENOMEM;
 717		i += tcs->ncpt;
 718	} while (slot + msg->num_cmds - 1 >= i);
 719
 720	bitmap_set(tcs->slots, slot, msg->num_cmds);
 721
 722	offset = slot / tcs->ncpt;
 723	*tcs_id = offset + tcs->offset;
 724	*cmd_id = slot % tcs->ncpt;
 725
 726	return 0;
 727}
 728
 729/**
 730 * rpmh_rsc_write_ctrl_data() - Write request to controller but don't trigger.
 731 * @drv: The controller.
 732 * @msg: The data to be written to the controller.
 733 *
 734 * This should only be called for sleep/wake state, never active-only
 735 * state.
 736 *
 737 * The caller must ensure that no other RPMH actions are happening and the
 738 * controller is idle when this function is called since it runs lockless.
 739 *
 740 * Return: 0 if no error; else -error.
 741 */
 742int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg)
 743{
 744	struct tcs_group *tcs;
 745	int tcs_id = 0, cmd_id = 0;
 746	int ret;
 747
 748	tcs = get_tcs_for_msg(drv, msg);
 749	if (IS_ERR(tcs))
 750		return PTR_ERR(tcs);
 751
 752	/* find the TCS id and the command in the TCS to write to */
 753	ret = find_slots(tcs, msg, &tcs_id, &cmd_id);
 754	if (!ret)
 755		__tcs_buffer_write(drv, tcs_id, cmd_id, msg);
 756
 757	return ret;
 758}
 759
 760/**
 761 * rpmh_rsc_ctrlr_is_busy() - Check if any of the AMCs are busy.
 762 * @drv: The controller
 763 *
 764 * Checks if any of the AMCs are busy in handling ACTIVE sets.
 765 * This is called from the last cpu powering down before flushing
 766 * SLEEP and WAKE sets. If AMCs are busy, controller can not enter
 767 * power collapse, so deny from the last cpu's pm notification.
 768 *
 769 * Context: Must be called with the drv->lock held.
 770 *
 771 * Return:
 772 * * False		- AMCs are idle
 773 * * True		- AMCs are busy
 774 */
 775static bool rpmh_rsc_ctrlr_is_busy(struct rsc_drv *drv)
 776{
 777	unsigned long set;
 778	const struct tcs_group *tcs = &drv->tcs[ACTIVE_TCS];
 779	unsigned long max;
 780
 781	/*
 782	 * If we made an active request on a RSC that does not have a
 783	 * dedicated TCS for active state use, then re-purposed wake TCSes
 784	 * should be checked for not busy, because we used wake TCSes for
 785	 * active requests in this case.
 786	 */
 787	if (!tcs->num_tcs)
 788		tcs = &drv->tcs[WAKE_TCS];
 789
 790	max = tcs->offset + tcs->num_tcs;
 791	set = find_next_bit(drv->tcs_in_use, max, tcs->offset);
 
 
 792
 793	return set < max;
 794}
 795
 796/**
 797 * rpmh_rsc_write_next_wakeup() - Write next wakeup in CONTROL_TCS.
 798 * @drv: The controller
 799 *
 800 * Writes maximum wakeup cycles when called from suspend.
 801 * Writes earliest hrtimer wakeup when called from idle.
 802 */
 803void rpmh_rsc_write_next_wakeup(struct rsc_drv *drv)
 804{
 805	ktime_t now, wakeup;
 806	u64 wakeup_us, wakeup_cycles = ~0;
 807	u32 lo, hi;
 808
 809	if (!drv->tcs[CONTROL_TCS].num_tcs || !drv->genpd_nb.notifier_call)
 810		return;
 811
 812	/* Set highest time when system (timekeeping) is suspended */
 813	if (system_state == SYSTEM_SUSPEND)
 814		goto exit;
 815
 816	/* Find the earliest hrtimer wakeup from online cpus */
 817	wakeup = dev_pm_genpd_get_next_hrtimer(drv->dev);
 818
 819	/* Find the relative wakeup in kernel time scale */
 820	now = ktime_get();
 821	wakeup = ktime_sub(wakeup, now);
 822	wakeup_us = ktime_to_us(wakeup);
 823
 824	/* Convert the wakeup to arch timer scale */
 825	wakeup_cycles = USECS_TO_CYCLES(wakeup_us);
 826	wakeup_cycles += arch_timer_read_counter();
 827
 828exit:
 829	lo = wakeup_cycles & RSC_DRV_CTL_TCS_DATA_LO_MASK;
 830	hi = wakeup_cycles >> RSC_DRV_CTL_TCS_DATA_SIZE;
 831	hi &= RSC_DRV_CTL_TCS_DATA_HI_MASK;
 832	hi |= RSC_DRV_CTL_TCS_DATA_HI_VALID;
 833
 834	writel_relaxed(lo, drv->base + RSC_DRV_CTL_TCS_DATA_LO);
 835	writel_relaxed(hi, drv->base + RSC_DRV_CTL_TCS_DATA_HI);
 836}
 837
 838/**
 839 * rpmh_rsc_cpu_pm_callback() - Check if any of the AMCs are busy.
 840 * @nfb:    Pointer to the notifier block in struct rsc_drv.
 841 * @action: CPU_PM_ENTER, CPU_PM_ENTER_FAILED, or CPU_PM_EXIT.
 842 * @v:      Unused
 843 *
 844 * This function is given to cpu_pm_register_notifier so we can be informed
 845 * about when CPUs go down. When all CPUs go down we know no more active
 846 * transfers will be started so we write sleep/wake sets. This function gets
 847 * called from cpuidle code paths and also at system suspend time.
 848 *
 849 * If its last CPU going down and AMCs are not busy then writes cached sleep
 850 * and wake messages to TCSes. The firmware then takes care of triggering
 851 * them when entering deepest low power modes.
 852 *
 853 * Return: See cpu_pm_register_notifier()
 854 */
 855static int rpmh_rsc_cpu_pm_callback(struct notifier_block *nfb,
 856				    unsigned long action, void *v)
 857{
 858	struct rsc_drv *drv = container_of(nfb, struct rsc_drv, rsc_pm);
 859	int ret = NOTIFY_OK;
 860	int cpus_in_pm;
 861
 862	switch (action) {
 863	case CPU_PM_ENTER:
 864		cpus_in_pm = atomic_inc_return(&drv->cpus_in_pm);
 865		/*
 866		 * NOTE: comments for num_online_cpus() point out that it's
 867		 * only a snapshot so we need to be careful. It should be OK
 868		 * for us to use, though.  It's important for us not to miss
 869		 * if we're the last CPU going down so it would only be a
 870		 * problem if a CPU went offline right after we did the check
 871		 * AND that CPU was not idle AND that CPU was the last non-idle
 872		 * CPU. That can't happen. CPUs would have to come out of idle
 873		 * before the CPU could go offline.
 874		 */
 875		if (cpus_in_pm < num_online_cpus())
 876			return NOTIFY_OK;
 877		break;
 878	case CPU_PM_ENTER_FAILED:
 879	case CPU_PM_EXIT:
 880		atomic_dec(&drv->cpus_in_pm);
 881		return NOTIFY_OK;
 882	default:
 883		return NOTIFY_DONE;
 884	}
 885
 886	/*
 887	 * It's likely we're on the last CPU. Grab the drv->lock and write
 888	 * out the sleep/wake commands to RPMH hardware. Grabbing the lock
 889	 * means that if we race with another CPU coming up we are still
 890	 * guaranteed to be safe. If another CPU came up just after we checked
 891	 * and has grabbed the lock or started an active transfer then we'll
 892	 * notice we're busy and abort. If another CPU comes up after we start
 893	 * flushing it will be blocked from starting an active transfer until
 894	 * we're done flushing. If another CPU starts an active transfer after
 895	 * we release the lock we're still OK because we're no longer the last
 896	 * CPU.
 897	 */
 898	if (spin_trylock(&drv->lock)) {
 899		if (rpmh_rsc_ctrlr_is_busy(drv) || rpmh_flush(&drv->client))
 900			ret = NOTIFY_BAD;
 901		spin_unlock(&drv->lock);
 902	} else {
 903		/* Another CPU must be up */
 904		return NOTIFY_OK;
 905	}
 906
 907	if (ret == NOTIFY_BAD) {
 908		/* Double-check if we're here because someone else is up */
 909		if (cpus_in_pm < num_online_cpus())
 910			ret = NOTIFY_OK;
 911		else
 912			/* We won't be called w/ CPU_PM_ENTER_FAILED */
 913			atomic_dec(&drv->cpus_in_pm);
 914	}
 915
 916	return ret;
 917}
 918
 919/**
 920 * rpmh_rsc_pd_callback() - Check if any of the AMCs are busy.
 921 * @nfb:    Pointer to the genpd notifier block in struct rsc_drv.
 922 * @action: GENPD_NOTIFY_PRE_OFF, GENPD_NOTIFY_OFF, GENPD_NOTIFY_PRE_ON or GENPD_NOTIFY_ON.
 923 * @v:      Unused
 924 *
 925 * This function is given to dev_pm_genpd_add_notifier() so we can be informed
 926 * about when cluster-pd is going down. When cluster go down we know no more active
 927 * transfers will be started so we write sleep/wake sets. This function gets
 928 * called from cpuidle code paths and also at system suspend time.
 929 *
 930 * If AMCs are not busy then writes cached sleep and wake messages to TCSes.
 931 * The firmware then takes care of triggering them when entering deepest low power modes.
 932 *
 933 * Return:
 934 * * NOTIFY_OK          - success
 935 * * NOTIFY_BAD         - failure
 936 */
 937static int rpmh_rsc_pd_callback(struct notifier_block *nfb,
 938				unsigned long action, void *v)
 939{
 940	struct rsc_drv *drv = container_of(nfb, struct rsc_drv, genpd_nb);
 941
 942	/* We don't need to lock as genpd on/off are serialized */
 943	if ((action == GENPD_NOTIFY_PRE_OFF) &&
 944	    (rpmh_rsc_ctrlr_is_busy(drv) || rpmh_flush(&drv->client)))
 945		return NOTIFY_BAD;
 946
 947	return NOTIFY_OK;
 948}
 949
 950static int rpmh_rsc_pd_attach(struct rsc_drv *drv, struct device *dev)
 951{
 952	int ret;
 953
 954	pm_runtime_enable(dev);
 955	drv->genpd_nb.notifier_call = rpmh_rsc_pd_callback;
 956	ret = dev_pm_genpd_add_notifier(dev, &drv->genpd_nb);
 957	if (ret)
 958		pm_runtime_disable(dev);
 959
 960	return ret;
 961}
 962
 963static int rpmh_probe_tcs_config(struct platform_device *pdev, struct rsc_drv *drv)
 964{
 965	struct tcs_type_config {
 966		u32 type;
 967		u32 n;
 968	} tcs_cfg[TCS_TYPE_NR] = { { 0 } };
 969	struct device_node *dn = pdev->dev.of_node;
 970	u32 config, max_tcs, ncpt, offset;
 971	int i, ret, n, st = 0;
 972	struct tcs_group *tcs;
 973
 974	ret = of_property_read_u32(dn, "qcom,tcs-offset", &offset);
 975	if (ret)
 976		return ret;
 977	drv->tcs_base = drv->base + offset;
 978
 979	config = readl_relaxed(drv->base + drv->regs[DRV_PRNT_CHLD_CONFIG]);
 980
 981	max_tcs = config;
 982	max_tcs &= DRV_NUM_TCS_MASK << (DRV_NUM_TCS_SHIFT * drv->id);
 983	max_tcs = max_tcs >> (DRV_NUM_TCS_SHIFT * drv->id);
 984
 985	ncpt = config & (DRV_NCPT_MASK << DRV_NCPT_SHIFT);
 986	ncpt = ncpt >> DRV_NCPT_SHIFT;
 987
 988	n = of_property_count_u32_elems(dn, "qcom,tcs-config");
 989	if (n != 2 * TCS_TYPE_NR)
 990		return -EINVAL;
 991
 992	for (i = 0; i < TCS_TYPE_NR; i++) {
 993		ret = of_property_read_u32_index(dn, "qcom,tcs-config",
 994						 i * 2, &tcs_cfg[i].type);
 995		if (ret)
 996			return ret;
 997		if (tcs_cfg[i].type >= TCS_TYPE_NR)
 998			return -EINVAL;
 999
1000		ret = of_property_read_u32_index(dn, "qcom,tcs-config",
1001						 i * 2 + 1, &tcs_cfg[i].n);
1002		if (ret)
1003			return ret;
1004		if (tcs_cfg[i].n > MAX_TCS_PER_TYPE)
1005			return -EINVAL;
1006	}
1007
1008	for (i = 0; i < TCS_TYPE_NR; i++) {
1009		tcs = &drv->tcs[tcs_cfg[i].type];
1010		if (tcs->drv)
1011			return -EINVAL;
1012		tcs->drv = drv;
1013		tcs->type = tcs_cfg[i].type;
1014		tcs->num_tcs = tcs_cfg[i].n;
1015		tcs->ncpt = ncpt;
1016
1017		if (!tcs->num_tcs || tcs->type == CONTROL_TCS)
1018			continue;
1019
1020		if (st + tcs->num_tcs > max_tcs ||
1021		    st + tcs->num_tcs >= BITS_PER_BYTE * sizeof(tcs->mask))
1022			return -EINVAL;
1023
1024		tcs->mask = ((1 << tcs->num_tcs) - 1) << st;
1025		tcs->offset = st;
1026		st += tcs->num_tcs;
1027	}
1028
1029	drv->num_tcs = st;
1030
1031	return 0;
1032}
1033
1034static int rpmh_rsc_probe(struct platform_device *pdev)
1035{
1036	struct device_node *dn = pdev->dev.of_node;
1037	struct rsc_drv *drv;
 
1038	char drv_id[10] = {0};
1039	int ret, irq;
1040	u32 solver_config;
1041	u32 rsc_id;
1042
1043	/*
1044	 * Even though RPMh doesn't directly use cmd-db, all of its children
1045	 * do. To avoid adding this check to our children we'll do it now.
1046	 */
1047	ret = cmd_db_ready();
1048	if (ret)
1049		return dev_err_probe(&pdev->dev, ret,
1050				     "Command DB not available\n");
 
 
 
1051
1052	drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
1053	if (!drv)
1054		return -ENOMEM;
1055
1056	ret = of_property_read_u32(dn, "qcom,drv-id", &drv->id);
1057	if (ret)
1058		return ret;
1059
1060	drv->name = of_get_property(dn, "label", NULL);
1061	if (!drv->name)
1062		drv->name = dev_name(&pdev->dev);
1063
1064	snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id);
1065	drv->base = devm_platform_ioremap_resource_byname(pdev, drv_id);
1066	if (IS_ERR(drv->base))
1067		return PTR_ERR(drv->base);
1068
1069	rsc_id = readl_relaxed(drv->base + RSC_DRV_ID);
1070	drv->ver.major = rsc_id & (MAJOR_VER_MASK << MAJOR_VER_SHIFT);
1071	drv->ver.major >>= MAJOR_VER_SHIFT;
1072	drv->ver.minor = rsc_id & (MINOR_VER_MASK << MINOR_VER_SHIFT);
1073	drv->ver.minor >>= MINOR_VER_SHIFT;
1074
1075	if (drv->ver.major == 3)
1076		drv->regs = rpmh_rsc_reg_offset_ver_3_0;
1077	else
1078		drv->regs = rpmh_rsc_reg_offset_ver_2_7;
1079
1080	ret = rpmh_probe_tcs_config(pdev, drv);
1081	if (ret)
1082		return ret;
1083
1084	spin_lock_init(&drv->lock);
1085	init_waitqueue_head(&drv->tcs_wait);
1086	bitmap_zero(drv->tcs_in_use, MAX_TCS_NR);
1087
1088	irq = platform_get_irq(pdev, drv->id);
1089	if (irq < 0)
1090		return irq;
1091
1092	ret = devm_request_irq(&pdev->dev, irq, tcs_tx_done,
1093			       IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
1094			       drv->name, drv);
1095	if (ret)
1096		return ret;
1097
1098	/*
1099	 * CPU PM/genpd notification are not required for controllers that support
1100	 * 'HW solver' mode where they can be in autonomous mode executing low
1101	 * power mode to power down.
1102	 */
1103	solver_config = readl_relaxed(drv->base + drv->regs[DRV_SOLVER_CONFIG]);
1104	solver_config &= DRV_HW_SOLVER_MASK << DRV_HW_SOLVER_SHIFT;
1105	solver_config = solver_config >> DRV_HW_SOLVER_SHIFT;
1106	if (!solver_config) {
1107		if (pdev->dev.pm_domain) {
1108			ret = rpmh_rsc_pd_attach(drv, &pdev->dev);
1109			if (ret)
1110				return ret;
1111		} else {
1112			drv->rsc_pm.notifier_call = rpmh_rsc_cpu_pm_callback;
1113			cpu_pm_register_notifier(&drv->rsc_pm);
1114		}
1115	}
1116
1117	/* Enable the active TCS to send requests immediately */
1118	writel_relaxed(drv->tcs[ACTIVE_TCS].mask,
1119		       drv->tcs_base + drv->regs[RSC_DRV_IRQ_ENABLE]);
1120
1121	spin_lock_init(&drv->client.cache_lock);
1122	INIT_LIST_HEAD(&drv->client.cache);
1123	INIT_LIST_HEAD(&drv->client.batch_cache);
1124
1125	dev_set_drvdata(&pdev->dev, drv);
1126	drv->dev = &pdev->dev;
1127
1128	ret = devm_of_platform_populate(&pdev->dev);
1129	if (ret && pdev->dev.pm_domain) {
1130		dev_pm_genpd_remove_notifier(&pdev->dev);
1131		pm_runtime_disable(&pdev->dev);
1132	}
1133
1134	return ret;
1135}
1136
1137static const struct of_device_id rpmh_drv_match[] = {
1138	{ .compatible = "qcom,rpmh-rsc", },
1139	{ }
1140};
1141MODULE_DEVICE_TABLE(of, rpmh_drv_match);
1142
1143static struct platform_driver rpmh_driver = {
1144	.probe = rpmh_rsc_probe,
1145	.driver = {
1146		  .name = "rpmh",
1147		  .of_match_table = rpmh_drv_match,
1148		  .suppress_bind_attrs = true,
1149	},
1150};
1151
1152static int __init rpmh_driver_init(void)
1153{
1154	return platform_driver_register(&rpmh_driver);
1155}
1156core_initcall(rpmh_driver_init);
1157
1158MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPMh Driver");
1159MODULE_LICENSE("GPL v2");