Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
   3 * Author: Marc Zyngier <marc.zyngier@arm.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License version 2 as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  16 */
  17
 
 
 
  18#include <linux/bitmap.h>
  19#include <linux/cpu.h>
 
  20#include <linux/delay.h>
 
 
  21#include <linux/interrupt.h>
 
 
 
  22#include <linux/log2.h>
 
  23#include <linux/mm.h>
  24#include <linux/msi.h>
  25#include <linux/of.h>
  26#include <linux/of_address.h>
  27#include <linux/of_irq.h>
  28#include <linux/of_pci.h>
  29#include <linux/of_platform.h>
  30#include <linux/percpu.h>
  31#include <linux/slab.h>
 
  32
  33#include <linux/irqchip.h>
  34#include <linux/irqchip/arm-gic-v3.h>
 
  35
  36#include <asm/cacheflush.h>
  37#include <asm/cputype.h>
  38#include <asm/exception.h>
  39
  40#include "irq-gic-common.h"
  41
  42#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING		(1ULL << 0)
  43#define ITS_FLAGS_WORKAROUND_CAVIUM_22375	(1ULL << 1)
 
 
  44
  45#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING	(1 << 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  46
  47/*
  48 * Collection structure - just an ID, and a redistributor address to
  49 * ping. We use one per CPU as a bag of interrupts assigned to this
  50 * CPU.
  51 */
  52struct its_collection {
  53	u64			target_address;
  54	u16			col_id;
  55};
  56
  57/*
 
 
 
 
 
 
 
 
 
 
 
 
 
  58 * The ITS structure - contains most of the infrastructure, with the
  59 * top-level MSI domain, the command queue, the collections, and the
  60 * list of devices writing to it.
 
 
 
 
  61 */
  62struct its_node {
  63	raw_spinlock_t		lock;
 
  64	struct list_head	entry;
  65	void __iomem		*base;
  66	unsigned long		phys_base;
 
  67	struct its_cmd_block	*cmd_base;
  68	struct its_cmd_block	*cmd_write;
  69	struct {
  70		void		*base;
  71		u32		order;
  72	} tables[GITS_BASER_NR_REGS];
  73	struct its_collection	*collections;
 
 
 
 
 
 
  74	struct list_head	its_device_list;
  75	u64			flags;
  76	u32			ite_size;
 
 
 
 
  77};
  78
 
 
 
 
  79#define ITS_ITT_ALIGN		SZ_256
  80
 
 
 
 
 
 
 
 
 
 
 
 
 
  81/* Convert page order to size in bytes */
  82#define PAGE_ORDER_TO_SIZE(o)	(PAGE_SIZE << (o))
  83
  84struct event_lpi_map {
  85	unsigned long		*lpi_map;
  86	u16			*col_map;
  87	irq_hw_number_t		lpi_base;
  88	int			nr_lpis;
 
 
 
 
  89};
  90
  91/*
  92 * The ITS view of a device - belongs to an ITS, a collection, owns an
  93 * interrupt translation table, and a list of interrupts.
 
 
  94 */
  95struct its_device {
  96	struct list_head	entry;
  97	struct its_node		*its;
  98	struct event_lpi_map	event_map;
  99	void			*itt;
 100	u32			nr_ites;
 101	u32			device_id;
 
 102};
 103
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 104static LIST_HEAD(its_nodes);
 105static DEFINE_SPINLOCK(its_lock);
 106static struct rdists *gic_rdists;
 
 
 
 
 
 
 
 107
 108#define gic_data_rdist()		(raw_cpu_ptr(gic_rdists->rdist))
 
 109#define gic_data_rdist_rd_base()	(gic_data_rdist()->rd_base)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 110
 111static struct its_collection *dev_event_to_col(struct its_device *its_dev,
 112					       u32 event)
 113{
 114	struct its_node *its = its_dev->its;
 115
 116	return its->collections + its_dev->event_map.col_map[event];
 117}
 118
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 119/*
 120 * ITS command descriptors - parameters to be encoded in a command
 121 * block.
 122 */
 123struct its_cmd_desc {
 124	union {
 125		struct {
 126			struct its_device *dev;
 127			u32 event_id;
 128		} its_inv_cmd;
 129
 130		struct {
 131			struct its_device *dev;
 132			u32 event_id;
 
 
 
 
 
 133		} its_int_cmd;
 134
 135		struct {
 136			struct its_device *dev;
 137			int valid;
 138		} its_mapd_cmd;
 139
 140		struct {
 141			struct its_collection *col;
 142			int valid;
 143		} its_mapc_cmd;
 144
 145		struct {
 146			struct its_device *dev;
 147			u32 phys_id;
 148			u32 event_id;
 149		} its_mapvi_cmd;
 150
 151		struct {
 152			struct its_device *dev;
 153			struct its_collection *col;
 154			u32 event_id;
 155		} its_movi_cmd;
 156
 157		struct {
 158			struct its_device *dev;
 159			u32 event_id;
 160		} its_discard_cmd;
 161
 162		struct {
 163			struct its_collection *col;
 164		} its_invall_cmd;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 165	};
 166};
 167
 168/*
 169 * The ITS command block, which is what the ITS actually parses.
 170 */
 171struct its_cmd_block {
 172	u64	raw_cmd[4];
 
 
 
 173};
 174
 175#define ITS_CMD_QUEUE_SZ		SZ_64K
 176#define ITS_CMD_QUEUE_NR_ENTRIES	(ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
 177
 178typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
 
 179						    struct its_cmd_desc *);
 180
 
 
 
 
 
 
 
 
 
 
 
 181static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
 182{
 183	cmd->raw_cmd[0] &= ~0xffUL;
 184	cmd->raw_cmd[0] |= cmd_nr;
 185}
 186
 187static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
 188{
 189	cmd->raw_cmd[0] &= BIT_ULL(32) - 1;
 190	cmd->raw_cmd[0] |= ((u64)devid) << 32;
 191}
 192
 193static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
 194{
 195	cmd->raw_cmd[1] &= ~0xffffffffUL;
 196	cmd->raw_cmd[1] |= id;
 197}
 198
 199static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
 200{
 201	cmd->raw_cmd[1] &= 0xffffffffUL;
 202	cmd->raw_cmd[1] |= ((u64)phys_id) << 32;
 203}
 204
 205static void its_encode_size(struct its_cmd_block *cmd, u8 size)
 206{
 207	cmd->raw_cmd[1] &= ~0x1fUL;
 208	cmd->raw_cmd[1] |= size & 0x1f;
 209}
 210
 211static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
 212{
 213	cmd->raw_cmd[2] &= ~0xffffffffffffUL;
 214	cmd->raw_cmd[2] |= itt_addr & 0xffffffffff00UL;
 215}
 216
 217static void its_encode_valid(struct its_cmd_block *cmd, int valid)
 218{
 219	cmd->raw_cmd[2] &= ~(1UL << 63);
 220	cmd->raw_cmd[2] |= ((u64)!!valid) << 63;
 221}
 222
 223static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
 224{
 225	cmd->raw_cmd[2] &= ~(0xffffffffUL << 16);
 226	cmd->raw_cmd[2] |= (target_addr & (0xffffffffUL << 16));
 227}
 228
 229static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
 230{
 231	cmd->raw_cmd[2] &= ~0xffffUL;
 232	cmd->raw_cmd[2] |= col;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 233}
 234
 235static inline void its_fixup_cmd(struct its_cmd_block *cmd)
 236{
 237	/* Let's fixup BE commands */
 238	cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]);
 239	cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]);
 240	cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]);
 241	cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
 242}
 243
 244static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd,
 
 245						 struct its_cmd_desc *desc)
 246{
 247	unsigned long itt_addr;
 248	u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
 249
 250	itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
 251	itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
 252
 253	its_encode_cmd(cmd, GITS_CMD_MAPD);
 254	its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
 255	its_encode_size(cmd, size - 1);
 256	its_encode_itt(cmd, itt_addr);
 257	its_encode_valid(cmd, desc->its_mapd_cmd.valid);
 258
 259	its_fixup_cmd(cmd);
 260
 261	return NULL;
 262}
 263
 264static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
 
 265						 struct its_cmd_desc *desc)
 266{
 267	its_encode_cmd(cmd, GITS_CMD_MAPC);
 268	its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
 269	its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
 270	its_encode_valid(cmd, desc->its_mapc_cmd.valid);
 271
 272	its_fixup_cmd(cmd);
 273
 274	return desc->its_mapc_cmd.col;
 275}
 276
 277static struct its_collection *its_build_mapvi_cmd(struct its_cmd_block *cmd,
 
 278						  struct its_cmd_desc *desc)
 279{
 280	struct its_collection *col;
 281
 282	col = dev_event_to_col(desc->its_mapvi_cmd.dev,
 283			       desc->its_mapvi_cmd.event_id);
 284
 285	its_encode_cmd(cmd, GITS_CMD_MAPVI);
 286	its_encode_devid(cmd, desc->its_mapvi_cmd.dev->device_id);
 287	its_encode_event_id(cmd, desc->its_mapvi_cmd.event_id);
 288	its_encode_phys_id(cmd, desc->its_mapvi_cmd.phys_id);
 289	its_encode_collection(cmd, col->col_id);
 290
 291	its_fixup_cmd(cmd);
 292
 293	return col;
 294}
 295
 296static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd,
 
 297						 struct its_cmd_desc *desc)
 298{
 299	struct its_collection *col;
 300
 301	col = dev_event_to_col(desc->its_movi_cmd.dev,
 302			       desc->its_movi_cmd.event_id);
 303
 304	its_encode_cmd(cmd, GITS_CMD_MOVI);
 305	its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
 306	its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
 307	its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
 308
 309	its_fixup_cmd(cmd);
 310
 311	return col;
 312}
 313
 314static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd,
 
 315						    struct its_cmd_desc *desc)
 316{
 317	struct its_collection *col;
 318
 319	col = dev_event_to_col(desc->its_discard_cmd.dev,
 320			       desc->its_discard_cmd.event_id);
 321
 322	its_encode_cmd(cmd, GITS_CMD_DISCARD);
 323	its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
 324	its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
 325
 326	its_fixup_cmd(cmd);
 327
 328	return col;
 329}
 330
 331static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
 
 332						struct its_cmd_desc *desc)
 333{
 334	struct its_collection *col;
 335
 336	col = dev_event_to_col(desc->its_inv_cmd.dev,
 337			       desc->its_inv_cmd.event_id);
 338
 339	its_encode_cmd(cmd, GITS_CMD_INV);
 340	its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
 341	its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
 342
 343	its_fixup_cmd(cmd);
 344
 345	return col;
 346}
 347
 348static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 349						   struct its_cmd_desc *desc)
 350{
 351	its_encode_cmd(cmd, GITS_CMD_INVALL);
 352	its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
 353
 354	its_fixup_cmd(cmd);
 355
 356	return NULL;
 357}
 358
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 359static u64 its_cmd_ptr_to_offset(struct its_node *its,
 360				 struct its_cmd_block *ptr)
 361{
 362	return (ptr - its->cmd_base) * sizeof(*ptr);
 363}
 364
 365static int its_queue_full(struct its_node *its)
 366{
 367	int widx;
 368	int ridx;
 369
 370	widx = its->cmd_write - its->cmd_base;
 371	ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
 372
 373	/* This is incredibly unlikely to happen, unless the ITS locks up. */
 374	if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
 375		return 1;
 376
 377	return 0;
 378}
 379
 380static struct its_cmd_block *its_allocate_entry(struct its_node *its)
 381{
 382	struct its_cmd_block *cmd;
 383	u32 count = 1000000;	/* 1s! */
 384
 385	while (its_queue_full(its)) {
 386		count--;
 387		if (!count) {
 388			pr_err_ratelimited("ITS queue not draining\n");
 389			return NULL;
 390		}
 391		cpu_relax();
 392		udelay(1);
 393	}
 394
 395	cmd = its->cmd_write++;
 396
 397	/* Handle queue wrapping */
 398	if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
 399		its->cmd_write = its->cmd_base;
 400
 
 
 
 
 
 
 401	return cmd;
 402}
 403
 404static struct its_cmd_block *its_post_commands(struct its_node *its)
 405{
 406	u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
 407
 408	writel_relaxed(wr, its->base + GITS_CWRITER);
 409
 410	return its->cmd_write;
 411}
 412
 413static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
 414{
 415	/*
 416	 * Make sure the commands written to memory are observable by
 417	 * the ITS.
 418	 */
 419	if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
 420		__flush_dcache_area(cmd, sizeof(*cmd));
 421	else
 422		dsb(ishst);
 423}
 424
 425static void its_wait_for_range_completion(struct its_node *its,
 426					  struct its_cmd_block *from,
 427					  struct its_cmd_block *to)
 428{
 429	u64 rd_idx, from_idx, to_idx;
 430	u32 count = 1000000;	/* 1s! */
 431
 432	from_idx = its_cmd_ptr_to_offset(its, from);
 433	to_idx = its_cmd_ptr_to_offset(its, to);
 
 
 
 
 434
 435	while (1) {
 
 
 436		rd_idx = readl_relaxed(its->base + GITS_CREADR);
 437		if (rd_idx >= to_idx || rd_idx < from_idx)
 
 
 
 
 
 
 
 
 
 
 438			break;
 439
 440		count--;
 441		if (!count) {
 442			pr_err_ratelimited("ITS queue timeout\n");
 443			return;
 
 444		}
 
 445		cpu_relax();
 446		udelay(1);
 447	}
 
 
 448}
 449
 450static void its_send_single_command(struct its_node *its,
 451				    its_cmd_builder_t builder,
 452				    struct its_cmd_desc *desc)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 453{
 454	struct its_cmd_block *cmd, *sync_cmd, *next_cmd;
 455	struct its_collection *sync_col;
 456	unsigned long flags;
 457
 458	raw_spin_lock_irqsave(&its->lock, flags);
 
 459
 460	cmd = its_allocate_entry(its);
 461	if (!cmd) {		/* We're soooooo screewed... */
 462		pr_err_ratelimited("ITS can't allocate, dropping command\n");
 463		raw_spin_unlock_irqrestore(&its->lock, flags);
 464		return;
 465	}
 466	sync_col = builder(cmd, desc);
 467	its_flush_cmd(its, cmd);
 468
 469	if (sync_col) {
 470		sync_cmd = its_allocate_entry(its);
 471		if (!sync_cmd) {
 472			pr_err_ratelimited("ITS can't SYNC, skipping\n");
 473			goto post;
 474		}
 475		its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
 476		its_encode_target(sync_cmd, sync_col->target_address);
 477		its_fixup_cmd(sync_cmd);
 478		its_flush_cmd(its, sync_cmd);
 479	}
 480
 481post:
 482	next_cmd = its_post_commands(its);
 483	raw_spin_unlock_irqrestore(&its->lock, flags);
 484
 485	its_wait_for_range_completion(its, cmd, next_cmd);
 486}
 487
 488static void its_send_inv(struct its_device *dev, u32 event_id)
 489{
 490	struct its_cmd_desc desc;
 491
 492	desc.its_inv_cmd.dev = dev;
 493	desc.its_inv_cmd.event_id = event_id;
 494
 495	its_send_single_command(dev->its, its_build_inv_cmd, &desc);
 496}
 497
 498static void its_send_mapd(struct its_device *dev, int valid)
 499{
 500	struct its_cmd_desc desc;
 501
 502	desc.its_mapd_cmd.dev = dev;
 503	desc.its_mapd_cmd.valid = !!valid;
 504
 505	its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
 506}
 507
 508static void its_send_mapc(struct its_node *its, struct its_collection *col,
 509			  int valid)
 510{
 511	struct its_cmd_desc desc;
 512
 513	desc.its_mapc_cmd.col = col;
 514	desc.its_mapc_cmd.valid = !!valid;
 515
 516	its_send_single_command(its, its_build_mapc_cmd, &desc);
 517}
 518
 519static void its_send_mapvi(struct its_device *dev, u32 irq_id, u32 id)
 520{
 521	struct its_cmd_desc desc;
 522
 523	desc.its_mapvi_cmd.dev = dev;
 524	desc.its_mapvi_cmd.phys_id = irq_id;
 525	desc.its_mapvi_cmd.event_id = id;
 526
 527	its_send_single_command(dev->its, its_build_mapvi_cmd, &desc);
 528}
 529
 530static void its_send_movi(struct its_device *dev,
 531			  struct its_collection *col, u32 id)
 532{
 533	struct its_cmd_desc desc;
 534
 535	desc.its_movi_cmd.dev = dev;
 536	desc.its_movi_cmd.col = col;
 537	desc.its_movi_cmd.event_id = id;
 538
 539	its_send_single_command(dev->its, its_build_movi_cmd, &desc);
 540}
 541
 542static void its_send_discard(struct its_device *dev, u32 id)
 543{
 544	struct its_cmd_desc desc;
 545
 546	desc.its_discard_cmd.dev = dev;
 547	desc.its_discard_cmd.event_id = id;
 548
 549	its_send_single_command(dev->its, its_build_discard_cmd, &desc);
 550}
 551
 552static void its_send_invall(struct its_node *its, struct its_collection *col)
 553{
 554	struct its_cmd_desc desc;
 555
 556	desc.its_invall_cmd.col = col;
 557
 558	its_send_single_command(its, its_build_invall_cmd, &desc);
 559}
 560
 561/*
 562 * irqchip functions - assumes MSI, mostly.
 563 */
 
 564
 565static inline u32 its_get_event_id(struct irq_data *d)
 
 
 
 
 
 
 
 
 
 566{
 567	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
 568	return d->hwirq - its_dev->event_map.lpi_base;
 
 
 
 
 
 
 
 569}
 570
 571static void lpi_set_config(struct irq_data *d, bool enable)
 
 572{
 573	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
 574	irq_hw_number_t hwirq = d->hwirq;
 575	u32 id = its_get_event_id(d);
 576	u8 *cfg = page_address(gic_rdists->prop_page) + hwirq - 8192;
 577
 578	if (enable)
 579		*cfg |= LPI_PROP_ENABLED;
 580	else
 581		*cfg &= ~LPI_PROP_ENABLED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 582
 583	/*
 584	 * Make the above write visible to the redistributors.
 585	 * And yes, we're flushing exactly: One. Single. Byte.
 586	 * Humpf...
 587	 */
 588	if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
 589		__flush_dcache_area(cfg, sizeof(*cfg));
 590	else
 591		dsb(ishst);
 592	its_send_inv(its_dev, id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 593}
 594
 595static void its_mask_irq(struct irq_data *d)
 596{
 597	lpi_set_config(d, false);
 
 
 
 598}
 599
 600static void its_unmask_irq(struct irq_data *d)
 601{
 602	lpi_set_config(d, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 603}
 604
 605static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
 606			    bool force)
 607{
 608	unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
 609	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
 610	struct its_collection *target_col;
 611	u32 id = its_get_event_id(d);
 
 612
 613	if (cpu >= nr_cpu_ids)
 
 614		return -EINVAL;
 615
 616	target_col = &its_dev->its->collections[cpu];
 617	its_send_movi(its_dev, target_col, id);
 618	its_dev->event_map.col_map[id] = cpu;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 619
 620	return IRQ_SET_MASK_OK_DONE;
 
 
 
 
 
 
 
 
 
 
 
 621}
 622
 623static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
 624{
 625	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
 626	struct its_node *its;
 627	u64 addr;
 628
 629	its = its_dev->its;
 630	addr = its->phys_base + GITS_TRANSLATER;
 631
 632	msg->address_lo		= addr & ((1UL << 32) - 1);
 633	msg->address_hi		= addr >> 32;
 634	msg->data		= its_get_event_id(d);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 635}
 636
 637static struct irq_chip its_irq_chip = {
 638	.name			= "ITS",
 639	.irq_mask		= its_mask_irq,
 640	.irq_unmask		= its_unmask_irq,
 641	.irq_eoi		= irq_chip_eoi_parent,
 642	.irq_set_affinity	= its_set_affinity,
 643	.irq_compose_msi_msg	= its_irq_compose_msi_msg,
 
 
 644};
 645
 
 646/*
 647 * How we allocate LPIs:
 648 *
 649 * The GIC has id_bits bits for interrupt identifiers. From there, we
 650 * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as
 651 * we allocate LPIs by chunks of 32, we can shift the whole thing by 5
 652 * bits to the right.
 
 
 
 
 653 *
 654 * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
 
 655 */
 656#define IRQS_PER_CHUNK_SHIFT	5
 657#define IRQS_PER_CHUNK		(1 << IRQS_PER_CHUNK_SHIFT)
 658
 659static unsigned long *lpi_bitmap;
 660static u32 lpi_chunks;
 661static DEFINE_SPINLOCK(lpi_lock);
 662
 663static int its_lpi_to_chunk(int lpi)
 
 
 
 
 
 
 664{
 665	return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT;
 
 
 
 
 
 
 
 
 666}
 667
 668static int its_chunk_to_lpi(int chunk)
 669{
 670	return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 671}
 672
 673static int __init its_lpi_init(u32 id_bits)
 
 
 
 
 
 
 
 
 
 
 
 
 674{
 675	lpi_chunks = its_lpi_to_chunk(1UL << id_bits);
 676
 677	lpi_bitmap = kzalloc(BITS_TO_LONGS(lpi_chunks) * sizeof(long),
 678			     GFP_KERNEL);
 679	if (!lpi_bitmap) {
 680		lpi_chunks = 0;
 681		return -ENOMEM;
 
 
 
 
 
 
 682	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 683
 684	pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks);
 685	return 0;
 686}
 687
 688static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
 689{
 690	unsigned long *bitmap = NULL;
 691	int chunk_id;
 692	int nr_chunks;
 693	int i;
 
 
 
 
 
 
 
 694
 695	nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK);
 
 
 
 
 
 
 
 696
 697	spin_lock(&lpi_lock);
 
 
 
 698
 699	do {
 700		chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks,
 701						      0, nr_chunks, 0);
 702		if (chunk_id < lpi_chunks)
 703			break;
 704
 705		nr_chunks--;
 706	} while (nr_chunks > 0);
 707
 708	if (!nr_chunks)
 
 
 
 709		goto out;
 710
 711	bitmap = kzalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK) * sizeof (long),
 712			 GFP_ATOMIC);
 713	if (!bitmap)
 714		goto out;
 715
 716	for (i = 0; i < nr_chunks; i++)
 717		set_bit(chunk_id + i, lpi_bitmap);
 718
 719	*base = its_chunk_to_lpi(chunk_id);
 720	*nr_ids = nr_chunks * IRQS_PER_CHUNK;
 721
 722out:
 723	spin_unlock(&lpi_lock);
 724
 725	if (!bitmap)
 726		*base = *nr_ids = 0;
 727
 728	return bitmap;
 729}
 730
 731static void its_lpi_free(struct event_lpi_map *map)
 732{
 733	int base = map->lpi_base;
 734	int nr_ids = map->nr_lpis;
 735	int lpi;
 736
 737	spin_lock(&lpi_lock);
 738
 739	for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) {
 740		int chunk = its_lpi_to_chunk(lpi);
 741		BUG_ON(chunk > lpi_chunks);
 742		if (test_bit(chunk, lpi_bitmap)) {
 743			clear_bit(chunk, lpi_bitmap);
 744		} else {
 745			pr_err("Bad LPI chunk %d\n", chunk);
 746		}
 747	}
 748
 749	spin_unlock(&lpi_lock);
 
 
 
 750
 751	kfree(map->lpi_map);
 752	kfree(map->col_map);
 753}
 754
 755/*
 756 * We allocate 64kB for PROPBASE. That gives us at most 64K LPIs to
 757 * deal with (one configuration byte per interrupt). PENDBASE has to
 758 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
 759 */
 760#define LPI_PROPBASE_SZ		SZ_64K
 761#define LPI_PENDBASE_SZ		(LPI_PROPBASE_SZ / 8 + SZ_1K)
 762
 763/*
 764 * This is how many bits of ID we need, including the useless ones.
 765 */
 766#define LPI_NRBITS		ilog2(LPI_PROPBASE_SZ + SZ_8K)
 
 767
 768#define LPI_PROP_DEFAULT_PRIO	0xa0
 
 769
 770static int __init its_alloc_lpi_tables(void)
 771{
 772	phys_addr_t paddr;
 
 
 773
 774	gic_rdists->prop_page = alloc_pages(GFP_NOWAIT,
 775					   get_order(LPI_PROPBASE_SZ));
 776	if (!gic_rdists->prop_page) {
 777		pr_err("Failed to allocate PROPBASE\n");
 778		return -ENOMEM;
 779	}
 780
 781	paddr = page_to_phys(gic_rdists->prop_page);
 782	pr_info("GIC: using LPI property table @%pa\n", &paddr);
 
 
 
 
 
 783
 784	/* Priority 0xa0, Group-1, disabled */
 785	memset(page_address(gic_rdists->prop_page),
 786	       LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
 787	       LPI_PROPBASE_SZ);
 788
 789	/* Make sure the GIC will observe the written configuration */
 790	__flush_dcache_area(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 791
 792	return 0;
 793}
 794
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 795static const char *its_base_type_string[] = {
 796	[GITS_BASER_TYPE_DEVICE]	= "Devices",
 797	[GITS_BASER_TYPE_VCPU]		= "Virtual CPUs",
 798	[GITS_BASER_TYPE_CPU]		= "Physical CPUs",
 799	[GITS_BASER_TYPE_COLLECTION]	= "Interrupt Collections",
 800	[GITS_BASER_TYPE_RESERVED5] 	= "Reserved (5)",
 801	[GITS_BASER_TYPE_RESERVED6] 	= "Reserved (6)",
 802	[GITS_BASER_TYPE_RESERVED7] 	= "Reserved (7)",
 803};
 804
 805static void its_free_tables(struct its_node *its)
 806{
 807	int i;
 808
 809	for (i = 0; i < GITS_BASER_NR_REGS; i++) {
 810		if (its->tables[i].base) {
 811			free_pages((unsigned long)its->tables[i].base,
 812				   its->tables[i].order);
 813			its->tables[i].base = NULL;
 814		}
 815	}
 816}
 817
 818static int its_alloc_tables(const char *node_name, struct its_node *its)
 
 819{
 820	int err;
 821	int i;
 822	int psz = SZ_64K;
 823	u64 shr = GITS_BASER_InnerShareable;
 824	u64 cache;
 825	u64 typer;
 826	u32 ids;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 827
 828	if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 829		/*
 830		 * erratum 22375: only alloc 8MB table size
 831		 * erratum 24313: ignore memory access type
 
 
 
 832		 */
 833		cache	= 0;
 834		ids	= 0x14;			/* 20 bits, 8MB */
 835	} else {
 836		cache	= GITS_BASER_WaWb;
 837		typer	= readq_relaxed(its->base + GITS_TYPER);
 838		ids	= GITS_TYPER_DEVBITS(typer);
 839	}
 840
 841	for (i = 0; i < GITS_BASER_NR_REGS; i++) {
 842		u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
 843		u64 type = GITS_BASER_TYPE(val);
 844		u64 entry_size = GITS_BASER_ENTRY_SIZE(val);
 845		int order = get_order(psz);
 846		int alloc_pages;
 847		u64 tmp;
 848		void *base;
 849
 850		if (type == GITS_BASER_TYPE_NONE)
 851			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 852
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 853		/*
 854		 * Allocate as many entries as required to fit the
 855		 * range of device IDs that the ITS can grok... The ID
 856		 * space being incredibly sparse, this results in a
 857		 * massive waste of memory.
 858		 *
 859		 * For other tables, only allocate a single page.
 860		 */
 861		if (type == GITS_BASER_TYPE_DEVICE) {
 
 
 
 862			/*
 863			 * 'order' was initialized earlier to the default page
 864			 * granule of the the ITS.  We can't have an allocation
 865			 * smaller than that.  If the requested allocation
 866			 * is smaller, round up to the default page granule.
 
 867			 */
 868			order = max(get_order((1UL << ids) * entry_size),
 869				    order);
 870			if (order >= MAX_ORDER) {
 871				order = MAX_ORDER - 1;
 872				pr_warn("%s: Device Table too large, reduce its page order to %u\n",
 873					node_name, order);
 874			}
 875		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 876
 877retry_alloc_baser:
 878		alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
 879		if (alloc_pages > GITS_BASER_PAGES_MAX) {
 880			alloc_pages = GITS_BASER_PAGES_MAX;
 881			order = get_order(GITS_BASER_PAGES_MAX * psz);
 882			pr_warn("%s: Device Table too large, reduce its page order to %u (%u pages)\n",
 883				node_name, order, alloc_pages);
 884		}
 885
 886		base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
 887		if (!base) {
 888			err = -ENOMEM;
 889			goto out_free;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 890		}
 
 
 891
 892		its->tables[i].base = base;
 893		its->tables[i].order = order;
 
 894
 895retry_baser:
 896		val = (virt_to_phys(base) 				 |
 897		       (type << GITS_BASER_TYPE_SHIFT)			 |
 898		       ((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
 899		       cache						 |
 900		       shr						 |
 901		       GITS_BASER_VALID);
 902
 903		switch (psz) {
 904		case SZ_4K:
 905			val |= GITS_BASER_PAGE_SIZE_4K;
 906			break;
 907		case SZ_16K:
 908			val |= GITS_BASER_PAGE_SIZE_16K;
 
 
 
 
 
 
 
 
 
 
 
 
 
 909			break;
 
 
 910		case SZ_64K:
 911			val |= GITS_BASER_PAGE_SIZE_64K;
 
 
 
 912			break;
 
 
 
 913		}
 
 
 
 
 
 914
 915		val |= alloc_pages - 1;
 
 
 
 
 916
 917		writeq_relaxed(val, its->base + GITS_BASER + i * 8);
 918		tmp = readq_relaxed(its->base + GITS_BASER + i * 8);
 
 919
 920		if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
 921			/*
 922			 * Shareability didn't stick. Just use
 923			 * whatever the read reported, which is likely
 924			 * to be the only thing this redistributor
 925			 * supports. If that's zero, make it
 926			 * non-cacheable as well.
 927			 */
 928			shr = tmp & GITS_BASER_SHAREABILITY_MASK;
 929			if (!shr) {
 930				cache = GITS_BASER_nC;
 931				__flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order));
 932			}
 933			goto retry_baser;
 934		}
 935
 936		if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
 937			/*
 938			 * Page size didn't stick. Let's try a smaller
 939			 * size and retry. If we reach 4K, then
 940			 * something is horribly wrong...
 941			 */
 942			free_pages((unsigned long)base, order);
 943			its->tables[i].base = NULL;
 944
 945			switch (psz) {
 946			case SZ_16K:
 947				psz = SZ_4K;
 948				goto retry_alloc_baser;
 949			case SZ_64K:
 950				psz = SZ_16K;
 951				goto retry_alloc_baser;
 
 
 
 
 
 
 
 
 
 952			}
 
 
 
 
 953		}
 954
 955		if (val != tmp) {
 956			pr_err("ITS: %s: GITS_BASER%d doesn't stick: %lx %lx\n",
 957			       node_name, i,
 958			       (unsigned long) val, (unsigned long) tmp);
 959			err = -ENXIO;
 960			goto out_free;
 961		}
 962
 963		pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n",
 964			(int)(PAGE_ORDER_TO_SIZE(order) / entry_size),
 965			its_base_type_string[type],
 966			(unsigned long)virt_to_phys(base),
 967			psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
 968	}
 969
 970	return 0;
 
 971
 972out_free:
 973	its_free_tables(its);
 
 
 
 974
 975	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 976}
 977
 978static int its_alloc_collections(struct its_node *its)
 979{
 980	its->collections = kzalloc(nr_cpu_ids * sizeof(*its->collections),
 
 
 981				   GFP_KERNEL);
 982	if (!its->collections)
 983		return -ENOMEM;
 984
 
 
 
 985	return 0;
 986}
 987
 988static void its_cpu_init_lpis(void)
 989{
 990	void __iomem *rbase = gic_data_rdist_rd_base();
 991	struct page *pend_page;
 992	u64 val, tmp;
 993
 994	/* If we didn't allocate the pending table yet, do it now */
 995	pend_page = gic_data_rdist()->pend_page;
 996	if (!pend_page) {
 997		phys_addr_t paddr;
 998		/*
 999		 * The pending pages have to be at least 64kB aligned,
1000		 * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
1001		 */
1002		pend_page = alloc_pages(GFP_NOWAIT | __GFP_ZERO,
1003					get_order(max(LPI_PENDBASE_SZ, SZ_64K)));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1004		if (!pend_page) {
1005			pr_err("Failed to allocate PENDBASE for CPU%d\n",
1006			       smp_processor_id());
1007			return;
1008		}
1009
1010		/* Make sure the GIC will observe the zero-ed page */
1011		__flush_dcache_area(page_address(pend_page), LPI_PENDBASE_SZ);
1012
1013		paddr = page_to_phys(pend_page);
1014		pr_info("CPU%d: using LPI pending table @%pa\n",
1015			smp_processor_id(), &paddr);
1016		gic_data_rdist()->pend_page = pend_page;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1017	}
1018
1019	/* Disable LPIs */
 
 
 
 
 
 
 
 
 
 
 
 
1020	val = readl_relaxed(rbase + GICR_CTLR);
1021	val &= ~GICR_CTLR_ENABLE_LPIS;
1022	writel_relaxed(val, rbase + GICR_CTLR);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1023
1024	/*
1025	 * Make sure any change to the table is observable by the GIC.
1026	 */
1027	dsb(sy);
 
 
1028
1029	/* set PROPBASE */
1030	val = (page_to_phys(gic_rdists->prop_page) |
1031	       GICR_PROPBASER_InnerShareable |
1032	       GICR_PROPBASER_WaWb |
1033	       ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
1034
1035	writeq_relaxed(val, rbase + GICR_PROPBASER);
1036	tmp = readq_relaxed(rbase + GICR_PROPBASER);
1037
1038	if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
1039		if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
1040			/*
1041			 * The HW reports non-shareable, we must
1042			 * remove the cacheability attributes as
1043			 * well.
1044			 */
1045			val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
1046				 GICR_PROPBASER_CACHEABILITY_MASK);
1047			val |= GICR_PROPBASER_nC;
1048			writeq_relaxed(val, rbase + GICR_PROPBASER);
1049		}
1050		pr_info_once("GIC: using cache flushing for LPI property table\n");
1051		gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
1052	}
1053
1054	/* set PENDBASE */
1055	val = (page_to_phys(pend_page) |
1056	       GICR_PENDBASER_InnerShareable |
1057	       GICR_PENDBASER_WaWb);
1058
1059	writeq_relaxed(val, rbase + GICR_PENDBASER);
1060	tmp = readq_relaxed(rbase + GICR_PENDBASER);
1061
1062	if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
1063		/*
1064		 * The HW reports non-shareable, we must remove the
1065		 * cacheability attributes as well.
1066		 */
1067		val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
1068			 GICR_PENDBASER_CACHEABILITY_MASK);
1069		val |= GICR_PENDBASER_nC;
1070		writeq_relaxed(val, rbase + GICR_PENDBASER);
1071	}
1072
1073	/* Enable LPIs */
1074	val = readl_relaxed(rbase + GICR_CTLR);
1075	val |= GICR_CTLR_ENABLE_LPIS;
1076	writel_relaxed(val, rbase + GICR_CTLR);
1077
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1078	/* Make sure the GIC has seen the above */
1079	dsb(sy);
 
 
 
 
 
 
1080}
1081
1082static void its_cpu_init_collection(void)
1083{
1084	struct its_node *its;
1085	int cpu;
1086
1087	spin_lock(&its_lock);
1088	cpu = smp_processor_id();
 
1089
1090	list_for_each_entry(its, &its_nodes, entry) {
1091		u64 target;
 
 
 
1092
 
 
 
 
 
1093		/*
1094		 * We now have to bind each collection to its target
1095		 * redistributor.
1096		 */
1097		if (readq_relaxed(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
1098			/*
1099			 * This ITS wants the physical address of the
1100			 * redistributor.
1101			 */
1102			target = gic_data_rdist()->phys_base;
1103		} else {
1104			/*
1105			 * This ITS wants a linear CPU number.
1106			 */
1107			target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER);
1108			target = GICR_TYPER_CPU_NUMBER(target) << 16;
1109		}
1110
1111		/* Perform collection mapping */
1112		its->collections[cpu].target_address = target;
1113		its->collections[cpu].col_id = cpu;
1114
1115		its_send_mapc(its, &its->collections[cpu], 1);
1116		its_send_invall(its, &its->collections[cpu]);
1117	}
 
 
 
 
 
 
1118
1119	spin_unlock(&its_lock);
 
 
 
1120}
1121
1122static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
1123{
1124	struct its_device *its_dev = NULL, *tmp;
1125	unsigned long flags;
1126
1127	raw_spin_lock_irqsave(&its->lock, flags);
1128
1129	list_for_each_entry(tmp, &its->its_device_list, entry) {
1130		if (tmp->device_id == dev_id) {
1131			its_dev = tmp;
1132			break;
1133		}
1134	}
1135
1136	raw_spin_unlock_irqrestore(&its->lock, flags);
1137
1138	return its_dev;
1139}
1140
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1141static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1142					    int nvecs)
1143{
1144	struct its_device *dev;
1145	unsigned long *lpi_map;
1146	unsigned long flags;
1147	u16 *col_map = NULL;
1148	void *itt;
1149	int lpi_base;
1150	int nr_lpis;
1151	int nr_ites;
1152	int sz;
1153
 
 
 
 
 
 
1154	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1155	/*
1156	 * At least one bit of EventID is being used, hence a minimum
1157	 * of two entries. No, the architecture doesn't let you
1158	 * express an ITT with a single entry.
1159	 */
1160	nr_ites = max(2UL, roundup_pow_of_two(nvecs));
1161	sz = nr_ites * its->ite_size;
1162	sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
1163	itt = kzalloc(sz, GFP_KERNEL);
1164	lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
1165	if (lpi_map)
1166		col_map = kzalloc(sizeof(*col_map) * nr_lpis, GFP_KERNEL);
 
 
 
 
 
 
 
1167
1168	if (!dev || !itt || !lpi_map || !col_map) {
1169		kfree(dev);
1170		kfree(itt);
1171		kfree(lpi_map);
1172		kfree(col_map);
1173		return NULL;
1174	}
1175
1176	__flush_dcache_area(itt, sz);
1177
1178	dev->its = its;
1179	dev->itt = itt;
1180	dev->nr_ites = nr_ites;
1181	dev->event_map.lpi_map = lpi_map;
1182	dev->event_map.col_map = col_map;
1183	dev->event_map.lpi_base = lpi_base;
1184	dev->event_map.nr_lpis = nr_lpis;
 
1185	dev->device_id = dev_id;
1186	INIT_LIST_HEAD(&dev->entry);
1187
1188	raw_spin_lock_irqsave(&its->lock, flags);
1189	list_add(&dev->entry, &its->its_device_list);
1190	raw_spin_unlock_irqrestore(&its->lock, flags);
1191
1192	/* Map device to its ITT */
1193	its_send_mapd(dev, 1);
1194
1195	return dev;
1196}
1197
1198static void its_free_device(struct its_device *its_dev)
1199{
1200	unsigned long flags;
1201
1202	raw_spin_lock_irqsave(&its_dev->its->lock, flags);
1203	list_del(&its_dev->entry);
1204	raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
 
1205	kfree(its_dev->itt);
1206	kfree(its_dev);
1207}
1208
1209static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
1210{
1211	int idx;
1212
1213	idx = find_first_zero_bit(dev->event_map.lpi_map,
1214				  dev->event_map.nr_lpis);
1215	if (idx == dev->event_map.nr_lpis)
 
 
1216		return -ENOSPC;
1217
1218	*hwirq = dev->event_map.lpi_base + idx;
1219	set_bit(idx, dev->event_map.lpi_map);
1220
1221	return 0;
1222}
1223
1224static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
1225			   int nvec, msi_alloc_info_t *info)
1226{
1227	struct its_node *its;
1228	struct its_device *its_dev;
1229	struct msi_domain_info *msi_info;
1230	u32 dev_id;
 
1231
1232	/*
1233	 * We ignore "dev" entierely, and rely on the dev_id that has
1234	 * been passed via the scratchpad. This limits this domain's
1235	 * usefulness to upper layers that definitely know that they
1236	 * are built on top of the ITS.
1237	 */
1238	dev_id = info->scratchpad[0].ul;
1239
1240	msi_info = msi_get_domain_info(domain);
1241	its = msi_info->data;
1242
 
 
 
 
 
 
 
 
 
 
 
1243	its_dev = its_find_device(its, dev_id);
1244	if (its_dev) {
1245		/*
1246		 * We already have seen this ID, probably through
1247		 * another alias (PCI bridge of some sort). No need to
1248		 * create the device.
1249		 */
 
1250		pr_debug("Reusing ITT for devID %x\n", dev_id);
1251		goto out;
1252	}
1253
1254	its_dev = its_create_device(its, dev_id, nvec);
1255	if (!its_dev)
1256		return -ENOMEM;
 
 
1257
1258	pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
1259out:
 
1260	info->scratchpad[0].ptr = its_dev;
1261	return 0;
1262}
1263
1264static struct msi_domain_ops its_msi_domain_ops = {
1265	.msi_prepare	= its_msi_prepare,
1266};
1267
1268static int its_irq_gic_domain_alloc(struct irq_domain *domain,
1269				    unsigned int virq,
1270				    irq_hw_number_t hwirq)
1271{
1272	struct irq_fwspec fwspec;
1273
1274	if (irq_domain_get_of_node(domain->parent)) {
1275		fwspec.fwnode = domain->parent->fwnode;
1276		fwspec.param_count = 3;
1277		fwspec.param[0] = GIC_IRQ_TYPE_LPI;
1278		fwspec.param[1] = hwirq;
1279		fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
 
 
 
 
 
1280	} else {
1281		return -EINVAL;
1282	}
1283
1284	return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
1285}
1286
1287static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1288				unsigned int nr_irqs, void *args)
1289{
1290	msi_alloc_info_t *info = args;
1291	struct its_device *its_dev = info->scratchpad[0].ptr;
 
 
1292	irq_hw_number_t hwirq;
1293	int err;
1294	int i;
1295
1296	for (i = 0; i < nr_irqs; i++) {
1297		err = its_alloc_device_irq(its_dev, &hwirq);
1298		if (err)
1299			return err;
1300
1301		err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
 
 
 
 
 
1302		if (err)
1303			return err;
1304
1305		irq_domain_set_hwirq_and_chip(domain, virq + i,
1306					      hwirq, &its_irq_chip, its_dev);
 
 
 
1307		pr_debug("ID:%d pID:%d vID:%d\n",
1308			 (int)(hwirq - its_dev->event_map.lpi_base),
1309			 (int) hwirq, virq + i);
1310	}
1311
1312	return 0;
1313}
1314
1315static void its_irq_domain_activate(struct irq_domain *domain,
1316				    struct irq_data *d)
1317{
1318	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1319	u32 event = its_get_event_id(d);
 
1320
1321	/* Bind the LPI to the first possible CPU */
1322	its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask);
 
 
 
 
 
1323
1324	/* Map the GIC IRQ and event to the device */
1325	its_send_mapvi(its_dev, d->hwirq, event);
 
1326}
1327
1328static void its_irq_domain_deactivate(struct irq_domain *domain,
1329				      struct irq_data *d)
1330{
1331	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1332	u32 event = its_get_event_id(d);
1333
 
1334	/* Stop the delivery of interrupts */
1335	its_send_discard(its_dev, event);
1336}
1337
1338static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1339				unsigned int nr_irqs)
1340{
1341	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
1342	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
 
1343	int i;
1344
 
 
 
 
1345	for (i = 0; i < nr_irqs; i++) {
1346		struct irq_data *data = irq_domain_get_irq_data(domain,
1347								virq + i);
1348		u32 event = its_get_event_id(data);
1349
1350		/* Mark interrupt index as unused */
1351		clear_bit(event, its_dev->event_map.lpi_map);
1352
1353		/* Nuke the entry in the domain */
1354		irq_domain_reset_irq_data(data);
1355	}
1356
1357	/* If all interrupts have been freed, start mopping the floor */
1358	if (bitmap_empty(its_dev->event_map.lpi_map,
 
 
 
 
 
 
1359			 its_dev->event_map.nr_lpis)) {
1360		its_lpi_free(&its_dev->event_map);
 
 
1361
1362		/* Unmap device/itt */
1363		its_send_mapd(its_dev, 0);
1364		its_free_device(its_dev);
1365	}
1366
 
 
1367	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
1368}
1369
1370static const struct irq_domain_ops its_domain_ops = {
1371	.alloc			= its_irq_domain_alloc,
1372	.free			= its_irq_domain_free,
1373	.activate		= its_irq_domain_activate,
1374	.deactivate		= its_irq_domain_deactivate,
1375};
1376
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1377static int its_force_quiescent(void __iomem *base)
1378{
1379	u32 count = 1000000;	/* 1s */
1380	u32 val;
1381
1382	val = readl_relaxed(base + GITS_CTLR);
1383	if (val & GITS_CTLR_QUIESCENT)
 
 
 
 
 
1384		return 0;
1385
1386	/* Disable the generation of all interrupts to this ITS */
1387	val &= ~GITS_CTLR_ENABLE;
1388	writel_relaxed(val, base + GITS_CTLR);
1389
1390	/* Poll GITS_CTLR and wait until ITS becomes quiescent */
1391	while (1) {
1392		val = readl_relaxed(base + GITS_CTLR);
1393		if (val & GITS_CTLR_QUIESCENT)
1394			return 0;
1395
1396		count--;
1397		if (!count)
1398			return -EBUSY;
1399
1400		cpu_relax();
1401		udelay(1);
1402	}
1403}
1404
1405static void __maybe_unused its_enable_quirk_cavium_22375(void *data)
1406{
1407	struct its_node *its = data;
1408
 
 
 
1409	its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1410}
1411
1412static const struct gic_quirk its_quirks[] = {
1413#ifdef CONFIG_CAVIUM_ERRATUM_22375
1414	{
1415		.desc	= "ITS: Cavium errata 22375, 24313",
1416		.iidr	= 0xa100034c,	/* ThunderX pass 1.x */
1417		.mask	= 0xffff0fff,
1418		.init	= its_enable_quirk_cavium_22375,
1419	},
1420#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1421	{
1422	}
1423};
1424
1425static void its_enable_quirks(struct its_node *its)
1426{
1427	u32 iidr = readl_relaxed(its->base + GITS_IIDR);
1428
1429	gic_enable_quirks(iidr, its_quirks, its);
1430}
1431
1432static int __init its_probe(struct device_node *node,
1433			    struct irq_domain *parent)
1434{
1435	struct resource res;
1436	struct its_node *its;
1437	void __iomem *its_base;
1438	struct irq_domain *inner_domain;
1439	u32 val;
1440	u64 baser, tmp;
1441	int err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1442
1443	err = of_address_to_resource(node, 0, &res);
1444	if (err) {
1445		pr_warn("%s: no regs?\n", node->full_name);
1446		return -ENXIO;
 
 
 
 
 
 
 
1447	}
 
1448
1449	its_base = ioremap(res.start, resource_size(&res));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1450	if (!its_base) {
1451		pr_warn("%s: unable to map registers\n", node->full_name);
1452		return -ENOMEM;
1453	}
1454
1455	val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
1456	if (val != 0x30 && val != 0x40) {
1457		pr_warn("%s: no ITS detected, giving up\n", node->full_name);
1458		err = -ENODEV;
1459		goto out_unmap;
1460	}
1461
1462	err = its_force_quiescent(its_base);
1463	if (err) {
1464		pr_warn("%s: failed to quiesce, giving up\n",
1465			node->full_name);
1466		goto out_unmap;
1467	}
1468
1469	pr_info("ITS: %s\n", node->full_name);
1470
1471	its = kzalloc(sizeof(*its), GFP_KERNEL);
1472	if (!its) {
1473		err = -ENOMEM;
1474		goto out_unmap;
1475	}
1476
1477	raw_spin_lock_init(&its->lock);
 
1478	INIT_LIST_HEAD(&its->entry);
1479	INIT_LIST_HEAD(&its->its_device_list);
 
 
1480	its->base = its_base;
1481	its->phys_base = res.start;
1482	its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
 
 
 
 
 
 
1483
1484	its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
1485	if (!its->cmd_base) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1486		err = -ENOMEM;
1487		goto out_free_its;
1488	}
 
1489	its->cmd_write = its->cmd_base;
 
 
 
1490
1491	its_enable_quirks(its);
1492
1493	err = its_alloc_tables(node->full_name, its);
1494	if (err)
1495		goto out_free_cmd;
1496
1497	err = its_alloc_collections(its);
1498	if (err)
1499		goto out_free_tables;
1500
1501	baser = (virt_to_phys(its->cmd_base)	|
1502		 GITS_CBASER_WaWb		|
1503		 GITS_CBASER_InnerShareable	|
1504		 (ITS_CMD_QUEUE_SZ / SZ_4K - 1)	|
1505		 GITS_CBASER_VALID);
1506
1507	writeq_relaxed(baser, its->base + GITS_CBASER);
1508	tmp = readq_relaxed(its->base + GITS_CBASER);
1509
1510	if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
1511		if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
1512			/*
1513			 * The HW reports non-shareable, we must
1514			 * remove the cacheability attributes as
1515			 * well.
1516			 */
1517			baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
1518				   GITS_CBASER_CACHEABILITY_MASK);
1519			baser |= GITS_CBASER_nC;
1520			writeq_relaxed(baser, its->base + GITS_CBASER);
1521		}
1522		pr_info("ITS: using cache flushing for cmd queue\n");
1523		its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
1524	}
1525
1526	writeq_relaxed(0, its->base + GITS_CWRITER);
1527	writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
1528
1529	if (of_property_read_bool(node, "msi-controller")) {
1530		struct msi_domain_info *info;
 
1531
1532		info = kzalloc(sizeof(*info), GFP_KERNEL);
1533		if (!info) {
1534			err = -ENOMEM;
1535			goto out_free_tables;
1536		}
1537
1538		inner_domain = irq_domain_add_tree(node, &its_domain_ops, its);
1539		if (!inner_domain) {
1540			err = -ENOMEM;
1541			kfree(info);
1542			goto out_free_tables;
1543		}
1544
1545		inner_domain->parent = parent;
1546		inner_domain->bus_token = DOMAIN_BUS_NEXUS;
1547		info->ops = &its_msi_domain_ops;
1548		info->data = its;
1549		inner_domain->host_data = info;
1550	}
1551
1552	spin_lock(&its_lock);
1553	list_add(&its->entry, &its_nodes);
1554	spin_unlock(&its_lock);
1555
1556	return 0;
1557
1558out_free_tables:
1559	its_free_tables(its);
1560out_free_cmd:
1561	kfree(its->cmd_base);
 
 
 
1562out_free_its:
1563	kfree(its);
1564out_unmap:
1565	iounmap(its_base);
1566	pr_err("ITS: failed probing %s (%d)\n", node->full_name, err);
1567	return err;
1568}
1569
1570static bool gic_rdists_supports_plpis(void)
1571{
1572	return !!(readl_relaxed(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1573}
1574
1575int its_cpu_init(void)
1576{
1577	if (!list_empty(&its_nodes)) {
1578		if (!gic_rdists_supports_plpis()) {
1579			pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
1580			return -ENXIO;
1581		}
 
 
1582		its_cpu_init_lpis();
1583		its_cpu_init_collection();
1584	}
1585
1586	return 0;
1587}
1588
1589static struct of_device_id its_device_id[] = {
1590	{	.compatible	= "arm,gic-v3-its",	},
1591	{},
1592};
1593
1594int __init its_init(struct device_node *node, struct rdists *rdists,
1595	     struct irq_domain *parent_domain)
1596{
1597	struct device_node *np;
 
1598
1599	for (np = of_find_matching_node(node, its_device_id); np;
1600	     np = of_find_matching_node(np, its_device_id)) {
1601		its_probe(np, parent_domain);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1602	}
1603
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1604	if (list_empty(&its_nodes)) {
1605		pr_warn("ITS: No ITS available, not enabling LPIs\n");
1606		return -ENXIO;
1607	}
1608
1609	gic_rdists = rdists;
1610	its_alloc_lpi_tables();
1611	its_lpi_init(rdists->id_bits);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1612
1613	return 0;
1614}
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
   4 * Author: Marc Zyngier <marc.zyngier@arm.com>
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6
   7#include <linux/acpi.h>
   8#include <linux/acpi_iort.h>
   9#include <linux/bitfield.h>
  10#include <linux/bitmap.h>
  11#include <linux/cpu.h>
  12#include <linux/crash_dump.h>
  13#include <linux/delay.h>
  14#include <linux/dma-iommu.h>
  15#include <linux/efi.h>
  16#include <linux/interrupt.h>
  17#include <linux/iopoll.h>
  18#include <linux/irqdomain.h>
  19#include <linux/list.h>
  20#include <linux/log2.h>
  21#include <linux/memblock.h>
  22#include <linux/mm.h>
  23#include <linux/msi.h>
  24#include <linux/of.h>
  25#include <linux/of_address.h>
  26#include <linux/of_irq.h>
  27#include <linux/of_pci.h>
  28#include <linux/of_platform.h>
  29#include <linux/percpu.h>
  30#include <linux/slab.h>
  31#include <linux/syscore_ops.h>
  32
  33#include <linux/irqchip.h>
  34#include <linux/irqchip/arm-gic-v3.h>
  35#include <linux/irqchip/arm-gic-v4.h>
  36
 
  37#include <asm/cputype.h>
  38#include <asm/exception.h>
  39
  40#include "irq-gic-common.h"
  41
  42#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING		(1ULL << 0)
  43#define ITS_FLAGS_WORKAROUND_CAVIUM_22375	(1ULL << 1)
  44#define ITS_FLAGS_WORKAROUND_CAVIUM_23144	(1ULL << 2)
  45#define ITS_FLAGS_SAVE_SUSPEND_STATE		(1ULL << 3)
  46
  47#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING	(1 << 0)
  48#define RDIST_FLAGS_RD_TABLES_PREALLOCATED	(1 << 1)
  49
  50static u32 lpi_id_bits;
  51
  52/*
  53 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
  54 * deal with (one configuration byte per interrupt). PENDBASE has to
  55 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
  56 */
  57#define LPI_NRBITS		lpi_id_bits
  58#define LPI_PROPBASE_SZ		ALIGN(BIT(LPI_NRBITS), SZ_64K)
  59#define LPI_PENDBASE_SZ		ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
  60
  61#define LPI_PROP_DEFAULT_PRIO	GICD_INT_DEF_PRI
  62
  63/*
  64 * Collection structure - just an ID, and a redistributor address to
  65 * ping. We use one per CPU as a bag of interrupts assigned to this
  66 * CPU.
  67 */
  68struct its_collection {
  69	u64			target_address;
  70	u16			col_id;
  71};
  72
  73/*
  74 * The ITS_BASER structure - contains memory information, cached
  75 * value of BASER register configuration and ITS page size.
  76 */
  77struct its_baser {
  78	void		*base;
  79	u64		val;
  80	u32		order;
  81	u32		psz;
  82};
  83
  84struct its_device;
  85
  86/*
  87 * The ITS structure - contains most of the infrastructure, with the
  88 * top-level MSI domain, the command queue, the collections, and the
  89 * list of devices writing to it.
  90 *
  91 * dev_alloc_lock has to be taken for device allocations, while the
  92 * spinlock must be taken to parse data structures such as the device
  93 * list.
  94 */
  95struct its_node {
  96	raw_spinlock_t		lock;
  97	struct mutex		dev_alloc_lock;
  98	struct list_head	entry;
  99	void __iomem		*base;
 100	void __iomem		*sgir_base;
 101	phys_addr_t		phys_base;
 102	struct its_cmd_block	*cmd_base;
 103	struct its_cmd_block	*cmd_write;
 104	struct its_baser	tables[GITS_BASER_NR_REGS];
 
 
 
 105	struct its_collection	*collections;
 106	struct fwnode_handle	*fwnode_handle;
 107	u64			(*get_msi_base)(struct its_device *its_dev);
 108	u64			typer;
 109	u64			cbaser_save;
 110	u32			ctlr_save;
 111	u32			mpidr;
 112	struct list_head	its_device_list;
 113	u64			flags;
 114	unsigned long		list_nr;
 115	int			numa_node;
 116	unsigned int		msi_domain_flags;
 117	u32			pre_its_base; /* for Socionext Synquacer */
 118	int			vlpi_redist_offset;
 119};
 120
 121#define is_v4(its)		(!!((its)->typer & GITS_TYPER_VLPIS))
 122#define is_v4_1(its)		(!!((its)->typer & GITS_TYPER_VMAPP))
 123#define device_ids(its)		(FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
 124
 125#define ITS_ITT_ALIGN		SZ_256
 126
 127/* The maximum number of VPEID bits supported by VLPI commands */
 128#define ITS_MAX_VPEID_BITS						\
 129	({								\
 130		int nvpeid = 16;					\
 131		if (gic_rdists->has_rvpeid &&				\
 132		    gic_rdists->gicd_typer2 & GICD_TYPER2_VIL)		\
 133			nvpeid = 1 + (gic_rdists->gicd_typer2 &		\
 134				      GICD_TYPER2_VID);			\
 135									\
 136		nvpeid;							\
 137	})
 138#define ITS_MAX_VPEID		(1 << (ITS_MAX_VPEID_BITS))
 139
 140/* Convert page order to size in bytes */
 141#define PAGE_ORDER_TO_SIZE(o)	(PAGE_SIZE << (o))
 142
 143struct event_lpi_map {
 144	unsigned long		*lpi_map;
 145	u16			*col_map;
 146	irq_hw_number_t		lpi_base;
 147	int			nr_lpis;
 148	raw_spinlock_t		vlpi_lock;
 149	struct its_vm		*vm;
 150	struct its_vlpi_map	*vlpi_maps;
 151	int			nr_vlpis;
 152};
 153
 154/*
 155 * The ITS view of a device - belongs to an ITS, owns an interrupt
 156 * translation table, and a list of interrupts.  If it some of its
 157 * LPIs are injected into a guest (GICv4), the event_map.vm field
 158 * indicates which one.
 159 */
 160struct its_device {
 161	struct list_head	entry;
 162	struct its_node		*its;
 163	struct event_lpi_map	event_map;
 164	void			*itt;
 165	u32			nr_ites;
 166	u32			device_id;
 167	bool			shared;
 168};
 169
 170static struct {
 171	raw_spinlock_t		lock;
 172	struct its_device	*dev;
 173	struct its_vpe		**vpes;
 174	int			next_victim;
 175} vpe_proxy;
 176
 177struct cpu_lpi_count {
 178	atomic_t	managed;
 179	atomic_t	unmanaged;
 180};
 181
 182static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count);
 183
 184static LIST_HEAD(its_nodes);
 185static DEFINE_RAW_SPINLOCK(its_lock);
 186static struct rdists *gic_rdists;
 187static struct irq_domain *its_parent;
 188
 189static unsigned long its_list_map;
 190static u16 vmovp_seq_num;
 191static DEFINE_RAW_SPINLOCK(vmovp_lock);
 192
 193static DEFINE_IDA(its_vpeid_ida);
 194
 195#define gic_data_rdist()		(raw_cpu_ptr(gic_rdists->rdist))
 196#define gic_data_rdist_cpu(cpu)		(per_cpu_ptr(gic_rdists->rdist, cpu))
 197#define gic_data_rdist_rd_base()	(gic_data_rdist()->rd_base)
 198#define gic_data_rdist_vlpi_base()	(gic_data_rdist_rd_base() + SZ_128K)
 199
 200/*
 201 * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we
 202 * always have vSGIs mapped.
 203 */
 204static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its)
 205{
 206	return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]);
 207}
 208
 209static u16 get_its_list(struct its_vm *vm)
 210{
 211	struct its_node *its;
 212	unsigned long its_list = 0;
 213
 214	list_for_each_entry(its, &its_nodes, entry) {
 215		if (!is_v4(its))
 216			continue;
 217
 218		if (require_its_list_vmovp(vm, its))
 219			__set_bit(its->list_nr, &its_list);
 220	}
 221
 222	return (u16)its_list;
 223}
 224
 225static inline u32 its_get_event_id(struct irq_data *d)
 226{
 227	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
 228	return d->hwirq - its_dev->event_map.lpi_base;
 229}
 230
 231static struct its_collection *dev_event_to_col(struct its_device *its_dev,
 232					       u32 event)
 233{
 234	struct its_node *its = its_dev->its;
 235
 236	return its->collections + its_dev->event_map.col_map[event];
 237}
 238
 239static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev,
 240					       u32 event)
 241{
 242	if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis))
 243		return NULL;
 244
 245	return &its_dev->event_map.vlpi_maps[event];
 246}
 247
 248static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
 249{
 250	if (irqd_is_forwarded_to_vcpu(d)) {
 251		struct its_device *its_dev = irq_data_get_irq_chip_data(d);
 252		u32 event = its_get_event_id(d);
 253
 254		return dev_event_to_vlpi_map(its_dev, event);
 255	}
 256
 257	return NULL;
 258}
 259
 260static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags)
 261{
 262	raw_spin_lock_irqsave(&vpe->vpe_lock, *flags);
 263	return vpe->col_idx;
 264}
 265
 266static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags)
 267{
 268	raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
 269}
 270
 271static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags)
 272{
 273	struct its_vlpi_map *map = get_vlpi_map(d);
 274	int cpu;
 275
 276	if (map) {
 277		cpu = vpe_to_cpuid_lock(map->vpe, flags);
 278	} else {
 279		/* Physical LPIs are already locked via the irq_desc lock */
 280		struct its_device *its_dev = irq_data_get_irq_chip_data(d);
 281		cpu = its_dev->event_map.col_map[its_get_event_id(d)];
 282		/* Keep GCC quiet... */
 283		*flags = 0;
 284	}
 285
 286	return cpu;
 287}
 288
 289static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags)
 290{
 291	struct its_vlpi_map *map = get_vlpi_map(d);
 292
 293	if (map)
 294		vpe_to_cpuid_unlock(map->vpe, flags);
 295}
 296
 297static struct its_collection *valid_col(struct its_collection *col)
 298{
 299	if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
 300		return NULL;
 301
 302	return col;
 303}
 304
 305static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
 306{
 307	if (valid_col(its->collections + vpe->col_idx))
 308		return vpe;
 309
 310	return NULL;
 311}
 312
 313/*
 314 * ITS command descriptors - parameters to be encoded in a command
 315 * block.
 316 */
 317struct its_cmd_desc {
 318	union {
 319		struct {
 320			struct its_device *dev;
 321			u32 event_id;
 322		} its_inv_cmd;
 323
 324		struct {
 325			struct its_device *dev;
 326			u32 event_id;
 327		} its_clear_cmd;
 328
 329		struct {
 330			struct its_device *dev;
 331			u32 event_id;
 332		} its_int_cmd;
 333
 334		struct {
 335			struct its_device *dev;
 336			int valid;
 337		} its_mapd_cmd;
 338
 339		struct {
 340			struct its_collection *col;
 341			int valid;
 342		} its_mapc_cmd;
 343
 344		struct {
 345			struct its_device *dev;
 346			u32 phys_id;
 347			u32 event_id;
 348		} its_mapti_cmd;
 349
 350		struct {
 351			struct its_device *dev;
 352			struct its_collection *col;
 353			u32 event_id;
 354		} its_movi_cmd;
 355
 356		struct {
 357			struct its_device *dev;
 358			u32 event_id;
 359		} its_discard_cmd;
 360
 361		struct {
 362			struct its_collection *col;
 363		} its_invall_cmd;
 364
 365		struct {
 366			struct its_vpe *vpe;
 367		} its_vinvall_cmd;
 368
 369		struct {
 370			struct its_vpe *vpe;
 371			struct its_collection *col;
 372			bool valid;
 373		} its_vmapp_cmd;
 374
 375		struct {
 376			struct its_vpe *vpe;
 377			struct its_device *dev;
 378			u32 virt_id;
 379			u32 event_id;
 380			bool db_enabled;
 381		} its_vmapti_cmd;
 382
 383		struct {
 384			struct its_vpe *vpe;
 385			struct its_device *dev;
 386			u32 event_id;
 387			bool db_enabled;
 388		} its_vmovi_cmd;
 389
 390		struct {
 391			struct its_vpe *vpe;
 392			struct its_collection *col;
 393			u16 seq_num;
 394			u16 its_list;
 395		} its_vmovp_cmd;
 396
 397		struct {
 398			struct its_vpe *vpe;
 399		} its_invdb_cmd;
 400
 401		struct {
 402			struct its_vpe *vpe;
 403			u8 sgi;
 404			u8 priority;
 405			bool enable;
 406			bool group;
 407			bool clear;
 408		} its_vsgi_cmd;
 409	};
 410};
 411
 412/*
 413 * The ITS command block, which is what the ITS actually parses.
 414 */
 415struct its_cmd_block {
 416	union {
 417		u64	raw_cmd[4];
 418		__le64	raw_cmd_le[4];
 419	};
 420};
 421
 422#define ITS_CMD_QUEUE_SZ		SZ_64K
 423#define ITS_CMD_QUEUE_NR_ENTRIES	(ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
 424
 425typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
 426						    struct its_cmd_block *,
 427						    struct its_cmd_desc *);
 428
 429typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
 430					      struct its_cmd_block *,
 431					      struct its_cmd_desc *);
 432
 433static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
 434{
 435	u64 mask = GENMASK_ULL(h, l);
 436	*raw_cmd &= ~mask;
 437	*raw_cmd |= (val << l) & mask;
 438}
 439
 440static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
 441{
 442	its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
 
 443}
 444
 445static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
 446{
 447	its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
 
 448}
 449
 450static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
 451{
 452	its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
 
 453}
 454
 455static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
 456{
 457	its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
 
 458}
 459
 460static void its_encode_size(struct its_cmd_block *cmd, u8 size)
 461{
 462	its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
 
 463}
 464
 465static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
 466{
 467	its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
 
 468}
 469
 470static void its_encode_valid(struct its_cmd_block *cmd, int valid)
 471{
 472	its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
 
 473}
 474
 475static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
 476{
 477	its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
 
 478}
 479
 480static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
 481{
 482	its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
 483}
 484
 485static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
 486{
 487	its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
 488}
 489
 490static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
 491{
 492	its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
 493}
 494
 495static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
 496{
 497	its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
 498}
 499
 500static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
 501{
 502	its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
 503}
 504
 505static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
 506{
 507	its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
 508}
 509
 510static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
 511{
 512	its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
 513}
 514
 515static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
 516{
 517	its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
 518}
 519
 520static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
 521{
 522	its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
 523}
 524
 525static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa)
 526{
 527	its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16);
 528}
 529
 530static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc)
 531{
 532	its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8);
 533}
 534
 535static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz)
 536{
 537	its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9);
 538}
 539
 540static void its_encode_vmapp_default_db(struct its_cmd_block *cmd,
 541					u32 vpe_db_lpi)
 542{
 543	its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0);
 544}
 545
 546static void its_encode_vmovp_default_db(struct its_cmd_block *cmd,
 547					u32 vpe_db_lpi)
 548{
 549	its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0);
 550}
 551
 552static void its_encode_db(struct its_cmd_block *cmd, bool db)
 553{
 554	its_mask_encode(&cmd->raw_cmd[2], db, 63, 63);
 555}
 556
 557static void its_encode_sgi_intid(struct its_cmd_block *cmd, u8 sgi)
 558{
 559	its_mask_encode(&cmd->raw_cmd[0], sgi, 35, 32);
 560}
 561
 562static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio)
 563{
 564	its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20);
 565}
 566
 567static void its_encode_sgi_group(struct its_cmd_block *cmd, bool grp)
 568{
 569	its_mask_encode(&cmd->raw_cmd[0], grp, 10, 10);
 570}
 571
 572static void its_encode_sgi_clear(struct its_cmd_block *cmd, bool clr)
 573{
 574	its_mask_encode(&cmd->raw_cmd[0], clr, 9, 9);
 575}
 576
 577static void its_encode_sgi_enable(struct its_cmd_block *cmd, bool en)
 578{
 579	its_mask_encode(&cmd->raw_cmd[0], en, 8, 8);
 580}
 581
 582static inline void its_fixup_cmd(struct its_cmd_block *cmd)
 583{
 584	/* Let's fixup BE commands */
 585	cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]);
 586	cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]);
 587	cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]);
 588	cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]);
 589}
 590
 591static struct its_collection *its_build_mapd_cmd(struct its_node *its,
 592						 struct its_cmd_block *cmd,
 593						 struct its_cmd_desc *desc)
 594{
 595	unsigned long itt_addr;
 596	u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
 597
 598	itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
 599	itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
 600
 601	its_encode_cmd(cmd, GITS_CMD_MAPD);
 602	its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
 603	its_encode_size(cmd, size - 1);
 604	its_encode_itt(cmd, itt_addr);
 605	its_encode_valid(cmd, desc->its_mapd_cmd.valid);
 606
 607	its_fixup_cmd(cmd);
 608
 609	return NULL;
 610}
 611
 612static struct its_collection *its_build_mapc_cmd(struct its_node *its,
 613						 struct its_cmd_block *cmd,
 614						 struct its_cmd_desc *desc)
 615{
 616	its_encode_cmd(cmd, GITS_CMD_MAPC);
 617	its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
 618	its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
 619	its_encode_valid(cmd, desc->its_mapc_cmd.valid);
 620
 621	its_fixup_cmd(cmd);
 622
 623	return desc->its_mapc_cmd.col;
 624}
 625
 626static struct its_collection *its_build_mapti_cmd(struct its_node *its,
 627						  struct its_cmd_block *cmd,
 628						  struct its_cmd_desc *desc)
 629{
 630	struct its_collection *col;
 631
 632	col = dev_event_to_col(desc->its_mapti_cmd.dev,
 633			       desc->its_mapti_cmd.event_id);
 634
 635	its_encode_cmd(cmd, GITS_CMD_MAPTI);
 636	its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
 637	its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
 638	its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
 639	its_encode_collection(cmd, col->col_id);
 640
 641	its_fixup_cmd(cmd);
 642
 643	return valid_col(col);
 644}
 645
 646static struct its_collection *its_build_movi_cmd(struct its_node *its,
 647						 struct its_cmd_block *cmd,
 648						 struct its_cmd_desc *desc)
 649{
 650	struct its_collection *col;
 651
 652	col = dev_event_to_col(desc->its_movi_cmd.dev,
 653			       desc->its_movi_cmd.event_id);
 654
 655	its_encode_cmd(cmd, GITS_CMD_MOVI);
 656	its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
 657	its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
 658	its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
 659
 660	its_fixup_cmd(cmd);
 661
 662	return valid_col(col);
 663}
 664
 665static struct its_collection *its_build_discard_cmd(struct its_node *its,
 666						    struct its_cmd_block *cmd,
 667						    struct its_cmd_desc *desc)
 668{
 669	struct its_collection *col;
 670
 671	col = dev_event_to_col(desc->its_discard_cmd.dev,
 672			       desc->its_discard_cmd.event_id);
 673
 674	its_encode_cmd(cmd, GITS_CMD_DISCARD);
 675	its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
 676	its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
 677
 678	its_fixup_cmd(cmd);
 679
 680	return valid_col(col);
 681}
 682
 683static struct its_collection *its_build_inv_cmd(struct its_node *its,
 684						struct its_cmd_block *cmd,
 685						struct its_cmd_desc *desc)
 686{
 687	struct its_collection *col;
 688
 689	col = dev_event_to_col(desc->its_inv_cmd.dev,
 690			       desc->its_inv_cmd.event_id);
 691
 692	its_encode_cmd(cmd, GITS_CMD_INV);
 693	its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
 694	its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
 695
 696	its_fixup_cmd(cmd);
 697
 698	return valid_col(col);
 699}
 700
 701static struct its_collection *its_build_int_cmd(struct its_node *its,
 702						struct its_cmd_block *cmd,
 703						struct its_cmd_desc *desc)
 704{
 705	struct its_collection *col;
 706
 707	col = dev_event_to_col(desc->its_int_cmd.dev,
 708			       desc->its_int_cmd.event_id);
 709
 710	its_encode_cmd(cmd, GITS_CMD_INT);
 711	its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
 712	its_encode_event_id(cmd, desc->its_int_cmd.event_id);
 713
 714	its_fixup_cmd(cmd);
 715
 716	return valid_col(col);
 717}
 718
 719static struct its_collection *its_build_clear_cmd(struct its_node *its,
 720						  struct its_cmd_block *cmd,
 721						  struct its_cmd_desc *desc)
 722{
 723	struct its_collection *col;
 724
 725	col = dev_event_to_col(desc->its_clear_cmd.dev,
 726			       desc->its_clear_cmd.event_id);
 727
 728	its_encode_cmd(cmd, GITS_CMD_CLEAR);
 729	its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
 730	its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
 731
 732	its_fixup_cmd(cmd);
 733
 734	return valid_col(col);
 735}
 736
 737static struct its_collection *its_build_invall_cmd(struct its_node *its,
 738						   struct its_cmd_block *cmd,
 739						   struct its_cmd_desc *desc)
 740{
 741	its_encode_cmd(cmd, GITS_CMD_INVALL);
 742	its_encode_collection(cmd, desc->its_invall_cmd.col->col_id);
 743
 744	its_fixup_cmd(cmd);
 745
 746	return NULL;
 747}
 748
 749static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
 750					     struct its_cmd_block *cmd,
 751					     struct its_cmd_desc *desc)
 752{
 753	its_encode_cmd(cmd, GITS_CMD_VINVALL);
 754	its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
 755
 756	its_fixup_cmd(cmd);
 757
 758	return valid_vpe(its, desc->its_vinvall_cmd.vpe);
 759}
 760
 761static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
 762					   struct its_cmd_block *cmd,
 763					   struct its_cmd_desc *desc)
 764{
 765	unsigned long vpt_addr, vconf_addr;
 766	u64 target;
 767	bool alloc;
 768
 769	its_encode_cmd(cmd, GITS_CMD_VMAPP);
 770	its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
 771	its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
 772
 773	if (!desc->its_vmapp_cmd.valid) {
 774		if (is_v4_1(its)) {
 775			alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
 776			its_encode_alloc(cmd, alloc);
 777		}
 778
 779		goto out;
 780	}
 781
 782	vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
 783	target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
 784
 785	its_encode_target(cmd, target);
 786	its_encode_vpt_addr(cmd, vpt_addr);
 787	its_encode_vpt_size(cmd, LPI_NRBITS - 1);
 788
 789	if (!is_v4_1(its))
 790		goto out;
 791
 792	vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
 793
 794	alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
 795
 796	its_encode_alloc(cmd, alloc);
 797
 798	/* We can only signal PTZ when alloc==1. Why do we have two bits? */
 799	its_encode_ptz(cmd, alloc);
 800	its_encode_vconf_addr(cmd, vconf_addr);
 801	its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi);
 802
 803out:
 804	its_fixup_cmd(cmd);
 805
 806	return valid_vpe(its, desc->its_vmapp_cmd.vpe);
 807}
 808
 809static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
 810					    struct its_cmd_block *cmd,
 811					    struct its_cmd_desc *desc)
 812{
 813	u32 db;
 814
 815	if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled)
 816		db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
 817	else
 818		db = 1023;
 819
 820	its_encode_cmd(cmd, GITS_CMD_VMAPTI);
 821	its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
 822	its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
 823	its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
 824	its_encode_db_phys_id(cmd, db);
 825	its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
 826
 827	its_fixup_cmd(cmd);
 828
 829	return valid_vpe(its, desc->its_vmapti_cmd.vpe);
 830}
 831
 832static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
 833					   struct its_cmd_block *cmd,
 834					   struct its_cmd_desc *desc)
 835{
 836	u32 db;
 837
 838	if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled)
 839		db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
 840	else
 841		db = 1023;
 842
 843	its_encode_cmd(cmd, GITS_CMD_VMOVI);
 844	its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
 845	its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
 846	its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
 847	its_encode_db_phys_id(cmd, db);
 848	its_encode_db_valid(cmd, true);
 849
 850	its_fixup_cmd(cmd);
 851
 852	return valid_vpe(its, desc->its_vmovi_cmd.vpe);
 853}
 854
 855static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
 856					   struct its_cmd_block *cmd,
 857					   struct its_cmd_desc *desc)
 858{
 859	u64 target;
 860
 861	target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
 862	its_encode_cmd(cmd, GITS_CMD_VMOVP);
 863	its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
 864	its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
 865	its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
 866	its_encode_target(cmd, target);
 867
 868	if (is_v4_1(its)) {
 869		its_encode_db(cmd, true);
 870		its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi);
 871	}
 872
 873	its_fixup_cmd(cmd);
 874
 875	return valid_vpe(its, desc->its_vmovp_cmd.vpe);
 876}
 877
 878static struct its_vpe *its_build_vinv_cmd(struct its_node *its,
 879					  struct its_cmd_block *cmd,
 880					  struct its_cmd_desc *desc)
 881{
 882	struct its_vlpi_map *map;
 883
 884	map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev,
 885				    desc->its_inv_cmd.event_id);
 886
 887	its_encode_cmd(cmd, GITS_CMD_INV);
 888	its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
 889	its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
 890
 891	its_fixup_cmd(cmd);
 892
 893	return valid_vpe(its, map->vpe);
 894}
 895
 896static struct its_vpe *its_build_vint_cmd(struct its_node *its,
 897					  struct its_cmd_block *cmd,
 898					  struct its_cmd_desc *desc)
 899{
 900	struct its_vlpi_map *map;
 901
 902	map = dev_event_to_vlpi_map(desc->its_int_cmd.dev,
 903				    desc->its_int_cmd.event_id);
 904
 905	its_encode_cmd(cmd, GITS_CMD_INT);
 906	its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
 907	its_encode_event_id(cmd, desc->its_int_cmd.event_id);
 908
 909	its_fixup_cmd(cmd);
 910
 911	return valid_vpe(its, map->vpe);
 912}
 913
 914static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
 915					    struct its_cmd_block *cmd,
 916					    struct its_cmd_desc *desc)
 917{
 918	struct its_vlpi_map *map;
 919
 920	map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev,
 921				    desc->its_clear_cmd.event_id);
 922
 923	its_encode_cmd(cmd, GITS_CMD_CLEAR);
 924	its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
 925	its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
 926
 927	its_fixup_cmd(cmd);
 928
 929	return valid_vpe(its, map->vpe);
 930}
 931
 932static struct its_vpe *its_build_invdb_cmd(struct its_node *its,
 933					   struct its_cmd_block *cmd,
 934					   struct its_cmd_desc *desc)
 935{
 936	if (WARN_ON(!is_v4_1(its)))
 937		return NULL;
 938
 939	its_encode_cmd(cmd, GITS_CMD_INVDB);
 940	its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id);
 941
 942	its_fixup_cmd(cmd);
 943
 944	return valid_vpe(its, desc->its_invdb_cmd.vpe);
 945}
 946
 947static struct its_vpe *its_build_vsgi_cmd(struct its_node *its,
 948					  struct its_cmd_block *cmd,
 949					  struct its_cmd_desc *desc)
 950{
 951	if (WARN_ON(!is_v4_1(its)))
 952		return NULL;
 953
 954	its_encode_cmd(cmd, GITS_CMD_VSGI);
 955	its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id);
 956	its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi);
 957	its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority);
 958	its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group);
 959	its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear);
 960	its_encode_sgi_enable(cmd, desc->its_vsgi_cmd.enable);
 961
 962	its_fixup_cmd(cmd);
 963
 964	return valid_vpe(its, desc->its_vsgi_cmd.vpe);
 965}
 966
 967static u64 its_cmd_ptr_to_offset(struct its_node *its,
 968				 struct its_cmd_block *ptr)
 969{
 970	return (ptr - its->cmd_base) * sizeof(*ptr);
 971}
 972
 973static int its_queue_full(struct its_node *its)
 974{
 975	int widx;
 976	int ridx;
 977
 978	widx = its->cmd_write - its->cmd_base;
 979	ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
 980
 981	/* This is incredibly unlikely to happen, unless the ITS locks up. */
 982	if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
 983		return 1;
 984
 985	return 0;
 986}
 987
 988static struct its_cmd_block *its_allocate_entry(struct its_node *its)
 989{
 990	struct its_cmd_block *cmd;
 991	u32 count = 1000000;	/* 1s! */
 992
 993	while (its_queue_full(its)) {
 994		count--;
 995		if (!count) {
 996			pr_err_ratelimited("ITS queue not draining\n");
 997			return NULL;
 998		}
 999		cpu_relax();
1000		udelay(1);
1001	}
1002
1003	cmd = its->cmd_write++;
1004
1005	/* Handle queue wrapping */
1006	if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
1007		its->cmd_write = its->cmd_base;
1008
1009	/* Clear command  */
1010	cmd->raw_cmd[0] = 0;
1011	cmd->raw_cmd[1] = 0;
1012	cmd->raw_cmd[2] = 0;
1013	cmd->raw_cmd[3] = 0;
1014
1015	return cmd;
1016}
1017
1018static struct its_cmd_block *its_post_commands(struct its_node *its)
1019{
1020	u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
1021
1022	writel_relaxed(wr, its->base + GITS_CWRITER);
1023
1024	return its->cmd_write;
1025}
1026
1027static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
1028{
1029	/*
1030	 * Make sure the commands written to memory are observable by
1031	 * the ITS.
1032	 */
1033	if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
1034		gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
1035	else
1036		dsb(ishst);
1037}
1038
1039static int its_wait_for_range_completion(struct its_node *its,
1040					 u64	prev_idx,
1041					 struct its_cmd_block *to)
1042{
1043	u64 rd_idx, to_idx, linear_idx;
1044	u32 count = 1000000;	/* 1s! */
1045
1046	/* Linearize to_idx if the command set has wrapped around */
1047	to_idx = its_cmd_ptr_to_offset(its, to);
1048	if (to_idx < prev_idx)
1049		to_idx += ITS_CMD_QUEUE_SZ;
1050
1051	linear_idx = prev_idx;
1052
1053	while (1) {
1054		s64 delta;
1055
1056		rd_idx = readl_relaxed(its->base + GITS_CREADR);
1057
1058		/*
1059		 * Compute the read pointer progress, taking the
1060		 * potential wrap-around into account.
1061		 */
1062		delta = rd_idx - prev_idx;
1063		if (rd_idx < prev_idx)
1064			delta += ITS_CMD_QUEUE_SZ;
1065
1066		linear_idx += delta;
1067		if (linear_idx >= to_idx)
1068			break;
1069
1070		count--;
1071		if (!count) {
1072			pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
1073					   to_idx, linear_idx);
1074			return -1;
1075		}
1076		prev_idx = rd_idx;
1077		cpu_relax();
1078		udelay(1);
1079	}
1080
1081	return 0;
1082}
1083
1084/* Warning, macro hell follows */
1085#define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn)	\
1086void name(struct its_node *its,						\
1087	  buildtype builder,						\
1088	  struct its_cmd_desc *desc)					\
1089{									\
1090	struct its_cmd_block *cmd, *sync_cmd, *next_cmd;		\
1091	synctype *sync_obj;						\
1092	unsigned long flags;						\
1093	u64 rd_idx;							\
1094									\
1095	raw_spin_lock_irqsave(&its->lock, flags);			\
1096									\
1097	cmd = its_allocate_entry(its);					\
1098	if (!cmd) {		/* We're soooooo screewed... */		\
1099		raw_spin_unlock_irqrestore(&its->lock, flags);		\
1100		return;							\
1101	}								\
1102	sync_obj = builder(its, cmd, desc);				\
1103	its_flush_cmd(its, cmd);					\
1104									\
1105	if (sync_obj) {							\
1106		sync_cmd = its_allocate_entry(its);			\
1107		if (!sync_cmd)						\
1108			goto post;					\
1109									\
1110		buildfn(its, sync_cmd, sync_obj);			\
1111		its_flush_cmd(its, sync_cmd);				\
1112	}								\
1113									\
1114post:									\
1115	rd_idx = readl_relaxed(its->base + GITS_CREADR);		\
1116	next_cmd = its_post_commands(its);				\
1117	raw_spin_unlock_irqrestore(&its->lock, flags);			\
1118									\
1119	if (its_wait_for_range_completion(its, rd_idx, next_cmd))	\
1120		pr_err_ratelimited("ITS cmd %ps failed\n", builder);	\
1121}
1122
1123static void its_build_sync_cmd(struct its_node *its,
1124			       struct its_cmd_block *sync_cmd,
1125			       struct its_collection *sync_col)
1126{
1127	its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
1128	its_encode_target(sync_cmd, sync_col->target_address);
1129
1130	its_fixup_cmd(sync_cmd);
1131}
1132
1133static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
1134			     struct its_collection, its_build_sync_cmd)
1135
1136static void its_build_vsync_cmd(struct its_node *its,
1137				struct its_cmd_block *sync_cmd,
1138				struct its_vpe *sync_vpe)
1139{
1140	its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
1141	its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
1142
1143	its_fixup_cmd(sync_cmd);
1144}
1145
1146static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
1147			     struct its_vpe, its_build_vsync_cmd)
1148
1149static void its_send_int(struct its_device *dev, u32 event_id)
1150{
1151	struct its_cmd_desc desc;
 
 
1152
1153	desc.its_int_cmd.dev = dev;
1154	desc.its_int_cmd.event_id = event_id;
1155
1156	its_send_single_command(dev->its, its_build_int_cmd, &desc);
1157}
 
 
 
 
 
 
1158
1159static void its_send_clear(struct its_device *dev, u32 event_id)
1160{
1161	struct its_cmd_desc desc;
 
 
 
 
 
 
 
 
1162
1163	desc.its_clear_cmd.dev = dev;
1164	desc.its_clear_cmd.event_id = event_id;
 
1165
1166	its_send_single_command(dev->its, its_build_clear_cmd, &desc);
1167}
1168
1169static void its_send_inv(struct its_device *dev, u32 event_id)
1170{
1171	struct its_cmd_desc desc;
1172
1173	desc.its_inv_cmd.dev = dev;
1174	desc.its_inv_cmd.event_id = event_id;
1175
1176	its_send_single_command(dev->its, its_build_inv_cmd, &desc);
1177}
1178
1179static void its_send_mapd(struct its_device *dev, int valid)
1180{
1181	struct its_cmd_desc desc;
1182
1183	desc.its_mapd_cmd.dev = dev;
1184	desc.its_mapd_cmd.valid = !!valid;
1185
1186	its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
1187}
1188
1189static void its_send_mapc(struct its_node *its, struct its_collection *col,
1190			  int valid)
1191{
1192	struct its_cmd_desc desc;
1193
1194	desc.its_mapc_cmd.col = col;
1195	desc.its_mapc_cmd.valid = !!valid;
1196
1197	its_send_single_command(its, its_build_mapc_cmd, &desc);
1198}
1199
1200static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
1201{
1202	struct its_cmd_desc desc;
1203
1204	desc.its_mapti_cmd.dev = dev;
1205	desc.its_mapti_cmd.phys_id = irq_id;
1206	desc.its_mapti_cmd.event_id = id;
1207
1208	its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
1209}
1210
1211static void its_send_movi(struct its_device *dev,
1212			  struct its_collection *col, u32 id)
1213{
1214	struct its_cmd_desc desc;
1215
1216	desc.its_movi_cmd.dev = dev;
1217	desc.its_movi_cmd.col = col;
1218	desc.its_movi_cmd.event_id = id;
1219
1220	its_send_single_command(dev->its, its_build_movi_cmd, &desc);
1221}
1222
1223static void its_send_discard(struct its_device *dev, u32 id)
1224{
1225	struct its_cmd_desc desc;
1226
1227	desc.its_discard_cmd.dev = dev;
1228	desc.its_discard_cmd.event_id = id;
1229
1230	its_send_single_command(dev->its, its_build_discard_cmd, &desc);
1231}
1232
1233static void its_send_invall(struct its_node *its, struct its_collection *col)
1234{
1235	struct its_cmd_desc desc;
1236
1237	desc.its_invall_cmd.col = col;
1238
1239	its_send_single_command(its, its_build_invall_cmd, &desc);
1240}
1241
1242static void its_send_vmapti(struct its_device *dev, u32 id)
1243{
1244	struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1245	struct its_cmd_desc desc;
1246
1247	desc.its_vmapti_cmd.vpe = map->vpe;
1248	desc.its_vmapti_cmd.dev = dev;
1249	desc.its_vmapti_cmd.virt_id = map->vintid;
1250	desc.its_vmapti_cmd.event_id = id;
1251	desc.its_vmapti_cmd.db_enabled = map->db_enabled;
1252
1253	its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
1254}
1255
1256static void its_send_vmovi(struct its_device *dev, u32 id)
1257{
1258	struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1259	struct its_cmd_desc desc;
1260
1261	desc.its_vmovi_cmd.vpe = map->vpe;
1262	desc.its_vmovi_cmd.dev = dev;
1263	desc.its_vmovi_cmd.event_id = id;
1264	desc.its_vmovi_cmd.db_enabled = map->db_enabled;
1265
1266	its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
1267}
1268
1269static void its_send_vmapp(struct its_node *its,
1270			   struct its_vpe *vpe, bool valid)
1271{
1272	struct its_cmd_desc desc;
 
 
 
1273
1274	desc.its_vmapp_cmd.vpe = vpe;
1275	desc.its_vmapp_cmd.valid = valid;
1276	desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
1277
1278	its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
1279}
1280
1281static void its_send_vmovp(struct its_vpe *vpe)
1282{
1283	struct its_cmd_desc desc = {};
1284	struct its_node *its;
1285	unsigned long flags;
1286	int col_id = vpe->col_idx;
1287
1288	desc.its_vmovp_cmd.vpe = vpe;
1289
1290	if (!its_list_map) {
1291		its = list_first_entry(&its_nodes, struct its_node, entry);
1292		desc.its_vmovp_cmd.col = &its->collections[col_id];
1293		its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1294		return;
1295	}
1296
1297	/*
1298	 * Yet another marvel of the architecture. If using the
1299	 * its_list "feature", we need to make sure that all ITSs
1300	 * receive all VMOVP commands in the same order. The only way
1301	 * to guarantee this is to make vmovp a serialization point.
1302	 *
1303	 * Wall <-- Head.
1304	 */
1305	raw_spin_lock_irqsave(&vmovp_lock, flags);
1306
1307	desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
1308	desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
1309
1310	/* Emit VMOVPs */
1311	list_for_each_entry(its, &its_nodes, entry) {
1312		if (!is_v4(its))
1313			continue;
1314
1315		if (!require_its_list_vmovp(vpe->its_vm, its))
1316			continue;
1317
1318		desc.its_vmovp_cmd.col = &its->collections[col_id];
1319		its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1320	}
1321
1322	raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1323}
1324
1325static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
1326{
1327	struct its_cmd_desc desc;
1328
1329	desc.its_vinvall_cmd.vpe = vpe;
1330	its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
1331}
1332
1333static void its_send_vinv(struct its_device *dev, u32 event_id)
1334{
1335	struct its_cmd_desc desc;
1336
1337	/*
1338	 * There is no real VINV command. This is just a normal INV,
1339	 * with a VSYNC instead of a SYNC.
1340	 */
1341	desc.its_inv_cmd.dev = dev;
1342	desc.its_inv_cmd.event_id = event_id;
1343
1344	its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc);
1345}
1346
1347static void its_send_vint(struct its_device *dev, u32 event_id)
1348{
1349	struct its_cmd_desc desc;
1350
1351	/*
1352	 * There is no real VINT command. This is just a normal INT,
1353	 * with a VSYNC instead of a SYNC.
1354	 */
1355	desc.its_int_cmd.dev = dev;
1356	desc.its_int_cmd.event_id = event_id;
1357
1358	its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc);
1359}
1360
1361static void its_send_vclear(struct its_device *dev, u32 event_id)
1362{
1363	struct its_cmd_desc desc;
1364
1365	/*
1366	 * There is no real VCLEAR command. This is just a normal CLEAR,
1367	 * with a VSYNC instead of a SYNC.
1368	 */
1369	desc.its_clear_cmd.dev = dev;
1370	desc.its_clear_cmd.event_id = event_id;
1371
1372	its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
1373}
1374
1375static void its_send_invdb(struct its_node *its, struct its_vpe *vpe)
1376{
1377	struct its_cmd_desc desc;
1378
1379	desc.its_invdb_cmd.vpe = vpe;
1380	its_send_single_vcommand(its, its_build_invdb_cmd, &desc);
1381}
1382
1383/*
1384 * irqchip functions - assumes MSI, mostly.
1385 */
1386static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
1387{
1388	struct its_vlpi_map *map = get_vlpi_map(d);
1389	irq_hw_number_t hwirq;
1390	void *va;
1391	u8 *cfg;
1392
1393	if (map) {
1394		va = page_address(map->vm->vprop_page);
1395		hwirq = map->vintid;
1396
1397		/* Remember the updated property */
1398		map->properties &= ~clr;
1399		map->properties |= set | LPI_PROP_GROUP1;
1400	} else {
1401		va = gic_rdists->prop_table_va;
1402		hwirq = d->hwirq;
1403	}
1404
1405	cfg = va + hwirq - 8192;
1406	*cfg &= ~clr;
1407	*cfg |= set | LPI_PROP_GROUP1;
1408
1409	/*
1410	 * Make the above write visible to the redistributors.
1411	 * And yes, we're flushing exactly: One. Single. Byte.
1412	 * Humpf...
1413	 */
1414	if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
1415		gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
1416	else
1417		dsb(ishst);
1418}
1419
1420static void wait_for_syncr(void __iomem *rdbase)
1421{
1422	while (readl_relaxed(rdbase + GICR_SYNCR) & 1)
1423		cpu_relax();
1424}
1425
1426static void direct_lpi_inv(struct irq_data *d)
1427{
1428	struct its_vlpi_map *map = get_vlpi_map(d);
1429	void __iomem *rdbase;
1430	unsigned long flags;
1431	u64 val;
1432	int cpu;
1433
1434	if (map) {
1435		struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1436
1437		WARN_ON(!is_v4_1(its_dev->its));
1438
1439		val  = GICR_INVLPIR_V;
1440		val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id);
1441		val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid);
1442	} else {
1443		val = d->hwirq;
1444	}
1445
1446	/* Target the redistributor this LPI is currently routed to */
1447	cpu = irq_to_cpuid_lock(d, &flags);
1448	raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
1449	rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
1450	gic_write_lpir(val, rdbase + GICR_INVLPIR);
1451
1452	wait_for_syncr(rdbase);
1453	raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
1454	irq_to_cpuid_unlock(d, flags);
1455}
1456
1457static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1458{
1459	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1460
1461	lpi_write_config(d, clr, set);
1462	if (gic_rdists->has_direct_lpi &&
1463	    (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d)))
1464		direct_lpi_inv(d);
1465	else if (!irqd_is_forwarded_to_vcpu(d))
1466		its_send_inv(its_dev, its_get_event_id(d));
1467	else
1468		its_send_vinv(its_dev, its_get_event_id(d));
1469}
1470
1471static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1472{
1473	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1474	u32 event = its_get_event_id(d);
1475	struct its_vlpi_map *map;
1476
1477	/*
1478	 * GICv4.1 does away with the per-LPI nonsense, nothing to do
1479	 * here.
1480	 */
1481	if (is_v4_1(its_dev->its))
1482		return;
1483
1484	map = dev_event_to_vlpi_map(its_dev, event);
1485
1486	if (map->db_enabled == enable)
1487		return;
1488
1489	map->db_enabled = enable;
1490
1491	/*
1492	 * More fun with the architecture:
1493	 *
1494	 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1495	 * value or to 1023, depending on the enable bit. But that
1496	 * would be issueing a mapping for an /existing/ DevID+EventID
1497	 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1498	 * to the /same/ vPE, using this opportunity to adjust the
1499	 * doorbell. Mouahahahaha. We loves it, Precious.
1500	 */
1501	its_send_vmovi(its_dev, event);
1502}
1503
1504static void its_mask_irq(struct irq_data *d)
1505{
1506	if (irqd_is_forwarded_to_vcpu(d))
1507		its_vlpi_set_doorbell(d, false);
1508
1509	lpi_update_config(d, LPI_PROP_ENABLED, 0);
1510}
1511
1512static void its_unmask_irq(struct irq_data *d)
1513{
1514	if (irqd_is_forwarded_to_vcpu(d))
1515		its_vlpi_set_doorbell(d, true);
1516
1517	lpi_update_config(d, 0, LPI_PROP_ENABLED);
1518}
1519
1520static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu)
1521{
1522	if (irqd_affinity_is_managed(d))
1523		return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1524
1525	return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1526}
1527
1528static void its_inc_lpi_count(struct irq_data *d, int cpu)
1529{
1530	if (irqd_affinity_is_managed(d))
1531		atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1532	else
1533		atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1534}
1535
1536static void its_dec_lpi_count(struct irq_data *d, int cpu)
1537{
1538	if (irqd_affinity_is_managed(d))
1539		atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1540	else
1541		atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1542}
1543
1544static unsigned int cpumask_pick_least_loaded(struct irq_data *d,
1545					      const struct cpumask *cpu_mask)
1546{
1547	unsigned int cpu = nr_cpu_ids, tmp;
1548	int count = S32_MAX;
1549
1550	for_each_cpu(tmp, cpu_mask) {
1551		int this_count = its_read_lpi_count(d, tmp);
1552		if (this_count < count) {
1553			cpu = tmp;
1554		        count = this_count;
1555		}
1556	}
1557
1558	return cpu;
1559}
1560
1561/*
1562 * As suggested by Thomas Gleixner in:
1563 * https://lore.kernel.org/r/87h80q2aoc.fsf@nanos.tec.linutronix.de
1564 */
1565static int its_select_cpu(struct irq_data *d,
1566			  const struct cpumask *aff_mask)
1567{
1568	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1569	cpumask_var_t tmpmask;
1570	int cpu, node;
1571
1572	if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC))
1573		return -ENOMEM;
1574
1575	node = its_dev->its->numa_node;
1576
1577	if (!irqd_affinity_is_managed(d)) {
1578		/* First try the NUMA node */
1579		if (node != NUMA_NO_NODE) {
1580			/*
1581			 * Try the intersection of the affinity mask and the
1582			 * node mask (and the online mask, just to be safe).
1583			 */
1584			cpumask_and(tmpmask, cpumask_of_node(node), aff_mask);
1585			cpumask_and(tmpmask, tmpmask, cpu_online_mask);
1586
1587			/*
1588			 * Ideally, we would check if the mask is empty, and
1589			 * try again on the full node here.
1590			 *
1591			 * But it turns out that the way ACPI describes the
1592			 * affinity for ITSs only deals about memory, and
1593			 * not target CPUs, so it cannot describe a single
1594			 * ITS placed next to two NUMA nodes.
1595			 *
1596			 * Instead, just fallback on the online mask. This
1597			 * diverges from Thomas' suggestion above.
1598			 */
1599			cpu = cpumask_pick_least_loaded(d, tmpmask);
1600			if (cpu < nr_cpu_ids)
1601				goto out;
1602
1603			/* If we can't cross sockets, give up */
1604			if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144))
1605				goto out;
1606
1607			/* If the above failed, expand the search */
1608		}
1609
1610		/* Try the intersection of the affinity and online masks */
1611		cpumask_and(tmpmask, aff_mask, cpu_online_mask);
1612
1613		/* If that doesn't fly, the online mask is the last resort */
1614		if (cpumask_empty(tmpmask))
1615			cpumask_copy(tmpmask, cpu_online_mask);
1616
1617		cpu = cpumask_pick_least_loaded(d, tmpmask);
1618	} else {
1619		cpumask_and(tmpmask, irq_data_get_affinity_mask(d), cpu_online_mask);
1620
1621		/* If we cannot cross sockets, limit the search to that node */
1622		if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) &&
1623		    node != NUMA_NO_NODE)
1624			cpumask_and(tmpmask, tmpmask, cpumask_of_node(node));
1625
1626		cpu = cpumask_pick_least_loaded(d, tmpmask);
1627	}
1628out:
1629	free_cpumask_var(tmpmask);
1630
1631	pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu);
1632	return cpu;
1633}
1634
1635static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1636			    bool force)
1637{
 
1638	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1639	struct its_collection *target_col;
1640	u32 id = its_get_event_id(d);
1641	int cpu, prev_cpu;
1642
1643	/* A forwarded interrupt should use irq_set_vcpu_affinity */
1644	if (irqd_is_forwarded_to_vcpu(d))
1645		return -EINVAL;
1646
1647	prev_cpu = its_dev->event_map.col_map[id];
1648	its_dec_lpi_count(d, prev_cpu);
1649
1650	if (!force)
1651		cpu = its_select_cpu(d, mask_val);
1652	else
1653		cpu = cpumask_pick_least_loaded(d, mask_val);
1654
1655	if (cpu < 0 || cpu >= nr_cpu_ids)
1656		goto err;
1657
1658	/* don't set the affinity when the target cpu is same as current one */
1659	if (cpu != prev_cpu) {
1660		target_col = &its_dev->its->collections[cpu];
1661		its_send_movi(its_dev, target_col, id);
1662		its_dev->event_map.col_map[id] = cpu;
1663		irq_data_update_effective_affinity(d, cpumask_of(cpu));
1664	}
1665
1666	its_inc_lpi_count(d, cpu);
1667
1668	return IRQ_SET_MASK_OK_DONE;
1669
1670err:
1671	its_inc_lpi_count(d, prev_cpu);
1672	return -EINVAL;
1673}
1674
1675static u64 its_irq_get_msi_base(struct its_device *its_dev)
1676{
1677	struct its_node *its = its_dev->its;
1678
1679	return its->phys_base + GITS_TRANSLATER;
1680}
1681
1682static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1683{
1684	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1685	struct its_node *its;
1686	u64 addr;
1687
1688	its = its_dev->its;
1689	addr = its->get_msi_base(its_dev);
1690
1691	msg->address_lo		= lower_32_bits(addr);
1692	msg->address_hi		= upper_32_bits(addr);
1693	msg->data		= its_get_event_id(d);
1694
1695	iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg);
1696}
1697
1698static int its_irq_set_irqchip_state(struct irq_data *d,
1699				     enum irqchip_irq_state which,
1700				     bool state)
1701{
1702	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1703	u32 event = its_get_event_id(d);
1704
1705	if (which != IRQCHIP_STATE_PENDING)
1706		return -EINVAL;
1707
1708	if (irqd_is_forwarded_to_vcpu(d)) {
1709		if (state)
1710			its_send_vint(its_dev, event);
1711		else
1712			its_send_vclear(its_dev, event);
1713	} else {
1714		if (state)
1715			its_send_int(its_dev, event);
1716		else
1717			its_send_clear(its_dev, event);
1718	}
1719
1720	return 0;
1721}
1722
1723/*
1724 * Two favourable cases:
1725 *
1726 * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times
1727 *     for vSGI delivery
1728 *
1729 * (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough
1730 *     and we're better off mapping all VPEs always
1731 *
1732 * If neither (a) nor (b) is true, then we map vPEs on demand.
1733 *
1734 */
1735static bool gic_requires_eager_mapping(void)
1736{
1737	if (!its_list_map || gic_rdists->has_rvpeid)
1738		return true;
1739
1740	return false;
1741}
1742
1743static void its_map_vm(struct its_node *its, struct its_vm *vm)
1744{
1745	unsigned long flags;
1746
1747	if (gic_requires_eager_mapping())
1748		return;
1749
1750	raw_spin_lock_irqsave(&vmovp_lock, flags);
1751
1752	/*
1753	 * If the VM wasn't mapped yet, iterate over the vpes and get
1754	 * them mapped now.
1755	 */
1756	vm->vlpi_count[its->list_nr]++;
1757
1758	if (vm->vlpi_count[its->list_nr] == 1) {
1759		int i;
1760
1761		for (i = 0; i < vm->nr_vpes; i++) {
1762			struct its_vpe *vpe = vm->vpes[i];
1763			struct irq_data *d = irq_get_irq_data(vpe->irq);
1764
1765			/* Map the VPE to the first possible CPU */
1766			vpe->col_idx = cpumask_first(cpu_online_mask);
1767			its_send_vmapp(its, vpe, true);
1768			its_send_vinvall(its, vpe);
1769			irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
1770		}
1771	}
1772
1773	raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1774}
1775
1776static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1777{
1778	unsigned long flags;
1779
1780	/* Not using the ITS list? Everything is always mapped. */
1781	if (gic_requires_eager_mapping())
1782		return;
1783
1784	raw_spin_lock_irqsave(&vmovp_lock, flags);
1785
1786	if (!--vm->vlpi_count[its->list_nr]) {
1787		int i;
1788
1789		for (i = 0; i < vm->nr_vpes; i++)
1790			its_send_vmapp(its, vm->vpes[i], false);
1791	}
1792
1793	raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1794}
1795
1796static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1797{
1798	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1799	u32 event = its_get_event_id(d);
1800	int ret = 0;
1801
1802	if (!info->map)
1803		return -EINVAL;
1804
1805	raw_spin_lock(&its_dev->event_map.vlpi_lock);
1806
1807	if (!its_dev->event_map.vm) {
1808		struct its_vlpi_map *maps;
1809
1810		maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
1811			       GFP_ATOMIC);
1812		if (!maps) {
1813			ret = -ENOMEM;
1814			goto out;
1815		}
1816
1817		its_dev->event_map.vm = info->map->vm;
1818		its_dev->event_map.vlpi_maps = maps;
1819	} else if (its_dev->event_map.vm != info->map->vm) {
1820		ret = -EINVAL;
1821		goto out;
1822	}
1823
1824	/* Get our private copy of the mapping information */
1825	its_dev->event_map.vlpi_maps[event] = *info->map;
1826
1827	if (irqd_is_forwarded_to_vcpu(d)) {
1828		/* Already mapped, move it around */
1829		its_send_vmovi(its_dev, event);
1830	} else {
1831		/* Ensure all the VPEs are mapped on this ITS */
1832		its_map_vm(its_dev->its, info->map->vm);
1833
1834		/*
1835		 * Flag the interrupt as forwarded so that we can
1836		 * start poking the virtual property table.
1837		 */
1838		irqd_set_forwarded_to_vcpu(d);
1839
1840		/* Write out the property to the prop table */
1841		lpi_write_config(d, 0xff, info->map->properties);
1842
1843		/* Drop the physical mapping */
1844		its_send_discard(its_dev, event);
1845
1846		/* and install the virtual one */
1847		its_send_vmapti(its_dev, event);
1848
1849		/* Increment the number of VLPIs */
1850		its_dev->event_map.nr_vlpis++;
1851	}
1852
1853out:
1854	raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1855	return ret;
1856}
1857
1858static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1859{
1860	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1861	struct its_vlpi_map *map;
1862	int ret = 0;
1863
1864	raw_spin_lock(&its_dev->event_map.vlpi_lock);
1865
1866	map = get_vlpi_map(d);
1867
1868	if (!its_dev->event_map.vm || !map) {
1869		ret = -EINVAL;
1870		goto out;
1871	}
1872
1873	/* Copy our mapping information to the incoming request */
1874	*info->map = *map;
1875
1876out:
1877	raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1878	return ret;
1879}
1880
1881static int its_vlpi_unmap(struct irq_data *d)
1882{
1883	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1884	u32 event = its_get_event_id(d);
1885	int ret = 0;
1886
1887	raw_spin_lock(&its_dev->event_map.vlpi_lock);
1888
1889	if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
1890		ret = -EINVAL;
1891		goto out;
1892	}
1893
1894	/* Drop the virtual mapping */
1895	its_send_discard(its_dev, event);
1896
1897	/* and restore the physical one */
1898	irqd_clr_forwarded_to_vcpu(d);
1899	its_send_mapti(its_dev, d->hwirq, event);
1900	lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
1901				    LPI_PROP_ENABLED |
1902				    LPI_PROP_GROUP1));
1903
1904	/* Potentially unmap the VM from this ITS */
1905	its_unmap_vm(its_dev->its, its_dev->event_map.vm);
1906
1907	/*
1908	 * Drop the refcount and make the device available again if
1909	 * this was the last VLPI.
1910	 */
1911	if (!--its_dev->event_map.nr_vlpis) {
1912		its_dev->event_map.vm = NULL;
1913		kfree(its_dev->event_map.vlpi_maps);
1914	}
1915
1916out:
1917	raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1918	return ret;
1919}
1920
1921static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
1922{
1923	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1924
1925	if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1926		return -EINVAL;
1927
1928	if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
1929		lpi_update_config(d, 0xff, info->config);
1930	else
1931		lpi_write_config(d, 0xff, info->config);
1932	its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
1933
1934	return 0;
1935}
1936
1937static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
1938{
1939	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1940	struct its_cmd_info *info = vcpu_info;
1941
1942	/* Need a v4 ITS */
1943	if (!is_v4(its_dev->its))
1944		return -EINVAL;
1945
1946	/* Unmap request? */
1947	if (!info)
1948		return its_vlpi_unmap(d);
1949
1950	switch (info->cmd_type) {
1951	case MAP_VLPI:
1952		return its_vlpi_map(d, info);
1953
1954	case GET_VLPI:
1955		return its_vlpi_get(d, info);
1956
1957	case PROP_UPDATE_VLPI:
1958	case PROP_UPDATE_AND_INV_VLPI:
1959		return its_vlpi_prop_update(d, info);
1960
1961	default:
1962		return -EINVAL;
1963	}
1964}
1965
1966static struct irq_chip its_irq_chip = {
1967	.name			= "ITS",
1968	.irq_mask		= its_mask_irq,
1969	.irq_unmask		= its_unmask_irq,
1970	.irq_eoi		= irq_chip_eoi_parent,
1971	.irq_set_affinity	= its_set_affinity,
1972	.irq_compose_msi_msg	= its_irq_compose_msi_msg,
1973	.irq_set_irqchip_state	= its_irq_set_irqchip_state,
1974	.irq_set_vcpu_affinity	= its_irq_set_vcpu_affinity,
1975};
1976
1977
1978/*
1979 * How we allocate LPIs:
1980 *
1981 * lpi_range_list contains ranges of LPIs that are to available to
1982 * allocate from. To allocate LPIs, just pick the first range that
1983 * fits the required allocation, and reduce it by the required
1984 * amount. Once empty, remove the range from the list.
1985 *
1986 * To free a range of LPIs, add a free range to the list, sort it and
1987 * merge the result if the new range happens to be adjacent to an
1988 * already free block.
1989 *
1990 * The consequence of the above is that allocation is cost is low, but
1991 * freeing is expensive. We assumes that freeing rarely occurs.
1992 */
1993#define ITS_MAX_LPI_NRBITS	16 /* 64K LPIs */
 
1994
1995static DEFINE_MUTEX(lpi_range_lock);
1996static LIST_HEAD(lpi_range_list);
 
1997
1998struct lpi_range {
1999	struct list_head	entry;
2000	u32			base_id;
2001	u32			span;
2002};
2003
2004static struct lpi_range *mk_lpi_range(u32 base, u32 span)
2005{
2006	struct lpi_range *range;
2007
2008	range = kmalloc(sizeof(*range), GFP_KERNEL);
2009	if (range) {
2010		range->base_id = base;
2011		range->span = span;
2012	}
2013
2014	return range;
2015}
2016
2017static int alloc_lpi_range(u32 nr_lpis, u32 *base)
2018{
2019	struct lpi_range *range, *tmp;
2020	int err = -ENOSPC;
2021
2022	mutex_lock(&lpi_range_lock);
2023
2024	list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
2025		if (range->span >= nr_lpis) {
2026			*base = range->base_id;
2027			range->base_id += nr_lpis;
2028			range->span -= nr_lpis;
2029
2030			if (range->span == 0) {
2031				list_del(&range->entry);
2032				kfree(range);
2033			}
2034
2035			err = 0;
2036			break;
2037		}
2038	}
2039
2040	mutex_unlock(&lpi_range_lock);
2041
2042	pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
2043	return err;
2044}
2045
2046static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b)
2047{
2048	if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list)
2049		return;
2050	if (a->base_id + a->span != b->base_id)
2051		return;
2052	b->base_id = a->base_id;
2053	b->span += a->span;
2054	list_del(&a->entry);
2055	kfree(a);
2056}
2057
2058static int free_lpi_range(u32 base, u32 nr_lpis)
2059{
2060	struct lpi_range *new, *old;
2061
2062	new = mk_lpi_range(base, nr_lpis);
2063	if (!new)
 
 
2064		return -ENOMEM;
2065
2066	mutex_lock(&lpi_range_lock);
2067
2068	list_for_each_entry_reverse(old, &lpi_range_list, entry) {
2069		if (old->base_id < base)
2070			break;
2071	}
2072	/*
2073	 * old is the last element with ->base_id smaller than base,
2074	 * so new goes right after it. If there are no elements with
2075	 * ->base_id smaller than base, &old->entry ends up pointing
2076	 * at the head of the list, and inserting new it the start of
2077	 * the list is the right thing to do in that case as well.
2078	 */
2079	list_add(&new->entry, &old->entry);
2080	/*
2081	 * Now check if we can merge with the preceding and/or
2082	 * following ranges.
2083	 */
2084	merge_lpi_ranges(old, new);
2085	merge_lpi_ranges(new, list_next_entry(new, entry));
2086
2087	mutex_unlock(&lpi_range_lock);
2088	return 0;
2089}
2090
2091static int __init its_lpi_init(u32 id_bits)
2092{
2093	u32 lpis = (1UL << id_bits) - 8192;
2094	u32 numlpis;
2095	int err;
2096
2097	numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
2098
2099	if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
2100		lpis = numlpis;
2101		pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
2102			lpis);
2103	}
2104
2105	/*
2106	 * Initializing the allocator is just the same as freeing the
2107	 * full range of LPIs.
2108	 */
2109	err = free_lpi_range(8192, lpis);
2110	pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
2111	return err;
2112}
2113
2114static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
2115{
2116	unsigned long *bitmap = NULL;
2117	int err = 0;
2118
2119	do {
2120		err = alloc_lpi_range(nr_irqs, base);
2121		if (!err)
 
2122			break;
2123
2124		nr_irqs /= 2;
2125	} while (nr_irqs > 0);
2126
2127	if (!nr_irqs)
2128		err = -ENOSPC;
2129
2130	if (err)
2131		goto out;
2132
2133	bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC);
 
2134	if (!bitmap)
2135		goto out;
2136
2137	*nr_ids = nr_irqs;
 
 
 
 
2138
2139out:
 
 
2140	if (!bitmap)
2141		*base = *nr_ids = 0;
2142
2143	return bitmap;
2144}
2145
2146static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
2147{
2148	WARN_ON(free_lpi_range(base, nr_ids));
2149	kfree(bitmap);
2150}
 
 
 
 
 
 
 
 
 
 
 
 
2151
2152static void gic_reset_prop_table(void *va)
2153{
2154	/* Priority 0xa0, Group-1, disabled */
2155	memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
2156
2157	/* Make sure the GIC will observe the written configuration */
2158	gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ);
2159}
2160
2161static struct page *its_allocate_prop_table(gfp_t gfp_flags)
2162{
2163	struct page *prop_page;
 
 
 
 
2164
2165	prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
2166	if (!prop_page)
2167		return NULL;
2168
2169	gic_reset_prop_table(page_address(prop_page));
2170
2171	return prop_page;
2172}
2173
2174static void its_free_prop_table(struct page *prop_page)
2175{
2176	free_pages((unsigned long)page_address(prop_page),
2177		   get_order(LPI_PROPBASE_SZ));
2178}
2179
2180static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
2181{
2182	phys_addr_t start, end, addr_end;
2183	u64 i;
 
 
2184
2185	/*
2186	 * We don't bother checking for a kdump kernel as by
2187	 * construction, the LPI tables are out of this kernel's
2188	 * memory map.
2189	 */
2190	if (is_kdump_kernel())
2191		return true;
2192
2193	addr_end = addr + size - 1;
 
 
 
2194
2195	for_each_reserved_mem_region(i, &start, &end) {
2196		if (addr >= start && addr_end <= end)
2197			return true;
2198	}
2199
2200	/* Not found, not a good sign... */
2201	pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n",
2202		&addr, &addr_end);
2203	add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
2204	return false;
2205}
2206
2207static int gic_reserve_range(phys_addr_t addr, unsigned long size)
2208{
2209	if (efi_enabled(EFI_CONFIG_TABLES))
2210		return efi_mem_reserve_persistent(addr, size);
2211
2212	return 0;
2213}
2214
2215static int __init its_setup_lpi_prop_table(void)
2216{
2217	if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) {
2218		u64 val;
2219
2220		val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
2221		lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1;
2222
2223		gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12);
2224		gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa,
2225						     LPI_PROPBASE_SZ,
2226						     MEMREMAP_WB);
2227		gic_reset_prop_table(gic_rdists->prop_table_va);
2228	} else {
2229		struct page *page;
2230
2231		lpi_id_bits = min_t(u32,
2232				    GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
2233				    ITS_MAX_LPI_NRBITS);
2234		page = its_allocate_prop_table(GFP_NOWAIT);
2235		if (!page) {
2236			pr_err("Failed to allocate PROPBASE\n");
2237			return -ENOMEM;
2238		}
2239
2240		gic_rdists->prop_table_pa = page_to_phys(page);
2241		gic_rdists->prop_table_va = page_address(page);
2242		WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa,
2243					  LPI_PROPBASE_SZ));
2244	}
2245
2246	pr_info("GICv3: using LPI property table @%pa\n",
2247		&gic_rdists->prop_table_pa);
2248
2249	return its_lpi_init(lpi_id_bits);
2250}
2251
2252static const char *its_base_type_string[] = {
2253	[GITS_BASER_TYPE_DEVICE]	= "Devices",
2254	[GITS_BASER_TYPE_VCPU]		= "Virtual CPUs",
2255	[GITS_BASER_TYPE_RESERVED3]	= "Reserved (3)",
2256	[GITS_BASER_TYPE_COLLECTION]	= "Interrupt Collections",
2257	[GITS_BASER_TYPE_RESERVED5] 	= "Reserved (5)",
2258	[GITS_BASER_TYPE_RESERVED6] 	= "Reserved (6)",
2259	[GITS_BASER_TYPE_RESERVED7] 	= "Reserved (7)",
2260};
2261
2262static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
2263{
2264	u32 idx = baser - its->tables;
2265
2266	return gits_read_baser(its->base + GITS_BASER + (idx << 3));
 
 
 
 
 
 
2267}
2268
2269static void its_write_baser(struct its_node *its, struct its_baser *baser,
2270			    u64 val)
2271{
2272	u32 idx = baser - its->tables;
2273
2274	gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
2275	baser->val = its_read_baser(its, baser);
2276}
2277
2278static int its_setup_baser(struct its_node *its, struct its_baser *baser,
2279			   u64 cache, u64 shr, u32 order, bool indirect)
2280{
2281	u64 val = its_read_baser(its, baser);
2282	u64 esz = GITS_BASER_ENTRY_SIZE(val);
2283	u64 type = GITS_BASER_TYPE(val);
2284	u64 baser_phys, tmp;
2285	u32 alloc_pages, psz;
2286	struct page *page;
2287	void *base;
2288
2289	psz = baser->psz;
2290	alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
2291	if (alloc_pages > GITS_BASER_PAGES_MAX) {
2292		pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
2293			&its->phys_base, its_base_type_string[type],
2294			alloc_pages, GITS_BASER_PAGES_MAX);
2295		alloc_pages = GITS_BASER_PAGES_MAX;
2296		order = get_order(GITS_BASER_PAGES_MAX * psz);
2297	}
2298
2299	page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
2300	if (!page)
2301		return -ENOMEM;
2302
2303	base = (void *)page_address(page);
2304	baser_phys = virt_to_phys(base);
2305
2306	/* Check if the physical address of the memory is above 48bits */
2307	if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
2308
2309		/* 52bit PA is supported only when PageSize=64K */
2310		if (psz != SZ_64K) {
2311			pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
2312			free_pages((unsigned long)base, order);
2313			return -ENXIO;
2314		}
2315
2316		/* Convert 52bit PA to 48bit field */
2317		baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
2318	}
2319
2320retry_baser:
2321	val = (baser_phys					 |
2322		(type << GITS_BASER_TYPE_SHIFT)			 |
2323		((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT)	 |
2324		((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT)	 |
2325		cache						 |
2326		shr						 |
2327		GITS_BASER_VALID);
2328
2329	val |=	indirect ? GITS_BASER_INDIRECT : 0x0;
2330
2331	switch (psz) {
2332	case SZ_4K:
2333		val |= GITS_BASER_PAGE_SIZE_4K;
2334		break;
2335	case SZ_16K:
2336		val |= GITS_BASER_PAGE_SIZE_16K;
2337		break;
2338	case SZ_64K:
2339		val |= GITS_BASER_PAGE_SIZE_64K;
2340		break;
2341	}
2342
2343	its_write_baser(its, baser, val);
2344	tmp = baser->val;
2345
2346	if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
2347		/*
2348		 * Shareability didn't stick. Just use
2349		 * whatever the read reported, which is likely
2350		 * to be the only thing this redistributor
2351		 * supports. If that's zero, make it
2352		 * non-cacheable as well.
2353		 */
2354		shr = tmp & GITS_BASER_SHAREABILITY_MASK;
2355		if (!shr) {
2356			cache = GITS_BASER_nC;
2357			gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
2358		}
2359		goto retry_baser;
2360	}
2361
2362	if (val != tmp) {
2363		pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
2364		       &its->phys_base, its_base_type_string[type],
2365		       val, tmp);
2366		free_pages((unsigned long)base, order);
2367		return -ENXIO;
2368	}
 
2369
2370	baser->order = order;
2371	baser->base = base;
2372	baser->psz = psz;
2373	tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
2374
2375	pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
2376		&its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
2377		its_base_type_string[type],
2378		(unsigned long)virt_to_phys(base),
2379		indirect ? "indirect" : "flat", (int)esz,
2380		psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
2381
2382	return 0;
2383}
2384
2385static bool its_parse_indirect_baser(struct its_node *its,
2386				     struct its_baser *baser,
2387				     u32 *order, u32 ids)
2388{
2389	u64 tmp = its_read_baser(its, baser);
2390	u64 type = GITS_BASER_TYPE(tmp);
2391	u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
2392	u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
2393	u32 new_order = *order;
2394	u32 psz = baser->psz;
2395	bool indirect = false;
2396
2397	/* No need to enable Indirection if memory requirement < (psz*2)bytes */
2398	if ((esz << ids) > (psz * 2)) {
2399		/*
2400		 * Find out whether hw supports a single or two-level table by
2401		 * table by reading bit at offset '62' after writing '1' to it.
 
 
 
 
2402		 */
2403		its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
2404		indirect = !!(baser->val & GITS_BASER_INDIRECT);
2405
2406		if (indirect) {
2407			/*
2408			 * The size of the lvl2 table is equal to ITS page size
2409			 * which is 'psz'. For computing lvl1 table size,
2410			 * subtract ID bits that sparse lvl2 table from 'ids'
2411			 * which is reported by ITS hardware times lvl1 table
2412			 * entry size.
2413			 */
2414			ids -= ilog2(psz / (int)esz);
2415			esz = GITS_LVL1_ENTRY_SIZE;
 
 
 
 
 
2416		}
2417	}
2418
2419	/*
2420	 * Allocate as many entries as required to fit the
2421	 * range of device IDs that the ITS can grok... The ID
2422	 * space being incredibly sparse, this results in a
2423	 * massive waste of memory if two-level device table
2424	 * feature is not supported by hardware.
2425	 */
2426	new_order = max_t(u32, get_order(esz << ids), new_order);
2427	if (new_order >= MAX_ORDER) {
2428		new_order = MAX_ORDER - 1;
2429		ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
2430		pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n",
2431			&its->phys_base, its_base_type_string[type],
2432			device_ids(its), ids);
2433	}
2434
2435	*order = new_order;
2436
2437	return indirect;
2438}
2439
2440static u32 compute_common_aff(u64 val)
2441{
2442	u32 aff, clpiaff;
2443
2444	aff = FIELD_GET(GICR_TYPER_AFFINITY, val);
2445	clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val);
2446
2447	return aff & ~(GENMASK(31, 0) >> (clpiaff * 8));
2448}
2449
2450static u32 compute_its_aff(struct its_node *its)
2451{
2452	u64 val;
2453	u32 svpet;
2454
2455	/*
2456	 * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute
2457	 * the resulting affinity. We then use that to see if this match
2458	 * our own affinity.
2459	 */
2460	svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
2461	val  = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet);
2462	val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr);
2463	return compute_common_aff(val);
2464}
2465
2466static struct its_node *find_sibling_its(struct its_node *cur_its)
2467{
2468	struct its_node *its;
2469	u32 aff;
2470
2471	if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer))
2472		return NULL;
2473
2474	aff = compute_its_aff(cur_its);
2475
2476	list_for_each_entry(its, &its_nodes, entry) {
2477		u64 baser;
2478
2479		if (!is_v4_1(its) || its == cur_its)
2480			continue;
2481
2482		if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2483			continue;
2484
2485		if (aff != compute_its_aff(its))
2486			continue;
2487
2488		/* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
2489		baser = its->tables[2].val;
2490		if (!(baser & GITS_BASER_VALID))
2491			continue;
2492
2493		return its;
2494	}
2495
2496	return NULL;
2497}
2498
2499static void its_free_tables(struct its_node *its)
2500{
2501	int i;
2502
2503	for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2504		if (its->tables[i].base) {
2505			free_pages((unsigned long)its->tables[i].base,
2506				   its->tables[i].order);
2507			its->tables[i].base = NULL;
2508		}
2509	}
2510}
2511
2512static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser)
2513{
2514	u64 psz = SZ_64K;
2515
2516	while (psz) {
2517		u64 val, gpsz;
2518
2519		val = its_read_baser(its, baser);
2520		val &= ~GITS_BASER_PAGE_SIZE_MASK;
 
 
2521
2522		switch (psz) {
2523		case SZ_64K:
2524			gpsz = GITS_BASER_PAGE_SIZE_64K;
2525			break;
2526		case SZ_16K:
2527			gpsz = GITS_BASER_PAGE_SIZE_16K;
2528			break;
2529		case SZ_4K:
2530		default:
2531			gpsz = GITS_BASER_PAGE_SIZE_4K;
2532			break;
2533		}
2534
2535		gpsz >>= GITS_BASER_PAGE_SIZE_SHIFT;
2536
2537		val |= FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK, gpsz);
2538		its_write_baser(its, baser, val);
2539
2540		if (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser->val) == gpsz)
2541			break;
2542
2543		switch (psz) {
2544		case SZ_64K:
2545			psz = SZ_16K;
2546			break;
2547		case SZ_16K:
2548			psz = SZ_4K;
2549			break;
2550		case SZ_4K:
2551		default:
2552			return -1;
2553		}
2554	}
2555
2556	baser->psz = psz;
2557	return 0;
2558}
2559
2560static int its_alloc_tables(struct its_node *its)
2561{
2562	u64 shr = GITS_BASER_InnerShareable;
2563	u64 cache = GITS_BASER_RaWaWb;
2564	int err, i;
2565
2566	if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
2567		/* erratum 24313: ignore memory access type */
2568		cache = GITS_BASER_nCnB;
2569
2570	for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2571		struct its_baser *baser = its->tables + i;
2572		u64 val = its_read_baser(its, baser);
2573		u64 type = GITS_BASER_TYPE(val);
2574		bool indirect = false;
2575		u32 order;
2576
2577		if (type == GITS_BASER_TYPE_NONE)
2578			continue;
2579
2580		if (its_probe_baser_psz(its, baser)) {
2581			its_free_tables(its);
2582			return -ENXIO;
 
2583		}
2584
2585		order = get_order(baser->psz);
 
 
 
 
 
 
 
2586
2587		switch (type) {
2588		case GITS_BASER_TYPE_DEVICE:
2589			indirect = its_parse_indirect_baser(its, baser, &order,
2590							    device_ids(its));
2591			break;
2592
2593		case GITS_BASER_TYPE_VCPU:
2594			if (is_v4_1(its)) {
2595				struct its_node *sibling;
2596
2597				WARN_ON(i != 2);
2598				if ((sibling = find_sibling_its(its))) {
2599					*baser = sibling->tables[2];
2600					its_write_baser(its, baser, baser->val);
2601					continue;
2602				}
2603			}
2604
2605			indirect = its_parse_indirect_baser(its, baser, &order,
2606							    ITS_MAX_VPEID_BITS);
2607			break;
2608		}
2609
2610		err = its_setup_baser(its, baser, cache, shr, order, indirect);
2611		if (err < 0) {
2612			its_free_tables(its);
2613			return err;
 
 
2614		}
2615
2616		/* Update settings which will be used for next BASERn */
2617		cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
2618		shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
 
 
2619	}
2620
2621	return 0;
2622}
2623
2624static u64 inherit_vpe_l1_table_from_its(void)
2625{
2626	struct its_node *its;
2627	u64 val;
2628	u32 aff;
2629
2630	val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2631	aff = compute_common_aff(val);
2632
2633	list_for_each_entry(its, &its_nodes, entry) {
2634		u64 baser, addr;
2635
2636		if (!is_v4_1(its))
2637			continue;
2638
2639		if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2640			continue;
2641
2642		if (aff != compute_its_aff(its))
2643			continue;
2644
2645		/* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
2646		baser = its->tables[2].val;
2647		if (!(baser & GITS_BASER_VALID))
2648			continue;
2649
2650		/* We have a winner! */
2651		gic_data_rdist()->vpe_l1_base = its->tables[2].base;
2652
2653		val  = GICR_VPROPBASER_4_1_VALID;
2654		if (baser & GITS_BASER_INDIRECT)
2655			val |= GICR_VPROPBASER_4_1_INDIRECT;
2656		val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE,
2657				  FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser));
2658		switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) {
2659		case GIC_PAGE_SIZE_64K:
2660			addr = GITS_BASER_ADDR_48_to_52(baser);
2661			break;
2662		default:
2663			addr = baser & GENMASK_ULL(47, 12);
2664			break;
2665		}
2666		val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12);
2667		val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
2668				  FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
2669		val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
2670				  FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
2671		val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1);
2672
2673		return val;
2674	}
2675
2676	return 0;
2677}
2678
2679static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
2680{
2681	u32 aff;
2682	u64 val;
2683	int cpu;
2684
2685	val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2686	aff = compute_common_aff(val);
2687
2688	for_each_possible_cpu(cpu) {
2689		void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
2690
2691		if (!base || cpu == smp_processor_id())
2692			continue;
2693
2694		val = gic_read_typer(base + GICR_TYPER);
2695		if (aff != compute_common_aff(val))
2696			continue;
2697
2698		/*
2699		 * At this point, we have a victim. This particular CPU
2700		 * has already booted, and has an affinity that matches
2701		 * ours wrt CommonLPIAff. Let's use its own VPROPBASER.
2702		 * Make sure we don't write the Z bit in that case.
2703		 */
2704		val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
2705		val &= ~GICR_VPROPBASER_4_1_Z;
2706
2707		gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base;
2708		*mask = gic_data_rdist_cpu(cpu)->vpe_table_mask;
2709
2710		return val;
2711	}
2712
2713	return 0;
2714}
2715
2716static bool allocate_vpe_l2_table(int cpu, u32 id)
2717{
2718	void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
2719	unsigned int psz, esz, idx, npg, gpsz;
2720	u64 val;
2721	struct page *page;
2722	__le64 *table;
2723
2724	if (!gic_rdists->has_rvpeid)
2725		return true;
2726
2727	/* Skip non-present CPUs */
2728	if (!base)
2729		return true;
2730
2731	val  = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
2732
2733	esz  = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1;
2734	gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
2735	npg  = FIELD_GET(GICR_VPROPBASER_4_1_SIZE, val) + 1;
2736
2737	switch (gpsz) {
2738	default:
2739		WARN_ON(1);
2740		fallthrough;
2741	case GIC_PAGE_SIZE_4K:
2742		psz = SZ_4K;
2743		break;
2744	case GIC_PAGE_SIZE_16K:
2745		psz = SZ_16K;
2746		break;
2747	case GIC_PAGE_SIZE_64K:
2748		psz = SZ_64K;
2749		break;
2750	}
2751
2752	/* Don't allow vpe_id that exceeds single, flat table limit */
2753	if (!(val & GICR_VPROPBASER_4_1_INDIRECT))
2754		return (id < (npg * psz / (esz * SZ_8)));
2755
2756	/* Compute 1st level table index & check if that exceeds table limit */
2757	idx = id >> ilog2(psz / (esz * SZ_8));
2758	if (idx >= (npg * psz / GITS_LVL1_ENTRY_SIZE))
2759		return false;
2760
2761	table = gic_data_rdist_cpu(cpu)->vpe_l1_base;
2762
2763	/* Allocate memory for 2nd level table */
2764	if (!table[idx]) {
2765		page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
2766		if (!page)
2767			return false;
2768
2769		/* Flush Lvl2 table to PoC if hw doesn't support coherency */
2770		if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
2771			gic_flush_dcache_to_poc(page_address(page), psz);
2772
2773		table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
2774
2775		/* Flush Lvl1 entry to PoC if hw doesn't support coherency */
2776		if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
2777			gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
2778
2779		/* Ensure updated table contents are visible to RD hardware */
2780		dsb(sy);
2781	}
2782
2783	return true;
2784}
2785
2786static int allocate_vpe_l1_table(void)
2787{
2788	void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2789	u64 val, gpsz, npg, pa;
2790	unsigned int psz = SZ_64K;
2791	unsigned int np, epp, esz;
2792	struct page *page;
2793
2794	if (!gic_rdists->has_rvpeid)
2795		return 0;
2796
2797	/*
2798	 * if VPENDBASER.Valid is set, disable any previously programmed
2799	 * VPE by setting PendingLast while clearing Valid. This has the
2800	 * effect of making sure no doorbell will be generated and we can
2801	 * then safely clear VPROPBASER.Valid.
2802	 */
2803	if (gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid)
2804		gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
2805				      vlpi_base + GICR_VPENDBASER);
2806
2807	/*
2808	 * If we can inherit the configuration from another RD, let's do
2809	 * so. Otherwise, we have to go through the allocation process. We
2810	 * assume that all RDs have the exact same requirements, as
2811	 * nothing will work otherwise.
2812	 */
2813	val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask);
2814	if (val & GICR_VPROPBASER_4_1_VALID)
2815		goto out;
2816
2817	gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_ATOMIC);
2818	if (!gic_data_rdist()->vpe_table_mask)
2819		return -ENOMEM;
2820
2821	val = inherit_vpe_l1_table_from_its();
2822	if (val & GICR_VPROPBASER_4_1_VALID)
2823		goto out;
2824
2825	/* First probe the page size */
2826	val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K);
2827	gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2828	val = gicr_read_vpropbaser(vlpi_base + GICR_VPROPBASER);
2829	gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
2830	esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val);
2831
2832	switch (gpsz) {
2833	default:
2834		gpsz = GIC_PAGE_SIZE_4K;
2835		fallthrough;
2836	case GIC_PAGE_SIZE_4K:
2837		psz = SZ_4K;
2838		break;
2839	case GIC_PAGE_SIZE_16K:
2840		psz = SZ_16K;
2841		break;
2842	case GIC_PAGE_SIZE_64K:
2843		psz = SZ_64K;
2844		break;
2845	}
2846
2847	/*
2848	 * Start populating the register from scratch, including RO fields
2849	 * (which we want to print in debug cases...)
2850	 */
2851	val = 0;
2852	val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz);
2853	val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz);
2854
2855	/* How many entries per GIC page? */
2856	esz++;
2857	epp = psz / (esz * SZ_8);
2858
2859	/*
2860	 * If we need more than just a single L1 page, flag the table
2861	 * as indirect and compute the number of required L1 pages.
2862	 */
2863	if (epp < ITS_MAX_VPEID) {
2864		int nl2;
2865
2866		val |= GICR_VPROPBASER_4_1_INDIRECT;
2867
2868		/* Number of L2 pages required to cover the VPEID space */
2869		nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp);
2870
2871		/* Number of L1 pages to point to the L2 pages */
2872		npg = DIV_ROUND_UP(nl2 * SZ_8, psz);
2873	} else {
2874		npg = 1;
2875	}
2876
2877	val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1);
2878
2879	/* Right, that's the number of CPU pages we need for L1 */
2880	np = DIV_ROUND_UP(npg * psz, PAGE_SIZE);
2881
2882	pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
2883		 np, npg, psz, epp, esz);
2884	page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
2885	if (!page)
2886		return -ENOMEM;
2887
2888	gic_data_rdist()->vpe_l1_base = page_address(page);
2889	pa = virt_to_phys(page_address(page));
2890	WARN_ON(!IS_ALIGNED(pa, psz));
2891
2892	val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12);
2893	val |= GICR_VPROPBASER_RaWb;
2894	val |= GICR_VPROPBASER_InnerShareable;
2895	val |= GICR_VPROPBASER_4_1_Z;
2896	val |= GICR_VPROPBASER_4_1_VALID;
2897
2898out:
2899	gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2900	cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask);
2901
2902	pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n",
2903		 smp_processor_id(), val,
2904		 cpumask_pr_args(gic_data_rdist()->vpe_table_mask));
2905
2906	return 0;
2907}
2908
2909static int its_alloc_collections(struct its_node *its)
2910{
2911	int i;
2912
2913	its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
2914				   GFP_KERNEL);
2915	if (!its->collections)
2916		return -ENOMEM;
2917
2918	for (i = 0; i < nr_cpu_ids; i++)
2919		its->collections[i].target_address = ~0ULL;
2920
2921	return 0;
2922}
2923
2924static struct page *its_allocate_pending_table(gfp_t gfp_flags)
2925{
 
2926	struct page *pend_page;
 
2927
2928	pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
2929				get_order(LPI_PENDBASE_SZ));
2930	if (!pend_page)
2931		return NULL;
2932
2933	/* Make sure the GIC will observe the zero-ed page */
2934	gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
2935
2936	return pend_page;
2937}
2938
2939static void its_free_pending_table(struct page *pt)
2940{
2941	free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
2942}
2943
2944/*
2945 * Booting with kdump and LPIs enabled is generally fine. Any other
2946 * case is wrong in the absence of firmware/EFI support.
2947 */
2948static bool enabled_lpis_allowed(void)
2949{
2950	phys_addr_t addr;
2951	u64 val;
2952
2953	/* Check whether the property table is in a reserved region */
2954	val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
2955	addr = val & GENMASK_ULL(51, 12);
2956
2957	return gic_check_reserved_range(addr, LPI_PROPBASE_SZ);
2958}
2959
2960static int __init allocate_lpi_tables(void)
2961{
2962	u64 val;
2963	int err, cpu;
2964
2965	/*
2966	 * If LPIs are enabled while we run this from the boot CPU,
2967	 * flag the RD tables as pre-allocated if the stars do align.
2968	 */
2969	val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR);
2970	if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) {
2971		gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED |
2972				      RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING);
2973		pr_info("GICv3: Using preallocated redistributor tables\n");
2974	}
2975
2976	err = its_setup_lpi_prop_table();
2977	if (err)
2978		return err;
2979
2980	/*
2981	 * We allocate all the pending tables anyway, as we may have a
2982	 * mix of RDs that have had LPIs enabled, and some that
2983	 * don't. We'll free the unused ones as each CPU comes online.
2984	 */
2985	for_each_possible_cpu(cpu) {
2986		struct page *pend_page;
2987
2988		pend_page = its_allocate_pending_table(GFP_NOWAIT);
2989		if (!pend_page) {
2990			pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
2991			return -ENOMEM;
 
2992		}
2993
2994		gic_data_rdist_cpu(cpu)->pend_page = pend_page;
2995	}
2996
2997	return 0;
2998}
2999
3000static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
3001{
3002	u32 count = 1000000;	/* 1s! */
3003	bool clean;
3004	u64 val;
3005
3006	val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
3007	val &= ~GICR_VPENDBASER_Valid;
3008	val &= ~clr;
3009	val |= set;
3010	gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
3011
3012	do {
3013		val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
3014		clean = !(val & GICR_VPENDBASER_Dirty);
3015		if (!clean) {
3016			count--;
3017			cpu_relax();
3018			udelay(1);
3019		}
3020	} while (!clean && count);
3021
3022	if (unlikely(val & GICR_VPENDBASER_Dirty)) {
3023		pr_err_ratelimited("ITS virtual pending table not cleaning\n");
3024		val |= GICR_VPENDBASER_PendingLast;
3025	}
3026
3027	return val;
3028}
3029
3030static void its_cpu_init_lpis(void)
3031{
3032	void __iomem *rbase = gic_data_rdist_rd_base();
3033	struct page *pend_page;
3034	phys_addr_t paddr;
3035	u64 val, tmp;
3036
3037	if (gic_data_rdist()->lpi_enabled)
3038		return;
3039
3040	val = readl_relaxed(rbase + GICR_CTLR);
3041	if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
3042	    (val & GICR_CTLR_ENABLE_LPIS)) {
3043		/*
3044		 * Check that we get the same property table on all
3045		 * RDs. If we don't, this is hopeless.
3046		 */
3047		paddr = gicr_read_propbaser(rbase + GICR_PROPBASER);
3048		paddr &= GENMASK_ULL(51, 12);
3049		if (WARN_ON(gic_rdists->prop_table_pa != paddr))
3050			add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
3051
3052		paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER);
3053		paddr &= GENMASK_ULL(51, 16);
3054
3055		WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
3056		its_free_pending_table(gic_data_rdist()->pend_page);
3057		gic_data_rdist()->pend_page = NULL;
3058
3059		goto out;
3060	}
3061
3062	pend_page = gic_data_rdist()->pend_page;
3063	paddr = page_to_phys(pend_page);
3064	WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
3065
3066	/* set PROPBASE */
3067	val = (gic_rdists->prop_table_pa |
3068	       GICR_PROPBASER_InnerShareable |
3069	       GICR_PROPBASER_RaWaWb |
3070	       ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
3071
3072	gicr_write_propbaser(val, rbase + GICR_PROPBASER);
3073	tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
3074
3075	if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
3076		if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
3077			/*
3078			 * The HW reports non-shareable, we must
3079			 * remove the cacheability attributes as
3080			 * well.
3081			 */
3082			val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
3083				 GICR_PROPBASER_CACHEABILITY_MASK);
3084			val |= GICR_PROPBASER_nC;
3085			gicr_write_propbaser(val, rbase + GICR_PROPBASER);
3086		}
3087		pr_info_once("GIC: using cache flushing for LPI property table\n");
3088		gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
3089	}
3090
3091	/* set PENDBASE */
3092	val = (page_to_phys(pend_page) |
3093	       GICR_PENDBASER_InnerShareable |
3094	       GICR_PENDBASER_RaWaWb);
3095
3096	gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
3097	tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
3098
3099	if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
3100		/*
3101		 * The HW reports non-shareable, we must remove the
3102		 * cacheability attributes as well.
3103		 */
3104		val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
3105			 GICR_PENDBASER_CACHEABILITY_MASK);
3106		val |= GICR_PENDBASER_nC;
3107		gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
3108	}
3109
3110	/* Enable LPIs */
3111	val = readl_relaxed(rbase + GICR_CTLR);
3112	val |= GICR_CTLR_ENABLE_LPIS;
3113	writel_relaxed(val, rbase + GICR_CTLR);
3114
3115	if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) {
3116		void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3117
3118		/*
3119		 * It's possible for CPU to receive VLPIs before it is
3120		 * sheduled as a vPE, especially for the first CPU, and the
3121		 * VLPI with INTID larger than 2^(IDbits+1) will be considered
3122		 * as out of range and dropped by GIC.
3123		 * So we initialize IDbits to known value to avoid VLPI drop.
3124		 */
3125		val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
3126		pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
3127			smp_processor_id(), val);
3128		gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
3129
3130		/*
3131		 * Also clear Valid bit of GICR_VPENDBASER, in case some
3132		 * ancient programming gets left in and has possibility of
3133		 * corrupting memory.
3134		 */
3135		val = its_clear_vpend_valid(vlpi_base, 0, 0);
3136	}
3137
3138	if (allocate_vpe_l1_table()) {
3139		/*
3140		 * If the allocation has failed, we're in massive trouble.
3141		 * Disable direct injection, and pray that no VM was
3142		 * already running...
3143		 */
3144		gic_rdists->has_rvpeid = false;
3145		gic_rdists->has_vlpis = false;
3146	}
3147
3148	/* Make sure the GIC has seen the above */
3149	dsb(sy);
3150out:
3151	gic_data_rdist()->lpi_enabled = true;
3152	pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
3153		smp_processor_id(),
3154		gic_data_rdist()->pend_page ? "allocated" : "reserved",
3155		&paddr);
3156}
3157
3158static void its_cpu_init_collection(struct its_node *its)
3159{
3160	int cpu = smp_processor_id();
3161	u64 target;
3162
3163	/* avoid cross node collections and its mapping */
3164	if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
3165		struct device_node *cpu_node;
3166
3167		cpu_node = of_get_cpu_node(cpu, NULL);
3168		if (its->numa_node != NUMA_NO_NODE &&
3169			its->numa_node != of_node_to_nid(cpu_node))
3170			return;
3171	}
3172
3173	/*
3174	 * We now have to bind each collection to its target
3175	 * redistributor.
3176	 */
3177	if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
3178		/*
3179		 * This ITS wants the physical address of the
3180		 * redistributor.
3181		 */
3182		target = gic_data_rdist()->phys_base;
3183	} else {
3184		/* This ITS wants a linear CPU number. */
3185		target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
3186		target = GICR_TYPER_CPU_NUMBER(target) << 16;
3187	}
 
 
 
 
 
 
 
3188
3189	/* Perform collection mapping */
3190	its->collections[cpu].target_address = target;
3191	its->collections[cpu].col_id = cpu;
3192
3193	its_send_mapc(its, &its->collections[cpu], 1);
3194	its_send_invall(its, &its->collections[cpu]);
3195}
3196
3197static void its_cpu_init_collections(void)
3198{
3199	struct its_node *its;
3200
3201	raw_spin_lock(&its_lock);
3202
3203	list_for_each_entry(its, &its_nodes, entry)
3204		its_cpu_init_collection(its);
3205
3206	raw_spin_unlock(&its_lock);
3207}
3208
3209static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
3210{
3211	struct its_device *its_dev = NULL, *tmp;
3212	unsigned long flags;
3213
3214	raw_spin_lock_irqsave(&its->lock, flags);
3215
3216	list_for_each_entry(tmp, &its->its_device_list, entry) {
3217		if (tmp->device_id == dev_id) {
3218			its_dev = tmp;
3219			break;
3220		}
3221	}
3222
3223	raw_spin_unlock_irqrestore(&its->lock, flags);
3224
3225	return its_dev;
3226}
3227
3228static struct its_baser *its_get_baser(struct its_node *its, u32 type)
3229{
3230	int i;
3231
3232	for (i = 0; i < GITS_BASER_NR_REGS; i++) {
3233		if (GITS_BASER_TYPE(its->tables[i].val) == type)
3234			return &its->tables[i];
3235	}
3236
3237	return NULL;
3238}
3239
3240static bool its_alloc_table_entry(struct its_node *its,
3241				  struct its_baser *baser, u32 id)
3242{
3243	struct page *page;
3244	u32 esz, idx;
3245	__le64 *table;
3246
3247	/* Don't allow device id that exceeds single, flat table limit */
3248	esz = GITS_BASER_ENTRY_SIZE(baser->val);
3249	if (!(baser->val & GITS_BASER_INDIRECT))
3250		return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
3251
3252	/* Compute 1st level table index & check if that exceeds table limit */
3253	idx = id >> ilog2(baser->psz / esz);
3254	if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
3255		return false;
3256
3257	table = baser->base;
3258
3259	/* Allocate memory for 2nd level table */
3260	if (!table[idx]) {
3261		page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
3262					get_order(baser->psz));
3263		if (!page)
3264			return false;
3265
3266		/* Flush Lvl2 table to PoC if hw doesn't support coherency */
3267		if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
3268			gic_flush_dcache_to_poc(page_address(page), baser->psz);
3269
3270		table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
3271
3272		/* Flush Lvl1 entry to PoC if hw doesn't support coherency */
3273		if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
3274			gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
3275
3276		/* Ensure updated table contents are visible to ITS hardware */
3277		dsb(sy);
3278	}
3279
3280	return true;
3281}
3282
3283static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
3284{
3285	struct its_baser *baser;
3286
3287	baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
3288
3289	/* Don't allow device id that exceeds ITS hardware limit */
3290	if (!baser)
3291		return (ilog2(dev_id) < device_ids(its));
3292
3293	return its_alloc_table_entry(its, baser, dev_id);
3294}
3295
3296static bool its_alloc_vpe_table(u32 vpe_id)
3297{
3298	struct its_node *its;
3299	int cpu;
3300
3301	/*
3302	 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
3303	 * could try and only do it on ITSs corresponding to devices
3304	 * that have interrupts targeted at this VPE, but the
3305	 * complexity becomes crazy (and you have tons of memory
3306	 * anyway, right?).
3307	 */
3308	list_for_each_entry(its, &its_nodes, entry) {
3309		struct its_baser *baser;
3310
3311		if (!is_v4(its))
3312			continue;
3313
3314		baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
3315		if (!baser)
3316			return false;
3317
3318		if (!its_alloc_table_entry(its, baser, vpe_id))
3319			return false;
3320	}
3321
3322	/* Non v4.1? No need to iterate RDs and go back early. */
3323	if (!gic_rdists->has_rvpeid)
3324		return true;
3325
3326	/*
3327	 * Make sure the L2 tables are allocated for all copies of
3328	 * the L1 table on *all* v4.1 RDs.
3329	 */
3330	for_each_possible_cpu(cpu) {
3331		if (!allocate_vpe_l2_table(cpu, vpe_id))
3332			return false;
3333	}
3334
3335	return true;
3336}
3337
3338static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
3339					    int nvecs, bool alloc_lpis)
3340{
3341	struct its_device *dev;
3342	unsigned long *lpi_map = NULL;
3343	unsigned long flags;
3344	u16 *col_map = NULL;
3345	void *itt;
3346	int lpi_base;
3347	int nr_lpis;
3348	int nr_ites;
3349	int sz;
3350
3351	if (!its_alloc_device_table(its, dev_id))
3352		return NULL;
3353
3354	if (WARN_ON(!is_power_of_2(nvecs)))
3355		nvecs = roundup_pow_of_two(nvecs);
3356
3357	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3358	/*
3359	 * Even if the device wants a single LPI, the ITT must be
3360	 * sized as a power of two (and you need at least one bit...).
 
3361	 */
3362	nr_ites = max(2, nvecs);
3363	sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
3364	sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
3365	itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
3366	if (alloc_lpis) {
3367		lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
3368		if (lpi_map)
3369			col_map = kcalloc(nr_lpis, sizeof(*col_map),
3370					  GFP_KERNEL);
3371	} else {
3372		col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL);
3373		nr_lpis = 0;
3374		lpi_base = 0;
3375	}
3376
3377	if (!dev || !itt ||  !col_map || (!lpi_map && alloc_lpis)) {
3378		kfree(dev);
3379		kfree(itt);
3380		kfree(lpi_map);
3381		kfree(col_map);
3382		return NULL;
3383	}
3384
3385	gic_flush_dcache_to_poc(itt, sz);
3386
3387	dev->its = its;
3388	dev->itt = itt;
3389	dev->nr_ites = nr_ites;
3390	dev->event_map.lpi_map = lpi_map;
3391	dev->event_map.col_map = col_map;
3392	dev->event_map.lpi_base = lpi_base;
3393	dev->event_map.nr_lpis = nr_lpis;
3394	raw_spin_lock_init(&dev->event_map.vlpi_lock);
3395	dev->device_id = dev_id;
3396	INIT_LIST_HEAD(&dev->entry);
3397
3398	raw_spin_lock_irqsave(&its->lock, flags);
3399	list_add(&dev->entry, &its->its_device_list);
3400	raw_spin_unlock_irqrestore(&its->lock, flags);
3401
3402	/* Map device to its ITT */
3403	its_send_mapd(dev, 1);
3404
3405	return dev;
3406}
3407
3408static void its_free_device(struct its_device *its_dev)
3409{
3410	unsigned long flags;
3411
3412	raw_spin_lock_irqsave(&its_dev->its->lock, flags);
3413	list_del(&its_dev->entry);
3414	raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
3415	kfree(its_dev->event_map.col_map);
3416	kfree(its_dev->itt);
3417	kfree(its_dev);
3418}
3419
3420static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
3421{
3422	int idx;
3423
3424	/* Find a free LPI region in lpi_map and allocate them. */
3425	idx = bitmap_find_free_region(dev->event_map.lpi_map,
3426				      dev->event_map.nr_lpis,
3427				      get_count_order(nvecs));
3428	if (idx < 0)
3429		return -ENOSPC;
3430
3431	*hwirq = dev->event_map.lpi_base + idx;
 
3432
3433	return 0;
3434}
3435
3436static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
3437			   int nvec, msi_alloc_info_t *info)
3438{
3439	struct its_node *its;
3440	struct its_device *its_dev;
3441	struct msi_domain_info *msi_info;
3442	u32 dev_id;
3443	int err = 0;
3444
3445	/*
3446	 * We ignore "dev" entirely, and rely on the dev_id that has
3447	 * been passed via the scratchpad. This limits this domain's
3448	 * usefulness to upper layers that definitely know that they
3449	 * are built on top of the ITS.
3450	 */
3451	dev_id = info->scratchpad[0].ul;
3452
3453	msi_info = msi_get_domain_info(domain);
3454	its = msi_info->data;
3455
3456	if (!gic_rdists->has_direct_lpi &&
3457	    vpe_proxy.dev &&
3458	    vpe_proxy.dev->its == its &&
3459	    dev_id == vpe_proxy.dev->device_id) {
3460		/* Bad luck. Get yourself a better implementation */
3461		WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
3462			  dev_id);
3463		return -EINVAL;
3464	}
3465
3466	mutex_lock(&its->dev_alloc_lock);
3467	its_dev = its_find_device(its, dev_id);
3468	if (its_dev) {
3469		/*
3470		 * We already have seen this ID, probably through
3471		 * another alias (PCI bridge of some sort). No need to
3472		 * create the device.
3473		 */
3474		its_dev->shared = true;
3475		pr_debug("Reusing ITT for devID %x\n", dev_id);
3476		goto out;
3477	}
3478
3479	its_dev = its_create_device(its, dev_id, nvec, true);
3480	if (!its_dev) {
3481		err = -ENOMEM;
3482		goto out;
3483	}
3484
3485	pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
3486out:
3487	mutex_unlock(&its->dev_alloc_lock);
3488	info->scratchpad[0].ptr = its_dev;
3489	return err;
3490}
3491
3492static struct msi_domain_ops its_msi_domain_ops = {
3493	.msi_prepare	= its_msi_prepare,
3494};
3495
3496static int its_irq_gic_domain_alloc(struct irq_domain *domain,
3497				    unsigned int virq,
3498				    irq_hw_number_t hwirq)
3499{
3500	struct irq_fwspec fwspec;
3501
3502	if (irq_domain_get_of_node(domain->parent)) {
3503		fwspec.fwnode = domain->parent->fwnode;
3504		fwspec.param_count = 3;
3505		fwspec.param[0] = GIC_IRQ_TYPE_LPI;
3506		fwspec.param[1] = hwirq;
3507		fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
3508	} else if (is_fwnode_irqchip(domain->parent->fwnode)) {
3509		fwspec.fwnode = domain->parent->fwnode;
3510		fwspec.param_count = 2;
3511		fwspec.param[0] = hwirq;
3512		fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
3513	} else {
3514		return -EINVAL;
3515	}
3516
3517	return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
3518}
3519
3520static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
3521				unsigned int nr_irqs, void *args)
3522{
3523	msi_alloc_info_t *info = args;
3524	struct its_device *its_dev = info->scratchpad[0].ptr;
3525	struct its_node *its = its_dev->its;
3526	struct irq_data *irqd;
3527	irq_hw_number_t hwirq;
3528	int err;
3529	int i;
3530
3531	err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
3532	if (err)
3533		return err;
 
3534
3535	err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev));
3536	if (err)
3537		return err;
3538
3539	for (i = 0; i < nr_irqs; i++) {
3540		err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
3541		if (err)
3542			return err;
3543
3544		irq_domain_set_hwirq_and_chip(domain, virq + i,
3545					      hwirq + i, &its_irq_chip, its_dev);
3546		irqd = irq_get_irq_data(virq + i);
3547		irqd_set_single_target(irqd);
3548		irqd_set_affinity_on_activate(irqd);
3549		pr_debug("ID:%d pID:%d vID:%d\n",
3550			 (int)(hwirq + i - its_dev->event_map.lpi_base),
3551			 (int)(hwirq + i), virq + i);
3552	}
3553
3554	return 0;
3555}
3556
3557static int its_irq_domain_activate(struct irq_domain *domain,
3558				   struct irq_data *d, bool reserve)
3559{
3560	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3561	u32 event = its_get_event_id(d);
3562	int cpu;
3563
3564	cpu = its_select_cpu(d, cpu_online_mask);
3565	if (cpu < 0 || cpu >= nr_cpu_ids)
3566		return -EINVAL;
3567
3568	its_inc_lpi_count(d, cpu);
3569	its_dev->event_map.col_map[event] = cpu;
3570	irq_data_update_effective_affinity(d, cpumask_of(cpu));
3571
3572	/* Map the GIC IRQ and event to the device */
3573	its_send_mapti(its_dev, d->hwirq, event);
3574	return 0;
3575}
3576
3577static void its_irq_domain_deactivate(struct irq_domain *domain,
3578				      struct irq_data *d)
3579{
3580	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3581	u32 event = its_get_event_id(d);
3582
3583	its_dec_lpi_count(d, its_dev->event_map.col_map[event]);
3584	/* Stop the delivery of interrupts */
3585	its_send_discard(its_dev, event);
3586}
3587
3588static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
3589				unsigned int nr_irqs)
3590{
3591	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
3592	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3593	struct its_node *its = its_dev->its;
3594	int i;
3595
3596	bitmap_release_region(its_dev->event_map.lpi_map,
3597			      its_get_event_id(irq_domain_get_irq_data(domain, virq)),
3598			      get_count_order(nr_irqs));
3599
3600	for (i = 0; i < nr_irqs; i++) {
3601		struct irq_data *data = irq_domain_get_irq_data(domain,
3602								virq + i);
 
 
 
 
 
3603		/* Nuke the entry in the domain */
3604		irq_domain_reset_irq_data(data);
3605	}
3606
3607	mutex_lock(&its->dev_alloc_lock);
3608
3609	/*
3610	 * If all interrupts have been freed, start mopping the
3611	 * floor. This is conditionned on the device not being shared.
3612	 */
3613	if (!its_dev->shared &&
3614	    bitmap_empty(its_dev->event_map.lpi_map,
3615			 its_dev->event_map.nr_lpis)) {
3616		its_lpi_free(its_dev->event_map.lpi_map,
3617			     its_dev->event_map.lpi_base,
3618			     its_dev->event_map.nr_lpis);
3619
3620		/* Unmap device/itt */
3621		its_send_mapd(its_dev, 0);
3622		its_free_device(its_dev);
3623	}
3624
3625	mutex_unlock(&its->dev_alloc_lock);
3626
3627	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
3628}
3629
3630static const struct irq_domain_ops its_domain_ops = {
3631	.alloc			= its_irq_domain_alloc,
3632	.free			= its_irq_domain_free,
3633	.activate		= its_irq_domain_activate,
3634	.deactivate		= its_irq_domain_deactivate,
3635};
3636
3637/*
3638 * This is insane.
3639 *
3640 * If a GICv4.0 doesn't implement Direct LPIs (which is extremely
3641 * likely), the only way to perform an invalidate is to use a fake
3642 * device to issue an INV command, implying that the LPI has first
3643 * been mapped to some event on that device. Since this is not exactly
3644 * cheap, we try to keep that mapping around as long as possible, and
3645 * only issue an UNMAP if we're short on available slots.
3646 *
3647 * Broken by design(tm).
3648 *
3649 * GICv4.1, on the other hand, mandates that we're able to invalidate
3650 * by writing to a MMIO register. It doesn't implement the whole of
3651 * DirectLPI, but that's good enough. And most of the time, we don't
3652 * even have to invalidate anything, as the redistributor can be told
3653 * whether to generate a doorbell or not (we thus leave it enabled,
3654 * always).
3655 */
3656static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
3657{
3658	/* GICv4.1 doesn't use a proxy, so nothing to do here */
3659	if (gic_rdists->has_rvpeid)
3660		return;
3661
3662	/* Already unmapped? */
3663	if (vpe->vpe_proxy_event == -1)
3664		return;
3665
3666	its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
3667	vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
3668
3669	/*
3670	 * We don't track empty slots at all, so let's move the
3671	 * next_victim pointer if we can quickly reuse that slot
3672	 * instead of nuking an existing entry. Not clear that this is
3673	 * always a win though, and this might just generate a ripple
3674	 * effect... Let's just hope VPEs don't migrate too often.
3675	 */
3676	if (vpe_proxy.vpes[vpe_proxy.next_victim])
3677		vpe_proxy.next_victim = vpe->vpe_proxy_event;
3678
3679	vpe->vpe_proxy_event = -1;
3680}
3681
3682static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
3683{
3684	/* GICv4.1 doesn't use a proxy, so nothing to do here */
3685	if (gic_rdists->has_rvpeid)
3686		return;
3687
3688	if (!gic_rdists->has_direct_lpi) {
3689		unsigned long flags;
3690
3691		raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3692		its_vpe_db_proxy_unmap_locked(vpe);
3693		raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3694	}
3695}
3696
3697static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
3698{
3699	/* GICv4.1 doesn't use a proxy, so nothing to do here */
3700	if (gic_rdists->has_rvpeid)
3701		return;
3702
3703	/* Already mapped? */
3704	if (vpe->vpe_proxy_event != -1)
3705		return;
3706
3707	/* This slot was already allocated. Kick the other VPE out. */
3708	if (vpe_proxy.vpes[vpe_proxy.next_victim])
3709		its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
3710
3711	/* Map the new VPE instead */
3712	vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
3713	vpe->vpe_proxy_event = vpe_proxy.next_victim;
3714	vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
3715
3716	vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
3717	its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
3718}
3719
3720static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
3721{
3722	unsigned long flags;
3723	struct its_collection *target_col;
3724
3725	/* GICv4.1 doesn't use a proxy, so nothing to do here */
3726	if (gic_rdists->has_rvpeid)
3727		return;
3728
3729	if (gic_rdists->has_direct_lpi) {
3730		void __iomem *rdbase;
3731
3732		rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
3733		gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
3734		wait_for_syncr(rdbase);
3735
3736		return;
3737	}
3738
3739	raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3740
3741	its_vpe_db_proxy_map_locked(vpe);
3742
3743	target_col = &vpe_proxy.dev->its->collections[to];
3744	its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
3745	vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
3746
3747	raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3748}
3749
3750static int its_vpe_set_affinity(struct irq_data *d,
3751				const struct cpumask *mask_val,
3752				bool force)
3753{
3754	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3755	int from, cpu = cpumask_first(mask_val);
3756	unsigned long flags;
3757
3758	/*
3759	 * Changing affinity is mega expensive, so let's be as lazy as
3760	 * we can and only do it if we really have to. Also, if mapped
3761	 * into the proxy device, we need to move the doorbell
3762	 * interrupt to its new location.
3763	 *
3764	 * Another thing is that changing the affinity of a vPE affects
3765	 * *other interrupts* such as all the vLPIs that are routed to
3766	 * this vPE. This means that the irq_desc lock is not enough to
3767	 * protect us, and that we must ensure nobody samples vpe->col_idx
3768	 * during the update, hence the lock below which must also be
3769	 * taken on any vLPI handling path that evaluates vpe->col_idx.
3770	 */
3771	from = vpe_to_cpuid_lock(vpe, &flags);
3772	if (from == cpu)
3773		goto out;
3774
3775	vpe->col_idx = cpu;
3776
3777	/*
3778	 * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD
3779	 * is sharing its VPE table with the current one.
3780	 */
3781	if (gic_data_rdist_cpu(cpu)->vpe_table_mask &&
3782	    cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask))
3783		goto out;
3784
3785	its_send_vmovp(vpe);
3786	its_vpe_db_proxy_move(vpe, from, cpu);
3787
3788out:
3789	irq_data_update_effective_affinity(d, cpumask_of(cpu));
3790	vpe_to_cpuid_unlock(vpe, flags);
3791
3792	return IRQ_SET_MASK_OK_DONE;
3793}
3794
3795static void its_wait_vpt_parse_complete(void)
3796{
3797	void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3798	u64 val;
3799
3800	if (!gic_rdists->has_vpend_valid_dirty)
3801		return;
3802
3803	WARN_ON_ONCE(readq_relaxed_poll_timeout_atomic(vlpi_base + GICR_VPENDBASER,
3804						       val,
3805						       !(val & GICR_VPENDBASER_Dirty),
3806						       10, 500));
3807}
3808
3809static void its_vpe_schedule(struct its_vpe *vpe)
3810{
3811	void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3812	u64 val;
3813
3814	/* Schedule the VPE */
3815	val  = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
3816		GENMASK_ULL(51, 12);
3817	val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
3818	val |= GICR_VPROPBASER_RaWb;
3819	val |= GICR_VPROPBASER_InnerShareable;
3820	gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
3821
3822	val  = virt_to_phys(page_address(vpe->vpt_page)) &
3823		GENMASK_ULL(51, 16);
3824	val |= GICR_VPENDBASER_RaWaWb;
3825	val |= GICR_VPENDBASER_InnerShareable;
3826	/*
3827	 * There is no good way of finding out if the pending table is
3828	 * empty as we can race against the doorbell interrupt very
3829	 * easily. So in the end, vpe->pending_last is only an
3830	 * indication that the vcpu has something pending, not one
3831	 * that the pending table is empty. A good implementation
3832	 * would be able to read its coarse map pretty quickly anyway,
3833	 * making this a tolerable issue.
3834	 */
3835	val |= GICR_VPENDBASER_PendingLast;
3836	val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
3837	val |= GICR_VPENDBASER_Valid;
3838	gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
3839
3840	its_wait_vpt_parse_complete();
3841}
3842
3843static void its_vpe_deschedule(struct its_vpe *vpe)
3844{
3845	void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3846	u64 val;
3847
3848	val = its_clear_vpend_valid(vlpi_base, 0, 0);
3849
3850	vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
3851	vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
3852}
3853
3854static void its_vpe_invall(struct its_vpe *vpe)
3855{
3856	struct its_node *its;
3857
3858	list_for_each_entry(its, &its_nodes, entry) {
3859		if (!is_v4(its))
3860			continue;
3861
3862		if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
3863			continue;
3864
3865		/*
3866		 * Sending a VINVALL to a single ITS is enough, as all
3867		 * we need is to reach the redistributors.
3868		 */
3869		its_send_vinvall(its, vpe);
3870		return;
3871	}
3872}
3873
3874static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
3875{
3876	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3877	struct its_cmd_info *info = vcpu_info;
3878
3879	switch (info->cmd_type) {
3880	case SCHEDULE_VPE:
3881		its_vpe_schedule(vpe);
3882		return 0;
3883
3884	case DESCHEDULE_VPE:
3885		its_vpe_deschedule(vpe);
3886		return 0;
3887
3888	case INVALL_VPE:
3889		its_vpe_invall(vpe);
3890		return 0;
3891
3892	default:
3893		return -EINVAL;
3894	}
3895}
3896
3897static void its_vpe_send_cmd(struct its_vpe *vpe,
3898			     void (*cmd)(struct its_device *, u32))
3899{
3900	unsigned long flags;
3901
3902	raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3903
3904	its_vpe_db_proxy_map_locked(vpe);
3905	cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
3906
3907	raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3908}
3909
3910static void its_vpe_send_inv(struct irq_data *d)
3911{
3912	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3913
3914	if (gic_rdists->has_direct_lpi) {
3915		void __iomem *rdbase;
3916
3917		/* Target the redistributor this VPE is currently known on */
3918		raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
3919		rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
3920		gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR);
3921		wait_for_syncr(rdbase);
3922		raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
3923	} else {
3924		its_vpe_send_cmd(vpe, its_send_inv);
3925	}
3926}
3927
3928static void its_vpe_mask_irq(struct irq_data *d)
3929{
3930	/*
3931	 * We need to unmask the LPI, which is described by the parent
3932	 * irq_data. Instead of calling into the parent (which won't
3933	 * exactly do the right thing, let's simply use the
3934	 * parent_data pointer. Yes, I'm naughty.
3935	 */
3936	lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
3937	its_vpe_send_inv(d);
3938}
3939
3940static void its_vpe_unmask_irq(struct irq_data *d)
3941{
3942	/* Same hack as above... */
3943	lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
3944	its_vpe_send_inv(d);
3945}
3946
3947static int its_vpe_set_irqchip_state(struct irq_data *d,
3948				     enum irqchip_irq_state which,
3949				     bool state)
3950{
3951	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3952
3953	if (which != IRQCHIP_STATE_PENDING)
3954		return -EINVAL;
3955
3956	if (gic_rdists->has_direct_lpi) {
3957		void __iomem *rdbase;
3958
3959		rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
3960		if (state) {
3961			gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
3962		} else {
3963			gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
3964			wait_for_syncr(rdbase);
3965		}
3966	} else {
3967		if (state)
3968			its_vpe_send_cmd(vpe, its_send_int);
3969		else
3970			its_vpe_send_cmd(vpe, its_send_clear);
3971	}
3972
3973	return 0;
3974}
3975
3976static int its_vpe_retrigger(struct irq_data *d)
3977{
3978	return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
3979}
3980
3981static struct irq_chip its_vpe_irq_chip = {
3982	.name			= "GICv4-vpe",
3983	.irq_mask		= its_vpe_mask_irq,
3984	.irq_unmask		= its_vpe_unmask_irq,
3985	.irq_eoi		= irq_chip_eoi_parent,
3986	.irq_set_affinity	= its_vpe_set_affinity,
3987	.irq_retrigger		= its_vpe_retrigger,
3988	.irq_set_irqchip_state	= its_vpe_set_irqchip_state,
3989	.irq_set_vcpu_affinity	= its_vpe_set_vcpu_affinity,
3990};
3991
3992static struct its_node *find_4_1_its(void)
3993{
3994	static struct its_node *its = NULL;
3995
3996	if (!its) {
3997		list_for_each_entry(its, &its_nodes, entry) {
3998			if (is_v4_1(its))
3999				return its;
4000		}
4001
4002		/* Oops? */
4003		its = NULL;
4004	}
4005
4006	return its;
4007}
4008
4009static void its_vpe_4_1_send_inv(struct irq_data *d)
4010{
4011	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4012	struct its_node *its;
4013
4014	/*
4015	 * GICv4.1 wants doorbells to be invalidated using the
4016	 * INVDB command in order to be broadcast to all RDs. Send
4017	 * it to the first valid ITS, and let the HW do its magic.
4018	 */
4019	its = find_4_1_its();
4020	if (its)
4021		its_send_invdb(its, vpe);
4022}
4023
4024static void its_vpe_4_1_mask_irq(struct irq_data *d)
4025{
4026	lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
4027	its_vpe_4_1_send_inv(d);
4028}
4029
4030static void its_vpe_4_1_unmask_irq(struct irq_data *d)
4031{
4032	lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
4033	its_vpe_4_1_send_inv(d);
4034}
4035
4036static void its_vpe_4_1_schedule(struct its_vpe *vpe,
4037				 struct its_cmd_info *info)
4038{
4039	void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
4040	u64 val = 0;
4041
4042	/* Schedule the VPE */
4043	val |= GICR_VPENDBASER_Valid;
4044	val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0;
4045	val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0;
4046	val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
4047
4048	gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
4049
4050	its_wait_vpt_parse_complete();
4051}
4052
4053static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
4054				   struct its_cmd_info *info)
4055{
4056	void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
4057	u64 val;
4058
4059	if (info->req_db) {
4060		unsigned long flags;
4061
4062		/*
4063		 * vPE is going to block: make the vPE non-resident with
4064		 * PendingLast clear and DB set. The GIC guarantees that if
4065		 * we read-back PendingLast clear, then a doorbell will be
4066		 * delivered when an interrupt comes.
4067		 *
4068		 * Note the locking to deal with the concurrent update of
4069		 * pending_last from the doorbell interrupt handler that can
4070		 * run concurrently.
4071		 */
4072		raw_spin_lock_irqsave(&vpe->vpe_lock, flags);
4073		val = its_clear_vpend_valid(vlpi_base,
4074					    GICR_VPENDBASER_PendingLast,
4075					    GICR_VPENDBASER_4_1_DB);
4076		vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
4077		raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
4078	} else {
4079		/*
4080		 * We're not blocking, so just make the vPE non-resident
4081		 * with PendingLast set, indicating that we'll be back.
4082		 */
4083		val = its_clear_vpend_valid(vlpi_base,
4084					    0,
4085					    GICR_VPENDBASER_PendingLast);
4086		vpe->pending_last = true;
4087	}
4088}
4089
4090static void its_vpe_4_1_invall(struct its_vpe *vpe)
4091{
4092	void __iomem *rdbase;
4093	unsigned long flags;
4094	u64 val;
4095	int cpu;
4096
4097	val  = GICR_INVALLR_V;
4098	val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
4099
4100	/* Target the redistributor this vPE is currently known on */
4101	cpu = vpe_to_cpuid_lock(vpe, &flags);
4102	raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
4103	rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
4104	gic_write_lpir(val, rdbase + GICR_INVALLR);
4105
4106	wait_for_syncr(rdbase);
4107	raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
4108	vpe_to_cpuid_unlock(vpe, flags);
4109}
4110
4111static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
4112{
4113	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4114	struct its_cmd_info *info = vcpu_info;
4115
4116	switch (info->cmd_type) {
4117	case SCHEDULE_VPE:
4118		its_vpe_4_1_schedule(vpe, info);
4119		return 0;
4120
4121	case DESCHEDULE_VPE:
4122		its_vpe_4_1_deschedule(vpe, info);
4123		return 0;
4124
4125	case INVALL_VPE:
4126		its_vpe_4_1_invall(vpe);
4127		return 0;
4128
4129	default:
4130		return -EINVAL;
4131	}
4132}
4133
4134static struct irq_chip its_vpe_4_1_irq_chip = {
4135	.name			= "GICv4.1-vpe",
4136	.irq_mask		= its_vpe_4_1_mask_irq,
4137	.irq_unmask		= its_vpe_4_1_unmask_irq,
4138	.irq_eoi		= irq_chip_eoi_parent,
4139	.irq_set_affinity	= its_vpe_set_affinity,
4140	.irq_set_vcpu_affinity	= its_vpe_4_1_set_vcpu_affinity,
4141};
4142
4143static void its_configure_sgi(struct irq_data *d, bool clear)
4144{
4145	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4146	struct its_cmd_desc desc;
4147
4148	desc.its_vsgi_cmd.vpe = vpe;
4149	desc.its_vsgi_cmd.sgi = d->hwirq;
4150	desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority;
4151	desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled;
4152	desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group;
4153	desc.its_vsgi_cmd.clear = clear;
4154
4155	/*
4156	 * GICv4.1 allows us to send VSGI commands to any ITS as long as the
4157	 * destination VPE is mapped there. Since we map them eagerly at
4158	 * activation time, we're pretty sure the first GICv4.1 ITS will do.
4159	 */
4160	its_send_single_vcommand(find_4_1_its(), its_build_vsgi_cmd, &desc);
4161}
4162
4163static void its_sgi_mask_irq(struct irq_data *d)
4164{
4165	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4166
4167	vpe->sgi_config[d->hwirq].enabled = false;
4168	its_configure_sgi(d, false);
4169}
4170
4171static void its_sgi_unmask_irq(struct irq_data *d)
4172{
4173	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4174
4175	vpe->sgi_config[d->hwirq].enabled = true;
4176	its_configure_sgi(d, false);
4177}
4178
4179static int its_sgi_set_affinity(struct irq_data *d,
4180				const struct cpumask *mask_val,
4181				bool force)
4182{
4183	/*
4184	 * There is no notion of affinity for virtual SGIs, at least
4185	 * not on the host (since they can only be targetting a vPE).
4186	 * Tell the kernel we've done whatever it asked for.
4187	 */
4188	irq_data_update_effective_affinity(d, mask_val);
4189	return IRQ_SET_MASK_OK;
4190}
4191
4192static int its_sgi_set_irqchip_state(struct irq_data *d,
4193				     enum irqchip_irq_state which,
4194				     bool state)
4195{
4196	if (which != IRQCHIP_STATE_PENDING)
4197		return -EINVAL;
4198
4199	if (state) {
4200		struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4201		struct its_node *its = find_4_1_its();
4202		u64 val;
4203
4204		val  = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id);
4205		val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq);
4206		writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K);
4207	} else {
4208		its_configure_sgi(d, true);
4209	}
4210
4211	return 0;
4212}
4213
4214static int its_sgi_get_irqchip_state(struct irq_data *d,
4215				     enum irqchip_irq_state which, bool *val)
4216{
4217	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4218	void __iomem *base;
4219	unsigned long flags;
4220	u32 count = 1000000;	/* 1s! */
4221	u32 status;
4222	int cpu;
4223
4224	if (which != IRQCHIP_STATE_PENDING)
4225		return -EINVAL;
4226
4227	/*
4228	 * Locking galore! We can race against two different events:
4229	 *
4230	 * - Concurent vPE affinity change: we must make sure it cannot
4231	 *   happen, or we'll talk to the wrong redistributor. This is
4232	 *   identical to what happens with vLPIs.
4233	 *
4234	 * - Concurrent VSGIPENDR access: As it involves accessing two
4235	 *   MMIO registers, this must be made atomic one way or another.
4236	 */
4237	cpu = vpe_to_cpuid_lock(vpe, &flags);
4238	raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
4239	base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K;
4240	writel_relaxed(vpe->vpe_id, base + GICR_VSGIR);
4241	do {
4242		status = readl_relaxed(base + GICR_VSGIPENDR);
4243		if (!(status & GICR_VSGIPENDR_BUSY))
4244			goto out;
4245
4246		count--;
4247		if (!count) {
4248			pr_err_ratelimited("Unable to get SGI status\n");
4249			goto out;
4250		}
4251		cpu_relax();
4252		udelay(1);
4253	} while (count);
4254
4255out:
4256	raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
4257	vpe_to_cpuid_unlock(vpe, flags);
4258
4259	if (!count)
4260		return -ENXIO;
4261
4262	*val = !!(status & (1 << d->hwirq));
4263
4264	return 0;
4265}
4266
4267static int its_sgi_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
4268{
4269	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4270	struct its_cmd_info *info = vcpu_info;
4271
4272	switch (info->cmd_type) {
4273	case PROP_UPDATE_VSGI:
4274		vpe->sgi_config[d->hwirq].priority = info->priority;
4275		vpe->sgi_config[d->hwirq].group = info->group;
4276		its_configure_sgi(d, false);
4277		return 0;
4278
4279	default:
4280		return -EINVAL;
4281	}
4282}
4283
4284static struct irq_chip its_sgi_irq_chip = {
4285	.name			= "GICv4.1-sgi",
4286	.irq_mask		= its_sgi_mask_irq,
4287	.irq_unmask		= its_sgi_unmask_irq,
4288	.irq_set_affinity	= its_sgi_set_affinity,
4289	.irq_set_irqchip_state	= its_sgi_set_irqchip_state,
4290	.irq_get_irqchip_state	= its_sgi_get_irqchip_state,
4291	.irq_set_vcpu_affinity	= its_sgi_set_vcpu_affinity,
4292};
4293
4294static int its_sgi_irq_domain_alloc(struct irq_domain *domain,
4295				    unsigned int virq, unsigned int nr_irqs,
4296				    void *args)
4297{
4298	struct its_vpe *vpe = args;
4299	int i;
4300
4301	/* Yes, we do want 16 SGIs */
4302	WARN_ON(nr_irqs != 16);
4303
4304	for (i = 0; i < 16; i++) {
4305		vpe->sgi_config[i].priority = 0;
4306		vpe->sgi_config[i].enabled = false;
4307		vpe->sgi_config[i].group = false;
4308
4309		irq_domain_set_hwirq_and_chip(domain, virq + i, i,
4310					      &its_sgi_irq_chip, vpe);
4311		irq_set_status_flags(virq + i, IRQ_DISABLE_UNLAZY);
4312	}
4313
4314	return 0;
4315}
4316
4317static void its_sgi_irq_domain_free(struct irq_domain *domain,
4318				    unsigned int virq,
4319				    unsigned int nr_irqs)
4320{
4321	/* Nothing to do */
4322}
4323
4324static int its_sgi_irq_domain_activate(struct irq_domain *domain,
4325				       struct irq_data *d, bool reserve)
4326{
4327	/* Write out the initial SGI configuration */
4328	its_configure_sgi(d, false);
4329	return 0;
4330}
4331
4332static void its_sgi_irq_domain_deactivate(struct irq_domain *domain,
4333					  struct irq_data *d)
4334{
4335	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4336
4337	/*
4338	 * The VSGI command is awkward:
4339	 *
4340	 * - To change the configuration, CLEAR must be set to false,
4341	 *   leaving the pending bit unchanged.
4342	 * - To clear the pending bit, CLEAR must be set to true, leaving
4343	 *   the configuration unchanged.
4344	 *
4345	 * You just can't do both at once, hence the two commands below.
4346	 */
4347	vpe->sgi_config[d->hwirq].enabled = false;
4348	its_configure_sgi(d, false);
4349	its_configure_sgi(d, true);
4350}
4351
4352static const struct irq_domain_ops its_sgi_domain_ops = {
4353	.alloc		= its_sgi_irq_domain_alloc,
4354	.free		= its_sgi_irq_domain_free,
4355	.activate	= its_sgi_irq_domain_activate,
4356	.deactivate	= its_sgi_irq_domain_deactivate,
4357};
4358
4359static int its_vpe_id_alloc(void)
4360{
4361	return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
4362}
4363
4364static void its_vpe_id_free(u16 id)
4365{
4366	ida_simple_remove(&its_vpeid_ida, id);
4367}
4368
4369static int its_vpe_init(struct its_vpe *vpe)
4370{
4371	struct page *vpt_page;
4372	int vpe_id;
4373
4374	/* Allocate vpe_id */
4375	vpe_id = its_vpe_id_alloc();
4376	if (vpe_id < 0)
4377		return vpe_id;
4378
4379	/* Allocate VPT */
4380	vpt_page = its_allocate_pending_table(GFP_KERNEL);
4381	if (!vpt_page) {
4382		its_vpe_id_free(vpe_id);
4383		return -ENOMEM;
4384	}
4385
4386	if (!its_alloc_vpe_table(vpe_id)) {
4387		its_vpe_id_free(vpe_id);
4388		its_free_pending_table(vpt_page);
4389		return -ENOMEM;
4390	}
4391
4392	raw_spin_lock_init(&vpe->vpe_lock);
4393	vpe->vpe_id = vpe_id;
4394	vpe->vpt_page = vpt_page;
4395	if (gic_rdists->has_rvpeid)
4396		atomic_set(&vpe->vmapp_count, 0);
4397	else
4398		vpe->vpe_proxy_event = -1;
4399
4400	return 0;
4401}
4402
4403static void its_vpe_teardown(struct its_vpe *vpe)
4404{
4405	its_vpe_db_proxy_unmap(vpe);
4406	its_vpe_id_free(vpe->vpe_id);
4407	its_free_pending_table(vpe->vpt_page);
4408}
4409
4410static void its_vpe_irq_domain_free(struct irq_domain *domain,
4411				    unsigned int virq,
4412				    unsigned int nr_irqs)
4413{
4414	struct its_vm *vm = domain->host_data;
4415	int i;
4416
4417	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
4418
4419	for (i = 0; i < nr_irqs; i++) {
4420		struct irq_data *data = irq_domain_get_irq_data(domain,
4421								virq + i);
4422		struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
4423
4424		BUG_ON(vm != vpe->its_vm);
4425
4426		clear_bit(data->hwirq, vm->db_bitmap);
4427		its_vpe_teardown(vpe);
4428		irq_domain_reset_irq_data(data);
4429	}
4430
4431	if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
4432		its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
4433		its_free_prop_table(vm->vprop_page);
4434	}
4435}
4436
4437static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
4438				    unsigned int nr_irqs, void *args)
4439{
4440	struct irq_chip *irqchip = &its_vpe_irq_chip;
4441	struct its_vm *vm = args;
4442	unsigned long *bitmap;
4443	struct page *vprop_page;
4444	int base, nr_ids, i, err = 0;
4445
4446	BUG_ON(!vm);
4447
4448	bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
4449	if (!bitmap)
4450		return -ENOMEM;
4451
4452	if (nr_ids < nr_irqs) {
4453		its_lpi_free(bitmap, base, nr_ids);
4454		return -ENOMEM;
4455	}
4456
4457	vprop_page = its_allocate_prop_table(GFP_KERNEL);
4458	if (!vprop_page) {
4459		its_lpi_free(bitmap, base, nr_ids);
4460		return -ENOMEM;
4461	}
4462
4463	vm->db_bitmap = bitmap;
4464	vm->db_lpi_base = base;
4465	vm->nr_db_lpis = nr_ids;
4466	vm->vprop_page = vprop_page;
4467
4468	if (gic_rdists->has_rvpeid)
4469		irqchip = &its_vpe_4_1_irq_chip;
4470
4471	for (i = 0; i < nr_irqs; i++) {
4472		vm->vpes[i]->vpe_db_lpi = base + i;
4473		err = its_vpe_init(vm->vpes[i]);
4474		if (err)
4475			break;
4476		err = its_irq_gic_domain_alloc(domain, virq + i,
4477					       vm->vpes[i]->vpe_db_lpi);
4478		if (err)
4479			break;
4480		irq_domain_set_hwirq_and_chip(domain, virq + i, i,
4481					      irqchip, vm->vpes[i]);
4482		set_bit(i, bitmap);
4483	}
4484
4485	if (err) {
4486		if (i > 0)
4487			its_vpe_irq_domain_free(domain, virq, i - 1);
4488
4489		its_lpi_free(bitmap, base, nr_ids);
4490		its_free_prop_table(vprop_page);
4491	}
4492
4493	return err;
4494}
4495
4496static int its_vpe_irq_domain_activate(struct irq_domain *domain,
4497				       struct irq_data *d, bool reserve)
4498{
4499	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4500	struct its_node *its;
4501
4502	/*
4503	 * If we use the list map, we issue VMAPP on demand... Unless
4504	 * we're on a GICv4.1 and we eagerly map the VPE on all ITSs
4505	 * so that VSGIs can work.
4506	 */
4507	if (!gic_requires_eager_mapping())
4508		return 0;
4509
4510	/* Map the VPE to the first possible CPU */
4511	vpe->col_idx = cpumask_first(cpu_online_mask);
4512
4513	list_for_each_entry(its, &its_nodes, entry) {
4514		if (!is_v4(its))
4515			continue;
4516
4517		its_send_vmapp(its, vpe, true);
4518		its_send_vinvall(its, vpe);
4519	}
4520
4521	irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
4522
4523	return 0;
4524}
4525
4526static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
4527					  struct irq_data *d)
4528{
4529	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4530	struct its_node *its;
4531
4532	/*
4533	 * If we use the list map on GICv4.0, we unmap the VPE once no
4534	 * VLPIs are associated with the VM.
4535	 */
4536	if (!gic_requires_eager_mapping())
4537		return;
4538
4539	list_for_each_entry(its, &its_nodes, entry) {
4540		if (!is_v4(its))
4541			continue;
4542
4543		its_send_vmapp(its, vpe, false);
4544	}
4545}
4546
4547static const struct irq_domain_ops its_vpe_domain_ops = {
4548	.alloc			= its_vpe_irq_domain_alloc,
4549	.free			= its_vpe_irq_domain_free,
4550	.activate		= its_vpe_irq_domain_activate,
4551	.deactivate		= its_vpe_irq_domain_deactivate,
4552};
4553
4554static int its_force_quiescent(void __iomem *base)
4555{
4556	u32 count = 1000000;	/* 1s */
4557	u32 val;
4558
4559	val = readl_relaxed(base + GITS_CTLR);
4560	/*
4561	 * GIC architecture specification requires the ITS to be both
4562	 * disabled and quiescent for writes to GITS_BASER<n> or
4563	 * GITS_CBASER to not have UNPREDICTABLE results.
4564	 */
4565	if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
4566		return 0;
4567
4568	/* Disable the generation of all interrupts to this ITS */
4569	val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
4570	writel_relaxed(val, base + GITS_CTLR);
4571
4572	/* Poll GITS_CTLR and wait until ITS becomes quiescent */
4573	while (1) {
4574		val = readl_relaxed(base + GITS_CTLR);
4575		if (val & GITS_CTLR_QUIESCENT)
4576			return 0;
4577
4578		count--;
4579		if (!count)
4580			return -EBUSY;
4581
4582		cpu_relax();
4583		udelay(1);
4584	}
4585}
4586
4587static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
4588{
4589	struct its_node *its = data;
4590
4591	/* erratum 22375: only alloc 8MB table size (20 bits) */
4592	its->typer &= ~GITS_TYPER_DEVBITS;
4593	its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1);
4594	its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
4595
4596	return true;
4597}
4598
4599static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
4600{
4601	struct its_node *its = data;
4602
4603	its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
4604
4605	return true;
4606}
4607
4608static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
4609{
4610	struct its_node *its = data;
4611
4612	/* On QDF2400, the size of the ITE is 16Bytes */
4613	its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE;
4614	its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1);
4615
4616	return true;
4617}
4618
4619static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
4620{
4621	struct its_node *its = its_dev->its;
4622
4623	/*
4624	 * The Socionext Synquacer SoC has a so-called 'pre-ITS',
4625	 * which maps 32-bit writes targeted at a separate window of
4626	 * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
4627	 * with device ID taken from bits [device_id_bits + 1:2] of
4628	 * the window offset.
4629	 */
4630	return its->pre_its_base + (its_dev->device_id << 2);
4631}
4632
4633static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
4634{
4635	struct its_node *its = data;
4636	u32 pre_its_window[2];
4637	u32 ids;
4638
4639	if (!fwnode_property_read_u32_array(its->fwnode_handle,
4640					   "socionext,synquacer-pre-its",
4641					   pre_its_window,
4642					   ARRAY_SIZE(pre_its_window))) {
4643
4644		its->pre_its_base = pre_its_window[0];
4645		its->get_msi_base = its_irq_get_msi_base_pre_its;
4646
4647		ids = ilog2(pre_its_window[1]) - 2;
4648		if (device_ids(its) > ids) {
4649			its->typer &= ~GITS_TYPER_DEVBITS;
4650			its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1);
4651		}
4652
4653		/* the pre-ITS breaks isolation, so disable MSI remapping */
4654		its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
4655		return true;
4656	}
4657	return false;
4658}
4659
4660static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
4661{
4662	struct its_node *its = data;
4663
4664	/*
4665	 * Hip07 insists on using the wrong address for the VLPI
4666	 * page. Trick it into doing the right thing...
4667	 */
4668	its->vlpi_redist_offset = SZ_128K;
4669	return true;
4670}
4671
4672static const struct gic_quirk its_quirks[] = {
4673#ifdef CONFIG_CAVIUM_ERRATUM_22375
4674	{
4675		.desc	= "ITS: Cavium errata 22375, 24313",
4676		.iidr	= 0xa100034c,	/* ThunderX pass 1.x */
4677		.mask	= 0xffff0fff,
4678		.init	= its_enable_quirk_cavium_22375,
4679	},
4680#endif
4681#ifdef CONFIG_CAVIUM_ERRATUM_23144
4682	{
4683		.desc	= "ITS: Cavium erratum 23144",
4684		.iidr	= 0xa100034c,	/* ThunderX pass 1.x */
4685		.mask	= 0xffff0fff,
4686		.init	= its_enable_quirk_cavium_23144,
4687	},
4688#endif
4689#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
4690	{
4691		.desc	= "ITS: QDF2400 erratum 0065",
4692		.iidr	= 0x00001070, /* QDF2400 ITS rev 1.x */
4693		.mask	= 0xffffffff,
4694		.init	= its_enable_quirk_qdf2400_e0065,
4695	},
4696#endif
4697#ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
4698	{
4699		/*
4700		 * The Socionext Synquacer SoC incorporates ARM's own GIC-500
4701		 * implementation, but with a 'pre-ITS' added that requires
4702		 * special handling in software.
4703		 */
4704		.desc	= "ITS: Socionext Synquacer pre-ITS",
4705		.iidr	= 0x0001143b,
4706		.mask	= 0xffffffff,
4707		.init	= its_enable_quirk_socionext_synquacer,
4708	},
4709#endif
4710#ifdef CONFIG_HISILICON_ERRATUM_161600802
4711	{
4712		.desc	= "ITS: Hip07 erratum 161600802",
4713		.iidr	= 0x00000004,
4714		.mask	= 0xffffffff,
4715		.init	= its_enable_quirk_hip07_161600802,
4716	},
4717#endif
4718	{
4719	}
4720};
4721
4722static void its_enable_quirks(struct its_node *its)
4723{
4724	u32 iidr = readl_relaxed(its->base + GITS_IIDR);
4725
4726	gic_enable_quirks(iidr, its_quirks, its);
4727}
4728
4729static int its_save_disable(void)
 
4730{
 
4731	struct its_node *its;
4732	int err = 0;
4733
4734	raw_spin_lock(&its_lock);
4735	list_for_each_entry(its, &its_nodes, entry) {
4736		void __iomem *base;
4737
4738		if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
4739			continue;
4740
4741		base = its->base;
4742		its->ctlr_save = readl_relaxed(base + GITS_CTLR);
4743		err = its_force_quiescent(base);
4744		if (err) {
4745			pr_err("ITS@%pa: failed to quiesce: %d\n",
4746			       &its->phys_base, err);
4747			writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4748			goto err;
4749		}
4750
4751		its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
4752	}
4753
4754err:
4755	if (err) {
4756		list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
4757			void __iomem *base;
4758
4759			if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
4760				continue;
4761
4762			base = its->base;
4763			writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4764		}
4765	}
4766	raw_spin_unlock(&its_lock);
4767
4768	return err;
4769}
4770
4771static void its_restore_enable(void)
4772{
4773	struct its_node *its;
4774	int ret;
4775
4776	raw_spin_lock(&its_lock);
4777	list_for_each_entry(its, &its_nodes, entry) {
4778		void __iomem *base;
4779		int i;
4780
4781		if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
4782			continue;
4783
4784		base = its->base;
4785
4786		/*
4787		 * Make sure that the ITS is disabled. If it fails to quiesce,
4788		 * don't restore it since writing to CBASER or BASER<n>
4789		 * registers is undefined according to the GIC v3 ITS
4790		 * Specification.
4791		 */
4792		ret = its_force_quiescent(base);
4793		if (ret) {
4794			pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
4795			       &its->phys_base, ret);
4796			continue;
4797		}
4798
4799		gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
4800
4801		/*
4802		 * Writing CBASER resets CREADR to 0, so make CWRITER and
4803		 * cmd_write line up with it.
4804		 */
4805		its->cmd_write = its->cmd_base;
4806		gits_write_cwriter(0, base + GITS_CWRITER);
4807
4808		/* Restore GITS_BASER from the value cache. */
4809		for (i = 0; i < GITS_BASER_NR_REGS; i++) {
4810			struct its_baser *baser = &its->tables[i];
4811
4812			if (!(baser->val & GITS_BASER_VALID))
4813				continue;
4814
4815			its_write_baser(its, baser, baser->val);
4816		}
4817		writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4818
4819		/*
4820		 * Reinit the collection if it's stored in the ITS. This is
4821		 * indicated by the col_id being less than the HCC field.
4822		 * CID < HCC as specified in the GIC v3 Documentation.
4823		 */
4824		if (its->collections[smp_processor_id()].col_id <
4825		    GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
4826			its_cpu_init_collection(its);
4827	}
4828	raw_spin_unlock(&its_lock);
4829}
4830
4831static struct syscore_ops its_syscore_ops = {
4832	.suspend = its_save_disable,
4833	.resume = its_restore_enable,
4834};
4835
4836static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
4837{
4838	struct irq_domain *inner_domain;
4839	struct msi_domain_info *info;
4840
4841	info = kzalloc(sizeof(*info), GFP_KERNEL);
4842	if (!info)
4843		return -ENOMEM;
4844
4845	inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
4846	if (!inner_domain) {
4847		kfree(info);
4848		return -ENOMEM;
4849	}
4850
4851	inner_domain->parent = its_parent;
4852	irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
4853	inner_domain->flags |= its->msi_domain_flags;
4854	info->ops = &its_msi_domain_ops;
4855	info->data = its;
4856	inner_domain->host_data = info;
4857
4858	return 0;
4859}
4860
4861static int its_init_vpe_domain(void)
4862{
4863	struct its_node *its;
4864	u32 devid;
4865	int entries;
4866
4867	if (gic_rdists->has_direct_lpi) {
4868		pr_info("ITS: Using DirectLPI for VPE invalidation\n");
4869		return 0;
4870	}
4871
4872	/* Any ITS will do, even if not v4 */
4873	its = list_first_entry(&its_nodes, struct its_node, entry);
4874
4875	entries = roundup_pow_of_two(nr_cpu_ids);
4876	vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes),
4877				 GFP_KERNEL);
4878	if (!vpe_proxy.vpes) {
4879		pr_err("ITS: Can't allocate GICv4 proxy device array\n");
4880		return -ENOMEM;
4881	}
4882
4883	/* Use the last possible DevID */
4884	devid = GENMASK(device_ids(its) - 1, 0);
4885	vpe_proxy.dev = its_create_device(its, devid, entries, false);
4886	if (!vpe_proxy.dev) {
4887		kfree(vpe_proxy.vpes);
4888		pr_err("ITS: Can't allocate GICv4 proxy device\n");
4889		return -ENOMEM;
4890	}
4891
4892	BUG_ON(entries > vpe_proxy.dev->nr_ites);
4893
4894	raw_spin_lock_init(&vpe_proxy.lock);
4895	vpe_proxy.next_victim = 0;
4896	pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
4897		devid, vpe_proxy.dev->nr_ites);
4898
4899	return 0;
4900}
4901
4902static int __init its_compute_its_list_map(struct resource *res,
4903					   void __iomem *its_base)
4904{
4905	int its_number;
4906	u32 ctlr;
4907
4908	/*
4909	 * This is assumed to be done early enough that we're
4910	 * guaranteed to be single-threaded, hence no
4911	 * locking. Should this change, we should address
4912	 * this.
4913	 */
4914	its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
4915	if (its_number >= GICv4_ITS_LIST_MAX) {
4916		pr_err("ITS@%pa: No ITSList entry available!\n",
4917		       &res->start);
4918		return -EINVAL;
4919	}
4920
4921	ctlr = readl_relaxed(its_base + GITS_CTLR);
4922	ctlr &= ~GITS_CTLR_ITS_NUMBER;
4923	ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
4924	writel_relaxed(ctlr, its_base + GITS_CTLR);
4925	ctlr = readl_relaxed(its_base + GITS_CTLR);
4926	if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
4927		its_number = ctlr & GITS_CTLR_ITS_NUMBER;
4928		its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
4929	}
4930
4931	if (test_and_set_bit(its_number, &its_list_map)) {
4932		pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
4933		       &res->start, its_number);
4934		return -EINVAL;
4935	}
4936
4937	return its_number;
4938}
4939
4940static int __init its_probe_one(struct resource *res,
4941				struct fwnode_handle *handle, int numa_node)
4942{
4943	struct its_node *its;
4944	void __iomem *its_base;
4945	u32 val, ctlr;
4946	u64 baser, tmp, typer;
4947	struct page *page;
4948	int err;
4949
4950	its_base = ioremap(res->start, SZ_64K);
4951	if (!its_base) {
4952		pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
4953		return -ENOMEM;
4954	}
4955
4956	val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
4957	if (val != 0x30 && val != 0x40) {
4958		pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
4959		err = -ENODEV;
4960		goto out_unmap;
4961	}
4962
4963	err = its_force_quiescent(its_base);
4964	if (err) {
4965		pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
 
4966		goto out_unmap;
4967	}
4968
4969	pr_info("ITS %pR\n", res);
4970
4971	its = kzalloc(sizeof(*its), GFP_KERNEL);
4972	if (!its) {
4973		err = -ENOMEM;
4974		goto out_unmap;
4975	}
4976
4977	raw_spin_lock_init(&its->lock);
4978	mutex_init(&its->dev_alloc_lock);
4979	INIT_LIST_HEAD(&its->entry);
4980	INIT_LIST_HEAD(&its->its_device_list);
4981	typer = gic_read_typer(its_base + GITS_TYPER);
4982	its->typer = typer;
4983	its->base = its_base;
4984	its->phys_base = res->start;
4985	if (is_v4(its)) {
4986		if (!(typer & GITS_TYPER_VMOVP)) {
4987			err = its_compute_its_list_map(res, its_base);
4988			if (err < 0)
4989				goto out_free_its;
4990
4991			its->list_nr = err;
4992
4993			pr_info("ITS@%pa: Using ITS number %d\n",
4994				&res->start, err);
4995		} else {
4996			pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
4997		}
4998
4999		if (is_v4_1(its)) {
5000			u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer);
5001
5002			its->sgir_base = ioremap(res->start + SZ_128K, SZ_64K);
5003			if (!its->sgir_base) {
5004				err = -ENOMEM;
5005				goto out_free_its;
5006			}
5007
5008			its->mpidr = readl_relaxed(its_base + GITS_MPIDR);
5009
5010			pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n",
5011				&res->start, its->mpidr, svpet);
5012		}
5013	}
5014
5015	its->numa_node = numa_node;
5016
5017	page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
5018				get_order(ITS_CMD_QUEUE_SZ));
5019	if (!page) {
5020		err = -ENOMEM;
5021		goto out_unmap_sgir;
5022	}
5023	its->cmd_base = (void *)page_address(page);
5024	its->cmd_write = its->cmd_base;
5025	its->fwnode_handle = handle;
5026	its->get_msi_base = its_irq_get_msi_base;
5027	its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP;
5028
5029	its_enable_quirks(its);
5030
5031	err = its_alloc_tables(its);
5032	if (err)
5033		goto out_free_cmd;
5034
5035	err = its_alloc_collections(its);
5036	if (err)
5037		goto out_free_tables;
5038
5039	baser = (virt_to_phys(its->cmd_base)	|
5040		 GITS_CBASER_RaWaWb		|
5041		 GITS_CBASER_InnerShareable	|
5042		 (ITS_CMD_QUEUE_SZ / SZ_4K - 1)	|
5043		 GITS_CBASER_VALID);
5044
5045	gits_write_cbaser(baser, its->base + GITS_CBASER);
5046	tmp = gits_read_cbaser(its->base + GITS_CBASER);
5047
5048	if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
5049		if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
5050			/*
5051			 * The HW reports non-shareable, we must
5052			 * remove the cacheability attributes as
5053			 * well.
5054			 */
5055			baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
5056				   GITS_CBASER_CACHEABILITY_MASK);
5057			baser |= GITS_CBASER_nC;
5058			gits_write_cbaser(baser, its->base + GITS_CBASER);
5059		}
5060		pr_info("ITS: using cache flushing for cmd queue\n");
5061		its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
5062	}
5063
5064	gits_write_cwriter(0, its->base + GITS_CWRITER);
5065	ctlr = readl_relaxed(its->base + GITS_CTLR);
5066	ctlr |= GITS_CTLR_ENABLE;
5067	if (is_v4(its))
5068		ctlr |= GITS_CTLR_ImDe;
5069	writel_relaxed(ctlr, its->base + GITS_CTLR);
5070
5071	if (GITS_TYPER_HCC(typer))
5072		its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE;
 
 
 
 
 
 
 
 
 
 
5073
5074	err = its_init_domain(handle, its);
5075	if (err)
5076		goto out_free_tables;
 
 
 
5077
5078	raw_spin_lock(&its_lock);
5079	list_add(&its->entry, &its_nodes);
5080	raw_spin_unlock(&its_lock);
5081
5082	return 0;
5083
5084out_free_tables:
5085	its_free_tables(its);
5086out_free_cmd:
5087	free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
5088out_unmap_sgir:
5089	if (its->sgir_base)
5090		iounmap(its->sgir_base);
5091out_free_its:
5092	kfree(its);
5093out_unmap:
5094	iounmap(its_base);
5095	pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
5096	return err;
5097}
5098
5099static bool gic_rdists_supports_plpis(void)
5100{
5101	return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
5102}
5103
5104static int redist_disable_lpis(void)
5105{
5106	void __iomem *rbase = gic_data_rdist_rd_base();
5107	u64 timeout = USEC_PER_SEC;
5108	u64 val;
5109
5110	if (!gic_rdists_supports_plpis()) {
5111		pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
5112		return -ENXIO;
5113	}
5114
5115	val = readl_relaxed(rbase + GICR_CTLR);
5116	if (!(val & GICR_CTLR_ENABLE_LPIS))
5117		return 0;
5118
5119	/*
5120	 * If coming via a CPU hotplug event, we don't need to disable
5121	 * LPIs before trying to re-enable them. They are already
5122	 * configured and all is well in the world.
5123	 *
5124	 * If running with preallocated tables, there is nothing to do.
5125	 */
5126	if (gic_data_rdist()->lpi_enabled ||
5127	    (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
5128		return 0;
5129
5130	/*
5131	 * From that point on, we only try to do some damage control.
5132	 */
5133	pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
5134		smp_processor_id());
5135	add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
5136
5137	/* Disable LPIs */
5138	val &= ~GICR_CTLR_ENABLE_LPIS;
5139	writel_relaxed(val, rbase + GICR_CTLR);
5140
5141	/* Make sure any change to GICR_CTLR is observable by the GIC */
5142	dsb(sy);
5143
5144	/*
5145	 * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
5146	 * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
5147	 * Error out if we time out waiting for RWP to clear.
5148	 */
5149	while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) {
5150		if (!timeout) {
5151			pr_err("CPU%d: Timeout while disabling LPIs\n",
5152			       smp_processor_id());
5153			return -ETIMEDOUT;
5154		}
5155		udelay(1);
5156		timeout--;
5157	}
5158
5159	/*
5160	 * After it has been written to 1, it is IMPLEMENTATION
5161	 * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
5162	 * cleared to 0. Error out if clearing the bit failed.
5163	 */
5164	if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) {
5165		pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
5166		return -EBUSY;
5167	}
5168
5169	return 0;
5170}
5171
5172int its_cpu_init(void)
5173{
5174	if (!list_empty(&its_nodes)) {
5175		int ret;
5176
5177		ret = redist_disable_lpis();
5178		if (ret)
5179			return ret;
5180
5181		its_cpu_init_lpis();
5182		its_cpu_init_collections();
5183	}
5184
5185	return 0;
5186}
5187
5188static const struct of_device_id its_device_id[] = {
5189	{	.compatible	= "arm,gic-v3-its",	},
5190	{},
5191};
5192
5193static int __init its_of_probe(struct device_node *node)
 
5194{
5195	struct device_node *np;
5196	struct resource res;
5197
5198	for (np = of_find_matching_node(node, its_device_id); np;
5199	     np = of_find_matching_node(np, its_device_id)) {
5200		if (!of_device_is_available(np))
5201			continue;
5202		if (!of_property_read_bool(np, "msi-controller")) {
5203			pr_warn("%pOF: no msi-controller property, ITS ignored\n",
5204				np);
5205			continue;
5206		}
5207
5208		if (of_address_to_resource(np, 0, &res)) {
5209			pr_warn("%pOF: no regs?\n", np);
5210			continue;
5211		}
5212
5213		its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
5214	}
5215	return 0;
5216}
5217
5218#ifdef CONFIG_ACPI
5219
5220#define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
5221
5222#ifdef CONFIG_ACPI_NUMA
5223struct its_srat_map {
5224	/* numa node id */
5225	u32	numa_node;
5226	/* GIC ITS ID */
5227	u32	its_id;
5228};
5229
5230static struct its_srat_map *its_srat_maps __initdata;
5231static int its_in_srat __initdata;
5232
5233static int __init acpi_get_its_numa_node(u32 its_id)
5234{
5235	int i;
5236
5237	for (i = 0; i < its_in_srat; i++) {
5238		if (its_id == its_srat_maps[i].its_id)
5239			return its_srat_maps[i].numa_node;
5240	}
5241	return NUMA_NO_NODE;
5242}
5243
5244static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header,
5245					  const unsigned long end)
5246{
5247	return 0;
5248}
5249
5250static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header,
5251			 const unsigned long end)
5252{
5253	int node;
5254	struct acpi_srat_gic_its_affinity *its_affinity;
5255
5256	its_affinity = (struct acpi_srat_gic_its_affinity *)header;
5257	if (!its_affinity)
5258		return -EINVAL;
5259
5260	if (its_affinity->header.length < sizeof(*its_affinity)) {
5261		pr_err("SRAT: Invalid header length %d in ITS affinity\n",
5262			its_affinity->header.length);
5263		return -EINVAL;
5264	}
5265
5266	node = acpi_map_pxm_to_node(its_affinity->proximity_domain);
5267
5268	if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
5269		pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
5270		return 0;
5271	}
5272
5273	its_srat_maps[its_in_srat].numa_node = node;
5274	its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
5275	its_in_srat++;
5276	pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
5277		its_affinity->proximity_domain, its_affinity->its_id, node);
5278
5279	return 0;
5280}
5281
5282static void __init acpi_table_parse_srat_its(void)
5283{
5284	int count;
5285
5286	count = acpi_table_parse_entries(ACPI_SIG_SRAT,
5287			sizeof(struct acpi_table_srat),
5288			ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
5289			gic_acpi_match_srat_its, 0);
5290	if (count <= 0)
5291		return;
5292
5293	its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map),
5294				      GFP_KERNEL);
5295	if (!its_srat_maps) {
5296		pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
5297		return;
5298	}
5299
5300	acpi_table_parse_entries(ACPI_SIG_SRAT,
5301			sizeof(struct acpi_table_srat),
5302			ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
5303			gic_acpi_parse_srat_its, 0);
5304}
5305
5306/* free the its_srat_maps after ITS probing */
5307static void __init acpi_its_srat_maps_free(void)
5308{
5309	kfree(its_srat_maps);
5310}
5311#else
5312static void __init acpi_table_parse_srat_its(void)	{ }
5313static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
5314static void __init acpi_its_srat_maps_free(void) { }
5315#endif
5316
5317static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
5318					  const unsigned long end)
5319{
5320	struct acpi_madt_generic_translator *its_entry;
5321	struct fwnode_handle *dom_handle;
5322	struct resource res;
5323	int err;
5324
5325	its_entry = (struct acpi_madt_generic_translator *)header;
5326	memset(&res, 0, sizeof(res));
5327	res.start = its_entry->base_address;
5328	res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
5329	res.flags = IORESOURCE_MEM;
5330
5331	dom_handle = irq_domain_alloc_fwnode(&res.start);
5332	if (!dom_handle) {
5333		pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
5334		       &res.start);
5335		return -ENOMEM;
5336	}
5337
5338	err = iort_register_domain_token(its_entry->translation_id, res.start,
5339					 dom_handle);
5340	if (err) {
5341		pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
5342		       &res.start, its_entry->translation_id);
5343		goto dom_err;
5344	}
5345
5346	err = its_probe_one(&res, dom_handle,
5347			acpi_get_its_numa_node(its_entry->translation_id));
5348	if (!err)
5349		return 0;
5350
5351	iort_deregister_domain_token(its_entry->translation_id);
5352dom_err:
5353	irq_domain_free_fwnode(dom_handle);
5354	return err;
5355}
5356
5357static void __init its_acpi_probe(void)
5358{
5359	acpi_table_parse_srat_its();
5360	acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
5361			      gic_acpi_parse_madt_its, 0);
5362	acpi_its_srat_maps_free();
5363}
5364#else
5365static void __init its_acpi_probe(void) { }
5366#endif
5367
5368int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
5369		    struct irq_domain *parent_domain)
5370{
5371	struct device_node *of_node;
5372	struct its_node *its;
5373	bool has_v4 = false;
5374	bool has_v4_1 = false;
5375	int err;
5376
5377	gic_rdists = rdists;
5378
5379	its_parent = parent_domain;
5380	of_node = to_of_node(handle);
5381	if (of_node)
5382		its_of_probe(of_node);
5383	else
5384		its_acpi_probe();
5385
5386	if (list_empty(&its_nodes)) {
5387		pr_warn("ITS: No ITS available, not enabling LPIs\n");
5388		return -ENXIO;
5389	}
5390
5391	err = allocate_lpi_tables();
5392	if (err)
5393		return err;
5394
5395	list_for_each_entry(its, &its_nodes, entry) {
5396		has_v4 |= is_v4(its);
5397		has_v4_1 |= is_v4_1(its);
5398	}
5399
5400	/* Don't bother with inconsistent systems */
5401	if (WARN_ON(!has_v4_1 && rdists->has_rvpeid))
5402		rdists->has_rvpeid = false;
5403
5404	if (has_v4 & rdists->has_vlpis) {
5405		const struct irq_domain_ops *sgi_ops;
5406
5407		if (has_v4_1)
5408			sgi_ops = &its_sgi_domain_ops;
5409		else
5410			sgi_ops = NULL;
5411
5412		if (its_init_vpe_domain() ||
5413		    its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops)) {
5414			rdists->has_vlpis = false;
5415			pr_err("ITS: Disabling GICv4 support\n");
5416		}
5417	}
5418
5419	register_syscore_ops(&its_syscore_ops);
5420
5421	return 0;
5422}