Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7#include <linux/acpi.h>
8#include <linux/acpi_iort.h>
9#include <linux/bitfield.h>
10#include <linux/bitmap.h>
11#include <linux/cpu.h>
12#include <linux/crash_dump.h>
13#include <linux/delay.h>
14#include <linux/efi.h>
15#include <linux/interrupt.h>
16#include <linux/iommu.h>
17#include <linux/iopoll.h>
18#include <linux/irqdomain.h>
19#include <linux/list.h>
20#include <linux/log2.h>
21#include <linux/memblock.h>
22#include <linux/mm.h>
23#include <linux/msi.h>
24#include <linux/of.h>
25#include <linux/of_address.h>
26#include <linux/of_irq.h>
27#include <linux/of_pci.h>
28#include <linux/of_platform.h>
29#include <linux/percpu.h>
30#include <linux/slab.h>
31#include <linux/syscore_ops.h>
32
33#include <linux/irqchip.h>
34#include <linux/irqchip/arm-gic-v3.h>
35#include <linux/irqchip/arm-gic-v4.h>
36
37#include <asm/cputype.h>
38#include <asm/exception.h>
39
40#include "irq-gic-common.h"
41
42#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
43#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
44#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
45#define ITS_FLAGS_FORCE_NON_SHAREABLE (1ULL << 3)
46
47#define RD_LOCAL_LPI_ENABLED BIT(0)
48#define RD_LOCAL_PENDTABLE_PREALLOCATED BIT(1)
49#define RD_LOCAL_MEMRESERVE_DONE BIT(2)
50
51static u32 lpi_id_bits;
52
53/*
54 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
55 * deal with (one configuration byte per interrupt). PENDBASE has to
56 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
57 */
58#define LPI_NRBITS lpi_id_bits
59#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
60#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
61
62#define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI
63
64/*
65 * Collection structure - just an ID, and a redistributor address to
66 * ping. We use one per CPU as a bag of interrupts assigned to this
67 * CPU.
68 */
69struct its_collection {
70 u64 target_address;
71 u16 col_id;
72};
73
74/*
75 * The ITS_BASER structure - contains memory information, cached
76 * value of BASER register configuration and ITS page size.
77 */
78struct its_baser {
79 void *base;
80 u64 val;
81 u32 order;
82 u32 psz;
83};
84
85struct its_device;
86
87/*
88 * The ITS structure - contains most of the infrastructure, with the
89 * top-level MSI domain, the command queue, the collections, and the
90 * list of devices writing to it.
91 *
92 * dev_alloc_lock has to be taken for device allocations, while the
93 * spinlock must be taken to parse data structures such as the device
94 * list.
95 */
96struct its_node {
97 raw_spinlock_t lock;
98 struct mutex dev_alloc_lock;
99 struct list_head entry;
100 void __iomem *base;
101 void __iomem *sgir_base;
102 phys_addr_t phys_base;
103 struct its_cmd_block *cmd_base;
104 struct its_cmd_block *cmd_write;
105 struct its_baser tables[GITS_BASER_NR_REGS];
106 struct its_collection *collections;
107 struct fwnode_handle *fwnode_handle;
108 u64 (*get_msi_base)(struct its_device *its_dev);
109 u64 typer;
110 u64 cbaser_save;
111 u32 ctlr_save;
112 u32 mpidr;
113 struct list_head its_device_list;
114 u64 flags;
115 unsigned long list_nr;
116 int numa_node;
117 unsigned int msi_domain_flags;
118 u32 pre_its_base; /* for Socionext Synquacer */
119 int vlpi_redist_offset;
120};
121
122#define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS))
123#define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP))
124#define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
125
126#define ITS_ITT_ALIGN SZ_256
127
128/* The maximum number of VPEID bits supported by VLPI commands */
129#define ITS_MAX_VPEID_BITS \
130 ({ \
131 int nvpeid = 16; \
132 if (gic_rdists->has_rvpeid && \
133 gic_rdists->gicd_typer2 & GICD_TYPER2_VIL) \
134 nvpeid = 1 + (gic_rdists->gicd_typer2 & \
135 GICD_TYPER2_VID); \
136 \
137 nvpeid; \
138 })
139#define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
140
141/* Convert page order to size in bytes */
142#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
143
144struct event_lpi_map {
145 unsigned long *lpi_map;
146 u16 *col_map;
147 irq_hw_number_t lpi_base;
148 int nr_lpis;
149 raw_spinlock_t vlpi_lock;
150 struct its_vm *vm;
151 struct its_vlpi_map *vlpi_maps;
152 int nr_vlpis;
153};
154
155/*
156 * The ITS view of a device - belongs to an ITS, owns an interrupt
157 * translation table, and a list of interrupts. If it some of its
158 * LPIs are injected into a guest (GICv4), the event_map.vm field
159 * indicates which one.
160 */
161struct its_device {
162 struct list_head entry;
163 struct its_node *its;
164 struct event_lpi_map event_map;
165 void *itt;
166 u32 nr_ites;
167 u32 device_id;
168 bool shared;
169};
170
171static struct {
172 raw_spinlock_t lock;
173 struct its_device *dev;
174 struct its_vpe **vpes;
175 int next_victim;
176} vpe_proxy;
177
178struct cpu_lpi_count {
179 atomic_t managed;
180 atomic_t unmanaged;
181};
182
183static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count);
184
185static LIST_HEAD(its_nodes);
186static DEFINE_RAW_SPINLOCK(its_lock);
187static struct rdists *gic_rdists;
188static struct irq_domain *its_parent;
189
190static unsigned long its_list_map;
191static u16 vmovp_seq_num;
192static DEFINE_RAW_SPINLOCK(vmovp_lock);
193
194static DEFINE_IDA(its_vpeid_ida);
195
196#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
197#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
198#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
199#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
200
201/*
202 * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we
203 * always have vSGIs mapped.
204 */
205static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its)
206{
207 return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]);
208}
209
210static bool rdists_support_shareable(void)
211{
212 return !(gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE);
213}
214
215static u16 get_its_list(struct its_vm *vm)
216{
217 struct its_node *its;
218 unsigned long its_list = 0;
219
220 list_for_each_entry(its, &its_nodes, entry) {
221 if (!is_v4(its))
222 continue;
223
224 if (require_its_list_vmovp(vm, its))
225 __set_bit(its->list_nr, &its_list);
226 }
227
228 return (u16)its_list;
229}
230
231static inline u32 its_get_event_id(struct irq_data *d)
232{
233 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
234 return d->hwirq - its_dev->event_map.lpi_base;
235}
236
237static struct its_collection *dev_event_to_col(struct its_device *its_dev,
238 u32 event)
239{
240 struct its_node *its = its_dev->its;
241
242 return its->collections + its_dev->event_map.col_map[event];
243}
244
245static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev,
246 u32 event)
247{
248 if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis))
249 return NULL;
250
251 return &its_dev->event_map.vlpi_maps[event];
252}
253
254static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
255{
256 if (irqd_is_forwarded_to_vcpu(d)) {
257 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
258 u32 event = its_get_event_id(d);
259
260 return dev_event_to_vlpi_map(its_dev, event);
261 }
262
263 return NULL;
264}
265
266static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags)
267{
268 raw_spin_lock_irqsave(&vpe->vpe_lock, *flags);
269 return vpe->col_idx;
270}
271
272static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags)
273{
274 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
275}
276
277static struct irq_chip its_vpe_irq_chip;
278
279static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags)
280{
281 struct its_vpe *vpe = NULL;
282 int cpu;
283
284 if (d->chip == &its_vpe_irq_chip) {
285 vpe = irq_data_get_irq_chip_data(d);
286 } else {
287 struct its_vlpi_map *map = get_vlpi_map(d);
288 if (map)
289 vpe = map->vpe;
290 }
291
292 if (vpe) {
293 cpu = vpe_to_cpuid_lock(vpe, flags);
294 } else {
295 /* Physical LPIs are already locked via the irq_desc lock */
296 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
297 cpu = its_dev->event_map.col_map[its_get_event_id(d)];
298 /* Keep GCC quiet... */
299 *flags = 0;
300 }
301
302 return cpu;
303}
304
305static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags)
306{
307 struct its_vpe *vpe = NULL;
308
309 if (d->chip == &its_vpe_irq_chip) {
310 vpe = irq_data_get_irq_chip_data(d);
311 } else {
312 struct its_vlpi_map *map = get_vlpi_map(d);
313 if (map)
314 vpe = map->vpe;
315 }
316
317 if (vpe)
318 vpe_to_cpuid_unlock(vpe, flags);
319}
320
321static struct its_collection *valid_col(struct its_collection *col)
322{
323 if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
324 return NULL;
325
326 return col;
327}
328
329static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
330{
331 if (valid_col(its->collections + vpe->col_idx))
332 return vpe;
333
334 return NULL;
335}
336
337/*
338 * ITS command descriptors - parameters to be encoded in a command
339 * block.
340 */
341struct its_cmd_desc {
342 union {
343 struct {
344 struct its_device *dev;
345 u32 event_id;
346 } its_inv_cmd;
347
348 struct {
349 struct its_device *dev;
350 u32 event_id;
351 } its_clear_cmd;
352
353 struct {
354 struct its_device *dev;
355 u32 event_id;
356 } its_int_cmd;
357
358 struct {
359 struct its_device *dev;
360 int valid;
361 } its_mapd_cmd;
362
363 struct {
364 struct its_collection *col;
365 int valid;
366 } its_mapc_cmd;
367
368 struct {
369 struct its_device *dev;
370 u32 phys_id;
371 u32 event_id;
372 } its_mapti_cmd;
373
374 struct {
375 struct its_device *dev;
376 struct its_collection *col;
377 u32 event_id;
378 } its_movi_cmd;
379
380 struct {
381 struct its_device *dev;
382 u32 event_id;
383 } its_discard_cmd;
384
385 struct {
386 struct its_collection *col;
387 } its_invall_cmd;
388
389 struct {
390 struct its_vpe *vpe;
391 } its_vinvall_cmd;
392
393 struct {
394 struct its_vpe *vpe;
395 struct its_collection *col;
396 bool valid;
397 } its_vmapp_cmd;
398
399 struct {
400 struct its_vpe *vpe;
401 struct its_device *dev;
402 u32 virt_id;
403 u32 event_id;
404 bool db_enabled;
405 } its_vmapti_cmd;
406
407 struct {
408 struct its_vpe *vpe;
409 struct its_device *dev;
410 u32 event_id;
411 bool db_enabled;
412 } its_vmovi_cmd;
413
414 struct {
415 struct its_vpe *vpe;
416 struct its_collection *col;
417 u16 seq_num;
418 u16 its_list;
419 } its_vmovp_cmd;
420
421 struct {
422 struct its_vpe *vpe;
423 } its_invdb_cmd;
424
425 struct {
426 struct its_vpe *vpe;
427 u8 sgi;
428 u8 priority;
429 bool enable;
430 bool group;
431 bool clear;
432 } its_vsgi_cmd;
433 };
434};
435
436/*
437 * The ITS command block, which is what the ITS actually parses.
438 */
439struct its_cmd_block {
440 union {
441 u64 raw_cmd[4];
442 __le64 raw_cmd_le[4];
443 };
444};
445
446#define ITS_CMD_QUEUE_SZ SZ_64K
447#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
448
449typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
450 struct its_cmd_block *,
451 struct its_cmd_desc *);
452
453typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
454 struct its_cmd_block *,
455 struct its_cmd_desc *);
456
457static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
458{
459 u64 mask = GENMASK_ULL(h, l);
460 *raw_cmd &= ~mask;
461 *raw_cmd |= (val << l) & mask;
462}
463
464static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
465{
466 its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
467}
468
469static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
470{
471 its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
472}
473
474static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
475{
476 its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
477}
478
479static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
480{
481 its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
482}
483
484static void its_encode_size(struct its_cmd_block *cmd, u8 size)
485{
486 its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
487}
488
489static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
490{
491 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
492}
493
494static void its_encode_valid(struct its_cmd_block *cmd, int valid)
495{
496 its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
497}
498
499static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
500{
501 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
502}
503
504static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
505{
506 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
507}
508
509static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
510{
511 its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
512}
513
514static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
515{
516 its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
517}
518
519static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
520{
521 its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
522}
523
524static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
525{
526 its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
527}
528
529static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
530{
531 its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
532}
533
534static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
535{
536 its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
537}
538
539static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
540{
541 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
542}
543
544static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
545{
546 its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
547}
548
549static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa)
550{
551 its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16);
552}
553
554static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc)
555{
556 its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8);
557}
558
559static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz)
560{
561 its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9);
562}
563
564static void its_encode_vmapp_default_db(struct its_cmd_block *cmd,
565 u32 vpe_db_lpi)
566{
567 its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0);
568}
569
570static void its_encode_vmovp_default_db(struct its_cmd_block *cmd,
571 u32 vpe_db_lpi)
572{
573 its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0);
574}
575
576static void its_encode_db(struct its_cmd_block *cmd, bool db)
577{
578 its_mask_encode(&cmd->raw_cmd[2], db, 63, 63);
579}
580
581static void its_encode_sgi_intid(struct its_cmd_block *cmd, u8 sgi)
582{
583 its_mask_encode(&cmd->raw_cmd[0], sgi, 35, 32);
584}
585
586static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio)
587{
588 its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20);
589}
590
591static void its_encode_sgi_group(struct its_cmd_block *cmd, bool grp)
592{
593 its_mask_encode(&cmd->raw_cmd[0], grp, 10, 10);
594}
595
596static void its_encode_sgi_clear(struct its_cmd_block *cmd, bool clr)
597{
598 its_mask_encode(&cmd->raw_cmd[0], clr, 9, 9);
599}
600
601static void its_encode_sgi_enable(struct its_cmd_block *cmd, bool en)
602{
603 its_mask_encode(&cmd->raw_cmd[0], en, 8, 8);
604}
605
606static inline void its_fixup_cmd(struct its_cmd_block *cmd)
607{
608 /* Let's fixup BE commands */
609 cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]);
610 cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]);
611 cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]);
612 cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]);
613}
614
615static struct its_collection *its_build_mapd_cmd(struct its_node *its,
616 struct its_cmd_block *cmd,
617 struct its_cmd_desc *desc)
618{
619 unsigned long itt_addr;
620 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
621
622 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
623 itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
624
625 its_encode_cmd(cmd, GITS_CMD_MAPD);
626 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
627 its_encode_size(cmd, size - 1);
628 its_encode_itt(cmd, itt_addr);
629 its_encode_valid(cmd, desc->its_mapd_cmd.valid);
630
631 its_fixup_cmd(cmd);
632
633 return NULL;
634}
635
636static struct its_collection *its_build_mapc_cmd(struct its_node *its,
637 struct its_cmd_block *cmd,
638 struct its_cmd_desc *desc)
639{
640 its_encode_cmd(cmd, GITS_CMD_MAPC);
641 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
642 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
643 its_encode_valid(cmd, desc->its_mapc_cmd.valid);
644
645 its_fixup_cmd(cmd);
646
647 return desc->its_mapc_cmd.col;
648}
649
650static struct its_collection *its_build_mapti_cmd(struct its_node *its,
651 struct its_cmd_block *cmd,
652 struct its_cmd_desc *desc)
653{
654 struct its_collection *col;
655
656 col = dev_event_to_col(desc->its_mapti_cmd.dev,
657 desc->its_mapti_cmd.event_id);
658
659 its_encode_cmd(cmd, GITS_CMD_MAPTI);
660 its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
661 its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
662 its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
663 its_encode_collection(cmd, col->col_id);
664
665 its_fixup_cmd(cmd);
666
667 return valid_col(col);
668}
669
670static struct its_collection *its_build_movi_cmd(struct its_node *its,
671 struct its_cmd_block *cmd,
672 struct its_cmd_desc *desc)
673{
674 struct its_collection *col;
675
676 col = dev_event_to_col(desc->its_movi_cmd.dev,
677 desc->its_movi_cmd.event_id);
678
679 its_encode_cmd(cmd, GITS_CMD_MOVI);
680 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
681 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
682 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
683
684 its_fixup_cmd(cmd);
685
686 return valid_col(col);
687}
688
689static struct its_collection *its_build_discard_cmd(struct its_node *its,
690 struct its_cmd_block *cmd,
691 struct its_cmd_desc *desc)
692{
693 struct its_collection *col;
694
695 col = dev_event_to_col(desc->its_discard_cmd.dev,
696 desc->its_discard_cmd.event_id);
697
698 its_encode_cmd(cmd, GITS_CMD_DISCARD);
699 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
700 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
701
702 its_fixup_cmd(cmd);
703
704 return valid_col(col);
705}
706
707static struct its_collection *its_build_inv_cmd(struct its_node *its,
708 struct its_cmd_block *cmd,
709 struct its_cmd_desc *desc)
710{
711 struct its_collection *col;
712
713 col = dev_event_to_col(desc->its_inv_cmd.dev,
714 desc->its_inv_cmd.event_id);
715
716 its_encode_cmd(cmd, GITS_CMD_INV);
717 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
718 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
719
720 its_fixup_cmd(cmd);
721
722 return valid_col(col);
723}
724
725static struct its_collection *its_build_int_cmd(struct its_node *its,
726 struct its_cmd_block *cmd,
727 struct its_cmd_desc *desc)
728{
729 struct its_collection *col;
730
731 col = dev_event_to_col(desc->its_int_cmd.dev,
732 desc->its_int_cmd.event_id);
733
734 its_encode_cmd(cmd, GITS_CMD_INT);
735 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
736 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
737
738 its_fixup_cmd(cmd);
739
740 return valid_col(col);
741}
742
743static struct its_collection *its_build_clear_cmd(struct its_node *its,
744 struct its_cmd_block *cmd,
745 struct its_cmd_desc *desc)
746{
747 struct its_collection *col;
748
749 col = dev_event_to_col(desc->its_clear_cmd.dev,
750 desc->its_clear_cmd.event_id);
751
752 its_encode_cmd(cmd, GITS_CMD_CLEAR);
753 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
754 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
755
756 its_fixup_cmd(cmd);
757
758 return valid_col(col);
759}
760
761static struct its_collection *its_build_invall_cmd(struct its_node *its,
762 struct its_cmd_block *cmd,
763 struct its_cmd_desc *desc)
764{
765 its_encode_cmd(cmd, GITS_CMD_INVALL);
766 its_encode_collection(cmd, desc->its_invall_cmd.col->col_id);
767
768 its_fixup_cmd(cmd);
769
770 return desc->its_invall_cmd.col;
771}
772
773static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
774 struct its_cmd_block *cmd,
775 struct its_cmd_desc *desc)
776{
777 its_encode_cmd(cmd, GITS_CMD_VINVALL);
778 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
779
780 its_fixup_cmd(cmd);
781
782 return valid_vpe(its, desc->its_vinvall_cmd.vpe);
783}
784
785static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
786 struct its_cmd_block *cmd,
787 struct its_cmd_desc *desc)
788{
789 unsigned long vpt_addr, vconf_addr;
790 u64 target;
791 bool alloc;
792
793 its_encode_cmd(cmd, GITS_CMD_VMAPP);
794 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
795 its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
796
797 if (!desc->its_vmapp_cmd.valid) {
798 if (is_v4_1(its)) {
799 alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
800 its_encode_alloc(cmd, alloc);
801 }
802
803 goto out;
804 }
805
806 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
807 target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
808
809 its_encode_target(cmd, target);
810 its_encode_vpt_addr(cmd, vpt_addr);
811 its_encode_vpt_size(cmd, LPI_NRBITS - 1);
812
813 if (!is_v4_1(its))
814 goto out;
815
816 vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
817
818 alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
819
820 its_encode_alloc(cmd, alloc);
821
822 /*
823 * GICv4.1 provides a way to get the VLPI state, which needs the vPE
824 * to be unmapped first, and in this case, we may remap the vPE
825 * back while the VPT is not empty. So we can't assume that the
826 * VPT is empty on map. This is why we never advertise PTZ.
827 */
828 its_encode_ptz(cmd, false);
829 its_encode_vconf_addr(cmd, vconf_addr);
830 its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi);
831
832out:
833 its_fixup_cmd(cmd);
834
835 return valid_vpe(its, desc->its_vmapp_cmd.vpe);
836}
837
838static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
839 struct its_cmd_block *cmd,
840 struct its_cmd_desc *desc)
841{
842 u32 db;
843
844 if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled)
845 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
846 else
847 db = 1023;
848
849 its_encode_cmd(cmd, GITS_CMD_VMAPTI);
850 its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
851 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
852 its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
853 its_encode_db_phys_id(cmd, db);
854 its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
855
856 its_fixup_cmd(cmd);
857
858 return valid_vpe(its, desc->its_vmapti_cmd.vpe);
859}
860
861static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
862 struct its_cmd_block *cmd,
863 struct its_cmd_desc *desc)
864{
865 u32 db;
866
867 if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled)
868 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
869 else
870 db = 1023;
871
872 its_encode_cmd(cmd, GITS_CMD_VMOVI);
873 its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
874 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
875 its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
876 its_encode_db_phys_id(cmd, db);
877 its_encode_db_valid(cmd, true);
878
879 its_fixup_cmd(cmd);
880
881 return valid_vpe(its, desc->its_vmovi_cmd.vpe);
882}
883
884static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
885 struct its_cmd_block *cmd,
886 struct its_cmd_desc *desc)
887{
888 u64 target;
889
890 target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
891 its_encode_cmd(cmd, GITS_CMD_VMOVP);
892 its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
893 its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
894 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
895 its_encode_target(cmd, target);
896
897 if (is_v4_1(its)) {
898 its_encode_db(cmd, true);
899 its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi);
900 }
901
902 its_fixup_cmd(cmd);
903
904 return valid_vpe(its, desc->its_vmovp_cmd.vpe);
905}
906
907static struct its_vpe *its_build_vinv_cmd(struct its_node *its,
908 struct its_cmd_block *cmd,
909 struct its_cmd_desc *desc)
910{
911 struct its_vlpi_map *map;
912
913 map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev,
914 desc->its_inv_cmd.event_id);
915
916 its_encode_cmd(cmd, GITS_CMD_INV);
917 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
918 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
919
920 its_fixup_cmd(cmd);
921
922 return valid_vpe(its, map->vpe);
923}
924
925static struct its_vpe *its_build_vint_cmd(struct its_node *its,
926 struct its_cmd_block *cmd,
927 struct its_cmd_desc *desc)
928{
929 struct its_vlpi_map *map;
930
931 map = dev_event_to_vlpi_map(desc->its_int_cmd.dev,
932 desc->its_int_cmd.event_id);
933
934 its_encode_cmd(cmd, GITS_CMD_INT);
935 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
936 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
937
938 its_fixup_cmd(cmd);
939
940 return valid_vpe(its, map->vpe);
941}
942
943static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
944 struct its_cmd_block *cmd,
945 struct its_cmd_desc *desc)
946{
947 struct its_vlpi_map *map;
948
949 map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev,
950 desc->its_clear_cmd.event_id);
951
952 its_encode_cmd(cmd, GITS_CMD_CLEAR);
953 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
954 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
955
956 its_fixup_cmd(cmd);
957
958 return valid_vpe(its, map->vpe);
959}
960
961static struct its_vpe *its_build_invdb_cmd(struct its_node *its,
962 struct its_cmd_block *cmd,
963 struct its_cmd_desc *desc)
964{
965 if (WARN_ON(!is_v4_1(its)))
966 return NULL;
967
968 its_encode_cmd(cmd, GITS_CMD_INVDB);
969 its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id);
970
971 its_fixup_cmd(cmd);
972
973 return valid_vpe(its, desc->its_invdb_cmd.vpe);
974}
975
976static struct its_vpe *its_build_vsgi_cmd(struct its_node *its,
977 struct its_cmd_block *cmd,
978 struct its_cmd_desc *desc)
979{
980 if (WARN_ON(!is_v4_1(its)))
981 return NULL;
982
983 its_encode_cmd(cmd, GITS_CMD_VSGI);
984 its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id);
985 its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi);
986 its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority);
987 its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group);
988 its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear);
989 its_encode_sgi_enable(cmd, desc->its_vsgi_cmd.enable);
990
991 its_fixup_cmd(cmd);
992
993 return valid_vpe(its, desc->its_vsgi_cmd.vpe);
994}
995
996static u64 its_cmd_ptr_to_offset(struct its_node *its,
997 struct its_cmd_block *ptr)
998{
999 return (ptr - its->cmd_base) * sizeof(*ptr);
1000}
1001
1002static int its_queue_full(struct its_node *its)
1003{
1004 int widx;
1005 int ridx;
1006
1007 widx = its->cmd_write - its->cmd_base;
1008 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
1009
1010 /* This is incredibly unlikely to happen, unless the ITS locks up. */
1011 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
1012 return 1;
1013
1014 return 0;
1015}
1016
1017static struct its_cmd_block *its_allocate_entry(struct its_node *its)
1018{
1019 struct its_cmd_block *cmd;
1020 u32 count = 1000000; /* 1s! */
1021
1022 while (its_queue_full(its)) {
1023 count--;
1024 if (!count) {
1025 pr_err_ratelimited("ITS queue not draining\n");
1026 return NULL;
1027 }
1028 cpu_relax();
1029 udelay(1);
1030 }
1031
1032 cmd = its->cmd_write++;
1033
1034 /* Handle queue wrapping */
1035 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
1036 its->cmd_write = its->cmd_base;
1037
1038 /* Clear command */
1039 cmd->raw_cmd[0] = 0;
1040 cmd->raw_cmd[1] = 0;
1041 cmd->raw_cmd[2] = 0;
1042 cmd->raw_cmd[3] = 0;
1043
1044 return cmd;
1045}
1046
1047static struct its_cmd_block *its_post_commands(struct its_node *its)
1048{
1049 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
1050
1051 writel_relaxed(wr, its->base + GITS_CWRITER);
1052
1053 return its->cmd_write;
1054}
1055
1056static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
1057{
1058 /*
1059 * Make sure the commands written to memory are observable by
1060 * the ITS.
1061 */
1062 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
1063 gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
1064 else
1065 dsb(ishst);
1066}
1067
1068static int its_wait_for_range_completion(struct its_node *its,
1069 u64 prev_idx,
1070 struct its_cmd_block *to)
1071{
1072 u64 rd_idx, to_idx, linear_idx;
1073 u32 count = 1000000; /* 1s! */
1074
1075 /* Linearize to_idx if the command set has wrapped around */
1076 to_idx = its_cmd_ptr_to_offset(its, to);
1077 if (to_idx < prev_idx)
1078 to_idx += ITS_CMD_QUEUE_SZ;
1079
1080 linear_idx = prev_idx;
1081
1082 while (1) {
1083 s64 delta;
1084
1085 rd_idx = readl_relaxed(its->base + GITS_CREADR);
1086
1087 /*
1088 * Compute the read pointer progress, taking the
1089 * potential wrap-around into account.
1090 */
1091 delta = rd_idx - prev_idx;
1092 if (rd_idx < prev_idx)
1093 delta += ITS_CMD_QUEUE_SZ;
1094
1095 linear_idx += delta;
1096 if (linear_idx >= to_idx)
1097 break;
1098
1099 count--;
1100 if (!count) {
1101 pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
1102 to_idx, linear_idx);
1103 return -1;
1104 }
1105 prev_idx = rd_idx;
1106 cpu_relax();
1107 udelay(1);
1108 }
1109
1110 return 0;
1111}
1112
1113/* Warning, macro hell follows */
1114#define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
1115void name(struct its_node *its, \
1116 buildtype builder, \
1117 struct its_cmd_desc *desc) \
1118{ \
1119 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
1120 synctype *sync_obj; \
1121 unsigned long flags; \
1122 u64 rd_idx; \
1123 \
1124 raw_spin_lock_irqsave(&its->lock, flags); \
1125 \
1126 cmd = its_allocate_entry(its); \
1127 if (!cmd) { /* We're soooooo screewed... */ \
1128 raw_spin_unlock_irqrestore(&its->lock, flags); \
1129 return; \
1130 } \
1131 sync_obj = builder(its, cmd, desc); \
1132 its_flush_cmd(its, cmd); \
1133 \
1134 if (sync_obj) { \
1135 sync_cmd = its_allocate_entry(its); \
1136 if (!sync_cmd) \
1137 goto post; \
1138 \
1139 buildfn(its, sync_cmd, sync_obj); \
1140 its_flush_cmd(its, sync_cmd); \
1141 } \
1142 \
1143post: \
1144 rd_idx = readl_relaxed(its->base + GITS_CREADR); \
1145 next_cmd = its_post_commands(its); \
1146 raw_spin_unlock_irqrestore(&its->lock, flags); \
1147 \
1148 if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
1149 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
1150}
1151
1152static void its_build_sync_cmd(struct its_node *its,
1153 struct its_cmd_block *sync_cmd,
1154 struct its_collection *sync_col)
1155{
1156 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
1157 its_encode_target(sync_cmd, sync_col->target_address);
1158
1159 its_fixup_cmd(sync_cmd);
1160}
1161
1162static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
1163 struct its_collection, its_build_sync_cmd)
1164
1165static void its_build_vsync_cmd(struct its_node *its,
1166 struct its_cmd_block *sync_cmd,
1167 struct its_vpe *sync_vpe)
1168{
1169 its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
1170 its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
1171
1172 its_fixup_cmd(sync_cmd);
1173}
1174
1175static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
1176 struct its_vpe, its_build_vsync_cmd)
1177
1178static void its_send_int(struct its_device *dev, u32 event_id)
1179{
1180 struct its_cmd_desc desc;
1181
1182 desc.its_int_cmd.dev = dev;
1183 desc.its_int_cmd.event_id = event_id;
1184
1185 its_send_single_command(dev->its, its_build_int_cmd, &desc);
1186}
1187
1188static void its_send_clear(struct its_device *dev, u32 event_id)
1189{
1190 struct its_cmd_desc desc;
1191
1192 desc.its_clear_cmd.dev = dev;
1193 desc.its_clear_cmd.event_id = event_id;
1194
1195 its_send_single_command(dev->its, its_build_clear_cmd, &desc);
1196}
1197
1198static void its_send_inv(struct its_device *dev, u32 event_id)
1199{
1200 struct its_cmd_desc desc;
1201
1202 desc.its_inv_cmd.dev = dev;
1203 desc.its_inv_cmd.event_id = event_id;
1204
1205 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
1206}
1207
1208static void its_send_mapd(struct its_device *dev, int valid)
1209{
1210 struct its_cmd_desc desc;
1211
1212 desc.its_mapd_cmd.dev = dev;
1213 desc.its_mapd_cmd.valid = !!valid;
1214
1215 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
1216}
1217
1218static void its_send_mapc(struct its_node *its, struct its_collection *col,
1219 int valid)
1220{
1221 struct its_cmd_desc desc;
1222
1223 desc.its_mapc_cmd.col = col;
1224 desc.its_mapc_cmd.valid = !!valid;
1225
1226 its_send_single_command(its, its_build_mapc_cmd, &desc);
1227}
1228
1229static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
1230{
1231 struct its_cmd_desc desc;
1232
1233 desc.its_mapti_cmd.dev = dev;
1234 desc.its_mapti_cmd.phys_id = irq_id;
1235 desc.its_mapti_cmd.event_id = id;
1236
1237 its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
1238}
1239
1240static void its_send_movi(struct its_device *dev,
1241 struct its_collection *col, u32 id)
1242{
1243 struct its_cmd_desc desc;
1244
1245 desc.its_movi_cmd.dev = dev;
1246 desc.its_movi_cmd.col = col;
1247 desc.its_movi_cmd.event_id = id;
1248
1249 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
1250}
1251
1252static void its_send_discard(struct its_device *dev, u32 id)
1253{
1254 struct its_cmd_desc desc;
1255
1256 desc.its_discard_cmd.dev = dev;
1257 desc.its_discard_cmd.event_id = id;
1258
1259 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
1260}
1261
1262static void its_send_invall(struct its_node *its, struct its_collection *col)
1263{
1264 struct its_cmd_desc desc;
1265
1266 desc.its_invall_cmd.col = col;
1267
1268 its_send_single_command(its, its_build_invall_cmd, &desc);
1269}
1270
1271static void its_send_vmapti(struct its_device *dev, u32 id)
1272{
1273 struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1274 struct its_cmd_desc desc;
1275
1276 desc.its_vmapti_cmd.vpe = map->vpe;
1277 desc.its_vmapti_cmd.dev = dev;
1278 desc.its_vmapti_cmd.virt_id = map->vintid;
1279 desc.its_vmapti_cmd.event_id = id;
1280 desc.its_vmapti_cmd.db_enabled = map->db_enabled;
1281
1282 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
1283}
1284
1285static void its_send_vmovi(struct its_device *dev, u32 id)
1286{
1287 struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1288 struct its_cmd_desc desc;
1289
1290 desc.its_vmovi_cmd.vpe = map->vpe;
1291 desc.its_vmovi_cmd.dev = dev;
1292 desc.its_vmovi_cmd.event_id = id;
1293 desc.its_vmovi_cmd.db_enabled = map->db_enabled;
1294
1295 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
1296}
1297
1298static void its_send_vmapp(struct its_node *its,
1299 struct its_vpe *vpe, bool valid)
1300{
1301 struct its_cmd_desc desc;
1302
1303 desc.its_vmapp_cmd.vpe = vpe;
1304 desc.its_vmapp_cmd.valid = valid;
1305 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
1306
1307 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
1308}
1309
1310static void its_send_vmovp(struct its_vpe *vpe)
1311{
1312 struct its_cmd_desc desc = {};
1313 struct its_node *its;
1314 unsigned long flags;
1315 int col_id = vpe->col_idx;
1316
1317 desc.its_vmovp_cmd.vpe = vpe;
1318
1319 if (!its_list_map) {
1320 its = list_first_entry(&its_nodes, struct its_node, entry);
1321 desc.its_vmovp_cmd.col = &its->collections[col_id];
1322 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1323 return;
1324 }
1325
1326 /*
1327 * Yet another marvel of the architecture. If using the
1328 * its_list "feature", we need to make sure that all ITSs
1329 * receive all VMOVP commands in the same order. The only way
1330 * to guarantee this is to make vmovp a serialization point.
1331 *
1332 * Wall <-- Head.
1333 */
1334 raw_spin_lock_irqsave(&vmovp_lock, flags);
1335
1336 desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
1337 desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
1338
1339 /* Emit VMOVPs */
1340 list_for_each_entry(its, &its_nodes, entry) {
1341 if (!is_v4(its))
1342 continue;
1343
1344 if (!require_its_list_vmovp(vpe->its_vm, its))
1345 continue;
1346
1347 desc.its_vmovp_cmd.col = &its->collections[col_id];
1348 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1349 }
1350
1351 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1352}
1353
1354static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
1355{
1356 struct its_cmd_desc desc;
1357
1358 desc.its_vinvall_cmd.vpe = vpe;
1359 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
1360}
1361
1362static void its_send_vinv(struct its_device *dev, u32 event_id)
1363{
1364 struct its_cmd_desc desc;
1365
1366 /*
1367 * There is no real VINV command. This is just a normal INV,
1368 * with a VSYNC instead of a SYNC.
1369 */
1370 desc.its_inv_cmd.dev = dev;
1371 desc.its_inv_cmd.event_id = event_id;
1372
1373 its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc);
1374}
1375
1376static void its_send_vint(struct its_device *dev, u32 event_id)
1377{
1378 struct its_cmd_desc desc;
1379
1380 /*
1381 * There is no real VINT command. This is just a normal INT,
1382 * with a VSYNC instead of a SYNC.
1383 */
1384 desc.its_int_cmd.dev = dev;
1385 desc.its_int_cmd.event_id = event_id;
1386
1387 its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc);
1388}
1389
1390static void its_send_vclear(struct its_device *dev, u32 event_id)
1391{
1392 struct its_cmd_desc desc;
1393
1394 /*
1395 * There is no real VCLEAR command. This is just a normal CLEAR,
1396 * with a VSYNC instead of a SYNC.
1397 */
1398 desc.its_clear_cmd.dev = dev;
1399 desc.its_clear_cmd.event_id = event_id;
1400
1401 its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
1402}
1403
1404static void its_send_invdb(struct its_node *its, struct its_vpe *vpe)
1405{
1406 struct its_cmd_desc desc;
1407
1408 desc.its_invdb_cmd.vpe = vpe;
1409 its_send_single_vcommand(its, its_build_invdb_cmd, &desc);
1410}
1411
1412/*
1413 * irqchip functions - assumes MSI, mostly.
1414 */
1415static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
1416{
1417 struct its_vlpi_map *map = get_vlpi_map(d);
1418 irq_hw_number_t hwirq;
1419 void *va;
1420 u8 *cfg;
1421
1422 if (map) {
1423 va = page_address(map->vm->vprop_page);
1424 hwirq = map->vintid;
1425
1426 /* Remember the updated property */
1427 map->properties &= ~clr;
1428 map->properties |= set | LPI_PROP_GROUP1;
1429 } else {
1430 va = gic_rdists->prop_table_va;
1431 hwirq = d->hwirq;
1432 }
1433
1434 cfg = va + hwirq - 8192;
1435 *cfg &= ~clr;
1436 *cfg |= set | LPI_PROP_GROUP1;
1437
1438 /*
1439 * Make the above write visible to the redistributors.
1440 * And yes, we're flushing exactly: One. Single. Byte.
1441 * Humpf...
1442 */
1443 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
1444 gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
1445 else
1446 dsb(ishst);
1447}
1448
1449static void wait_for_syncr(void __iomem *rdbase)
1450{
1451 while (readl_relaxed(rdbase + GICR_SYNCR) & 1)
1452 cpu_relax();
1453}
1454
1455static void __direct_lpi_inv(struct irq_data *d, u64 val)
1456{
1457 void __iomem *rdbase;
1458 unsigned long flags;
1459 int cpu;
1460
1461 /* Target the redistributor this LPI is currently routed to */
1462 cpu = irq_to_cpuid_lock(d, &flags);
1463 raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
1464
1465 rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
1466 gic_write_lpir(val, rdbase + GICR_INVLPIR);
1467 wait_for_syncr(rdbase);
1468
1469 raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
1470 irq_to_cpuid_unlock(d, flags);
1471}
1472
1473static void direct_lpi_inv(struct irq_data *d)
1474{
1475 struct its_vlpi_map *map = get_vlpi_map(d);
1476 u64 val;
1477
1478 if (map) {
1479 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1480
1481 WARN_ON(!is_v4_1(its_dev->its));
1482
1483 val = GICR_INVLPIR_V;
1484 val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id);
1485 val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid);
1486 } else {
1487 val = d->hwirq;
1488 }
1489
1490 __direct_lpi_inv(d, val);
1491}
1492
1493static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1494{
1495 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1496
1497 lpi_write_config(d, clr, set);
1498 if (gic_rdists->has_direct_lpi &&
1499 (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d)))
1500 direct_lpi_inv(d);
1501 else if (!irqd_is_forwarded_to_vcpu(d))
1502 its_send_inv(its_dev, its_get_event_id(d));
1503 else
1504 its_send_vinv(its_dev, its_get_event_id(d));
1505}
1506
1507static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1508{
1509 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1510 u32 event = its_get_event_id(d);
1511 struct its_vlpi_map *map;
1512
1513 /*
1514 * GICv4.1 does away with the per-LPI nonsense, nothing to do
1515 * here.
1516 */
1517 if (is_v4_1(its_dev->its))
1518 return;
1519
1520 map = dev_event_to_vlpi_map(its_dev, event);
1521
1522 if (map->db_enabled == enable)
1523 return;
1524
1525 map->db_enabled = enable;
1526
1527 /*
1528 * More fun with the architecture:
1529 *
1530 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1531 * value or to 1023, depending on the enable bit. But that
1532 * would be issuing a mapping for an /existing/ DevID+EventID
1533 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1534 * to the /same/ vPE, using this opportunity to adjust the
1535 * doorbell. Mouahahahaha. We loves it, Precious.
1536 */
1537 its_send_vmovi(its_dev, event);
1538}
1539
1540static void its_mask_irq(struct irq_data *d)
1541{
1542 if (irqd_is_forwarded_to_vcpu(d))
1543 its_vlpi_set_doorbell(d, false);
1544
1545 lpi_update_config(d, LPI_PROP_ENABLED, 0);
1546}
1547
1548static void its_unmask_irq(struct irq_data *d)
1549{
1550 if (irqd_is_forwarded_to_vcpu(d))
1551 its_vlpi_set_doorbell(d, true);
1552
1553 lpi_update_config(d, 0, LPI_PROP_ENABLED);
1554}
1555
1556static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu)
1557{
1558 if (irqd_affinity_is_managed(d))
1559 return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1560
1561 return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1562}
1563
1564static void its_inc_lpi_count(struct irq_data *d, int cpu)
1565{
1566 if (irqd_affinity_is_managed(d))
1567 atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1568 else
1569 atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1570}
1571
1572static void its_dec_lpi_count(struct irq_data *d, int cpu)
1573{
1574 if (irqd_affinity_is_managed(d))
1575 atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1576 else
1577 atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1578}
1579
1580static unsigned int cpumask_pick_least_loaded(struct irq_data *d,
1581 const struct cpumask *cpu_mask)
1582{
1583 unsigned int cpu = nr_cpu_ids, tmp;
1584 int count = S32_MAX;
1585
1586 for_each_cpu(tmp, cpu_mask) {
1587 int this_count = its_read_lpi_count(d, tmp);
1588 if (this_count < count) {
1589 cpu = tmp;
1590 count = this_count;
1591 }
1592 }
1593
1594 return cpu;
1595}
1596
1597/*
1598 * As suggested by Thomas Gleixner in:
1599 * https://lore.kernel.org/r/87h80q2aoc.fsf@nanos.tec.linutronix.de
1600 */
1601static int its_select_cpu(struct irq_data *d,
1602 const struct cpumask *aff_mask)
1603{
1604 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1605 static DEFINE_RAW_SPINLOCK(tmpmask_lock);
1606 static struct cpumask __tmpmask;
1607 struct cpumask *tmpmask;
1608 unsigned long flags;
1609 int cpu, node;
1610 node = its_dev->its->numa_node;
1611 tmpmask = &__tmpmask;
1612
1613 raw_spin_lock_irqsave(&tmpmask_lock, flags);
1614
1615 if (!irqd_affinity_is_managed(d)) {
1616 /* First try the NUMA node */
1617 if (node != NUMA_NO_NODE) {
1618 /*
1619 * Try the intersection of the affinity mask and the
1620 * node mask (and the online mask, just to be safe).
1621 */
1622 cpumask_and(tmpmask, cpumask_of_node(node), aff_mask);
1623 cpumask_and(tmpmask, tmpmask, cpu_online_mask);
1624
1625 /*
1626 * Ideally, we would check if the mask is empty, and
1627 * try again on the full node here.
1628 *
1629 * But it turns out that the way ACPI describes the
1630 * affinity for ITSs only deals about memory, and
1631 * not target CPUs, so it cannot describe a single
1632 * ITS placed next to two NUMA nodes.
1633 *
1634 * Instead, just fallback on the online mask. This
1635 * diverges from Thomas' suggestion above.
1636 */
1637 cpu = cpumask_pick_least_loaded(d, tmpmask);
1638 if (cpu < nr_cpu_ids)
1639 goto out;
1640
1641 /* If we can't cross sockets, give up */
1642 if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144))
1643 goto out;
1644
1645 /* If the above failed, expand the search */
1646 }
1647
1648 /* Try the intersection of the affinity and online masks */
1649 cpumask_and(tmpmask, aff_mask, cpu_online_mask);
1650
1651 /* If that doesn't fly, the online mask is the last resort */
1652 if (cpumask_empty(tmpmask))
1653 cpumask_copy(tmpmask, cpu_online_mask);
1654
1655 cpu = cpumask_pick_least_loaded(d, tmpmask);
1656 } else {
1657 cpumask_copy(tmpmask, aff_mask);
1658
1659 /* If we cannot cross sockets, limit the search to that node */
1660 if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) &&
1661 node != NUMA_NO_NODE)
1662 cpumask_and(tmpmask, tmpmask, cpumask_of_node(node));
1663
1664 cpu = cpumask_pick_least_loaded(d, tmpmask);
1665 }
1666out:
1667 raw_spin_unlock_irqrestore(&tmpmask_lock, flags);
1668
1669 pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu);
1670 return cpu;
1671}
1672
1673static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1674 bool force)
1675{
1676 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1677 struct its_collection *target_col;
1678 u32 id = its_get_event_id(d);
1679 int cpu, prev_cpu;
1680
1681 /* A forwarded interrupt should use irq_set_vcpu_affinity */
1682 if (irqd_is_forwarded_to_vcpu(d))
1683 return -EINVAL;
1684
1685 prev_cpu = its_dev->event_map.col_map[id];
1686 its_dec_lpi_count(d, prev_cpu);
1687
1688 if (!force)
1689 cpu = its_select_cpu(d, mask_val);
1690 else
1691 cpu = cpumask_pick_least_loaded(d, mask_val);
1692
1693 if (cpu < 0 || cpu >= nr_cpu_ids)
1694 goto err;
1695
1696 /* don't set the affinity when the target cpu is same as current one */
1697 if (cpu != prev_cpu) {
1698 target_col = &its_dev->its->collections[cpu];
1699 its_send_movi(its_dev, target_col, id);
1700 its_dev->event_map.col_map[id] = cpu;
1701 irq_data_update_effective_affinity(d, cpumask_of(cpu));
1702 }
1703
1704 its_inc_lpi_count(d, cpu);
1705
1706 return IRQ_SET_MASK_OK_DONE;
1707
1708err:
1709 its_inc_lpi_count(d, prev_cpu);
1710 return -EINVAL;
1711}
1712
1713static u64 its_irq_get_msi_base(struct its_device *its_dev)
1714{
1715 struct its_node *its = its_dev->its;
1716
1717 return its->phys_base + GITS_TRANSLATER;
1718}
1719
1720static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1721{
1722 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1723 struct its_node *its;
1724 u64 addr;
1725
1726 its = its_dev->its;
1727 addr = its->get_msi_base(its_dev);
1728
1729 msg->address_lo = lower_32_bits(addr);
1730 msg->address_hi = upper_32_bits(addr);
1731 msg->data = its_get_event_id(d);
1732
1733 iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg);
1734}
1735
1736static int its_irq_set_irqchip_state(struct irq_data *d,
1737 enum irqchip_irq_state which,
1738 bool state)
1739{
1740 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1741 u32 event = its_get_event_id(d);
1742
1743 if (which != IRQCHIP_STATE_PENDING)
1744 return -EINVAL;
1745
1746 if (irqd_is_forwarded_to_vcpu(d)) {
1747 if (state)
1748 its_send_vint(its_dev, event);
1749 else
1750 its_send_vclear(its_dev, event);
1751 } else {
1752 if (state)
1753 its_send_int(its_dev, event);
1754 else
1755 its_send_clear(its_dev, event);
1756 }
1757
1758 return 0;
1759}
1760
1761static int its_irq_retrigger(struct irq_data *d)
1762{
1763 return !its_irq_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
1764}
1765
1766/*
1767 * Two favourable cases:
1768 *
1769 * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times
1770 * for vSGI delivery
1771 *
1772 * (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough
1773 * and we're better off mapping all VPEs always
1774 *
1775 * If neither (a) nor (b) is true, then we map vPEs on demand.
1776 *
1777 */
1778static bool gic_requires_eager_mapping(void)
1779{
1780 if (!its_list_map || gic_rdists->has_rvpeid)
1781 return true;
1782
1783 return false;
1784}
1785
1786static void its_map_vm(struct its_node *its, struct its_vm *vm)
1787{
1788 unsigned long flags;
1789
1790 if (gic_requires_eager_mapping())
1791 return;
1792
1793 raw_spin_lock_irqsave(&vmovp_lock, flags);
1794
1795 /*
1796 * If the VM wasn't mapped yet, iterate over the vpes and get
1797 * them mapped now.
1798 */
1799 vm->vlpi_count[its->list_nr]++;
1800
1801 if (vm->vlpi_count[its->list_nr] == 1) {
1802 int i;
1803
1804 for (i = 0; i < vm->nr_vpes; i++) {
1805 struct its_vpe *vpe = vm->vpes[i];
1806 struct irq_data *d = irq_get_irq_data(vpe->irq);
1807
1808 /* Map the VPE to the first possible CPU */
1809 vpe->col_idx = cpumask_first(cpu_online_mask);
1810 its_send_vmapp(its, vpe, true);
1811 its_send_vinvall(its, vpe);
1812 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
1813 }
1814 }
1815
1816 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1817}
1818
1819static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1820{
1821 unsigned long flags;
1822
1823 /* Not using the ITS list? Everything is always mapped. */
1824 if (gic_requires_eager_mapping())
1825 return;
1826
1827 raw_spin_lock_irqsave(&vmovp_lock, flags);
1828
1829 if (!--vm->vlpi_count[its->list_nr]) {
1830 int i;
1831
1832 for (i = 0; i < vm->nr_vpes; i++)
1833 its_send_vmapp(its, vm->vpes[i], false);
1834 }
1835
1836 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1837}
1838
1839static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1840{
1841 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1842 u32 event = its_get_event_id(d);
1843 int ret = 0;
1844
1845 if (!info->map)
1846 return -EINVAL;
1847
1848 raw_spin_lock(&its_dev->event_map.vlpi_lock);
1849
1850 if (!its_dev->event_map.vm) {
1851 struct its_vlpi_map *maps;
1852
1853 maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
1854 GFP_ATOMIC);
1855 if (!maps) {
1856 ret = -ENOMEM;
1857 goto out;
1858 }
1859
1860 its_dev->event_map.vm = info->map->vm;
1861 its_dev->event_map.vlpi_maps = maps;
1862 } else if (its_dev->event_map.vm != info->map->vm) {
1863 ret = -EINVAL;
1864 goto out;
1865 }
1866
1867 /* Get our private copy of the mapping information */
1868 its_dev->event_map.vlpi_maps[event] = *info->map;
1869
1870 if (irqd_is_forwarded_to_vcpu(d)) {
1871 /* Already mapped, move it around */
1872 its_send_vmovi(its_dev, event);
1873 } else {
1874 /* Ensure all the VPEs are mapped on this ITS */
1875 its_map_vm(its_dev->its, info->map->vm);
1876
1877 /*
1878 * Flag the interrupt as forwarded so that we can
1879 * start poking the virtual property table.
1880 */
1881 irqd_set_forwarded_to_vcpu(d);
1882
1883 /* Write out the property to the prop table */
1884 lpi_write_config(d, 0xff, info->map->properties);
1885
1886 /* Drop the physical mapping */
1887 its_send_discard(its_dev, event);
1888
1889 /* and install the virtual one */
1890 its_send_vmapti(its_dev, event);
1891
1892 /* Increment the number of VLPIs */
1893 its_dev->event_map.nr_vlpis++;
1894 }
1895
1896out:
1897 raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1898 return ret;
1899}
1900
1901static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1902{
1903 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1904 struct its_vlpi_map *map;
1905 int ret = 0;
1906
1907 raw_spin_lock(&its_dev->event_map.vlpi_lock);
1908
1909 map = get_vlpi_map(d);
1910
1911 if (!its_dev->event_map.vm || !map) {
1912 ret = -EINVAL;
1913 goto out;
1914 }
1915
1916 /* Copy our mapping information to the incoming request */
1917 *info->map = *map;
1918
1919out:
1920 raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1921 return ret;
1922}
1923
1924static int its_vlpi_unmap(struct irq_data *d)
1925{
1926 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1927 u32 event = its_get_event_id(d);
1928 int ret = 0;
1929
1930 raw_spin_lock(&its_dev->event_map.vlpi_lock);
1931
1932 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
1933 ret = -EINVAL;
1934 goto out;
1935 }
1936
1937 /* Drop the virtual mapping */
1938 its_send_discard(its_dev, event);
1939
1940 /* and restore the physical one */
1941 irqd_clr_forwarded_to_vcpu(d);
1942 its_send_mapti(its_dev, d->hwirq, event);
1943 lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
1944 LPI_PROP_ENABLED |
1945 LPI_PROP_GROUP1));
1946
1947 /* Potentially unmap the VM from this ITS */
1948 its_unmap_vm(its_dev->its, its_dev->event_map.vm);
1949
1950 /*
1951 * Drop the refcount and make the device available again if
1952 * this was the last VLPI.
1953 */
1954 if (!--its_dev->event_map.nr_vlpis) {
1955 its_dev->event_map.vm = NULL;
1956 kfree(its_dev->event_map.vlpi_maps);
1957 }
1958
1959out:
1960 raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1961 return ret;
1962}
1963
1964static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
1965{
1966 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1967
1968 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1969 return -EINVAL;
1970
1971 if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
1972 lpi_update_config(d, 0xff, info->config);
1973 else
1974 lpi_write_config(d, 0xff, info->config);
1975 its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
1976
1977 return 0;
1978}
1979
1980static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
1981{
1982 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1983 struct its_cmd_info *info = vcpu_info;
1984
1985 /* Need a v4 ITS */
1986 if (!is_v4(its_dev->its))
1987 return -EINVAL;
1988
1989 /* Unmap request? */
1990 if (!info)
1991 return its_vlpi_unmap(d);
1992
1993 switch (info->cmd_type) {
1994 case MAP_VLPI:
1995 return its_vlpi_map(d, info);
1996
1997 case GET_VLPI:
1998 return its_vlpi_get(d, info);
1999
2000 case PROP_UPDATE_VLPI:
2001 case PROP_UPDATE_AND_INV_VLPI:
2002 return its_vlpi_prop_update(d, info);
2003
2004 default:
2005 return -EINVAL;
2006 }
2007}
2008
2009static struct irq_chip its_irq_chip = {
2010 .name = "ITS",
2011 .irq_mask = its_mask_irq,
2012 .irq_unmask = its_unmask_irq,
2013 .irq_eoi = irq_chip_eoi_parent,
2014 .irq_set_affinity = its_set_affinity,
2015 .irq_compose_msi_msg = its_irq_compose_msi_msg,
2016 .irq_set_irqchip_state = its_irq_set_irqchip_state,
2017 .irq_retrigger = its_irq_retrigger,
2018 .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
2019};
2020
2021
2022/*
2023 * How we allocate LPIs:
2024 *
2025 * lpi_range_list contains ranges of LPIs that are to available to
2026 * allocate from. To allocate LPIs, just pick the first range that
2027 * fits the required allocation, and reduce it by the required
2028 * amount. Once empty, remove the range from the list.
2029 *
2030 * To free a range of LPIs, add a free range to the list, sort it and
2031 * merge the result if the new range happens to be adjacent to an
2032 * already free block.
2033 *
2034 * The consequence of the above is that allocation is cost is low, but
2035 * freeing is expensive. We assumes that freeing rarely occurs.
2036 */
2037#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
2038
2039static DEFINE_MUTEX(lpi_range_lock);
2040static LIST_HEAD(lpi_range_list);
2041
2042struct lpi_range {
2043 struct list_head entry;
2044 u32 base_id;
2045 u32 span;
2046};
2047
2048static struct lpi_range *mk_lpi_range(u32 base, u32 span)
2049{
2050 struct lpi_range *range;
2051
2052 range = kmalloc(sizeof(*range), GFP_KERNEL);
2053 if (range) {
2054 range->base_id = base;
2055 range->span = span;
2056 }
2057
2058 return range;
2059}
2060
2061static int alloc_lpi_range(u32 nr_lpis, u32 *base)
2062{
2063 struct lpi_range *range, *tmp;
2064 int err = -ENOSPC;
2065
2066 mutex_lock(&lpi_range_lock);
2067
2068 list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
2069 if (range->span >= nr_lpis) {
2070 *base = range->base_id;
2071 range->base_id += nr_lpis;
2072 range->span -= nr_lpis;
2073
2074 if (range->span == 0) {
2075 list_del(&range->entry);
2076 kfree(range);
2077 }
2078
2079 err = 0;
2080 break;
2081 }
2082 }
2083
2084 mutex_unlock(&lpi_range_lock);
2085
2086 pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
2087 return err;
2088}
2089
2090static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b)
2091{
2092 if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list)
2093 return;
2094 if (a->base_id + a->span != b->base_id)
2095 return;
2096 b->base_id = a->base_id;
2097 b->span += a->span;
2098 list_del(&a->entry);
2099 kfree(a);
2100}
2101
2102static int free_lpi_range(u32 base, u32 nr_lpis)
2103{
2104 struct lpi_range *new, *old;
2105
2106 new = mk_lpi_range(base, nr_lpis);
2107 if (!new)
2108 return -ENOMEM;
2109
2110 mutex_lock(&lpi_range_lock);
2111
2112 list_for_each_entry_reverse(old, &lpi_range_list, entry) {
2113 if (old->base_id < base)
2114 break;
2115 }
2116 /*
2117 * old is the last element with ->base_id smaller than base,
2118 * so new goes right after it. If there are no elements with
2119 * ->base_id smaller than base, &old->entry ends up pointing
2120 * at the head of the list, and inserting new it the start of
2121 * the list is the right thing to do in that case as well.
2122 */
2123 list_add(&new->entry, &old->entry);
2124 /*
2125 * Now check if we can merge with the preceding and/or
2126 * following ranges.
2127 */
2128 merge_lpi_ranges(old, new);
2129 merge_lpi_ranges(new, list_next_entry(new, entry));
2130
2131 mutex_unlock(&lpi_range_lock);
2132 return 0;
2133}
2134
2135static int __init its_lpi_init(u32 id_bits)
2136{
2137 u32 lpis = (1UL << id_bits) - 8192;
2138 u32 numlpis;
2139 int err;
2140
2141 numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
2142
2143 if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
2144 lpis = numlpis;
2145 pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
2146 lpis);
2147 }
2148
2149 /*
2150 * Initializing the allocator is just the same as freeing the
2151 * full range of LPIs.
2152 */
2153 err = free_lpi_range(8192, lpis);
2154 pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
2155 return err;
2156}
2157
2158static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
2159{
2160 unsigned long *bitmap = NULL;
2161 int err = 0;
2162
2163 do {
2164 err = alloc_lpi_range(nr_irqs, base);
2165 if (!err)
2166 break;
2167
2168 nr_irqs /= 2;
2169 } while (nr_irqs > 0);
2170
2171 if (!nr_irqs)
2172 err = -ENOSPC;
2173
2174 if (err)
2175 goto out;
2176
2177 bitmap = bitmap_zalloc(nr_irqs, GFP_ATOMIC);
2178 if (!bitmap)
2179 goto out;
2180
2181 *nr_ids = nr_irqs;
2182
2183out:
2184 if (!bitmap)
2185 *base = *nr_ids = 0;
2186
2187 return bitmap;
2188}
2189
2190static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
2191{
2192 WARN_ON(free_lpi_range(base, nr_ids));
2193 bitmap_free(bitmap);
2194}
2195
2196static void gic_reset_prop_table(void *va)
2197{
2198 /* Priority 0xa0, Group-1, disabled */
2199 memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
2200
2201 /* Make sure the GIC will observe the written configuration */
2202 gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ);
2203}
2204
2205static struct page *its_allocate_prop_table(gfp_t gfp_flags)
2206{
2207 struct page *prop_page;
2208
2209 prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
2210 if (!prop_page)
2211 return NULL;
2212
2213 gic_reset_prop_table(page_address(prop_page));
2214
2215 return prop_page;
2216}
2217
2218static void its_free_prop_table(struct page *prop_page)
2219{
2220 free_pages((unsigned long)page_address(prop_page),
2221 get_order(LPI_PROPBASE_SZ));
2222}
2223
2224static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
2225{
2226 phys_addr_t start, end, addr_end;
2227 u64 i;
2228
2229 /*
2230 * We don't bother checking for a kdump kernel as by
2231 * construction, the LPI tables are out of this kernel's
2232 * memory map.
2233 */
2234 if (is_kdump_kernel())
2235 return true;
2236
2237 addr_end = addr + size - 1;
2238
2239 for_each_reserved_mem_range(i, &start, &end) {
2240 if (addr >= start && addr_end <= end)
2241 return true;
2242 }
2243
2244 /* Not found, not a good sign... */
2245 pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n",
2246 &addr, &addr_end);
2247 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
2248 return false;
2249}
2250
2251static int gic_reserve_range(phys_addr_t addr, unsigned long size)
2252{
2253 if (efi_enabled(EFI_CONFIG_TABLES))
2254 return efi_mem_reserve_persistent(addr, size);
2255
2256 return 0;
2257}
2258
2259static int __init its_setup_lpi_prop_table(void)
2260{
2261 if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) {
2262 u64 val;
2263
2264 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
2265 lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1;
2266
2267 gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12);
2268 gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa,
2269 LPI_PROPBASE_SZ,
2270 MEMREMAP_WB);
2271 gic_reset_prop_table(gic_rdists->prop_table_va);
2272 } else {
2273 struct page *page;
2274
2275 lpi_id_bits = min_t(u32,
2276 GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
2277 ITS_MAX_LPI_NRBITS);
2278 page = its_allocate_prop_table(GFP_NOWAIT);
2279 if (!page) {
2280 pr_err("Failed to allocate PROPBASE\n");
2281 return -ENOMEM;
2282 }
2283
2284 gic_rdists->prop_table_pa = page_to_phys(page);
2285 gic_rdists->prop_table_va = page_address(page);
2286 WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa,
2287 LPI_PROPBASE_SZ));
2288 }
2289
2290 pr_info("GICv3: using LPI property table @%pa\n",
2291 &gic_rdists->prop_table_pa);
2292
2293 return its_lpi_init(lpi_id_bits);
2294}
2295
2296static const char *its_base_type_string[] = {
2297 [GITS_BASER_TYPE_DEVICE] = "Devices",
2298 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
2299 [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)",
2300 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
2301 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
2302 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
2303 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
2304};
2305
2306static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
2307{
2308 u32 idx = baser - its->tables;
2309
2310 return gits_read_baser(its->base + GITS_BASER + (idx << 3));
2311}
2312
2313static void its_write_baser(struct its_node *its, struct its_baser *baser,
2314 u64 val)
2315{
2316 u32 idx = baser - its->tables;
2317
2318 gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
2319 baser->val = its_read_baser(its, baser);
2320}
2321
2322static int its_setup_baser(struct its_node *its, struct its_baser *baser,
2323 u64 cache, u64 shr, u32 order, bool indirect)
2324{
2325 u64 val = its_read_baser(its, baser);
2326 u64 esz = GITS_BASER_ENTRY_SIZE(val);
2327 u64 type = GITS_BASER_TYPE(val);
2328 u64 baser_phys, tmp;
2329 u32 alloc_pages, psz;
2330 struct page *page;
2331 void *base;
2332
2333 psz = baser->psz;
2334 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
2335 if (alloc_pages > GITS_BASER_PAGES_MAX) {
2336 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
2337 &its->phys_base, its_base_type_string[type],
2338 alloc_pages, GITS_BASER_PAGES_MAX);
2339 alloc_pages = GITS_BASER_PAGES_MAX;
2340 order = get_order(GITS_BASER_PAGES_MAX * psz);
2341 }
2342
2343 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
2344 if (!page)
2345 return -ENOMEM;
2346
2347 base = (void *)page_address(page);
2348 baser_phys = virt_to_phys(base);
2349
2350 /* Check if the physical address of the memory is above 48bits */
2351 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
2352
2353 /* 52bit PA is supported only when PageSize=64K */
2354 if (psz != SZ_64K) {
2355 pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
2356 free_pages((unsigned long)base, order);
2357 return -ENXIO;
2358 }
2359
2360 /* Convert 52bit PA to 48bit field */
2361 baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
2362 }
2363
2364retry_baser:
2365 val = (baser_phys |
2366 (type << GITS_BASER_TYPE_SHIFT) |
2367 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
2368 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
2369 cache |
2370 shr |
2371 GITS_BASER_VALID);
2372
2373 val |= indirect ? GITS_BASER_INDIRECT : 0x0;
2374
2375 switch (psz) {
2376 case SZ_4K:
2377 val |= GITS_BASER_PAGE_SIZE_4K;
2378 break;
2379 case SZ_16K:
2380 val |= GITS_BASER_PAGE_SIZE_16K;
2381 break;
2382 case SZ_64K:
2383 val |= GITS_BASER_PAGE_SIZE_64K;
2384 break;
2385 }
2386
2387 if (!shr)
2388 gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
2389
2390 its_write_baser(its, baser, val);
2391 tmp = baser->val;
2392
2393 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
2394 /*
2395 * Shareability didn't stick. Just use
2396 * whatever the read reported, which is likely
2397 * to be the only thing this redistributor
2398 * supports. If that's zero, make it
2399 * non-cacheable as well.
2400 */
2401 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
2402 if (!shr)
2403 cache = GITS_BASER_nC;
2404
2405 goto retry_baser;
2406 }
2407
2408 if (val != tmp) {
2409 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
2410 &its->phys_base, its_base_type_string[type],
2411 val, tmp);
2412 free_pages((unsigned long)base, order);
2413 return -ENXIO;
2414 }
2415
2416 baser->order = order;
2417 baser->base = base;
2418 baser->psz = psz;
2419 tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
2420
2421 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
2422 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
2423 its_base_type_string[type],
2424 (unsigned long)virt_to_phys(base),
2425 indirect ? "indirect" : "flat", (int)esz,
2426 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
2427
2428 return 0;
2429}
2430
2431static bool its_parse_indirect_baser(struct its_node *its,
2432 struct its_baser *baser,
2433 u32 *order, u32 ids)
2434{
2435 u64 tmp = its_read_baser(its, baser);
2436 u64 type = GITS_BASER_TYPE(tmp);
2437 u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
2438 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
2439 u32 new_order = *order;
2440 u32 psz = baser->psz;
2441 bool indirect = false;
2442
2443 /* No need to enable Indirection if memory requirement < (psz*2)bytes */
2444 if ((esz << ids) > (psz * 2)) {
2445 /*
2446 * Find out whether hw supports a single or two-level table by
2447 * table by reading bit at offset '62' after writing '1' to it.
2448 */
2449 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
2450 indirect = !!(baser->val & GITS_BASER_INDIRECT);
2451
2452 if (indirect) {
2453 /*
2454 * The size of the lvl2 table is equal to ITS page size
2455 * which is 'psz'. For computing lvl1 table size,
2456 * subtract ID bits that sparse lvl2 table from 'ids'
2457 * which is reported by ITS hardware times lvl1 table
2458 * entry size.
2459 */
2460 ids -= ilog2(psz / (int)esz);
2461 esz = GITS_LVL1_ENTRY_SIZE;
2462 }
2463 }
2464
2465 /*
2466 * Allocate as many entries as required to fit the
2467 * range of device IDs that the ITS can grok... The ID
2468 * space being incredibly sparse, this results in a
2469 * massive waste of memory if two-level device table
2470 * feature is not supported by hardware.
2471 */
2472 new_order = max_t(u32, get_order(esz << ids), new_order);
2473 if (new_order > MAX_PAGE_ORDER) {
2474 new_order = MAX_PAGE_ORDER;
2475 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
2476 pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n",
2477 &its->phys_base, its_base_type_string[type],
2478 device_ids(its), ids);
2479 }
2480
2481 *order = new_order;
2482
2483 return indirect;
2484}
2485
2486static u32 compute_common_aff(u64 val)
2487{
2488 u32 aff, clpiaff;
2489
2490 aff = FIELD_GET(GICR_TYPER_AFFINITY, val);
2491 clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val);
2492
2493 return aff & ~(GENMASK(31, 0) >> (clpiaff * 8));
2494}
2495
2496static u32 compute_its_aff(struct its_node *its)
2497{
2498 u64 val;
2499 u32 svpet;
2500
2501 /*
2502 * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute
2503 * the resulting affinity. We then use that to see if this match
2504 * our own affinity.
2505 */
2506 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
2507 val = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet);
2508 val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr);
2509 return compute_common_aff(val);
2510}
2511
2512static struct its_node *find_sibling_its(struct its_node *cur_its)
2513{
2514 struct its_node *its;
2515 u32 aff;
2516
2517 if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer))
2518 return NULL;
2519
2520 aff = compute_its_aff(cur_its);
2521
2522 list_for_each_entry(its, &its_nodes, entry) {
2523 u64 baser;
2524
2525 if (!is_v4_1(its) || its == cur_its)
2526 continue;
2527
2528 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2529 continue;
2530
2531 if (aff != compute_its_aff(its))
2532 continue;
2533
2534 /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
2535 baser = its->tables[2].val;
2536 if (!(baser & GITS_BASER_VALID))
2537 continue;
2538
2539 return its;
2540 }
2541
2542 return NULL;
2543}
2544
2545static void its_free_tables(struct its_node *its)
2546{
2547 int i;
2548
2549 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2550 if (its->tables[i].base) {
2551 free_pages((unsigned long)its->tables[i].base,
2552 its->tables[i].order);
2553 its->tables[i].base = NULL;
2554 }
2555 }
2556}
2557
2558static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser)
2559{
2560 u64 psz = SZ_64K;
2561
2562 while (psz) {
2563 u64 val, gpsz;
2564
2565 val = its_read_baser(its, baser);
2566 val &= ~GITS_BASER_PAGE_SIZE_MASK;
2567
2568 switch (psz) {
2569 case SZ_64K:
2570 gpsz = GITS_BASER_PAGE_SIZE_64K;
2571 break;
2572 case SZ_16K:
2573 gpsz = GITS_BASER_PAGE_SIZE_16K;
2574 break;
2575 case SZ_4K:
2576 default:
2577 gpsz = GITS_BASER_PAGE_SIZE_4K;
2578 break;
2579 }
2580
2581 gpsz >>= GITS_BASER_PAGE_SIZE_SHIFT;
2582
2583 val |= FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK, gpsz);
2584 its_write_baser(its, baser, val);
2585
2586 if (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser->val) == gpsz)
2587 break;
2588
2589 switch (psz) {
2590 case SZ_64K:
2591 psz = SZ_16K;
2592 break;
2593 case SZ_16K:
2594 psz = SZ_4K;
2595 break;
2596 case SZ_4K:
2597 default:
2598 return -1;
2599 }
2600 }
2601
2602 baser->psz = psz;
2603 return 0;
2604}
2605
2606static int its_alloc_tables(struct its_node *its)
2607{
2608 u64 shr = GITS_BASER_InnerShareable;
2609 u64 cache = GITS_BASER_RaWaWb;
2610 int err, i;
2611
2612 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
2613 /* erratum 24313: ignore memory access type */
2614 cache = GITS_BASER_nCnB;
2615
2616 if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) {
2617 cache = GITS_BASER_nC;
2618 shr = 0;
2619 }
2620
2621 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2622 struct its_baser *baser = its->tables + i;
2623 u64 val = its_read_baser(its, baser);
2624 u64 type = GITS_BASER_TYPE(val);
2625 bool indirect = false;
2626 u32 order;
2627
2628 if (type == GITS_BASER_TYPE_NONE)
2629 continue;
2630
2631 if (its_probe_baser_psz(its, baser)) {
2632 its_free_tables(its);
2633 return -ENXIO;
2634 }
2635
2636 order = get_order(baser->psz);
2637
2638 switch (type) {
2639 case GITS_BASER_TYPE_DEVICE:
2640 indirect = its_parse_indirect_baser(its, baser, &order,
2641 device_ids(its));
2642 break;
2643
2644 case GITS_BASER_TYPE_VCPU:
2645 if (is_v4_1(its)) {
2646 struct its_node *sibling;
2647
2648 WARN_ON(i != 2);
2649 if ((sibling = find_sibling_its(its))) {
2650 *baser = sibling->tables[2];
2651 its_write_baser(its, baser, baser->val);
2652 continue;
2653 }
2654 }
2655
2656 indirect = its_parse_indirect_baser(its, baser, &order,
2657 ITS_MAX_VPEID_BITS);
2658 break;
2659 }
2660
2661 err = its_setup_baser(its, baser, cache, shr, order, indirect);
2662 if (err < 0) {
2663 its_free_tables(its);
2664 return err;
2665 }
2666
2667 /* Update settings which will be used for next BASERn */
2668 cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
2669 shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
2670 }
2671
2672 return 0;
2673}
2674
2675static u64 inherit_vpe_l1_table_from_its(void)
2676{
2677 struct its_node *its;
2678 u64 val;
2679 u32 aff;
2680
2681 val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2682 aff = compute_common_aff(val);
2683
2684 list_for_each_entry(its, &its_nodes, entry) {
2685 u64 baser, addr;
2686
2687 if (!is_v4_1(its))
2688 continue;
2689
2690 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2691 continue;
2692
2693 if (aff != compute_its_aff(its))
2694 continue;
2695
2696 /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
2697 baser = its->tables[2].val;
2698 if (!(baser & GITS_BASER_VALID))
2699 continue;
2700
2701 /* We have a winner! */
2702 gic_data_rdist()->vpe_l1_base = its->tables[2].base;
2703
2704 val = GICR_VPROPBASER_4_1_VALID;
2705 if (baser & GITS_BASER_INDIRECT)
2706 val |= GICR_VPROPBASER_4_1_INDIRECT;
2707 val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE,
2708 FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser));
2709 switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) {
2710 case GIC_PAGE_SIZE_64K:
2711 addr = GITS_BASER_ADDR_48_to_52(baser);
2712 break;
2713 default:
2714 addr = baser & GENMASK_ULL(47, 12);
2715 break;
2716 }
2717 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12);
2718 if (rdists_support_shareable()) {
2719 val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
2720 FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
2721 val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
2722 FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
2723 }
2724 val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1);
2725
2726 return val;
2727 }
2728
2729 return 0;
2730}
2731
2732static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
2733{
2734 u32 aff;
2735 u64 val;
2736 int cpu;
2737
2738 val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2739 aff = compute_common_aff(val);
2740
2741 for_each_possible_cpu(cpu) {
2742 void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
2743
2744 if (!base || cpu == smp_processor_id())
2745 continue;
2746
2747 val = gic_read_typer(base + GICR_TYPER);
2748 if (aff != compute_common_aff(val))
2749 continue;
2750
2751 /*
2752 * At this point, we have a victim. This particular CPU
2753 * has already booted, and has an affinity that matches
2754 * ours wrt CommonLPIAff. Let's use its own VPROPBASER.
2755 * Make sure we don't write the Z bit in that case.
2756 */
2757 val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
2758 val &= ~GICR_VPROPBASER_4_1_Z;
2759
2760 gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base;
2761 *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask;
2762
2763 return val;
2764 }
2765
2766 return 0;
2767}
2768
2769static bool allocate_vpe_l2_table(int cpu, u32 id)
2770{
2771 void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
2772 unsigned int psz, esz, idx, npg, gpsz;
2773 u64 val;
2774 struct page *page;
2775 __le64 *table;
2776
2777 if (!gic_rdists->has_rvpeid)
2778 return true;
2779
2780 /* Skip non-present CPUs */
2781 if (!base)
2782 return true;
2783
2784 val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
2785
2786 esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1;
2787 gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
2788 npg = FIELD_GET(GICR_VPROPBASER_4_1_SIZE, val) + 1;
2789
2790 switch (gpsz) {
2791 default:
2792 WARN_ON(1);
2793 fallthrough;
2794 case GIC_PAGE_SIZE_4K:
2795 psz = SZ_4K;
2796 break;
2797 case GIC_PAGE_SIZE_16K:
2798 psz = SZ_16K;
2799 break;
2800 case GIC_PAGE_SIZE_64K:
2801 psz = SZ_64K;
2802 break;
2803 }
2804
2805 /* Don't allow vpe_id that exceeds single, flat table limit */
2806 if (!(val & GICR_VPROPBASER_4_1_INDIRECT))
2807 return (id < (npg * psz / (esz * SZ_8)));
2808
2809 /* Compute 1st level table index & check if that exceeds table limit */
2810 idx = id >> ilog2(psz / (esz * SZ_8));
2811 if (idx >= (npg * psz / GITS_LVL1_ENTRY_SIZE))
2812 return false;
2813
2814 table = gic_data_rdist_cpu(cpu)->vpe_l1_base;
2815
2816 /* Allocate memory for 2nd level table */
2817 if (!table[idx]) {
2818 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
2819 if (!page)
2820 return false;
2821
2822 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
2823 if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
2824 gic_flush_dcache_to_poc(page_address(page), psz);
2825
2826 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
2827
2828 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
2829 if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
2830 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
2831
2832 /* Ensure updated table contents are visible to RD hardware */
2833 dsb(sy);
2834 }
2835
2836 return true;
2837}
2838
2839static int allocate_vpe_l1_table(void)
2840{
2841 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2842 u64 val, gpsz, npg, pa;
2843 unsigned int psz = SZ_64K;
2844 unsigned int np, epp, esz;
2845 struct page *page;
2846
2847 if (!gic_rdists->has_rvpeid)
2848 return 0;
2849
2850 /*
2851 * if VPENDBASER.Valid is set, disable any previously programmed
2852 * VPE by setting PendingLast while clearing Valid. This has the
2853 * effect of making sure no doorbell will be generated and we can
2854 * then safely clear VPROPBASER.Valid.
2855 */
2856 if (gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid)
2857 gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
2858 vlpi_base + GICR_VPENDBASER);
2859
2860 /*
2861 * If we can inherit the configuration from another RD, let's do
2862 * so. Otherwise, we have to go through the allocation process. We
2863 * assume that all RDs have the exact same requirements, as
2864 * nothing will work otherwise.
2865 */
2866 val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask);
2867 if (val & GICR_VPROPBASER_4_1_VALID)
2868 goto out;
2869
2870 gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_ATOMIC);
2871 if (!gic_data_rdist()->vpe_table_mask)
2872 return -ENOMEM;
2873
2874 val = inherit_vpe_l1_table_from_its();
2875 if (val & GICR_VPROPBASER_4_1_VALID)
2876 goto out;
2877
2878 /* First probe the page size */
2879 val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K);
2880 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2881 val = gicr_read_vpropbaser(vlpi_base + GICR_VPROPBASER);
2882 gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
2883 esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val);
2884
2885 switch (gpsz) {
2886 default:
2887 gpsz = GIC_PAGE_SIZE_4K;
2888 fallthrough;
2889 case GIC_PAGE_SIZE_4K:
2890 psz = SZ_4K;
2891 break;
2892 case GIC_PAGE_SIZE_16K:
2893 psz = SZ_16K;
2894 break;
2895 case GIC_PAGE_SIZE_64K:
2896 psz = SZ_64K;
2897 break;
2898 }
2899
2900 /*
2901 * Start populating the register from scratch, including RO fields
2902 * (which we want to print in debug cases...)
2903 */
2904 val = 0;
2905 val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz);
2906 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz);
2907
2908 /* How many entries per GIC page? */
2909 esz++;
2910 epp = psz / (esz * SZ_8);
2911
2912 /*
2913 * If we need more than just a single L1 page, flag the table
2914 * as indirect and compute the number of required L1 pages.
2915 */
2916 if (epp < ITS_MAX_VPEID) {
2917 int nl2;
2918
2919 val |= GICR_VPROPBASER_4_1_INDIRECT;
2920
2921 /* Number of L2 pages required to cover the VPEID space */
2922 nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp);
2923
2924 /* Number of L1 pages to point to the L2 pages */
2925 npg = DIV_ROUND_UP(nl2 * SZ_8, psz);
2926 } else {
2927 npg = 1;
2928 }
2929
2930 val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1);
2931
2932 /* Right, that's the number of CPU pages we need for L1 */
2933 np = DIV_ROUND_UP(npg * psz, PAGE_SIZE);
2934
2935 pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
2936 np, npg, psz, epp, esz);
2937 page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
2938 if (!page)
2939 return -ENOMEM;
2940
2941 gic_data_rdist()->vpe_l1_base = page_address(page);
2942 pa = virt_to_phys(page_address(page));
2943 WARN_ON(!IS_ALIGNED(pa, psz));
2944
2945 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12);
2946 if (rdists_support_shareable()) {
2947 val |= GICR_VPROPBASER_RaWb;
2948 val |= GICR_VPROPBASER_InnerShareable;
2949 }
2950 val |= GICR_VPROPBASER_4_1_Z;
2951 val |= GICR_VPROPBASER_4_1_VALID;
2952
2953out:
2954 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2955 cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask);
2956
2957 pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n",
2958 smp_processor_id(), val,
2959 cpumask_pr_args(gic_data_rdist()->vpe_table_mask));
2960
2961 return 0;
2962}
2963
2964static int its_alloc_collections(struct its_node *its)
2965{
2966 int i;
2967
2968 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
2969 GFP_KERNEL);
2970 if (!its->collections)
2971 return -ENOMEM;
2972
2973 for (i = 0; i < nr_cpu_ids; i++)
2974 its->collections[i].target_address = ~0ULL;
2975
2976 return 0;
2977}
2978
2979static struct page *its_allocate_pending_table(gfp_t gfp_flags)
2980{
2981 struct page *pend_page;
2982
2983 pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
2984 get_order(LPI_PENDBASE_SZ));
2985 if (!pend_page)
2986 return NULL;
2987
2988 /* Make sure the GIC will observe the zero-ed page */
2989 gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
2990
2991 return pend_page;
2992}
2993
2994static void its_free_pending_table(struct page *pt)
2995{
2996 free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
2997}
2998
2999/*
3000 * Booting with kdump and LPIs enabled is generally fine. Any other
3001 * case is wrong in the absence of firmware/EFI support.
3002 */
3003static bool enabled_lpis_allowed(void)
3004{
3005 phys_addr_t addr;
3006 u64 val;
3007
3008 /* Check whether the property table is in a reserved region */
3009 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
3010 addr = val & GENMASK_ULL(51, 12);
3011
3012 return gic_check_reserved_range(addr, LPI_PROPBASE_SZ);
3013}
3014
3015static int __init allocate_lpi_tables(void)
3016{
3017 u64 val;
3018 int err, cpu;
3019
3020 /*
3021 * If LPIs are enabled while we run this from the boot CPU,
3022 * flag the RD tables as pre-allocated if the stars do align.
3023 */
3024 val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR);
3025 if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) {
3026 gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED |
3027 RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING);
3028 pr_info("GICv3: Using preallocated redistributor tables\n");
3029 }
3030
3031 err = its_setup_lpi_prop_table();
3032 if (err)
3033 return err;
3034
3035 /*
3036 * We allocate all the pending tables anyway, as we may have a
3037 * mix of RDs that have had LPIs enabled, and some that
3038 * don't. We'll free the unused ones as each CPU comes online.
3039 */
3040 for_each_possible_cpu(cpu) {
3041 struct page *pend_page;
3042
3043 pend_page = its_allocate_pending_table(GFP_NOWAIT);
3044 if (!pend_page) {
3045 pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
3046 return -ENOMEM;
3047 }
3048
3049 gic_data_rdist_cpu(cpu)->pend_page = pend_page;
3050 }
3051
3052 return 0;
3053}
3054
3055static u64 read_vpend_dirty_clear(void __iomem *vlpi_base)
3056{
3057 u32 count = 1000000; /* 1s! */
3058 bool clean;
3059 u64 val;
3060
3061 do {
3062 val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
3063 clean = !(val & GICR_VPENDBASER_Dirty);
3064 if (!clean) {
3065 count--;
3066 cpu_relax();
3067 udelay(1);
3068 }
3069 } while (!clean && count);
3070
3071 if (unlikely(!clean))
3072 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
3073
3074 return val;
3075}
3076
3077static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
3078{
3079 u64 val;
3080
3081 /* Make sure we wait until the RD is done with the initial scan */
3082 val = read_vpend_dirty_clear(vlpi_base);
3083 val &= ~GICR_VPENDBASER_Valid;
3084 val &= ~clr;
3085 val |= set;
3086 gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
3087
3088 val = read_vpend_dirty_clear(vlpi_base);
3089 if (unlikely(val & GICR_VPENDBASER_Dirty))
3090 val |= GICR_VPENDBASER_PendingLast;
3091
3092 return val;
3093}
3094
3095static void its_cpu_init_lpis(void)
3096{
3097 void __iomem *rbase = gic_data_rdist_rd_base();
3098 struct page *pend_page;
3099 phys_addr_t paddr;
3100 u64 val, tmp;
3101
3102 if (gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED)
3103 return;
3104
3105 val = readl_relaxed(rbase + GICR_CTLR);
3106 if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
3107 (val & GICR_CTLR_ENABLE_LPIS)) {
3108 /*
3109 * Check that we get the same property table on all
3110 * RDs. If we don't, this is hopeless.
3111 */
3112 paddr = gicr_read_propbaser(rbase + GICR_PROPBASER);
3113 paddr &= GENMASK_ULL(51, 12);
3114 if (WARN_ON(gic_rdists->prop_table_pa != paddr))
3115 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
3116
3117 paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER);
3118 paddr &= GENMASK_ULL(51, 16);
3119
3120 WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
3121 gic_data_rdist()->flags |= RD_LOCAL_PENDTABLE_PREALLOCATED;
3122
3123 goto out;
3124 }
3125
3126 pend_page = gic_data_rdist()->pend_page;
3127 paddr = page_to_phys(pend_page);
3128
3129 /* set PROPBASE */
3130 val = (gic_rdists->prop_table_pa |
3131 GICR_PROPBASER_InnerShareable |
3132 GICR_PROPBASER_RaWaWb |
3133 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
3134
3135 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
3136 tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
3137
3138 if (!rdists_support_shareable())
3139 tmp &= ~GICR_PROPBASER_SHAREABILITY_MASK;
3140
3141 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
3142 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
3143 /*
3144 * The HW reports non-shareable, we must
3145 * remove the cacheability attributes as
3146 * well.
3147 */
3148 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
3149 GICR_PROPBASER_CACHEABILITY_MASK);
3150 val |= GICR_PROPBASER_nC;
3151 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
3152 }
3153 pr_info_once("GIC: using cache flushing for LPI property table\n");
3154 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
3155 }
3156
3157 /* set PENDBASE */
3158 val = (page_to_phys(pend_page) |
3159 GICR_PENDBASER_InnerShareable |
3160 GICR_PENDBASER_RaWaWb);
3161
3162 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
3163 tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
3164
3165 if (!rdists_support_shareable())
3166 tmp &= ~GICR_PENDBASER_SHAREABILITY_MASK;
3167
3168 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
3169 /*
3170 * The HW reports non-shareable, we must remove the
3171 * cacheability attributes as well.
3172 */
3173 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
3174 GICR_PENDBASER_CACHEABILITY_MASK);
3175 val |= GICR_PENDBASER_nC;
3176 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
3177 }
3178
3179 /* Enable LPIs */
3180 val = readl_relaxed(rbase + GICR_CTLR);
3181 val |= GICR_CTLR_ENABLE_LPIS;
3182 writel_relaxed(val, rbase + GICR_CTLR);
3183
3184out:
3185 if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) {
3186 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3187
3188 /*
3189 * It's possible for CPU to receive VLPIs before it is
3190 * scheduled as a vPE, especially for the first CPU, and the
3191 * VLPI with INTID larger than 2^(IDbits+1) will be considered
3192 * as out of range and dropped by GIC.
3193 * So we initialize IDbits to known value to avoid VLPI drop.
3194 */
3195 val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
3196 pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
3197 smp_processor_id(), val);
3198 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
3199
3200 /*
3201 * Also clear Valid bit of GICR_VPENDBASER, in case some
3202 * ancient programming gets left in and has possibility of
3203 * corrupting memory.
3204 */
3205 val = its_clear_vpend_valid(vlpi_base, 0, 0);
3206 }
3207
3208 if (allocate_vpe_l1_table()) {
3209 /*
3210 * If the allocation has failed, we're in massive trouble.
3211 * Disable direct injection, and pray that no VM was
3212 * already running...
3213 */
3214 gic_rdists->has_rvpeid = false;
3215 gic_rdists->has_vlpis = false;
3216 }
3217
3218 /* Make sure the GIC has seen the above */
3219 dsb(sy);
3220 gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED;
3221 pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
3222 smp_processor_id(),
3223 gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED ?
3224 "reserved" : "allocated",
3225 &paddr);
3226}
3227
3228static void its_cpu_init_collection(struct its_node *its)
3229{
3230 int cpu = smp_processor_id();
3231 u64 target;
3232
3233 /* avoid cross node collections and its mapping */
3234 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
3235 struct device_node *cpu_node;
3236
3237 cpu_node = of_get_cpu_node(cpu, NULL);
3238 if (its->numa_node != NUMA_NO_NODE &&
3239 its->numa_node != of_node_to_nid(cpu_node))
3240 return;
3241 }
3242
3243 /*
3244 * We now have to bind each collection to its target
3245 * redistributor.
3246 */
3247 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
3248 /*
3249 * This ITS wants the physical address of the
3250 * redistributor.
3251 */
3252 target = gic_data_rdist()->phys_base;
3253 } else {
3254 /* This ITS wants a linear CPU number. */
3255 target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
3256 target = GICR_TYPER_CPU_NUMBER(target) << 16;
3257 }
3258
3259 /* Perform collection mapping */
3260 its->collections[cpu].target_address = target;
3261 its->collections[cpu].col_id = cpu;
3262
3263 its_send_mapc(its, &its->collections[cpu], 1);
3264 its_send_invall(its, &its->collections[cpu]);
3265}
3266
3267static void its_cpu_init_collections(void)
3268{
3269 struct its_node *its;
3270
3271 raw_spin_lock(&its_lock);
3272
3273 list_for_each_entry(its, &its_nodes, entry)
3274 its_cpu_init_collection(its);
3275
3276 raw_spin_unlock(&its_lock);
3277}
3278
3279static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
3280{
3281 struct its_device *its_dev = NULL, *tmp;
3282 unsigned long flags;
3283
3284 raw_spin_lock_irqsave(&its->lock, flags);
3285
3286 list_for_each_entry(tmp, &its->its_device_list, entry) {
3287 if (tmp->device_id == dev_id) {
3288 its_dev = tmp;
3289 break;
3290 }
3291 }
3292
3293 raw_spin_unlock_irqrestore(&its->lock, flags);
3294
3295 return its_dev;
3296}
3297
3298static struct its_baser *its_get_baser(struct its_node *its, u32 type)
3299{
3300 int i;
3301
3302 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
3303 if (GITS_BASER_TYPE(its->tables[i].val) == type)
3304 return &its->tables[i];
3305 }
3306
3307 return NULL;
3308}
3309
3310static bool its_alloc_table_entry(struct its_node *its,
3311 struct its_baser *baser, u32 id)
3312{
3313 struct page *page;
3314 u32 esz, idx;
3315 __le64 *table;
3316
3317 /* Don't allow device id that exceeds single, flat table limit */
3318 esz = GITS_BASER_ENTRY_SIZE(baser->val);
3319 if (!(baser->val & GITS_BASER_INDIRECT))
3320 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
3321
3322 /* Compute 1st level table index & check if that exceeds table limit */
3323 idx = id >> ilog2(baser->psz / esz);
3324 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
3325 return false;
3326
3327 table = baser->base;
3328
3329 /* Allocate memory for 2nd level table */
3330 if (!table[idx]) {
3331 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
3332 get_order(baser->psz));
3333 if (!page)
3334 return false;
3335
3336 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
3337 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
3338 gic_flush_dcache_to_poc(page_address(page), baser->psz);
3339
3340 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
3341
3342 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
3343 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
3344 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
3345
3346 /* Ensure updated table contents are visible to ITS hardware */
3347 dsb(sy);
3348 }
3349
3350 return true;
3351}
3352
3353static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
3354{
3355 struct its_baser *baser;
3356
3357 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
3358
3359 /* Don't allow device id that exceeds ITS hardware limit */
3360 if (!baser)
3361 return (ilog2(dev_id) < device_ids(its));
3362
3363 return its_alloc_table_entry(its, baser, dev_id);
3364}
3365
3366static bool its_alloc_vpe_table(u32 vpe_id)
3367{
3368 struct its_node *its;
3369 int cpu;
3370
3371 /*
3372 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
3373 * could try and only do it on ITSs corresponding to devices
3374 * that have interrupts targeted at this VPE, but the
3375 * complexity becomes crazy (and you have tons of memory
3376 * anyway, right?).
3377 */
3378 list_for_each_entry(its, &its_nodes, entry) {
3379 struct its_baser *baser;
3380
3381 if (!is_v4(its))
3382 continue;
3383
3384 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
3385 if (!baser)
3386 return false;
3387
3388 if (!its_alloc_table_entry(its, baser, vpe_id))
3389 return false;
3390 }
3391
3392 /* Non v4.1? No need to iterate RDs and go back early. */
3393 if (!gic_rdists->has_rvpeid)
3394 return true;
3395
3396 /*
3397 * Make sure the L2 tables are allocated for all copies of
3398 * the L1 table on *all* v4.1 RDs.
3399 */
3400 for_each_possible_cpu(cpu) {
3401 if (!allocate_vpe_l2_table(cpu, vpe_id))
3402 return false;
3403 }
3404
3405 return true;
3406}
3407
3408static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
3409 int nvecs, bool alloc_lpis)
3410{
3411 struct its_device *dev;
3412 unsigned long *lpi_map = NULL;
3413 unsigned long flags;
3414 u16 *col_map = NULL;
3415 void *itt;
3416 int lpi_base;
3417 int nr_lpis;
3418 int nr_ites;
3419 int sz;
3420
3421 if (!its_alloc_device_table(its, dev_id))
3422 return NULL;
3423
3424 if (WARN_ON(!is_power_of_2(nvecs)))
3425 nvecs = roundup_pow_of_two(nvecs);
3426
3427 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3428 /*
3429 * Even if the device wants a single LPI, the ITT must be
3430 * sized as a power of two (and you need at least one bit...).
3431 */
3432 nr_ites = max(2, nvecs);
3433 sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
3434 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
3435 itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
3436 if (alloc_lpis) {
3437 lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
3438 if (lpi_map)
3439 col_map = kcalloc(nr_lpis, sizeof(*col_map),
3440 GFP_KERNEL);
3441 } else {
3442 col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL);
3443 nr_lpis = 0;
3444 lpi_base = 0;
3445 }
3446
3447 if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
3448 kfree(dev);
3449 kfree(itt);
3450 bitmap_free(lpi_map);
3451 kfree(col_map);
3452 return NULL;
3453 }
3454
3455 gic_flush_dcache_to_poc(itt, sz);
3456
3457 dev->its = its;
3458 dev->itt = itt;
3459 dev->nr_ites = nr_ites;
3460 dev->event_map.lpi_map = lpi_map;
3461 dev->event_map.col_map = col_map;
3462 dev->event_map.lpi_base = lpi_base;
3463 dev->event_map.nr_lpis = nr_lpis;
3464 raw_spin_lock_init(&dev->event_map.vlpi_lock);
3465 dev->device_id = dev_id;
3466 INIT_LIST_HEAD(&dev->entry);
3467
3468 raw_spin_lock_irqsave(&its->lock, flags);
3469 list_add(&dev->entry, &its->its_device_list);
3470 raw_spin_unlock_irqrestore(&its->lock, flags);
3471
3472 /* Map device to its ITT */
3473 its_send_mapd(dev, 1);
3474
3475 return dev;
3476}
3477
3478static void its_free_device(struct its_device *its_dev)
3479{
3480 unsigned long flags;
3481
3482 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
3483 list_del(&its_dev->entry);
3484 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
3485 kfree(its_dev->event_map.col_map);
3486 kfree(its_dev->itt);
3487 kfree(its_dev);
3488}
3489
3490static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
3491{
3492 int idx;
3493
3494 /* Find a free LPI region in lpi_map and allocate them. */
3495 idx = bitmap_find_free_region(dev->event_map.lpi_map,
3496 dev->event_map.nr_lpis,
3497 get_count_order(nvecs));
3498 if (idx < 0)
3499 return -ENOSPC;
3500
3501 *hwirq = dev->event_map.lpi_base + idx;
3502
3503 return 0;
3504}
3505
3506static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
3507 int nvec, msi_alloc_info_t *info)
3508{
3509 struct its_node *its;
3510 struct its_device *its_dev;
3511 struct msi_domain_info *msi_info;
3512 u32 dev_id;
3513 int err = 0;
3514
3515 /*
3516 * We ignore "dev" entirely, and rely on the dev_id that has
3517 * been passed via the scratchpad. This limits this domain's
3518 * usefulness to upper layers that definitely know that they
3519 * are built on top of the ITS.
3520 */
3521 dev_id = info->scratchpad[0].ul;
3522
3523 msi_info = msi_get_domain_info(domain);
3524 its = msi_info->data;
3525
3526 if (!gic_rdists->has_direct_lpi &&
3527 vpe_proxy.dev &&
3528 vpe_proxy.dev->its == its &&
3529 dev_id == vpe_proxy.dev->device_id) {
3530 /* Bad luck. Get yourself a better implementation */
3531 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
3532 dev_id);
3533 return -EINVAL;
3534 }
3535
3536 mutex_lock(&its->dev_alloc_lock);
3537 its_dev = its_find_device(its, dev_id);
3538 if (its_dev) {
3539 /*
3540 * We already have seen this ID, probably through
3541 * another alias (PCI bridge of some sort). No need to
3542 * create the device.
3543 */
3544 its_dev->shared = true;
3545 pr_debug("Reusing ITT for devID %x\n", dev_id);
3546 goto out;
3547 }
3548
3549 its_dev = its_create_device(its, dev_id, nvec, true);
3550 if (!its_dev) {
3551 err = -ENOMEM;
3552 goto out;
3553 }
3554
3555 if (info->flags & MSI_ALLOC_FLAGS_PROXY_DEVICE)
3556 its_dev->shared = true;
3557
3558 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
3559out:
3560 mutex_unlock(&its->dev_alloc_lock);
3561 info->scratchpad[0].ptr = its_dev;
3562 return err;
3563}
3564
3565static struct msi_domain_ops its_msi_domain_ops = {
3566 .msi_prepare = its_msi_prepare,
3567};
3568
3569static int its_irq_gic_domain_alloc(struct irq_domain *domain,
3570 unsigned int virq,
3571 irq_hw_number_t hwirq)
3572{
3573 struct irq_fwspec fwspec;
3574
3575 if (irq_domain_get_of_node(domain->parent)) {
3576 fwspec.fwnode = domain->parent->fwnode;
3577 fwspec.param_count = 3;
3578 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
3579 fwspec.param[1] = hwirq;
3580 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
3581 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
3582 fwspec.fwnode = domain->parent->fwnode;
3583 fwspec.param_count = 2;
3584 fwspec.param[0] = hwirq;
3585 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
3586 } else {
3587 return -EINVAL;
3588 }
3589
3590 return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
3591}
3592
3593static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
3594 unsigned int nr_irqs, void *args)
3595{
3596 msi_alloc_info_t *info = args;
3597 struct its_device *its_dev = info->scratchpad[0].ptr;
3598 struct its_node *its = its_dev->its;
3599 struct irq_data *irqd;
3600 irq_hw_number_t hwirq;
3601 int err;
3602 int i;
3603
3604 err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
3605 if (err)
3606 return err;
3607
3608 err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev));
3609 if (err)
3610 return err;
3611
3612 for (i = 0; i < nr_irqs; i++) {
3613 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
3614 if (err)
3615 return err;
3616
3617 irq_domain_set_hwirq_and_chip(domain, virq + i,
3618 hwirq + i, &its_irq_chip, its_dev);
3619 irqd = irq_get_irq_data(virq + i);
3620 irqd_set_single_target(irqd);
3621 irqd_set_affinity_on_activate(irqd);
3622 irqd_set_resend_when_in_progress(irqd);
3623 pr_debug("ID:%d pID:%d vID:%d\n",
3624 (int)(hwirq + i - its_dev->event_map.lpi_base),
3625 (int)(hwirq + i), virq + i);
3626 }
3627
3628 return 0;
3629}
3630
3631static int its_irq_domain_activate(struct irq_domain *domain,
3632 struct irq_data *d, bool reserve)
3633{
3634 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3635 u32 event = its_get_event_id(d);
3636 int cpu;
3637
3638 cpu = its_select_cpu(d, cpu_online_mask);
3639 if (cpu < 0 || cpu >= nr_cpu_ids)
3640 return -EINVAL;
3641
3642 its_inc_lpi_count(d, cpu);
3643 its_dev->event_map.col_map[event] = cpu;
3644 irq_data_update_effective_affinity(d, cpumask_of(cpu));
3645
3646 /* Map the GIC IRQ and event to the device */
3647 its_send_mapti(its_dev, d->hwirq, event);
3648 return 0;
3649}
3650
3651static void its_irq_domain_deactivate(struct irq_domain *domain,
3652 struct irq_data *d)
3653{
3654 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3655 u32 event = its_get_event_id(d);
3656
3657 its_dec_lpi_count(d, its_dev->event_map.col_map[event]);
3658 /* Stop the delivery of interrupts */
3659 its_send_discard(its_dev, event);
3660}
3661
3662static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
3663 unsigned int nr_irqs)
3664{
3665 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
3666 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3667 struct its_node *its = its_dev->its;
3668 int i;
3669
3670 bitmap_release_region(its_dev->event_map.lpi_map,
3671 its_get_event_id(irq_domain_get_irq_data(domain, virq)),
3672 get_count_order(nr_irqs));
3673
3674 for (i = 0; i < nr_irqs; i++) {
3675 struct irq_data *data = irq_domain_get_irq_data(domain,
3676 virq + i);
3677 /* Nuke the entry in the domain */
3678 irq_domain_reset_irq_data(data);
3679 }
3680
3681 mutex_lock(&its->dev_alloc_lock);
3682
3683 /*
3684 * If all interrupts have been freed, start mopping the
3685 * floor. This is conditioned on the device not being shared.
3686 */
3687 if (!its_dev->shared &&
3688 bitmap_empty(its_dev->event_map.lpi_map,
3689 its_dev->event_map.nr_lpis)) {
3690 its_lpi_free(its_dev->event_map.lpi_map,
3691 its_dev->event_map.lpi_base,
3692 its_dev->event_map.nr_lpis);
3693
3694 /* Unmap device/itt */
3695 its_send_mapd(its_dev, 0);
3696 its_free_device(its_dev);
3697 }
3698
3699 mutex_unlock(&its->dev_alloc_lock);
3700
3701 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
3702}
3703
3704static const struct irq_domain_ops its_domain_ops = {
3705 .alloc = its_irq_domain_alloc,
3706 .free = its_irq_domain_free,
3707 .activate = its_irq_domain_activate,
3708 .deactivate = its_irq_domain_deactivate,
3709};
3710
3711/*
3712 * This is insane.
3713 *
3714 * If a GICv4.0 doesn't implement Direct LPIs (which is extremely
3715 * likely), the only way to perform an invalidate is to use a fake
3716 * device to issue an INV command, implying that the LPI has first
3717 * been mapped to some event on that device. Since this is not exactly
3718 * cheap, we try to keep that mapping around as long as possible, and
3719 * only issue an UNMAP if we're short on available slots.
3720 *
3721 * Broken by design(tm).
3722 *
3723 * GICv4.1, on the other hand, mandates that we're able to invalidate
3724 * by writing to a MMIO register. It doesn't implement the whole of
3725 * DirectLPI, but that's good enough. And most of the time, we don't
3726 * even have to invalidate anything, as the redistributor can be told
3727 * whether to generate a doorbell or not (we thus leave it enabled,
3728 * always).
3729 */
3730static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
3731{
3732 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3733 if (gic_rdists->has_rvpeid)
3734 return;
3735
3736 /* Already unmapped? */
3737 if (vpe->vpe_proxy_event == -1)
3738 return;
3739
3740 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
3741 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
3742
3743 /*
3744 * We don't track empty slots at all, so let's move the
3745 * next_victim pointer if we can quickly reuse that slot
3746 * instead of nuking an existing entry. Not clear that this is
3747 * always a win though, and this might just generate a ripple
3748 * effect... Let's just hope VPEs don't migrate too often.
3749 */
3750 if (vpe_proxy.vpes[vpe_proxy.next_victim])
3751 vpe_proxy.next_victim = vpe->vpe_proxy_event;
3752
3753 vpe->vpe_proxy_event = -1;
3754}
3755
3756static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
3757{
3758 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3759 if (gic_rdists->has_rvpeid)
3760 return;
3761
3762 if (!gic_rdists->has_direct_lpi) {
3763 unsigned long flags;
3764
3765 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3766 its_vpe_db_proxy_unmap_locked(vpe);
3767 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3768 }
3769}
3770
3771static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
3772{
3773 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3774 if (gic_rdists->has_rvpeid)
3775 return;
3776
3777 /* Already mapped? */
3778 if (vpe->vpe_proxy_event != -1)
3779 return;
3780
3781 /* This slot was already allocated. Kick the other VPE out. */
3782 if (vpe_proxy.vpes[vpe_proxy.next_victim])
3783 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
3784
3785 /* Map the new VPE instead */
3786 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
3787 vpe->vpe_proxy_event = vpe_proxy.next_victim;
3788 vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
3789
3790 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
3791 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
3792}
3793
3794static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
3795{
3796 unsigned long flags;
3797 struct its_collection *target_col;
3798
3799 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3800 if (gic_rdists->has_rvpeid)
3801 return;
3802
3803 if (gic_rdists->has_direct_lpi) {
3804 void __iomem *rdbase;
3805
3806 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
3807 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
3808 wait_for_syncr(rdbase);
3809
3810 return;
3811 }
3812
3813 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3814
3815 its_vpe_db_proxy_map_locked(vpe);
3816
3817 target_col = &vpe_proxy.dev->its->collections[to];
3818 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
3819 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
3820
3821 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3822}
3823
3824static int its_vpe_set_affinity(struct irq_data *d,
3825 const struct cpumask *mask_val,
3826 bool force)
3827{
3828 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3829 struct cpumask common, *table_mask;
3830 unsigned long flags;
3831 int from, cpu;
3832
3833 /*
3834 * Changing affinity is mega expensive, so let's be as lazy as
3835 * we can and only do it if we really have to. Also, if mapped
3836 * into the proxy device, we need to move the doorbell
3837 * interrupt to its new location.
3838 *
3839 * Another thing is that changing the affinity of a vPE affects
3840 * *other interrupts* such as all the vLPIs that are routed to
3841 * this vPE. This means that the irq_desc lock is not enough to
3842 * protect us, and that we must ensure nobody samples vpe->col_idx
3843 * during the update, hence the lock below which must also be
3844 * taken on any vLPI handling path that evaluates vpe->col_idx.
3845 */
3846 from = vpe_to_cpuid_lock(vpe, &flags);
3847 table_mask = gic_data_rdist_cpu(from)->vpe_table_mask;
3848
3849 /*
3850 * If we are offered another CPU in the same GICv4.1 ITS
3851 * affinity, pick this one. Otherwise, any CPU will do.
3852 */
3853 if (table_mask && cpumask_and(&common, mask_val, table_mask))
3854 cpu = cpumask_test_cpu(from, &common) ? from : cpumask_first(&common);
3855 else
3856 cpu = cpumask_first(mask_val);
3857
3858 if (from == cpu)
3859 goto out;
3860
3861 vpe->col_idx = cpu;
3862
3863 its_send_vmovp(vpe);
3864 its_vpe_db_proxy_move(vpe, from, cpu);
3865
3866out:
3867 irq_data_update_effective_affinity(d, cpumask_of(cpu));
3868 vpe_to_cpuid_unlock(vpe, flags);
3869
3870 return IRQ_SET_MASK_OK_DONE;
3871}
3872
3873static void its_wait_vpt_parse_complete(void)
3874{
3875 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3876 u64 val;
3877
3878 if (!gic_rdists->has_vpend_valid_dirty)
3879 return;
3880
3881 WARN_ON_ONCE(readq_relaxed_poll_timeout_atomic(vlpi_base + GICR_VPENDBASER,
3882 val,
3883 !(val & GICR_VPENDBASER_Dirty),
3884 1, 500));
3885}
3886
3887static void its_vpe_schedule(struct its_vpe *vpe)
3888{
3889 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3890 u64 val;
3891
3892 /* Schedule the VPE */
3893 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
3894 GENMASK_ULL(51, 12);
3895 val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
3896 if (rdists_support_shareable()) {
3897 val |= GICR_VPROPBASER_RaWb;
3898 val |= GICR_VPROPBASER_InnerShareable;
3899 }
3900 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
3901
3902 val = virt_to_phys(page_address(vpe->vpt_page)) &
3903 GENMASK_ULL(51, 16);
3904 if (rdists_support_shareable()) {
3905 val |= GICR_VPENDBASER_RaWaWb;
3906 val |= GICR_VPENDBASER_InnerShareable;
3907 }
3908 /*
3909 * There is no good way of finding out if the pending table is
3910 * empty as we can race against the doorbell interrupt very
3911 * easily. So in the end, vpe->pending_last is only an
3912 * indication that the vcpu has something pending, not one
3913 * that the pending table is empty. A good implementation
3914 * would be able to read its coarse map pretty quickly anyway,
3915 * making this a tolerable issue.
3916 */
3917 val |= GICR_VPENDBASER_PendingLast;
3918 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
3919 val |= GICR_VPENDBASER_Valid;
3920 gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
3921}
3922
3923static void its_vpe_deschedule(struct its_vpe *vpe)
3924{
3925 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3926 u64 val;
3927
3928 val = its_clear_vpend_valid(vlpi_base, 0, 0);
3929
3930 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
3931 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
3932}
3933
3934static void its_vpe_invall(struct its_vpe *vpe)
3935{
3936 struct its_node *its;
3937
3938 list_for_each_entry(its, &its_nodes, entry) {
3939 if (!is_v4(its))
3940 continue;
3941
3942 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
3943 continue;
3944
3945 /*
3946 * Sending a VINVALL to a single ITS is enough, as all
3947 * we need is to reach the redistributors.
3948 */
3949 its_send_vinvall(its, vpe);
3950 return;
3951 }
3952}
3953
3954static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
3955{
3956 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3957 struct its_cmd_info *info = vcpu_info;
3958
3959 switch (info->cmd_type) {
3960 case SCHEDULE_VPE:
3961 its_vpe_schedule(vpe);
3962 return 0;
3963
3964 case DESCHEDULE_VPE:
3965 its_vpe_deschedule(vpe);
3966 return 0;
3967
3968 case COMMIT_VPE:
3969 its_wait_vpt_parse_complete();
3970 return 0;
3971
3972 case INVALL_VPE:
3973 its_vpe_invall(vpe);
3974 return 0;
3975
3976 default:
3977 return -EINVAL;
3978 }
3979}
3980
3981static void its_vpe_send_cmd(struct its_vpe *vpe,
3982 void (*cmd)(struct its_device *, u32))
3983{
3984 unsigned long flags;
3985
3986 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3987
3988 its_vpe_db_proxy_map_locked(vpe);
3989 cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
3990
3991 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3992}
3993
3994static void its_vpe_send_inv(struct irq_data *d)
3995{
3996 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3997
3998 if (gic_rdists->has_direct_lpi)
3999 __direct_lpi_inv(d, d->parent_data->hwirq);
4000 else
4001 its_vpe_send_cmd(vpe, its_send_inv);
4002}
4003
4004static void its_vpe_mask_irq(struct irq_data *d)
4005{
4006 /*
4007 * We need to unmask the LPI, which is described by the parent
4008 * irq_data. Instead of calling into the parent (which won't
4009 * exactly do the right thing, let's simply use the
4010 * parent_data pointer. Yes, I'm naughty.
4011 */
4012 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
4013 its_vpe_send_inv(d);
4014}
4015
4016static void its_vpe_unmask_irq(struct irq_data *d)
4017{
4018 /* Same hack as above... */
4019 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
4020 its_vpe_send_inv(d);
4021}
4022
4023static int its_vpe_set_irqchip_state(struct irq_data *d,
4024 enum irqchip_irq_state which,
4025 bool state)
4026{
4027 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4028
4029 if (which != IRQCHIP_STATE_PENDING)
4030 return -EINVAL;
4031
4032 if (gic_rdists->has_direct_lpi) {
4033 void __iomem *rdbase;
4034
4035 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
4036 if (state) {
4037 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
4038 } else {
4039 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
4040 wait_for_syncr(rdbase);
4041 }
4042 } else {
4043 if (state)
4044 its_vpe_send_cmd(vpe, its_send_int);
4045 else
4046 its_vpe_send_cmd(vpe, its_send_clear);
4047 }
4048
4049 return 0;
4050}
4051
4052static int its_vpe_retrigger(struct irq_data *d)
4053{
4054 return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
4055}
4056
4057static struct irq_chip its_vpe_irq_chip = {
4058 .name = "GICv4-vpe",
4059 .irq_mask = its_vpe_mask_irq,
4060 .irq_unmask = its_vpe_unmask_irq,
4061 .irq_eoi = irq_chip_eoi_parent,
4062 .irq_set_affinity = its_vpe_set_affinity,
4063 .irq_retrigger = its_vpe_retrigger,
4064 .irq_set_irqchip_state = its_vpe_set_irqchip_state,
4065 .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
4066};
4067
4068static struct its_node *find_4_1_its(void)
4069{
4070 static struct its_node *its = NULL;
4071
4072 if (!its) {
4073 list_for_each_entry(its, &its_nodes, entry) {
4074 if (is_v4_1(its))
4075 return its;
4076 }
4077
4078 /* Oops? */
4079 its = NULL;
4080 }
4081
4082 return its;
4083}
4084
4085static void its_vpe_4_1_send_inv(struct irq_data *d)
4086{
4087 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4088 struct its_node *its;
4089
4090 /*
4091 * GICv4.1 wants doorbells to be invalidated using the
4092 * INVDB command in order to be broadcast to all RDs. Send
4093 * it to the first valid ITS, and let the HW do its magic.
4094 */
4095 its = find_4_1_its();
4096 if (its)
4097 its_send_invdb(its, vpe);
4098}
4099
4100static void its_vpe_4_1_mask_irq(struct irq_data *d)
4101{
4102 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
4103 its_vpe_4_1_send_inv(d);
4104}
4105
4106static void its_vpe_4_1_unmask_irq(struct irq_data *d)
4107{
4108 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
4109 its_vpe_4_1_send_inv(d);
4110}
4111
4112static void its_vpe_4_1_schedule(struct its_vpe *vpe,
4113 struct its_cmd_info *info)
4114{
4115 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
4116 u64 val = 0;
4117
4118 /* Schedule the VPE */
4119 val |= GICR_VPENDBASER_Valid;
4120 val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0;
4121 val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0;
4122 val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
4123
4124 gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
4125}
4126
4127static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
4128 struct its_cmd_info *info)
4129{
4130 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
4131 u64 val;
4132
4133 if (info->req_db) {
4134 unsigned long flags;
4135
4136 /*
4137 * vPE is going to block: make the vPE non-resident with
4138 * PendingLast clear and DB set. The GIC guarantees that if
4139 * we read-back PendingLast clear, then a doorbell will be
4140 * delivered when an interrupt comes.
4141 *
4142 * Note the locking to deal with the concurrent update of
4143 * pending_last from the doorbell interrupt handler that can
4144 * run concurrently.
4145 */
4146 raw_spin_lock_irqsave(&vpe->vpe_lock, flags);
4147 val = its_clear_vpend_valid(vlpi_base,
4148 GICR_VPENDBASER_PendingLast,
4149 GICR_VPENDBASER_4_1_DB);
4150 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
4151 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
4152 } else {
4153 /*
4154 * We're not blocking, so just make the vPE non-resident
4155 * with PendingLast set, indicating that we'll be back.
4156 */
4157 val = its_clear_vpend_valid(vlpi_base,
4158 0,
4159 GICR_VPENDBASER_PendingLast);
4160 vpe->pending_last = true;
4161 }
4162}
4163
4164static void its_vpe_4_1_invall(struct its_vpe *vpe)
4165{
4166 void __iomem *rdbase;
4167 unsigned long flags;
4168 u64 val;
4169 int cpu;
4170
4171 val = GICR_INVALLR_V;
4172 val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
4173
4174 /* Target the redistributor this vPE is currently known on */
4175 cpu = vpe_to_cpuid_lock(vpe, &flags);
4176 raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
4177 rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
4178 gic_write_lpir(val, rdbase + GICR_INVALLR);
4179
4180 wait_for_syncr(rdbase);
4181 raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
4182 vpe_to_cpuid_unlock(vpe, flags);
4183}
4184
4185static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
4186{
4187 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4188 struct its_cmd_info *info = vcpu_info;
4189
4190 switch (info->cmd_type) {
4191 case SCHEDULE_VPE:
4192 its_vpe_4_1_schedule(vpe, info);
4193 return 0;
4194
4195 case DESCHEDULE_VPE:
4196 its_vpe_4_1_deschedule(vpe, info);
4197 return 0;
4198
4199 case COMMIT_VPE:
4200 its_wait_vpt_parse_complete();
4201 return 0;
4202
4203 case INVALL_VPE:
4204 its_vpe_4_1_invall(vpe);
4205 return 0;
4206
4207 default:
4208 return -EINVAL;
4209 }
4210}
4211
4212static struct irq_chip its_vpe_4_1_irq_chip = {
4213 .name = "GICv4.1-vpe",
4214 .irq_mask = its_vpe_4_1_mask_irq,
4215 .irq_unmask = its_vpe_4_1_unmask_irq,
4216 .irq_eoi = irq_chip_eoi_parent,
4217 .irq_set_affinity = its_vpe_set_affinity,
4218 .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity,
4219};
4220
4221static void its_configure_sgi(struct irq_data *d, bool clear)
4222{
4223 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4224 struct its_cmd_desc desc;
4225
4226 desc.its_vsgi_cmd.vpe = vpe;
4227 desc.its_vsgi_cmd.sgi = d->hwirq;
4228 desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority;
4229 desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled;
4230 desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group;
4231 desc.its_vsgi_cmd.clear = clear;
4232
4233 /*
4234 * GICv4.1 allows us to send VSGI commands to any ITS as long as the
4235 * destination VPE is mapped there. Since we map them eagerly at
4236 * activation time, we're pretty sure the first GICv4.1 ITS will do.
4237 */
4238 its_send_single_vcommand(find_4_1_its(), its_build_vsgi_cmd, &desc);
4239}
4240
4241static void its_sgi_mask_irq(struct irq_data *d)
4242{
4243 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4244
4245 vpe->sgi_config[d->hwirq].enabled = false;
4246 its_configure_sgi(d, false);
4247}
4248
4249static void its_sgi_unmask_irq(struct irq_data *d)
4250{
4251 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4252
4253 vpe->sgi_config[d->hwirq].enabled = true;
4254 its_configure_sgi(d, false);
4255}
4256
4257static int its_sgi_set_affinity(struct irq_data *d,
4258 const struct cpumask *mask_val,
4259 bool force)
4260{
4261 /*
4262 * There is no notion of affinity for virtual SGIs, at least
4263 * not on the host (since they can only be targeting a vPE).
4264 * Tell the kernel we've done whatever it asked for.
4265 */
4266 irq_data_update_effective_affinity(d, mask_val);
4267 return IRQ_SET_MASK_OK;
4268}
4269
4270static int its_sgi_set_irqchip_state(struct irq_data *d,
4271 enum irqchip_irq_state which,
4272 bool state)
4273{
4274 if (which != IRQCHIP_STATE_PENDING)
4275 return -EINVAL;
4276
4277 if (state) {
4278 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4279 struct its_node *its = find_4_1_its();
4280 u64 val;
4281
4282 val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id);
4283 val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq);
4284 writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K);
4285 } else {
4286 its_configure_sgi(d, true);
4287 }
4288
4289 return 0;
4290}
4291
4292static int its_sgi_get_irqchip_state(struct irq_data *d,
4293 enum irqchip_irq_state which, bool *val)
4294{
4295 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4296 void __iomem *base;
4297 unsigned long flags;
4298 u32 count = 1000000; /* 1s! */
4299 u32 status;
4300 int cpu;
4301
4302 if (which != IRQCHIP_STATE_PENDING)
4303 return -EINVAL;
4304
4305 /*
4306 * Locking galore! We can race against two different events:
4307 *
4308 * - Concurrent vPE affinity change: we must make sure it cannot
4309 * happen, or we'll talk to the wrong redistributor. This is
4310 * identical to what happens with vLPIs.
4311 *
4312 * - Concurrent VSGIPENDR access: As it involves accessing two
4313 * MMIO registers, this must be made atomic one way or another.
4314 */
4315 cpu = vpe_to_cpuid_lock(vpe, &flags);
4316 raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
4317 base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K;
4318 writel_relaxed(vpe->vpe_id, base + GICR_VSGIR);
4319 do {
4320 status = readl_relaxed(base + GICR_VSGIPENDR);
4321 if (!(status & GICR_VSGIPENDR_BUSY))
4322 goto out;
4323
4324 count--;
4325 if (!count) {
4326 pr_err_ratelimited("Unable to get SGI status\n");
4327 goto out;
4328 }
4329 cpu_relax();
4330 udelay(1);
4331 } while (count);
4332
4333out:
4334 raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
4335 vpe_to_cpuid_unlock(vpe, flags);
4336
4337 if (!count)
4338 return -ENXIO;
4339
4340 *val = !!(status & (1 << d->hwirq));
4341
4342 return 0;
4343}
4344
4345static int its_sgi_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
4346{
4347 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4348 struct its_cmd_info *info = vcpu_info;
4349
4350 switch (info->cmd_type) {
4351 case PROP_UPDATE_VSGI:
4352 vpe->sgi_config[d->hwirq].priority = info->priority;
4353 vpe->sgi_config[d->hwirq].group = info->group;
4354 its_configure_sgi(d, false);
4355 return 0;
4356
4357 default:
4358 return -EINVAL;
4359 }
4360}
4361
4362static struct irq_chip its_sgi_irq_chip = {
4363 .name = "GICv4.1-sgi",
4364 .irq_mask = its_sgi_mask_irq,
4365 .irq_unmask = its_sgi_unmask_irq,
4366 .irq_set_affinity = its_sgi_set_affinity,
4367 .irq_set_irqchip_state = its_sgi_set_irqchip_state,
4368 .irq_get_irqchip_state = its_sgi_get_irqchip_state,
4369 .irq_set_vcpu_affinity = its_sgi_set_vcpu_affinity,
4370};
4371
4372static int its_sgi_irq_domain_alloc(struct irq_domain *domain,
4373 unsigned int virq, unsigned int nr_irqs,
4374 void *args)
4375{
4376 struct its_vpe *vpe = args;
4377 int i;
4378
4379 /* Yes, we do want 16 SGIs */
4380 WARN_ON(nr_irqs != 16);
4381
4382 for (i = 0; i < 16; i++) {
4383 vpe->sgi_config[i].priority = 0;
4384 vpe->sgi_config[i].enabled = false;
4385 vpe->sgi_config[i].group = false;
4386
4387 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
4388 &its_sgi_irq_chip, vpe);
4389 irq_set_status_flags(virq + i, IRQ_DISABLE_UNLAZY);
4390 }
4391
4392 return 0;
4393}
4394
4395static void its_sgi_irq_domain_free(struct irq_domain *domain,
4396 unsigned int virq,
4397 unsigned int nr_irqs)
4398{
4399 /* Nothing to do */
4400}
4401
4402static int its_sgi_irq_domain_activate(struct irq_domain *domain,
4403 struct irq_data *d, bool reserve)
4404{
4405 /* Write out the initial SGI configuration */
4406 its_configure_sgi(d, false);
4407 return 0;
4408}
4409
4410static void its_sgi_irq_domain_deactivate(struct irq_domain *domain,
4411 struct irq_data *d)
4412{
4413 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4414
4415 /*
4416 * The VSGI command is awkward:
4417 *
4418 * - To change the configuration, CLEAR must be set to false,
4419 * leaving the pending bit unchanged.
4420 * - To clear the pending bit, CLEAR must be set to true, leaving
4421 * the configuration unchanged.
4422 *
4423 * You just can't do both at once, hence the two commands below.
4424 */
4425 vpe->sgi_config[d->hwirq].enabled = false;
4426 its_configure_sgi(d, false);
4427 its_configure_sgi(d, true);
4428}
4429
4430static const struct irq_domain_ops its_sgi_domain_ops = {
4431 .alloc = its_sgi_irq_domain_alloc,
4432 .free = its_sgi_irq_domain_free,
4433 .activate = its_sgi_irq_domain_activate,
4434 .deactivate = its_sgi_irq_domain_deactivate,
4435};
4436
4437static int its_vpe_id_alloc(void)
4438{
4439 return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
4440}
4441
4442static void its_vpe_id_free(u16 id)
4443{
4444 ida_simple_remove(&its_vpeid_ida, id);
4445}
4446
4447static int its_vpe_init(struct its_vpe *vpe)
4448{
4449 struct page *vpt_page;
4450 int vpe_id;
4451
4452 /* Allocate vpe_id */
4453 vpe_id = its_vpe_id_alloc();
4454 if (vpe_id < 0)
4455 return vpe_id;
4456
4457 /* Allocate VPT */
4458 vpt_page = its_allocate_pending_table(GFP_KERNEL);
4459 if (!vpt_page) {
4460 its_vpe_id_free(vpe_id);
4461 return -ENOMEM;
4462 }
4463
4464 if (!its_alloc_vpe_table(vpe_id)) {
4465 its_vpe_id_free(vpe_id);
4466 its_free_pending_table(vpt_page);
4467 return -ENOMEM;
4468 }
4469
4470 raw_spin_lock_init(&vpe->vpe_lock);
4471 vpe->vpe_id = vpe_id;
4472 vpe->vpt_page = vpt_page;
4473 if (gic_rdists->has_rvpeid)
4474 atomic_set(&vpe->vmapp_count, 0);
4475 else
4476 vpe->vpe_proxy_event = -1;
4477
4478 return 0;
4479}
4480
4481static void its_vpe_teardown(struct its_vpe *vpe)
4482{
4483 its_vpe_db_proxy_unmap(vpe);
4484 its_vpe_id_free(vpe->vpe_id);
4485 its_free_pending_table(vpe->vpt_page);
4486}
4487
4488static void its_vpe_irq_domain_free(struct irq_domain *domain,
4489 unsigned int virq,
4490 unsigned int nr_irqs)
4491{
4492 struct its_vm *vm = domain->host_data;
4493 int i;
4494
4495 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
4496
4497 for (i = 0; i < nr_irqs; i++) {
4498 struct irq_data *data = irq_domain_get_irq_data(domain,
4499 virq + i);
4500 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
4501
4502 BUG_ON(vm != vpe->its_vm);
4503
4504 clear_bit(data->hwirq, vm->db_bitmap);
4505 its_vpe_teardown(vpe);
4506 irq_domain_reset_irq_data(data);
4507 }
4508
4509 if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
4510 its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
4511 its_free_prop_table(vm->vprop_page);
4512 }
4513}
4514
4515static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
4516 unsigned int nr_irqs, void *args)
4517{
4518 struct irq_chip *irqchip = &its_vpe_irq_chip;
4519 struct its_vm *vm = args;
4520 unsigned long *bitmap;
4521 struct page *vprop_page;
4522 int base, nr_ids, i, err = 0;
4523
4524 BUG_ON(!vm);
4525
4526 bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
4527 if (!bitmap)
4528 return -ENOMEM;
4529
4530 if (nr_ids < nr_irqs) {
4531 its_lpi_free(bitmap, base, nr_ids);
4532 return -ENOMEM;
4533 }
4534
4535 vprop_page = its_allocate_prop_table(GFP_KERNEL);
4536 if (!vprop_page) {
4537 its_lpi_free(bitmap, base, nr_ids);
4538 return -ENOMEM;
4539 }
4540
4541 vm->db_bitmap = bitmap;
4542 vm->db_lpi_base = base;
4543 vm->nr_db_lpis = nr_ids;
4544 vm->vprop_page = vprop_page;
4545
4546 if (gic_rdists->has_rvpeid)
4547 irqchip = &its_vpe_4_1_irq_chip;
4548
4549 for (i = 0; i < nr_irqs; i++) {
4550 vm->vpes[i]->vpe_db_lpi = base + i;
4551 err = its_vpe_init(vm->vpes[i]);
4552 if (err)
4553 break;
4554 err = its_irq_gic_domain_alloc(domain, virq + i,
4555 vm->vpes[i]->vpe_db_lpi);
4556 if (err)
4557 break;
4558 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
4559 irqchip, vm->vpes[i]);
4560 set_bit(i, bitmap);
4561 irqd_set_resend_when_in_progress(irq_get_irq_data(virq + i));
4562 }
4563
4564 if (err) {
4565 if (i > 0)
4566 its_vpe_irq_domain_free(domain, virq, i);
4567
4568 its_lpi_free(bitmap, base, nr_ids);
4569 its_free_prop_table(vprop_page);
4570 }
4571
4572 return err;
4573}
4574
4575static int its_vpe_irq_domain_activate(struct irq_domain *domain,
4576 struct irq_data *d, bool reserve)
4577{
4578 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4579 struct its_node *its;
4580
4581 /*
4582 * If we use the list map, we issue VMAPP on demand... Unless
4583 * we're on a GICv4.1 and we eagerly map the VPE on all ITSs
4584 * so that VSGIs can work.
4585 */
4586 if (!gic_requires_eager_mapping())
4587 return 0;
4588
4589 /* Map the VPE to the first possible CPU */
4590 vpe->col_idx = cpumask_first(cpu_online_mask);
4591
4592 list_for_each_entry(its, &its_nodes, entry) {
4593 if (!is_v4(its))
4594 continue;
4595
4596 its_send_vmapp(its, vpe, true);
4597 its_send_vinvall(its, vpe);
4598 }
4599
4600 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
4601
4602 return 0;
4603}
4604
4605static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
4606 struct irq_data *d)
4607{
4608 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4609 struct its_node *its;
4610
4611 /*
4612 * If we use the list map on GICv4.0, we unmap the VPE once no
4613 * VLPIs are associated with the VM.
4614 */
4615 if (!gic_requires_eager_mapping())
4616 return;
4617
4618 list_for_each_entry(its, &its_nodes, entry) {
4619 if (!is_v4(its))
4620 continue;
4621
4622 its_send_vmapp(its, vpe, false);
4623 }
4624
4625 /*
4626 * There may be a direct read to the VPT after unmapping the
4627 * vPE, to guarantee the validity of this, we make the VPT
4628 * memory coherent with the CPU caches here.
4629 */
4630 if (find_4_1_its() && !atomic_read(&vpe->vmapp_count))
4631 gic_flush_dcache_to_poc(page_address(vpe->vpt_page),
4632 LPI_PENDBASE_SZ);
4633}
4634
4635static const struct irq_domain_ops its_vpe_domain_ops = {
4636 .alloc = its_vpe_irq_domain_alloc,
4637 .free = its_vpe_irq_domain_free,
4638 .activate = its_vpe_irq_domain_activate,
4639 .deactivate = its_vpe_irq_domain_deactivate,
4640};
4641
4642static int its_force_quiescent(void __iomem *base)
4643{
4644 u32 count = 1000000; /* 1s */
4645 u32 val;
4646
4647 val = readl_relaxed(base + GITS_CTLR);
4648 /*
4649 * GIC architecture specification requires the ITS to be both
4650 * disabled and quiescent for writes to GITS_BASER<n> or
4651 * GITS_CBASER to not have UNPREDICTABLE results.
4652 */
4653 if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
4654 return 0;
4655
4656 /* Disable the generation of all interrupts to this ITS */
4657 val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
4658 writel_relaxed(val, base + GITS_CTLR);
4659
4660 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
4661 while (1) {
4662 val = readl_relaxed(base + GITS_CTLR);
4663 if (val & GITS_CTLR_QUIESCENT)
4664 return 0;
4665
4666 count--;
4667 if (!count)
4668 return -EBUSY;
4669
4670 cpu_relax();
4671 udelay(1);
4672 }
4673}
4674
4675static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
4676{
4677 struct its_node *its = data;
4678
4679 /* erratum 22375: only alloc 8MB table size (20 bits) */
4680 its->typer &= ~GITS_TYPER_DEVBITS;
4681 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1);
4682 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
4683
4684 return true;
4685}
4686
4687static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
4688{
4689 struct its_node *its = data;
4690
4691 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
4692
4693 return true;
4694}
4695
4696static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
4697{
4698 struct its_node *its = data;
4699
4700 /* On QDF2400, the size of the ITE is 16Bytes */
4701 its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE;
4702 its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1);
4703
4704 return true;
4705}
4706
4707static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
4708{
4709 struct its_node *its = its_dev->its;
4710
4711 /*
4712 * The Socionext Synquacer SoC has a so-called 'pre-ITS',
4713 * which maps 32-bit writes targeted at a separate window of
4714 * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
4715 * with device ID taken from bits [device_id_bits + 1:2] of
4716 * the window offset.
4717 */
4718 return its->pre_its_base + (its_dev->device_id << 2);
4719}
4720
4721static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
4722{
4723 struct its_node *its = data;
4724 u32 pre_its_window[2];
4725 u32 ids;
4726
4727 if (!fwnode_property_read_u32_array(its->fwnode_handle,
4728 "socionext,synquacer-pre-its",
4729 pre_its_window,
4730 ARRAY_SIZE(pre_its_window))) {
4731
4732 its->pre_its_base = pre_its_window[0];
4733 its->get_msi_base = its_irq_get_msi_base_pre_its;
4734
4735 ids = ilog2(pre_its_window[1]) - 2;
4736 if (device_ids(its) > ids) {
4737 its->typer &= ~GITS_TYPER_DEVBITS;
4738 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1);
4739 }
4740
4741 /* the pre-ITS breaks isolation, so disable MSI remapping */
4742 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_ISOLATED_MSI;
4743 return true;
4744 }
4745 return false;
4746}
4747
4748static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
4749{
4750 struct its_node *its = data;
4751
4752 /*
4753 * Hip07 insists on using the wrong address for the VLPI
4754 * page. Trick it into doing the right thing...
4755 */
4756 its->vlpi_redist_offset = SZ_128K;
4757 return true;
4758}
4759
4760static bool __maybe_unused its_enable_rk3588001(void *data)
4761{
4762 struct its_node *its = data;
4763
4764 if (!of_machine_is_compatible("rockchip,rk3588") &&
4765 !of_machine_is_compatible("rockchip,rk3588s"))
4766 return false;
4767
4768 its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
4769 gic_rdists->flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE;
4770
4771 return true;
4772}
4773
4774static bool its_set_non_coherent(void *data)
4775{
4776 struct its_node *its = data;
4777
4778 its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
4779 return true;
4780}
4781
4782static const struct gic_quirk its_quirks[] = {
4783#ifdef CONFIG_CAVIUM_ERRATUM_22375
4784 {
4785 .desc = "ITS: Cavium errata 22375, 24313",
4786 .iidr = 0xa100034c, /* ThunderX pass 1.x */
4787 .mask = 0xffff0fff,
4788 .init = its_enable_quirk_cavium_22375,
4789 },
4790#endif
4791#ifdef CONFIG_CAVIUM_ERRATUM_23144
4792 {
4793 .desc = "ITS: Cavium erratum 23144",
4794 .iidr = 0xa100034c, /* ThunderX pass 1.x */
4795 .mask = 0xffff0fff,
4796 .init = its_enable_quirk_cavium_23144,
4797 },
4798#endif
4799#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
4800 {
4801 .desc = "ITS: QDF2400 erratum 0065",
4802 .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
4803 .mask = 0xffffffff,
4804 .init = its_enable_quirk_qdf2400_e0065,
4805 },
4806#endif
4807#ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
4808 {
4809 /*
4810 * The Socionext Synquacer SoC incorporates ARM's own GIC-500
4811 * implementation, but with a 'pre-ITS' added that requires
4812 * special handling in software.
4813 */
4814 .desc = "ITS: Socionext Synquacer pre-ITS",
4815 .iidr = 0x0001143b,
4816 .mask = 0xffffffff,
4817 .init = its_enable_quirk_socionext_synquacer,
4818 },
4819#endif
4820#ifdef CONFIG_HISILICON_ERRATUM_161600802
4821 {
4822 .desc = "ITS: Hip07 erratum 161600802",
4823 .iidr = 0x00000004,
4824 .mask = 0xffffffff,
4825 .init = its_enable_quirk_hip07_161600802,
4826 },
4827#endif
4828#ifdef CONFIG_ROCKCHIP_ERRATUM_3588001
4829 {
4830 .desc = "ITS: Rockchip erratum RK3588001",
4831 .iidr = 0x0201743b,
4832 .mask = 0xffffffff,
4833 .init = its_enable_rk3588001,
4834 },
4835#endif
4836 {
4837 .desc = "ITS: non-coherent attribute",
4838 .property = "dma-noncoherent",
4839 .init = its_set_non_coherent,
4840 },
4841 {
4842 }
4843};
4844
4845static void its_enable_quirks(struct its_node *its)
4846{
4847 u32 iidr = readl_relaxed(its->base + GITS_IIDR);
4848
4849 gic_enable_quirks(iidr, its_quirks, its);
4850
4851 if (is_of_node(its->fwnode_handle))
4852 gic_enable_of_quirks(to_of_node(its->fwnode_handle),
4853 its_quirks, its);
4854}
4855
4856static int its_save_disable(void)
4857{
4858 struct its_node *its;
4859 int err = 0;
4860
4861 raw_spin_lock(&its_lock);
4862 list_for_each_entry(its, &its_nodes, entry) {
4863 void __iomem *base;
4864
4865 base = its->base;
4866 its->ctlr_save = readl_relaxed(base + GITS_CTLR);
4867 err = its_force_quiescent(base);
4868 if (err) {
4869 pr_err("ITS@%pa: failed to quiesce: %d\n",
4870 &its->phys_base, err);
4871 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4872 goto err;
4873 }
4874
4875 its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
4876 }
4877
4878err:
4879 if (err) {
4880 list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
4881 void __iomem *base;
4882
4883 base = its->base;
4884 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4885 }
4886 }
4887 raw_spin_unlock(&its_lock);
4888
4889 return err;
4890}
4891
4892static void its_restore_enable(void)
4893{
4894 struct its_node *its;
4895 int ret;
4896
4897 raw_spin_lock(&its_lock);
4898 list_for_each_entry(its, &its_nodes, entry) {
4899 void __iomem *base;
4900 int i;
4901
4902 base = its->base;
4903
4904 /*
4905 * Make sure that the ITS is disabled. If it fails to quiesce,
4906 * don't restore it since writing to CBASER or BASER<n>
4907 * registers is undefined according to the GIC v3 ITS
4908 * Specification.
4909 *
4910 * Firmware resuming with the ITS enabled is terminally broken.
4911 */
4912 WARN_ON(readl_relaxed(base + GITS_CTLR) & GITS_CTLR_ENABLE);
4913 ret = its_force_quiescent(base);
4914 if (ret) {
4915 pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
4916 &its->phys_base, ret);
4917 continue;
4918 }
4919
4920 gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
4921
4922 /*
4923 * Writing CBASER resets CREADR to 0, so make CWRITER and
4924 * cmd_write line up with it.
4925 */
4926 its->cmd_write = its->cmd_base;
4927 gits_write_cwriter(0, base + GITS_CWRITER);
4928
4929 /* Restore GITS_BASER from the value cache. */
4930 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
4931 struct its_baser *baser = &its->tables[i];
4932
4933 if (!(baser->val & GITS_BASER_VALID))
4934 continue;
4935
4936 its_write_baser(its, baser, baser->val);
4937 }
4938 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4939
4940 /*
4941 * Reinit the collection if it's stored in the ITS. This is
4942 * indicated by the col_id being less than the HCC field.
4943 * CID < HCC as specified in the GIC v3 Documentation.
4944 */
4945 if (its->collections[smp_processor_id()].col_id <
4946 GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
4947 its_cpu_init_collection(its);
4948 }
4949 raw_spin_unlock(&its_lock);
4950}
4951
4952static struct syscore_ops its_syscore_ops = {
4953 .suspend = its_save_disable,
4954 .resume = its_restore_enable,
4955};
4956
4957static void __init __iomem *its_map_one(struct resource *res, int *err)
4958{
4959 void __iomem *its_base;
4960 u32 val;
4961
4962 its_base = ioremap(res->start, SZ_64K);
4963 if (!its_base) {
4964 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
4965 *err = -ENOMEM;
4966 return NULL;
4967 }
4968
4969 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
4970 if (val != 0x30 && val != 0x40) {
4971 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
4972 *err = -ENODEV;
4973 goto out_unmap;
4974 }
4975
4976 *err = its_force_quiescent(its_base);
4977 if (*err) {
4978 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
4979 goto out_unmap;
4980 }
4981
4982 return its_base;
4983
4984out_unmap:
4985 iounmap(its_base);
4986 return NULL;
4987}
4988
4989static int its_init_domain(struct its_node *its)
4990{
4991 struct irq_domain *inner_domain;
4992 struct msi_domain_info *info;
4993
4994 info = kzalloc(sizeof(*info), GFP_KERNEL);
4995 if (!info)
4996 return -ENOMEM;
4997
4998 info->ops = &its_msi_domain_ops;
4999 info->data = its;
5000
5001 inner_domain = irq_domain_create_hierarchy(its_parent,
5002 its->msi_domain_flags, 0,
5003 its->fwnode_handle, &its_domain_ops,
5004 info);
5005 if (!inner_domain) {
5006 kfree(info);
5007 return -ENOMEM;
5008 }
5009
5010 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
5011
5012 return 0;
5013}
5014
5015static int its_init_vpe_domain(void)
5016{
5017 struct its_node *its;
5018 u32 devid;
5019 int entries;
5020
5021 if (gic_rdists->has_direct_lpi) {
5022 pr_info("ITS: Using DirectLPI for VPE invalidation\n");
5023 return 0;
5024 }
5025
5026 /* Any ITS will do, even if not v4 */
5027 its = list_first_entry(&its_nodes, struct its_node, entry);
5028
5029 entries = roundup_pow_of_two(nr_cpu_ids);
5030 vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes),
5031 GFP_KERNEL);
5032 if (!vpe_proxy.vpes)
5033 return -ENOMEM;
5034
5035 /* Use the last possible DevID */
5036 devid = GENMASK(device_ids(its) - 1, 0);
5037 vpe_proxy.dev = its_create_device(its, devid, entries, false);
5038 if (!vpe_proxy.dev) {
5039 kfree(vpe_proxy.vpes);
5040 pr_err("ITS: Can't allocate GICv4 proxy device\n");
5041 return -ENOMEM;
5042 }
5043
5044 BUG_ON(entries > vpe_proxy.dev->nr_ites);
5045
5046 raw_spin_lock_init(&vpe_proxy.lock);
5047 vpe_proxy.next_victim = 0;
5048 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
5049 devid, vpe_proxy.dev->nr_ites);
5050
5051 return 0;
5052}
5053
5054static int __init its_compute_its_list_map(struct its_node *its)
5055{
5056 int its_number;
5057 u32 ctlr;
5058
5059 /*
5060 * This is assumed to be done early enough that we're
5061 * guaranteed to be single-threaded, hence no
5062 * locking. Should this change, we should address
5063 * this.
5064 */
5065 its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
5066 if (its_number >= GICv4_ITS_LIST_MAX) {
5067 pr_err("ITS@%pa: No ITSList entry available!\n",
5068 &its->phys_base);
5069 return -EINVAL;
5070 }
5071
5072 ctlr = readl_relaxed(its->base + GITS_CTLR);
5073 ctlr &= ~GITS_CTLR_ITS_NUMBER;
5074 ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
5075 writel_relaxed(ctlr, its->base + GITS_CTLR);
5076 ctlr = readl_relaxed(its->base + GITS_CTLR);
5077 if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
5078 its_number = ctlr & GITS_CTLR_ITS_NUMBER;
5079 its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
5080 }
5081
5082 if (test_and_set_bit(its_number, &its_list_map)) {
5083 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
5084 &its->phys_base, its_number);
5085 return -EINVAL;
5086 }
5087
5088 return its_number;
5089}
5090
5091static int __init its_probe_one(struct its_node *its)
5092{
5093 u64 baser, tmp;
5094 struct page *page;
5095 u32 ctlr;
5096 int err;
5097
5098 its_enable_quirks(its);
5099
5100 if (is_v4(its)) {
5101 if (!(its->typer & GITS_TYPER_VMOVP)) {
5102 err = its_compute_its_list_map(its);
5103 if (err < 0)
5104 goto out;
5105
5106 its->list_nr = err;
5107
5108 pr_info("ITS@%pa: Using ITS number %d\n",
5109 &its->phys_base, err);
5110 } else {
5111 pr_info("ITS@%pa: Single VMOVP capable\n", &its->phys_base);
5112 }
5113
5114 if (is_v4_1(its)) {
5115 u32 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
5116
5117 its->sgir_base = ioremap(its->phys_base + SZ_128K, SZ_64K);
5118 if (!its->sgir_base) {
5119 err = -ENOMEM;
5120 goto out;
5121 }
5122
5123 its->mpidr = readl_relaxed(its->base + GITS_MPIDR);
5124
5125 pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n",
5126 &its->phys_base, its->mpidr, svpet);
5127 }
5128 }
5129
5130 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
5131 get_order(ITS_CMD_QUEUE_SZ));
5132 if (!page) {
5133 err = -ENOMEM;
5134 goto out_unmap_sgir;
5135 }
5136 its->cmd_base = (void *)page_address(page);
5137 its->cmd_write = its->cmd_base;
5138
5139 err = its_alloc_tables(its);
5140 if (err)
5141 goto out_free_cmd;
5142
5143 err = its_alloc_collections(its);
5144 if (err)
5145 goto out_free_tables;
5146
5147 baser = (virt_to_phys(its->cmd_base) |
5148 GITS_CBASER_RaWaWb |
5149 GITS_CBASER_InnerShareable |
5150 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
5151 GITS_CBASER_VALID);
5152
5153 gits_write_cbaser(baser, its->base + GITS_CBASER);
5154 tmp = gits_read_cbaser(its->base + GITS_CBASER);
5155
5156 if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE)
5157 tmp &= ~GITS_CBASER_SHAREABILITY_MASK;
5158
5159 if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
5160 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
5161 /*
5162 * The HW reports non-shareable, we must
5163 * remove the cacheability attributes as
5164 * well.
5165 */
5166 baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
5167 GITS_CBASER_CACHEABILITY_MASK);
5168 baser |= GITS_CBASER_nC;
5169 gits_write_cbaser(baser, its->base + GITS_CBASER);
5170 }
5171 pr_info("ITS: using cache flushing for cmd queue\n");
5172 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
5173 }
5174
5175 gits_write_cwriter(0, its->base + GITS_CWRITER);
5176 ctlr = readl_relaxed(its->base + GITS_CTLR);
5177 ctlr |= GITS_CTLR_ENABLE;
5178 if (is_v4(its))
5179 ctlr |= GITS_CTLR_ImDe;
5180 writel_relaxed(ctlr, its->base + GITS_CTLR);
5181
5182 err = its_init_domain(its);
5183 if (err)
5184 goto out_free_tables;
5185
5186 raw_spin_lock(&its_lock);
5187 list_add(&its->entry, &its_nodes);
5188 raw_spin_unlock(&its_lock);
5189
5190 return 0;
5191
5192out_free_tables:
5193 its_free_tables(its);
5194out_free_cmd:
5195 free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
5196out_unmap_sgir:
5197 if (its->sgir_base)
5198 iounmap(its->sgir_base);
5199out:
5200 pr_err("ITS@%pa: failed probing (%d)\n", &its->phys_base, err);
5201 return err;
5202}
5203
5204static bool gic_rdists_supports_plpis(void)
5205{
5206 return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
5207}
5208
5209static int redist_disable_lpis(void)
5210{
5211 void __iomem *rbase = gic_data_rdist_rd_base();
5212 u64 timeout = USEC_PER_SEC;
5213 u64 val;
5214
5215 if (!gic_rdists_supports_plpis()) {
5216 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
5217 return -ENXIO;
5218 }
5219
5220 val = readl_relaxed(rbase + GICR_CTLR);
5221 if (!(val & GICR_CTLR_ENABLE_LPIS))
5222 return 0;
5223
5224 /*
5225 * If coming via a CPU hotplug event, we don't need to disable
5226 * LPIs before trying to re-enable them. They are already
5227 * configured and all is well in the world.
5228 *
5229 * If running with preallocated tables, there is nothing to do.
5230 */
5231 if ((gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) ||
5232 (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
5233 return 0;
5234
5235 /*
5236 * From that point on, we only try to do some damage control.
5237 */
5238 pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
5239 smp_processor_id());
5240 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
5241
5242 /* Disable LPIs */
5243 val &= ~GICR_CTLR_ENABLE_LPIS;
5244 writel_relaxed(val, rbase + GICR_CTLR);
5245
5246 /* Make sure any change to GICR_CTLR is observable by the GIC */
5247 dsb(sy);
5248
5249 /*
5250 * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
5251 * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
5252 * Error out if we time out waiting for RWP to clear.
5253 */
5254 while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) {
5255 if (!timeout) {
5256 pr_err("CPU%d: Timeout while disabling LPIs\n",
5257 smp_processor_id());
5258 return -ETIMEDOUT;
5259 }
5260 udelay(1);
5261 timeout--;
5262 }
5263
5264 /*
5265 * After it has been written to 1, it is IMPLEMENTATION
5266 * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
5267 * cleared to 0. Error out if clearing the bit failed.
5268 */
5269 if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) {
5270 pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
5271 return -EBUSY;
5272 }
5273
5274 return 0;
5275}
5276
5277int its_cpu_init(void)
5278{
5279 if (!list_empty(&its_nodes)) {
5280 int ret;
5281
5282 ret = redist_disable_lpis();
5283 if (ret)
5284 return ret;
5285
5286 its_cpu_init_lpis();
5287 its_cpu_init_collections();
5288 }
5289
5290 return 0;
5291}
5292
5293static void rdist_memreserve_cpuhp_cleanup_workfn(struct work_struct *work)
5294{
5295 cpuhp_remove_state_nocalls(gic_rdists->cpuhp_memreserve_state);
5296 gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID;
5297}
5298
5299static DECLARE_WORK(rdist_memreserve_cpuhp_cleanup_work,
5300 rdist_memreserve_cpuhp_cleanup_workfn);
5301
5302static int its_cpu_memreserve_lpi(unsigned int cpu)
5303{
5304 struct page *pend_page;
5305 int ret = 0;
5306
5307 /* This gets to run exactly once per CPU */
5308 if (gic_data_rdist()->flags & RD_LOCAL_MEMRESERVE_DONE)
5309 return 0;
5310
5311 pend_page = gic_data_rdist()->pend_page;
5312 if (WARN_ON(!pend_page)) {
5313 ret = -ENOMEM;
5314 goto out;
5315 }
5316 /*
5317 * If the pending table was pre-programmed, free the memory we
5318 * preemptively allocated. Otherwise, reserve that memory for
5319 * later kexecs.
5320 */
5321 if (gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED) {
5322 its_free_pending_table(pend_page);
5323 gic_data_rdist()->pend_page = NULL;
5324 } else {
5325 phys_addr_t paddr = page_to_phys(pend_page);
5326 WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
5327 }
5328
5329out:
5330 /* Last CPU being brought up gets to issue the cleanup */
5331 if (!IS_ENABLED(CONFIG_SMP) ||
5332 cpumask_equal(&cpus_booted_once_mask, cpu_possible_mask))
5333 schedule_work(&rdist_memreserve_cpuhp_cleanup_work);
5334
5335 gic_data_rdist()->flags |= RD_LOCAL_MEMRESERVE_DONE;
5336 return ret;
5337}
5338
5339/* Mark all the BASER registers as invalid before they get reprogrammed */
5340static int __init its_reset_one(struct resource *res)
5341{
5342 void __iomem *its_base;
5343 int err, i;
5344
5345 its_base = its_map_one(res, &err);
5346 if (!its_base)
5347 return err;
5348
5349 for (i = 0; i < GITS_BASER_NR_REGS; i++)
5350 gits_write_baser(0, its_base + GITS_BASER + (i << 3));
5351
5352 iounmap(its_base);
5353 return 0;
5354}
5355
5356static const struct of_device_id its_device_id[] = {
5357 { .compatible = "arm,gic-v3-its", },
5358 {},
5359};
5360
5361static struct its_node __init *its_node_init(struct resource *res,
5362 struct fwnode_handle *handle, int numa_node)
5363{
5364 void __iomem *its_base;
5365 struct its_node *its;
5366 int err;
5367
5368 its_base = its_map_one(res, &err);
5369 if (!its_base)
5370 return NULL;
5371
5372 pr_info("ITS %pR\n", res);
5373
5374 its = kzalloc(sizeof(*its), GFP_KERNEL);
5375 if (!its)
5376 goto out_unmap;
5377
5378 raw_spin_lock_init(&its->lock);
5379 mutex_init(&its->dev_alloc_lock);
5380 INIT_LIST_HEAD(&its->entry);
5381 INIT_LIST_HEAD(&its->its_device_list);
5382
5383 its->typer = gic_read_typer(its_base + GITS_TYPER);
5384 its->base = its_base;
5385 its->phys_base = res->start;
5386 its->get_msi_base = its_irq_get_msi_base;
5387 its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI;
5388
5389 its->numa_node = numa_node;
5390 its->fwnode_handle = handle;
5391
5392 return its;
5393
5394out_unmap:
5395 iounmap(its_base);
5396 return NULL;
5397}
5398
5399static void its_node_destroy(struct its_node *its)
5400{
5401 iounmap(its->base);
5402 kfree(its);
5403}
5404
5405static int __init its_of_probe(struct device_node *node)
5406{
5407 struct device_node *np;
5408 struct resource res;
5409 int err;
5410
5411 /*
5412 * Make sure *all* the ITS are reset before we probe any, as
5413 * they may be sharing memory. If any of the ITS fails to
5414 * reset, don't even try to go any further, as this could
5415 * result in something even worse.
5416 */
5417 for (np = of_find_matching_node(node, its_device_id); np;
5418 np = of_find_matching_node(np, its_device_id)) {
5419 if (!of_device_is_available(np) ||
5420 !of_property_read_bool(np, "msi-controller") ||
5421 of_address_to_resource(np, 0, &res))
5422 continue;
5423
5424 err = its_reset_one(&res);
5425 if (err)
5426 return err;
5427 }
5428
5429 for (np = of_find_matching_node(node, its_device_id); np;
5430 np = of_find_matching_node(np, its_device_id)) {
5431 struct its_node *its;
5432
5433 if (!of_device_is_available(np))
5434 continue;
5435 if (!of_property_read_bool(np, "msi-controller")) {
5436 pr_warn("%pOF: no msi-controller property, ITS ignored\n",
5437 np);
5438 continue;
5439 }
5440
5441 if (of_address_to_resource(np, 0, &res)) {
5442 pr_warn("%pOF: no regs?\n", np);
5443 continue;
5444 }
5445
5446
5447 its = its_node_init(&res, &np->fwnode, of_node_to_nid(np));
5448 if (!its)
5449 return -ENOMEM;
5450
5451 err = its_probe_one(its);
5452 if (err) {
5453 its_node_destroy(its);
5454 return err;
5455 }
5456 }
5457 return 0;
5458}
5459
5460#ifdef CONFIG_ACPI
5461
5462#define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
5463
5464#ifdef CONFIG_ACPI_NUMA
5465struct its_srat_map {
5466 /* numa node id */
5467 u32 numa_node;
5468 /* GIC ITS ID */
5469 u32 its_id;
5470};
5471
5472static struct its_srat_map *its_srat_maps __initdata;
5473static int its_in_srat __initdata;
5474
5475static int __init acpi_get_its_numa_node(u32 its_id)
5476{
5477 int i;
5478
5479 for (i = 0; i < its_in_srat; i++) {
5480 if (its_id == its_srat_maps[i].its_id)
5481 return its_srat_maps[i].numa_node;
5482 }
5483 return NUMA_NO_NODE;
5484}
5485
5486static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header,
5487 const unsigned long end)
5488{
5489 return 0;
5490}
5491
5492static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header,
5493 const unsigned long end)
5494{
5495 int node;
5496 struct acpi_srat_gic_its_affinity *its_affinity;
5497
5498 its_affinity = (struct acpi_srat_gic_its_affinity *)header;
5499 if (!its_affinity)
5500 return -EINVAL;
5501
5502 if (its_affinity->header.length < sizeof(*its_affinity)) {
5503 pr_err("SRAT: Invalid header length %d in ITS affinity\n",
5504 its_affinity->header.length);
5505 return -EINVAL;
5506 }
5507
5508 /*
5509 * Note that in theory a new proximity node could be created by this
5510 * entry as it is an SRAT resource allocation structure.
5511 * We do not currently support doing so.
5512 */
5513 node = pxm_to_node(its_affinity->proximity_domain);
5514
5515 if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
5516 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
5517 return 0;
5518 }
5519
5520 its_srat_maps[its_in_srat].numa_node = node;
5521 its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
5522 its_in_srat++;
5523 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
5524 its_affinity->proximity_domain, its_affinity->its_id, node);
5525
5526 return 0;
5527}
5528
5529static void __init acpi_table_parse_srat_its(void)
5530{
5531 int count;
5532
5533 count = acpi_table_parse_entries(ACPI_SIG_SRAT,
5534 sizeof(struct acpi_table_srat),
5535 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
5536 gic_acpi_match_srat_its, 0);
5537 if (count <= 0)
5538 return;
5539
5540 its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map),
5541 GFP_KERNEL);
5542 if (!its_srat_maps)
5543 return;
5544
5545 acpi_table_parse_entries(ACPI_SIG_SRAT,
5546 sizeof(struct acpi_table_srat),
5547 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
5548 gic_acpi_parse_srat_its, 0);
5549}
5550
5551/* free the its_srat_maps after ITS probing */
5552static void __init acpi_its_srat_maps_free(void)
5553{
5554 kfree(its_srat_maps);
5555}
5556#else
5557static void __init acpi_table_parse_srat_its(void) { }
5558static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
5559static void __init acpi_its_srat_maps_free(void) { }
5560#endif
5561
5562static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
5563 const unsigned long end)
5564{
5565 struct acpi_madt_generic_translator *its_entry;
5566 struct fwnode_handle *dom_handle;
5567 struct its_node *its;
5568 struct resource res;
5569 int err;
5570
5571 its_entry = (struct acpi_madt_generic_translator *)header;
5572 memset(&res, 0, sizeof(res));
5573 res.start = its_entry->base_address;
5574 res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
5575 res.flags = IORESOURCE_MEM;
5576
5577 dom_handle = irq_domain_alloc_fwnode(&res.start);
5578 if (!dom_handle) {
5579 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
5580 &res.start);
5581 return -ENOMEM;
5582 }
5583
5584 err = iort_register_domain_token(its_entry->translation_id, res.start,
5585 dom_handle);
5586 if (err) {
5587 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
5588 &res.start, its_entry->translation_id);
5589 goto dom_err;
5590 }
5591
5592 its = its_node_init(&res, dom_handle,
5593 acpi_get_its_numa_node(its_entry->translation_id));
5594 if (!its) {
5595 err = -ENOMEM;
5596 goto node_err;
5597 }
5598
5599 err = its_probe_one(its);
5600 if (!err)
5601 return 0;
5602
5603node_err:
5604 iort_deregister_domain_token(its_entry->translation_id);
5605dom_err:
5606 irq_domain_free_fwnode(dom_handle);
5607 return err;
5608}
5609
5610static int __init its_acpi_reset(union acpi_subtable_headers *header,
5611 const unsigned long end)
5612{
5613 struct acpi_madt_generic_translator *its_entry;
5614 struct resource res;
5615
5616 its_entry = (struct acpi_madt_generic_translator *)header;
5617 res = (struct resource) {
5618 .start = its_entry->base_address,
5619 .end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1,
5620 .flags = IORESOURCE_MEM,
5621 };
5622
5623 return its_reset_one(&res);
5624}
5625
5626static void __init its_acpi_probe(void)
5627{
5628 acpi_table_parse_srat_its();
5629 /*
5630 * Make sure *all* the ITS are reset before we probe any, as
5631 * they may be sharing memory. If any of the ITS fails to
5632 * reset, don't even try to go any further, as this could
5633 * result in something even worse.
5634 */
5635 if (acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
5636 its_acpi_reset, 0) > 0)
5637 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
5638 gic_acpi_parse_madt_its, 0);
5639 acpi_its_srat_maps_free();
5640}
5641#else
5642static void __init its_acpi_probe(void) { }
5643#endif
5644
5645int __init its_lpi_memreserve_init(void)
5646{
5647 int state;
5648
5649 if (!efi_enabled(EFI_CONFIG_TABLES))
5650 return 0;
5651
5652 if (list_empty(&its_nodes))
5653 return 0;
5654
5655 gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID;
5656 state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
5657 "irqchip/arm/gicv3/memreserve:online",
5658 its_cpu_memreserve_lpi,
5659 NULL);
5660 if (state < 0)
5661 return state;
5662
5663 gic_rdists->cpuhp_memreserve_state = state;
5664
5665 return 0;
5666}
5667
5668int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
5669 struct irq_domain *parent_domain)
5670{
5671 struct device_node *of_node;
5672 struct its_node *its;
5673 bool has_v4 = false;
5674 bool has_v4_1 = false;
5675 int err;
5676
5677 gic_rdists = rdists;
5678
5679 its_parent = parent_domain;
5680 of_node = to_of_node(handle);
5681 if (of_node)
5682 its_of_probe(of_node);
5683 else
5684 its_acpi_probe();
5685
5686 if (list_empty(&its_nodes)) {
5687 pr_warn("ITS: No ITS available, not enabling LPIs\n");
5688 return -ENXIO;
5689 }
5690
5691 err = allocate_lpi_tables();
5692 if (err)
5693 return err;
5694
5695 list_for_each_entry(its, &its_nodes, entry) {
5696 has_v4 |= is_v4(its);
5697 has_v4_1 |= is_v4_1(its);
5698 }
5699
5700 /* Don't bother with inconsistent systems */
5701 if (WARN_ON(!has_v4_1 && rdists->has_rvpeid))
5702 rdists->has_rvpeid = false;
5703
5704 if (has_v4 & rdists->has_vlpis) {
5705 const struct irq_domain_ops *sgi_ops;
5706
5707 if (has_v4_1)
5708 sgi_ops = &its_sgi_domain_ops;
5709 else
5710 sgi_ops = NULL;
5711
5712 if (its_init_vpe_domain() ||
5713 its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops)) {
5714 rdists->has_vlpis = false;
5715 pr_err("ITS: Disabling GICv4 support\n");
5716 }
5717 }
5718
5719 register_syscore_ops(&its_syscore_ops);
5720
5721 return 0;
5722}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7#include <linux/acpi.h>
8#include <linux/acpi_iort.h>
9#include <linux/bitfield.h>
10#include <linux/bitmap.h>
11#include <linux/cpu.h>
12#include <linux/crash_dump.h>
13#include <linux/delay.h>
14#include <linux/efi.h>
15#include <linux/interrupt.h>
16#include <linux/iommu.h>
17#include <linux/iopoll.h>
18#include <linux/irqdomain.h>
19#include <linux/list.h>
20#include <linux/log2.h>
21#include <linux/memblock.h>
22#include <linux/mm.h>
23#include <linux/msi.h>
24#include <linux/of.h>
25#include <linux/of_address.h>
26#include <linux/of_irq.h>
27#include <linux/of_pci.h>
28#include <linux/of_platform.h>
29#include <linux/percpu.h>
30#include <linux/slab.h>
31#include <linux/syscore_ops.h>
32
33#include <linux/irqchip.h>
34#include <linux/irqchip/arm-gic-v3.h>
35#include <linux/irqchip/arm-gic-v4.h>
36
37#include <asm/cputype.h>
38#include <asm/exception.h>
39
40#include "irq-gic-common.h"
41
42#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
43#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
44#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
45
46#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
47#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
48
49#define RD_LOCAL_LPI_ENABLED BIT(0)
50#define RD_LOCAL_PENDTABLE_PREALLOCATED BIT(1)
51#define RD_LOCAL_MEMRESERVE_DONE BIT(2)
52
53static u32 lpi_id_bits;
54
55/*
56 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
57 * deal with (one configuration byte per interrupt). PENDBASE has to
58 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
59 */
60#define LPI_NRBITS lpi_id_bits
61#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
62#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
63
64#define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI
65
66/*
67 * Collection structure - just an ID, and a redistributor address to
68 * ping. We use one per CPU as a bag of interrupts assigned to this
69 * CPU.
70 */
71struct its_collection {
72 u64 target_address;
73 u16 col_id;
74};
75
76/*
77 * The ITS_BASER structure - contains memory information, cached
78 * value of BASER register configuration and ITS page size.
79 */
80struct its_baser {
81 void *base;
82 u64 val;
83 u32 order;
84 u32 psz;
85};
86
87struct its_device;
88
89/*
90 * The ITS structure - contains most of the infrastructure, with the
91 * top-level MSI domain, the command queue, the collections, and the
92 * list of devices writing to it.
93 *
94 * dev_alloc_lock has to be taken for device allocations, while the
95 * spinlock must be taken to parse data structures such as the device
96 * list.
97 */
98struct its_node {
99 raw_spinlock_t lock;
100 struct mutex dev_alloc_lock;
101 struct list_head entry;
102 void __iomem *base;
103 void __iomem *sgir_base;
104 phys_addr_t phys_base;
105 struct its_cmd_block *cmd_base;
106 struct its_cmd_block *cmd_write;
107 struct its_baser tables[GITS_BASER_NR_REGS];
108 struct its_collection *collections;
109 struct fwnode_handle *fwnode_handle;
110 u64 (*get_msi_base)(struct its_device *its_dev);
111 u64 typer;
112 u64 cbaser_save;
113 u32 ctlr_save;
114 u32 mpidr;
115 struct list_head its_device_list;
116 u64 flags;
117 unsigned long list_nr;
118 int numa_node;
119 unsigned int msi_domain_flags;
120 u32 pre_its_base; /* for Socionext Synquacer */
121 int vlpi_redist_offset;
122};
123
124#define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS))
125#define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP))
126#define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
127
128#define ITS_ITT_ALIGN SZ_256
129
130/* The maximum number of VPEID bits supported by VLPI commands */
131#define ITS_MAX_VPEID_BITS \
132 ({ \
133 int nvpeid = 16; \
134 if (gic_rdists->has_rvpeid && \
135 gic_rdists->gicd_typer2 & GICD_TYPER2_VIL) \
136 nvpeid = 1 + (gic_rdists->gicd_typer2 & \
137 GICD_TYPER2_VID); \
138 \
139 nvpeid; \
140 })
141#define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
142
143/* Convert page order to size in bytes */
144#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
145
146struct event_lpi_map {
147 unsigned long *lpi_map;
148 u16 *col_map;
149 irq_hw_number_t lpi_base;
150 int nr_lpis;
151 raw_spinlock_t vlpi_lock;
152 struct its_vm *vm;
153 struct its_vlpi_map *vlpi_maps;
154 int nr_vlpis;
155};
156
157/*
158 * The ITS view of a device - belongs to an ITS, owns an interrupt
159 * translation table, and a list of interrupts. If it some of its
160 * LPIs are injected into a guest (GICv4), the event_map.vm field
161 * indicates which one.
162 */
163struct its_device {
164 struct list_head entry;
165 struct its_node *its;
166 struct event_lpi_map event_map;
167 void *itt;
168 u32 nr_ites;
169 u32 device_id;
170 bool shared;
171};
172
173static struct {
174 raw_spinlock_t lock;
175 struct its_device *dev;
176 struct its_vpe **vpes;
177 int next_victim;
178} vpe_proxy;
179
180struct cpu_lpi_count {
181 atomic_t managed;
182 atomic_t unmanaged;
183};
184
185static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count);
186
187static LIST_HEAD(its_nodes);
188static DEFINE_RAW_SPINLOCK(its_lock);
189static struct rdists *gic_rdists;
190static struct irq_domain *its_parent;
191
192static unsigned long its_list_map;
193static u16 vmovp_seq_num;
194static DEFINE_RAW_SPINLOCK(vmovp_lock);
195
196static DEFINE_IDA(its_vpeid_ida);
197
198#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
199#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
200#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
201#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
202
203/*
204 * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we
205 * always have vSGIs mapped.
206 */
207static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its)
208{
209 return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]);
210}
211
212static u16 get_its_list(struct its_vm *vm)
213{
214 struct its_node *its;
215 unsigned long its_list = 0;
216
217 list_for_each_entry(its, &its_nodes, entry) {
218 if (!is_v4(its))
219 continue;
220
221 if (require_its_list_vmovp(vm, its))
222 __set_bit(its->list_nr, &its_list);
223 }
224
225 return (u16)its_list;
226}
227
228static inline u32 its_get_event_id(struct irq_data *d)
229{
230 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
231 return d->hwirq - its_dev->event_map.lpi_base;
232}
233
234static struct its_collection *dev_event_to_col(struct its_device *its_dev,
235 u32 event)
236{
237 struct its_node *its = its_dev->its;
238
239 return its->collections + its_dev->event_map.col_map[event];
240}
241
242static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev,
243 u32 event)
244{
245 if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis))
246 return NULL;
247
248 return &its_dev->event_map.vlpi_maps[event];
249}
250
251static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
252{
253 if (irqd_is_forwarded_to_vcpu(d)) {
254 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
255 u32 event = its_get_event_id(d);
256
257 return dev_event_to_vlpi_map(its_dev, event);
258 }
259
260 return NULL;
261}
262
263static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags)
264{
265 raw_spin_lock_irqsave(&vpe->vpe_lock, *flags);
266 return vpe->col_idx;
267}
268
269static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags)
270{
271 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
272}
273
274static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags)
275{
276 struct its_vlpi_map *map = get_vlpi_map(d);
277 int cpu;
278
279 if (map) {
280 cpu = vpe_to_cpuid_lock(map->vpe, flags);
281 } else {
282 /* Physical LPIs are already locked via the irq_desc lock */
283 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
284 cpu = its_dev->event_map.col_map[its_get_event_id(d)];
285 /* Keep GCC quiet... */
286 *flags = 0;
287 }
288
289 return cpu;
290}
291
292static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags)
293{
294 struct its_vlpi_map *map = get_vlpi_map(d);
295
296 if (map)
297 vpe_to_cpuid_unlock(map->vpe, flags);
298}
299
300static struct its_collection *valid_col(struct its_collection *col)
301{
302 if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
303 return NULL;
304
305 return col;
306}
307
308static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
309{
310 if (valid_col(its->collections + vpe->col_idx))
311 return vpe;
312
313 return NULL;
314}
315
316/*
317 * ITS command descriptors - parameters to be encoded in a command
318 * block.
319 */
320struct its_cmd_desc {
321 union {
322 struct {
323 struct its_device *dev;
324 u32 event_id;
325 } its_inv_cmd;
326
327 struct {
328 struct its_device *dev;
329 u32 event_id;
330 } its_clear_cmd;
331
332 struct {
333 struct its_device *dev;
334 u32 event_id;
335 } its_int_cmd;
336
337 struct {
338 struct its_device *dev;
339 int valid;
340 } its_mapd_cmd;
341
342 struct {
343 struct its_collection *col;
344 int valid;
345 } its_mapc_cmd;
346
347 struct {
348 struct its_device *dev;
349 u32 phys_id;
350 u32 event_id;
351 } its_mapti_cmd;
352
353 struct {
354 struct its_device *dev;
355 struct its_collection *col;
356 u32 event_id;
357 } its_movi_cmd;
358
359 struct {
360 struct its_device *dev;
361 u32 event_id;
362 } its_discard_cmd;
363
364 struct {
365 struct its_collection *col;
366 } its_invall_cmd;
367
368 struct {
369 struct its_vpe *vpe;
370 } its_vinvall_cmd;
371
372 struct {
373 struct its_vpe *vpe;
374 struct its_collection *col;
375 bool valid;
376 } its_vmapp_cmd;
377
378 struct {
379 struct its_vpe *vpe;
380 struct its_device *dev;
381 u32 virt_id;
382 u32 event_id;
383 bool db_enabled;
384 } its_vmapti_cmd;
385
386 struct {
387 struct its_vpe *vpe;
388 struct its_device *dev;
389 u32 event_id;
390 bool db_enabled;
391 } its_vmovi_cmd;
392
393 struct {
394 struct its_vpe *vpe;
395 struct its_collection *col;
396 u16 seq_num;
397 u16 its_list;
398 } its_vmovp_cmd;
399
400 struct {
401 struct its_vpe *vpe;
402 } its_invdb_cmd;
403
404 struct {
405 struct its_vpe *vpe;
406 u8 sgi;
407 u8 priority;
408 bool enable;
409 bool group;
410 bool clear;
411 } its_vsgi_cmd;
412 };
413};
414
415/*
416 * The ITS command block, which is what the ITS actually parses.
417 */
418struct its_cmd_block {
419 union {
420 u64 raw_cmd[4];
421 __le64 raw_cmd_le[4];
422 };
423};
424
425#define ITS_CMD_QUEUE_SZ SZ_64K
426#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
427
428typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
429 struct its_cmd_block *,
430 struct its_cmd_desc *);
431
432typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
433 struct its_cmd_block *,
434 struct its_cmd_desc *);
435
436static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
437{
438 u64 mask = GENMASK_ULL(h, l);
439 *raw_cmd &= ~mask;
440 *raw_cmd |= (val << l) & mask;
441}
442
443static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
444{
445 its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
446}
447
448static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
449{
450 its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
451}
452
453static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
454{
455 its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
456}
457
458static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
459{
460 its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
461}
462
463static void its_encode_size(struct its_cmd_block *cmd, u8 size)
464{
465 its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
466}
467
468static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
469{
470 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
471}
472
473static void its_encode_valid(struct its_cmd_block *cmd, int valid)
474{
475 its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
476}
477
478static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
479{
480 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
481}
482
483static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
484{
485 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
486}
487
488static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
489{
490 its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
491}
492
493static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
494{
495 its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
496}
497
498static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
499{
500 its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
501}
502
503static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
504{
505 its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
506}
507
508static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
509{
510 its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
511}
512
513static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
514{
515 its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
516}
517
518static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
519{
520 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
521}
522
523static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
524{
525 its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
526}
527
528static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa)
529{
530 its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16);
531}
532
533static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc)
534{
535 its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8);
536}
537
538static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz)
539{
540 its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9);
541}
542
543static void its_encode_vmapp_default_db(struct its_cmd_block *cmd,
544 u32 vpe_db_lpi)
545{
546 its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0);
547}
548
549static void its_encode_vmovp_default_db(struct its_cmd_block *cmd,
550 u32 vpe_db_lpi)
551{
552 its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0);
553}
554
555static void its_encode_db(struct its_cmd_block *cmd, bool db)
556{
557 its_mask_encode(&cmd->raw_cmd[2], db, 63, 63);
558}
559
560static void its_encode_sgi_intid(struct its_cmd_block *cmd, u8 sgi)
561{
562 its_mask_encode(&cmd->raw_cmd[0], sgi, 35, 32);
563}
564
565static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio)
566{
567 its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20);
568}
569
570static void its_encode_sgi_group(struct its_cmd_block *cmd, bool grp)
571{
572 its_mask_encode(&cmd->raw_cmd[0], grp, 10, 10);
573}
574
575static void its_encode_sgi_clear(struct its_cmd_block *cmd, bool clr)
576{
577 its_mask_encode(&cmd->raw_cmd[0], clr, 9, 9);
578}
579
580static void its_encode_sgi_enable(struct its_cmd_block *cmd, bool en)
581{
582 its_mask_encode(&cmd->raw_cmd[0], en, 8, 8);
583}
584
585static inline void its_fixup_cmd(struct its_cmd_block *cmd)
586{
587 /* Let's fixup BE commands */
588 cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]);
589 cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]);
590 cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]);
591 cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]);
592}
593
594static struct its_collection *its_build_mapd_cmd(struct its_node *its,
595 struct its_cmd_block *cmd,
596 struct its_cmd_desc *desc)
597{
598 unsigned long itt_addr;
599 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
600
601 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
602 itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
603
604 its_encode_cmd(cmd, GITS_CMD_MAPD);
605 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
606 its_encode_size(cmd, size - 1);
607 its_encode_itt(cmd, itt_addr);
608 its_encode_valid(cmd, desc->its_mapd_cmd.valid);
609
610 its_fixup_cmd(cmd);
611
612 return NULL;
613}
614
615static struct its_collection *its_build_mapc_cmd(struct its_node *its,
616 struct its_cmd_block *cmd,
617 struct its_cmd_desc *desc)
618{
619 its_encode_cmd(cmd, GITS_CMD_MAPC);
620 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
621 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
622 its_encode_valid(cmd, desc->its_mapc_cmd.valid);
623
624 its_fixup_cmd(cmd);
625
626 return desc->its_mapc_cmd.col;
627}
628
629static struct its_collection *its_build_mapti_cmd(struct its_node *its,
630 struct its_cmd_block *cmd,
631 struct its_cmd_desc *desc)
632{
633 struct its_collection *col;
634
635 col = dev_event_to_col(desc->its_mapti_cmd.dev,
636 desc->its_mapti_cmd.event_id);
637
638 its_encode_cmd(cmd, GITS_CMD_MAPTI);
639 its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
640 its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
641 its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
642 its_encode_collection(cmd, col->col_id);
643
644 its_fixup_cmd(cmd);
645
646 return valid_col(col);
647}
648
649static struct its_collection *its_build_movi_cmd(struct its_node *its,
650 struct its_cmd_block *cmd,
651 struct its_cmd_desc *desc)
652{
653 struct its_collection *col;
654
655 col = dev_event_to_col(desc->its_movi_cmd.dev,
656 desc->its_movi_cmd.event_id);
657
658 its_encode_cmd(cmd, GITS_CMD_MOVI);
659 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
660 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
661 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
662
663 its_fixup_cmd(cmd);
664
665 return valid_col(col);
666}
667
668static struct its_collection *its_build_discard_cmd(struct its_node *its,
669 struct its_cmd_block *cmd,
670 struct its_cmd_desc *desc)
671{
672 struct its_collection *col;
673
674 col = dev_event_to_col(desc->its_discard_cmd.dev,
675 desc->its_discard_cmd.event_id);
676
677 its_encode_cmd(cmd, GITS_CMD_DISCARD);
678 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
679 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
680
681 its_fixup_cmd(cmd);
682
683 return valid_col(col);
684}
685
686static struct its_collection *its_build_inv_cmd(struct its_node *its,
687 struct its_cmd_block *cmd,
688 struct its_cmd_desc *desc)
689{
690 struct its_collection *col;
691
692 col = dev_event_to_col(desc->its_inv_cmd.dev,
693 desc->its_inv_cmd.event_id);
694
695 its_encode_cmd(cmd, GITS_CMD_INV);
696 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
697 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
698
699 its_fixup_cmd(cmd);
700
701 return valid_col(col);
702}
703
704static struct its_collection *its_build_int_cmd(struct its_node *its,
705 struct its_cmd_block *cmd,
706 struct its_cmd_desc *desc)
707{
708 struct its_collection *col;
709
710 col = dev_event_to_col(desc->its_int_cmd.dev,
711 desc->its_int_cmd.event_id);
712
713 its_encode_cmd(cmd, GITS_CMD_INT);
714 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
715 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
716
717 its_fixup_cmd(cmd);
718
719 return valid_col(col);
720}
721
722static struct its_collection *its_build_clear_cmd(struct its_node *its,
723 struct its_cmd_block *cmd,
724 struct its_cmd_desc *desc)
725{
726 struct its_collection *col;
727
728 col = dev_event_to_col(desc->its_clear_cmd.dev,
729 desc->its_clear_cmd.event_id);
730
731 its_encode_cmd(cmd, GITS_CMD_CLEAR);
732 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
733 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
734
735 its_fixup_cmd(cmd);
736
737 return valid_col(col);
738}
739
740static struct its_collection *its_build_invall_cmd(struct its_node *its,
741 struct its_cmd_block *cmd,
742 struct its_cmd_desc *desc)
743{
744 its_encode_cmd(cmd, GITS_CMD_INVALL);
745 its_encode_collection(cmd, desc->its_invall_cmd.col->col_id);
746
747 its_fixup_cmd(cmd);
748
749 return desc->its_invall_cmd.col;
750}
751
752static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
753 struct its_cmd_block *cmd,
754 struct its_cmd_desc *desc)
755{
756 its_encode_cmd(cmd, GITS_CMD_VINVALL);
757 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
758
759 its_fixup_cmd(cmd);
760
761 return valid_vpe(its, desc->its_vinvall_cmd.vpe);
762}
763
764static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
765 struct its_cmd_block *cmd,
766 struct its_cmd_desc *desc)
767{
768 unsigned long vpt_addr, vconf_addr;
769 u64 target;
770 bool alloc;
771
772 its_encode_cmd(cmd, GITS_CMD_VMAPP);
773 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
774 its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
775
776 if (!desc->its_vmapp_cmd.valid) {
777 if (is_v4_1(its)) {
778 alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
779 its_encode_alloc(cmd, alloc);
780 }
781
782 goto out;
783 }
784
785 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
786 target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
787
788 its_encode_target(cmd, target);
789 its_encode_vpt_addr(cmd, vpt_addr);
790 its_encode_vpt_size(cmd, LPI_NRBITS - 1);
791
792 if (!is_v4_1(its))
793 goto out;
794
795 vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
796
797 alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
798
799 its_encode_alloc(cmd, alloc);
800
801 /*
802 * GICv4.1 provides a way to get the VLPI state, which needs the vPE
803 * to be unmapped first, and in this case, we may remap the vPE
804 * back while the VPT is not empty. So we can't assume that the
805 * VPT is empty on map. This is why we never advertise PTZ.
806 */
807 its_encode_ptz(cmd, false);
808 its_encode_vconf_addr(cmd, vconf_addr);
809 its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi);
810
811out:
812 its_fixup_cmd(cmd);
813
814 return valid_vpe(its, desc->its_vmapp_cmd.vpe);
815}
816
817static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
818 struct its_cmd_block *cmd,
819 struct its_cmd_desc *desc)
820{
821 u32 db;
822
823 if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled)
824 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
825 else
826 db = 1023;
827
828 its_encode_cmd(cmd, GITS_CMD_VMAPTI);
829 its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
830 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
831 its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
832 its_encode_db_phys_id(cmd, db);
833 its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
834
835 its_fixup_cmd(cmd);
836
837 return valid_vpe(its, desc->its_vmapti_cmd.vpe);
838}
839
840static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
841 struct its_cmd_block *cmd,
842 struct its_cmd_desc *desc)
843{
844 u32 db;
845
846 if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled)
847 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
848 else
849 db = 1023;
850
851 its_encode_cmd(cmd, GITS_CMD_VMOVI);
852 its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
853 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
854 its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
855 its_encode_db_phys_id(cmd, db);
856 its_encode_db_valid(cmd, true);
857
858 its_fixup_cmd(cmd);
859
860 return valid_vpe(its, desc->its_vmovi_cmd.vpe);
861}
862
863static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
864 struct its_cmd_block *cmd,
865 struct its_cmd_desc *desc)
866{
867 u64 target;
868
869 target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
870 its_encode_cmd(cmd, GITS_CMD_VMOVP);
871 its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
872 its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
873 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
874 its_encode_target(cmd, target);
875
876 if (is_v4_1(its)) {
877 its_encode_db(cmd, true);
878 its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi);
879 }
880
881 its_fixup_cmd(cmd);
882
883 return valid_vpe(its, desc->its_vmovp_cmd.vpe);
884}
885
886static struct its_vpe *its_build_vinv_cmd(struct its_node *its,
887 struct its_cmd_block *cmd,
888 struct its_cmd_desc *desc)
889{
890 struct its_vlpi_map *map;
891
892 map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev,
893 desc->its_inv_cmd.event_id);
894
895 its_encode_cmd(cmd, GITS_CMD_INV);
896 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
897 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
898
899 its_fixup_cmd(cmd);
900
901 return valid_vpe(its, map->vpe);
902}
903
904static struct its_vpe *its_build_vint_cmd(struct its_node *its,
905 struct its_cmd_block *cmd,
906 struct its_cmd_desc *desc)
907{
908 struct its_vlpi_map *map;
909
910 map = dev_event_to_vlpi_map(desc->its_int_cmd.dev,
911 desc->its_int_cmd.event_id);
912
913 its_encode_cmd(cmd, GITS_CMD_INT);
914 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
915 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
916
917 its_fixup_cmd(cmd);
918
919 return valid_vpe(its, map->vpe);
920}
921
922static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
923 struct its_cmd_block *cmd,
924 struct its_cmd_desc *desc)
925{
926 struct its_vlpi_map *map;
927
928 map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev,
929 desc->its_clear_cmd.event_id);
930
931 its_encode_cmd(cmd, GITS_CMD_CLEAR);
932 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
933 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
934
935 its_fixup_cmd(cmd);
936
937 return valid_vpe(its, map->vpe);
938}
939
940static struct its_vpe *its_build_invdb_cmd(struct its_node *its,
941 struct its_cmd_block *cmd,
942 struct its_cmd_desc *desc)
943{
944 if (WARN_ON(!is_v4_1(its)))
945 return NULL;
946
947 its_encode_cmd(cmd, GITS_CMD_INVDB);
948 its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id);
949
950 its_fixup_cmd(cmd);
951
952 return valid_vpe(its, desc->its_invdb_cmd.vpe);
953}
954
955static struct its_vpe *its_build_vsgi_cmd(struct its_node *its,
956 struct its_cmd_block *cmd,
957 struct its_cmd_desc *desc)
958{
959 if (WARN_ON(!is_v4_1(its)))
960 return NULL;
961
962 its_encode_cmd(cmd, GITS_CMD_VSGI);
963 its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id);
964 its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi);
965 its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority);
966 its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group);
967 its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear);
968 its_encode_sgi_enable(cmd, desc->its_vsgi_cmd.enable);
969
970 its_fixup_cmd(cmd);
971
972 return valid_vpe(its, desc->its_vsgi_cmd.vpe);
973}
974
975static u64 its_cmd_ptr_to_offset(struct its_node *its,
976 struct its_cmd_block *ptr)
977{
978 return (ptr - its->cmd_base) * sizeof(*ptr);
979}
980
981static int its_queue_full(struct its_node *its)
982{
983 int widx;
984 int ridx;
985
986 widx = its->cmd_write - its->cmd_base;
987 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
988
989 /* This is incredibly unlikely to happen, unless the ITS locks up. */
990 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
991 return 1;
992
993 return 0;
994}
995
996static struct its_cmd_block *its_allocate_entry(struct its_node *its)
997{
998 struct its_cmd_block *cmd;
999 u32 count = 1000000; /* 1s! */
1000
1001 while (its_queue_full(its)) {
1002 count--;
1003 if (!count) {
1004 pr_err_ratelimited("ITS queue not draining\n");
1005 return NULL;
1006 }
1007 cpu_relax();
1008 udelay(1);
1009 }
1010
1011 cmd = its->cmd_write++;
1012
1013 /* Handle queue wrapping */
1014 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
1015 its->cmd_write = its->cmd_base;
1016
1017 /* Clear command */
1018 cmd->raw_cmd[0] = 0;
1019 cmd->raw_cmd[1] = 0;
1020 cmd->raw_cmd[2] = 0;
1021 cmd->raw_cmd[3] = 0;
1022
1023 return cmd;
1024}
1025
1026static struct its_cmd_block *its_post_commands(struct its_node *its)
1027{
1028 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
1029
1030 writel_relaxed(wr, its->base + GITS_CWRITER);
1031
1032 return its->cmd_write;
1033}
1034
1035static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
1036{
1037 /*
1038 * Make sure the commands written to memory are observable by
1039 * the ITS.
1040 */
1041 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
1042 gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
1043 else
1044 dsb(ishst);
1045}
1046
1047static int its_wait_for_range_completion(struct its_node *its,
1048 u64 prev_idx,
1049 struct its_cmd_block *to)
1050{
1051 u64 rd_idx, to_idx, linear_idx;
1052 u32 count = 1000000; /* 1s! */
1053
1054 /* Linearize to_idx if the command set has wrapped around */
1055 to_idx = its_cmd_ptr_to_offset(its, to);
1056 if (to_idx < prev_idx)
1057 to_idx += ITS_CMD_QUEUE_SZ;
1058
1059 linear_idx = prev_idx;
1060
1061 while (1) {
1062 s64 delta;
1063
1064 rd_idx = readl_relaxed(its->base + GITS_CREADR);
1065
1066 /*
1067 * Compute the read pointer progress, taking the
1068 * potential wrap-around into account.
1069 */
1070 delta = rd_idx - prev_idx;
1071 if (rd_idx < prev_idx)
1072 delta += ITS_CMD_QUEUE_SZ;
1073
1074 linear_idx += delta;
1075 if (linear_idx >= to_idx)
1076 break;
1077
1078 count--;
1079 if (!count) {
1080 pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
1081 to_idx, linear_idx);
1082 return -1;
1083 }
1084 prev_idx = rd_idx;
1085 cpu_relax();
1086 udelay(1);
1087 }
1088
1089 return 0;
1090}
1091
1092/* Warning, macro hell follows */
1093#define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
1094void name(struct its_node *its, \
1095 buildtype builder, \
1096 struct its_cmd_desc *desc) \
1097{ \
1098 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
1099 synctype *sync_obj; \
1100 unsigned long flags; \
1101 u64 rd_idx; \
1102 \
1103 raw_spin_lock_irqsave(&its->lock, flags); \
1104 \
1105 cmd = its_allocate_entry(its); \
1106 if (!cmd) { /* We're soooooo screewed... */ \
1107 raw_spin_unlock_irqrestore(&its->lock, flags); \
1108 return; \
1109 } \
1110 sync_obj = builder(its, cmd, desc); \
1111 its_flush_cmd(its, cmd); \
1112 \
1113 if (sync_obj) { \
1114 sync_cmd = its_allocate_entry(its); \
1115 if (!sync_cmd) \
1116 goto post; \
1117 \
1118 buildfn(its, sync_cmd, sync_obj); \
1119 its_flush_cmd(its, sync_cmd); \
1120 } \
1121 \
1122post: \
1123 rd_idx = readl_relaxed(its->base + GITS_CREADR); \
1124 next_cmd = its_post_commands(its); \
1125 raw_spin_unlock_irqrestore(&its->lock, flags); \
1126 \
1127 if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
1128 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
1129}
1130
1131static void its_build_sync_cmd(struct its_node *its,
1132 struct its_cmd_block *sync_cmd,
1133 struct its_collection *sync_col)
1134{
1135 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
1136 its_encode_target(sync_cmd, sync_col->target_address);
1137
1138 its_fixup_cmd(sync_cmd);
1139}
1140
1141static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
1142 struct its_collection, its_build_sync_cmd)
1143
1144static void its_build_vsync_cmd(struct its_node *its,
1145 struct its_cmd_block *sync_cmd,
1146 struct its_vpe *sync_vpe)
1147{
1148 its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
1149 its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
1150
1151 its_fixup_cmd(sync_cmd);
1152}
1153
1154static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
1155 struct its_vpe, its_build_vsync_cmd)
1156
1157static void its_send_int(struct its_device *dev, u32 event_id)
1158{
1159 struct its_cmd_desc desc;
1160
1161 desc.its_int_cmd.dev = dev;
1162 desc.its_int_cmd.event_id = event_id;
1163
1164 its_send_single_command(dev->its, its_build_int_cmd, &desc);
1165}
1166
1167static void its_send_clear(struct its_device *dev, u32 event_id)
1168{
1169 struct its_cmd_desc desc;
1170
1171 desc.its_clear_cmd.dev = dev;
1172 desc.its_clear_cmd.event_id = event_id;
1173
1174 its_send_single_command(dev->its, its_build_clear_cmd, &desc);
1175}
1176
1177static void its_send_inv(struct its_device *dev, u32 event_id)
1178{
1179 struct its_cmd_desc desc;
1180
1181 desc.its_inv_cmd.dev = dev;
1182 desc.its_inv_cmd.event_id = event_id;
1183
1184 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
1185}
1186
1187static void its_send_mapd(struct its_device *dev, int valid)
1188{
1189 struct its_cmd_desc desc;
1190
1191 desc.its_mapd_cmd.dev = dev;
1192 desc.its_mapd_cmd.valid = !!valid;
1193
1194 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
1195}
1196
1197static void its_send_mapc(struct its_node *its, struct its_collection *col,
1198 int valid)
1199{
1200 struct its_cmd_desc desc;
1201
1202 desc.its_mapc_cmd.col = col;
1203 desc.its_mapc_cmd.valid = !!valid;
1204
1205 its_send_single_command(its, its_build_mapc_cmd, &desc);
1206}
1207
1208static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
1209{
1210 struct its_cmd_desc desc;
1211
1212 desc.its_mapti_cmd.dev = dev;
1213 desc.its_mapti_cmd.phys_id = irq_id;
1214 desc.its_mapti_cmd.event_id = id;
1215
1216 its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
1217}
1218
1219static void its_send_movi(struct its_device *dev,
1220 struct its_collection *col, u32 id)
1221{
1222 struct its_cmd_desc desc;
1223
1224 desc.its_movi_cmd.dev = dev;
1225 desc.its_movi_cmd.col = col;
1226 desc.its_movi_cmd.event_id = id;
1227
1228 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
1229}
1230
1231static void its_send_discard(struct its_device *dev, u32 id)
1232{
1233 struct its_cmd_desc desc;
1234
1235 desc.its_discard_cmd.dev = dev;
1236 desc.its_discard_cmd.event_id = id;
1237
1238 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
1239}
1240
1241static void its_send_invall(struct its_node *its, struct its_collection *col)
1242{
1243 struct its_cmd_desc desc;
1244
1245 desc.its_invall_cmd.col = col;
1246
1247 its_send_single_command(its, its_build_invall_cmd, &desc);
1248}
1249
1250static void its_send_vmapti(struct its_device *dev, u32 id)
1251{
1252 struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1253 struct its_cmd_desc desc;
1254
1255 desc.its_vmapti_cmd.vpe = map->vpe;
1256 desc.its_vmapti_cmd.dev = dev;
1257 desc.its_vmapti_cmd.virt_id = map->vintid;
1258 desc.its_vmapti_cmd.event_id = id;
1259 desc.its_vmapti_cmd.db_enabled = map->db_enabled;
1260
1261 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
1262}
1263
1264static void its_send_vmovi(struct its_device *dev, u32 id)
1265{
1266 struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1267 struct its_cmd_desc desc;
1268
1269 desc.its_vmovi_cmd.vpe = map->vpe;
1270 desc.its_vmovi_cmd.dev = dev;
1271 desc.its_vmovi_cmd.event_id = id;
1272 desc.its_vmovi_cmd.db_enabled = map->db_enabled;
1273
1274 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
1275}
1276
1277static void its_send_vmapp(struct its_node *its,
1278 struct its_vpe *vpe, bool valid)
1279{
1280 struct its_cmd_desc desc;
1281
1282 desc.its_vmapp_cmd.vpe = vpe;
1283 desc.its_vmapp_cmd.valid = valid;
1284 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
1285
1286 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
1287}
1288
1289static void its_send_vmovp(struct its_vpe *vpe)
1290{
1291 struct its_cmd_desc desc = {};
1292 struct its_node *its;
1293 unsigned long flags;
1294 int col_id = vpe->col_idx;
1295
1296 desc.its_vmovp_cmd.vpe = vpe;
1297
1298 if (!its_list_map) {
1299 its = list_first_entry(&its_nodes, struct its_node, entry);
1300 desc.its_vmovp_cmd.col = &its->collections[col_id];
1301 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1302 return;
1303 }
1304
1305 /*
1306 * Yet another marvel of the architecture. If using the
1307 * its_list "feature", we need to make sure that all ITSs
1308 * receive all VMOVP commands in the same order. The only way
1309 * to guarantee this is to make vmovp a serialization point.
1310 *
1311 * Wall <-- Head.
1312 */
1313 raw_spin_lock_irqsave(&vmovp_lock, flags);
1314
1315 desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
1316 desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
1317
1318 /* Emit VMOVPs */
1319 list_for_each_entry(its, &its_nodes, entry) {
1320 if (!is_v4(its))
1321 continue;
1322
1323 if (!require_its_list_vmovp(vpe->its_vm, its))
1324 continue;
1325
1326 desc.its_vmovp_cmd.col = &its->collections[col_id];
1327 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1328 }
1329
1330 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1331}
1332
1333static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
1334{
1335 struct its_cmd_desc desc;
1336
1337 desc.its_vinvall_cmd.vpe = vpe;
1338 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
1339}
1340
1341static void its_send_vinv(struct its_device *dev, u32 event_id)
1342{
1343 struct its_cmd_desc desc;
1344
1345 /*
1346 * There is no real VINV command. This is just a normal INV,
1347 * with a VSYNC instead of a SYNC.
1348 */
1349 desc.its_inv_cmd.dev = dev;
1350 desc.its_inv_cmd.event_id = event_id;
1351
1352 its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc);
1353}
1354
1355static void its_send_vint(struct its_device *dev, u32 event_id)
1356{
1357 struct its_cmd_desc desc;
1358
1359 /*
1360 * There is no real VINT command. This is just a normal INT,
1361 * with a VSYNC instead of a SYNC.
1362 */
1363 desc.its_int_cmd.dev = dev;
1364 desc.its_int_cmd.event_id = event_id;
1365
1366 its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc);
1367}
1368
1369static void its_send_vclear(struct its_device *dev, u32 event_id)
1370{
1371 struct its_cmd_desc desc;
1372
1373 /*
1374 * There is no real VCLEAR command. This is just a normal CLEAR,
1375 * with a VSYNC instead of a SYNC.
1376 */
1377 desc.its_clear_cmd.dev = dev;
1378 desc.its_clear_cmd.event_id = event_id;
1379
1380 its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
1381}
1382
1383static void its_send_invdb(struct its_node *its, struct its_vpe *vpe)
1384{
1385 struct its_cmd_desc desc;
1386
1387 desc.its_invdb_cmd.vpe = vpe;
1388 its_send_single_vcommand(its, its_build_invdb_cmd, &desc);
1389}
1390
1391/*
1392 * irqchip functions - assumes MSI, mostly.
1393 */
1394static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
1395{
1396 struct its_vlpi_map *map = get_vlpi_map(d);
1397 irq_hw_number_t hwirq;
1398 void *va;
1399 u8 *cfg;
1400
1401 if (map) {
1402 va = page_address(map->vm->vprop_page);
1403 hwirq = map->vintid;
1404
1405 /* Remember the updated property */
1406 map->properties &= ~clr;
1407 map->properties |= set | LPI_PROP_GROUP1;
1408 } else {
1409 va = gic_rdists->prop_table_va;
1410 hwirq = d->hwirq;
1411 }
1412
1413 cfg = va + hwirq - 8192;
1414 *cfg &= ~clr;
1415 *cfg |= set | LPI_PROP_GROUP1;
1416
1417 /*
1418 * Make the above write visible to the redistributors.
1419 * And yes, we're flushing exactly: One. Single. Byte.
1420 * Humpf...
1421 */
1422 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
1423 gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
1424 else
1425 dsb(ishst);
1426}
1427
1428static void wait_for_syncr(void __iomem *rdbase)
1429{
1430 while (readl_relaxed(rdbase + GICR_SYNCR) & 1)
1431 cpu_relax();
1432}
1433
1434static void direct_lpi_inv(struct irq_data *d)
1435{
1436 struct its_vlpi_map *map = get_vlpi_map(d);
1437 void __iomem *rdbase;
1438 unsigned long flags;
1439 u64 val;
1440 int cpu;
1441
1442 if (map) {
1443 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1444
1445 WARN_ON(!is_v4_1(its_dev->its));
1446
1447 val = GICR_INVLPIR_V;
1448 val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id);
1449 val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid);
1450 } else {
1451 val = d->hwirq;
1452 }
1453
1454 /* Target the redistributor this LPI is currently routed to */
1455 cpu = irq_to_cpuid_lock(d, &flags);
1456 raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
1457 rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
1458 gic_write_lpir(val, rdbase + GICR_INVLPIR);
1459
1460 wait_for_syncr(rdbase);
1461 raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
1462 irq_to_cpuid_unlock(d, flags);
1463}
1464
1465static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1466{
1467 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1468
1469 lpi_write_config(d, clr, set);
1470 if (gic_rdists->has_direct_lpi &&
1471 (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d)))
1472 direct_lpi_inv(d);
1473 else if (!irqd_is_forwarded_to_vcpu(d))
1474 its_send_inv(its_dev, its_get_event_id(d));
1475 else
1476 its_send_vinv(its_dev, its_get_event_id(d));
1477}
1478
1479static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1480{
1481 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1482 u32 event = its_get_event_id(d);
1483 struct its_vlpi_map *map;
1484
1485 /*
1486 * GICv4.1 does away with the per-LPI nonsense, nothing to do
1487 * here.
1488 */
1489 if (is_v4_1(its_dev->its))
1490 return;
1491
1492 map = dev_event_to_vlpi_map(its_dev, event);
1493
1494 if (map->db_enabled == enable)
1495 return;
1496
1497 map->db_enabled = enable;
1498
1499 /*
1500 * More fun with the architecture:
1501 *
1502 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1503 * value or to 1023, depending on the enable bit. But that
1504 * would be issuing a mapping for an /existing/ DevID+EventID
1505 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1506 * to the /same/ vPE, using this opportunity to adjust the
1507 * doorbell. Mouahahahaha. We loves it, Precious.
1508 */
1509 its_send_vmovi(its_dev, event);
1510}
1511
1512static void its_mask_irq(struct irq_data *d)
1513{
1514 if (irqd_is_forwarded_to_vcpu(d))
1515 its_vlpi_set_doorbell(d, false);
1516
1517 lpi_update_config(d, LPI_PROP_ENABLED, 0);
1518}
1519
1520static void its_unmask_irq(struct irq_data *d)
1521{
1522 if (irqd_is_forwarded_to_vcpu(d))
1523 its_vlpi_set_doorbell(d, true);
1524
1525 lpi_update_config(d, 0, LPI_PROP_ENABLED);
1526}
1527
1528static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu)
1529{
1530 if (irqd_affinity_is_managed(d))
1531 return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1532
1533 return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1534}
1535
1536static void its_inc_lpi_count(struct irq_data *d, int cpu)
1537{
1538 if (irqd_affinity_is_managed(d))
1539 atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1540 else
1541 atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1542}
1543
1544static void its_dec_lpi_count(struct irq_data *d, int cpu)
1545{
1546 if (irqd_affinity_is_managed(d))
1547 atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1548 else
1549 atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1550}
1551
1552static unsigned int cpumask_pick_least_loaded(struct irq_data *d,
1553 const struct cpumask *cpu_mask)
1554{
1555 unsigned int cpu = nr_cpu_ids, tmp;
1556 int count = S32_MAX;
1557
1558 for_each_cpu(tmp, cpu_mask) {
1559 int this_count = its_read_lpi_count(d, tmp);
1560 if (this_count < count) {
1561 cpu = tmp;
1562 count = this_count;
1563 }
1564 }
1565
1566 return cpu;
1567}
1568
1569/*
1570 * As suggested by Thomas Gleixner in:
1571 * https://lore.kernel.org/r/87h80q2aoc.fsf@nanos.tec.linutronix.de
1572 */
1573static int its_select_cpu(struct irq_data *d,
1574 const struct cpumask *aff_mask)
1575{
1576 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1577 static DEFINE_RAW_SPINLOCK(tmpmask_lock);
1578 static struct cpumask __tmpmask;
1579 struct cpumask *tmpmask;
1580 unsigned long flags;
1581 int cpu, node;
1582 node = its_dev->its->numa_node;
1583 tmpmask = &__tmpmask;
1584
1585 raw_spin_lock_irqsave(&tmpmask_lock, flags);
1586
1587 if (!irqd_affinity_is_managed(d)) {
1588 /* First try the NUMA node */
1589 if (node != NUMA_NO_NODE) {
1590 /*
1591 * Try the intersection of the affinity mask and the
1592 * node mask (and the online mask, just to be safe).
1593 */
1594 cpumask_and(tmpmask, cpumask_of_node(node), aff_mask);
1595 cpumask_and(tmpmask, tmpmask, cpu_online_mask);
1596
1597 /*
1598 * Ideally, we would check if the mask is empty, and
1599 * try again on the full node here.
1600 *
1601 * But it turns out that the way ACPI describes the
1602 * affinity for ITSs only deals about memory, and
1603 * not target CPUs, so it cannot describe a single
1604 * ITS placed next to two NUMA nodes.
1605 *
1606 * Instead, just fallback on the online mask. This
1607 * diverges from Thomas' suggestion above.
1608 */
1609 cpu = cpumask_pick_least_loaded(d, tmpmask);
1610 if (cpu < nr_cpu_ids)
1611 goto out;
1612
1613 /* If we can't cross sockets, give up */
1614 if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144))
1615 goto out;
1616
1617 /* If the above failed, expand the search */
1618 }
1619
1620 /* Try the intersection of the affinity and online masks */
1621 cpumask_and(tmpmask, aff_mask, cpu_online_mask);
1622
1623 /* If that doesn't fly, the online mask is the last resort */
1624 if (cpumask_empty(tmpmask))
1625 cpumask_copy(tmpmask, cpu_online_mask);
1626
1627 cpu = cpumask_pick_least_loaded(d, tmpmask);
1628 } else {
1629 cpumask_copy(tmpmask, aff_mask);
1630
1631 /* If we cannot cross sockets, limit the search to that node */
1632 if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) &&
1633 node != NUMA_NO_NODE)
1634 cpumask_and(tmpmask, tmpmask, cpumask_of_node(node));
1635
1636 cpu = cpumask_pick_least_loaded(d, tmpmask);
1637 }
1638out:
1639 raw_spin_unlock_irqrestore(&tmpmask_lock, flags);
1640
1641 pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu);
1642 return cpu;
1643}
1644
1645static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1646 bool force)
1647{
1648 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1649 struct its_collection *target_col;
1650 u32 id = its_get_event_id(d);
1651 int cpu, prev_cpu;
1652
1653 /* A forwarded interrupt should use irq_set_vcpu_affinity */
1654 if (irqd_is_forwarded_to_vcpu(d))
1655 return -EINVAL;
1656
1657 prev_cpu = its_dev->event_map.col_map[id];
1658 its_dec_lpi_count(d, prev_cpu);
1659
1660 if (!force)
1661 cpu = its_select_cpu(d, mask_val);
1662 else
1663 cpu = cpumask_pick_least_loaded(d, mask_val);
1664
1665 if (cpu < 0 || cpu >= nr_cpu_ids)
1666 goto err;
1667
1668 /* don't set the affinity when the target cpu is same as current one */
1669 if (cpu != prev_cpu) {
1670 target_col = &its_dev->its->collections[cpu];
1671 its_send_movi(its_dev, target_col, id);
1672 its_dev->event_map.col_map[id] = cpu;
1673 irq_data_update_effective_affinity(d, cpumask_of(cpu));
1674 }
1675
1676 its_inc_lpi_count(d, cpu);
1677
1678 return IRQ_SET_MASK_OK_DONE;
1679
1680err:
1681 its_inc_lpi_count(d, prev_cpu);
1682 return -EINVAL;
1683}
1684
1685static u64 its_irq_get_msi_base(struct its_device *its_dev)
1686{
1687 struct its_node *its = its_dev->its;
1688
1689 return its->phys_base + GITS_TRANSLATER;
1690}
1691
1692static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1693{
1694 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1695 struct its_node *its;
1696 u64 addr;
1697
1698 its = its_dev->its;
1699 addr = its->get_msi_base(its_dev);
1700
1701 msg->address_lo = lower_32_bits(addr);
1702 msg->address_hi = upper_32_bits(addr);
1703 msg->data = its_get_event_id(d);
1704
1705 iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg);
1706}
1707
1708static int its_irq_set_irqchip_state(struct irq_data *d,
1709 enum irqchip_irq_state which,
1710 bool state)
1711{
1712 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1713 u32 event = its_get_event_id(d);
1714
1715 if (which != IRQCHIP_STATE_PENDING)
1716 return -EINVAL;
1717
1718 if (irqd_is_forwarded_to_vcpu(d)) {
1719 if (state)
1720 its_send_vint(its_dev, event);
1721 else
1722 its_send_vclear(its_dev, event);
1723 } else {
1724 if (state)
1725 its_send_int(its_dev, event);
1726 else
1727 its_send_clear(its_dev, event);
1728 }
1729
1730 return 0;
1731}
1732
1733static int its_irq_retrigger(struct irq_data *d)
1734{
1735 return !its_irq_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
1736}
1737
1738/*
1739 * Two favourable cases:
1740 *
1741 * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times
1742 * for vSGI delivery
1743 *
1744 * (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough
1745 * and we're better off mapping all VPEs always
1746 *
1747 * If neither (a) nor (b) is true, then we map vPEs on demand.
1748 *
1749 */
1750static bool gic_requires_eager_mapping(void)
1751{
1752 if (!its_list_map || gic_rdists->has_rvpeid)
1753 return true;
1754
1755 return false;
1756}
1757
1758static void its_map_vm(struct its_node *its, struct its_vm *vm)
1759{
1760 unsigned long flags;
1761
1762 if (gic_requires_eager_mapping())
1763 return;
1764
1765 raw_spin_lock_irqsave(&vmovp_lock, flags);
1766
1767 /*
1768 * If the VM wasn't mapped yet, iterate over the vpes and get
1769 * them mapped now.
1770 */
1771 vm->vlpi_count[its->list_nr]++;
1772
1773 if (vm->vlpi_count[its->list_nr] == 1) {
1774 int i;
1775
1776 for (i = 0; i < vm->nr_vpes; i++) {
1777 struct its_vpe *vpe = vm->vpes[i];
1778 struct irq_data *d = irq_get_irq_data(vpe->irq);
1779
1780 /* Map the VPE to the first possible CPU */
1781 vpe->col_idx = cpumask_first(cpu_online_mask);
1782 its_send_vmapp(its, vpe, true);
1783 its_send_vinvall(its, vpe);
1784 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
1785 }
1786 }
1787
1788 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1789}
1790
1791static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1792{
1793 unsigned long flags;
1794
1795 /* Not using the ITS list? Everything is always mapped. */
1796 if (gic_requires_eager_mapping())
1797 return;
1798
1799 raw_spin_lock_irqsave(&vmovp_lock, flags);
1800
1801 if (!--vm->vlpi_count[its->list_nr]) {
1802 int i;
1803
1804 for (i = 0; i < vm->nr_vpes; i++)
1805 its_send_vmapp(its, vm->vpes[i], false);
1806 }
1807
1808 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1809}
1810
1811static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1812{
1813 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1814 u32 event = its_get_event_id(d);
1815 int ret = 0;
1816
1817 if (!info->map)
1818 return -EINVAL;
1819
1820 raw_spin_lock(&its_dev->event_map.vlpi_lock);
1821
1822 if (!its_dev->event_map.vm) {
1823 struct its_vlpi_map *maps;
1824
1825 maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
1826 GFP_ATOMIC);
1827 if (!maps) {
1828 ret = -ENOMEM;
1829 goto out;
1830 }
1831
1832 its_dev->event_map.vm = info->map->vm;
1833 its_dev->event_map.vlpi_maps = maps;
1834 } else if (its_dev->event_map.vm != info->map->vm) {
1835 ret = -EINVAL;
1836 goto out;
1837 }
1838
1839 /* Get our private copy of the mapping information */
1840 its_dev->event_map.vlpi_maps[event] = *info->map;
1841
1842 if (irqd_is_forwarded_to_vcpu(d)) {
1843 /* Already mapped, move it around */
1844 its_send_vmovi(its_dev, event);
1845 } else {
1846 /* Ensure all the VPEs are mapped on this ITS */
1847 its_map_vm(its_dev->its, info->map->vm);
1848
1849 /*
1850 * Flag the interrupt as forwarded so that we can
1851 * start poking the virtual property table.
1852 */
1853 irqd_set_forwarded_to_vcpu(d);
1854
1855 /* Write out the property to the prop table */
1856 lpi_write_config(d, 0xff, info->map->properties);
1857
1858 /* Drop the physical mapping */
1859 its_send_discard(its_dev, event);
1860
1861 /* and install the virtual one */
1862 its_send_vmapti(its_dev, event);
1863
1864 /* Increment the number of VLPIs */
1865 its_dev->event_map.nr_vlpis++;
1866 }
1867
1868out:
1869 raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1870 return ret;
1871}
1872
1873static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1874{
1875 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1876 struct its_vlpi_map *map;
1877 int ret = 0;
1878
1879 raw_spin_lock(&its_dev->event_map.vlpi_lock);
1880
1881 map = get_vlpi_map(d);
1882
1883 if (!its_dev->event_map.vm || !map) {
1884 ret = -EINVAL;
1885 goto out;
1886 }
1887
1888 /* Copy our mapping information to the incoming request */
1889 *info->map = *map;
1890
1891out:
1892 raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1893 return ret;
1894}
1895
1896static int its_vlpi_unmap(struct irq_data *d)
1897{
1898 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1899 u32 event = its_get_event_id(d);
1900 int ret = 0;
1901
1902 raw_spin_lock(&its_dev->event_map.vlpi_lock);
1903
1904 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
1905 ret = -EINVAL;
1906 goto out;
1907 }
1908
1909 /* Drop the virtual mapping */
1910 its_send_discard(its_dev, event);
1911
1912 /* and restore the physical one */
1913 irqd_clr_forwarded_to_vcpu(d);
1914 its_send_mapti(its_dev, d->hwirq, event);
1915 lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
1916 LPI_PROP_ENABLED |
1917 LPI_PROP_GROUP1));
1918
1919 /* Potentially unmap the VM from this ITS */
1920 its_unmap_vm(its_dev->its, its_dev->event_map.vm);
1921
1922 /*
1923 * Drop the refcount and make the device available again if
1924 * this was the last VLPI.
1925 */
1926 if (!--its_dev->event_map.nr_vlpis) {
1927 its_dev->event_map.vm = NULL;
1928 kfree(its_dev->event_map.vlpi_maps);
1929 }
1930
1931out:
1932 raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1933 return ret;
1934}
1935
1936static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
1937{
1938 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1939
1940 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1941 return -EINVAL;
1942
1943 if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
1944 lpi_update_config(d, 0xff, info->config);
1945 else
1946 lpi_write_config(d, 0xff, info->config);
1947 its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
1948
1949 return 0;
1950}
1951
1952static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
1953{
1954 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1955 struct its_cmd_info *info = vcpu_info;
1956
1957 /* Need a v4 ITS */
1958 if (!is_v4(its_dev->its))
1959 return -EINVAL;
1960
1961 /* Unmap request? */
1962 if (!info)
1963 return its_vlpi_unmap(d);
1964
1965 switch (info->cmd_type) {
1966 case MAP_VLPI:
1967 return its_vlpi_map(d, info);
1968
1969 case GET_VLPI:
1970 return its_vlpi_get(d, info);
1971
1972 case PROP_UPDATE_VLPI:
1973 case PROP_UPDATE_AND_INV_VLPI:
1974 return its_vlpi_prop_update(d, info);
1975
1976 default:
1977 return -EINVAL;
1978 }
1979}
1980
1981static struct irq_chip its_irq_chip = {
1982 .name = "ITS",
1983 .irq_mask = its_mask_irq,
1984 .irq_unmask = its_unmask_irq,
1985 .irq_eoi = irq_chip_eoi_parent,
1986 .irq_set_affinity = its_set_affinity,
1987 .irq_compose_msi_msg = its_irq_compose_msi_msg,
1988 .irq_set_irqchip_state = its_irq_set_irqchip_state,
1989 .irq_retrigger = its_irq_retrigger,
1990 .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
1991};
1992
1993
1994/*
1995 * How we allocate LPIs:
1996 *
1997 * lpi_range_list contains ranges of LPIs that are to available to
1998 * allocate from. To allocate LPIs, just pick the first range that
1999 * fits the required allocation, and reduce it by the required
2000 * amount. Once empty, remove the range from the list.
2001 *
2002 * To free a range of LPIs, add a free range to the list, sort it and
2003 * merge the result if the new range happens to be adjacent to an
2004 * already free block.
2005 *
2006 * The consequence of the above is that allocation is cost is low, but
2007 * freeing is expensive. We assumes that freeing rarely occurs.
2008 */
2009#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
2010
2011static DEFINE_MUTEX(lpi_range_lock);
2012static LIST_HEAD(lpi_range_list);
2013
2014struct lpi_range {
2015 struct list_head entry;
2016 u32 base_id;
2017 u32 span;
2018};
2019
2020static struct lpi_range *mk_lpi_range(u32 base, u32 span)
2021{
2022 struct lpi_range *range;
2023
2024 range = kmalloc(sizeof(*range), GFP_KERNEL);
2025 if (range) {
2026 range->base_id = base;
2027 range->span = span;
2028 }
2029
2030 return range;
2031}
2032
2033static int alloc_lpi_range(u32 nr_lpis, u32 *base)
2034{
2035 struct lpi_range *range, *tmp;
2036 int err = -ENOSPC;
2037
2038 mutex_lock(&lpi_range_lock);
2039
2040 list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
2041 if (range->span >= nr_lpis) {
2042 *base = range->base_id;
2043 range->base_id += nr_lpis;
2044 range->span -= nr_lpis;
2045
2046 if (range->span == 0) {
2047 list_del(&range->entry);
2048 kfree(range);
2049 }
2050
2051 err = 0;
2052 break;
2053 }
2054 }
2055
2056 mutex_unlock(&lpi_range_lock);
2057
2058 pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
2059 return err;
2060}
2061
2062static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b)
2063{
2064 if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list)
2065 return;
2066 if (a->base_id + a->span != b->base_id)
2067 return;
2068 b->base_id = a->base_id;
2069 b->span += a->span;
2070 list_del(&a->entry);
2071 kfree(a);
2072}
2073
2074static int free_lpi_range(u32 base, u32 nr_lpis)
2075{
2076 struct lpi_range *new, *old;
2077
2078 new = mk_lpi_range(base, nr_lpis);
2079 if (!new)
2080 return -ENOMEM;
2081
2082 mutex_lock(&lpi_range_lock);
2083
2084 list_for_each_entry_reverse(old, &lpi_range_list, entry) {
2085 if (old->base_id < base)
2086 break;
2087 }
2088 /*
2089 * old is the last element with ->base_id smaller than base,
2090 * so new goes right after it. If there are no elements with
2091 * ->base_id smaller than base, &old->entry ends up pointing
2092 * at the head of the list, and inserting new it the start of
2093 * the list is the right thing to do in that case as well.
2094 */
2095 list_add(&new->entry, &old->entry);
2096 /*
2097 * Now check if we can merge with the preceding and/or
2098 * following ranges.
2099 */
2100 merge_lpi_ranges(old, new);
2101 merge_lpi_ranges(new, list_next_entry(new, entry));
2102
2103 mutex_unlock(&lpi_range_lock);
2104 return 0;
2105}
2106
2107static int __init its_lpi_init(u32 id_bits)
2108{
2109 u32 lpis = (1UL << id_bits) - 8192;
2110 u32 numlpis;
2111 int err;
2112
2113 numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
2114
2115 if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
2116 lpis = numlpis;
2117 pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
2118 lpis);
2119 }
2120
2121 /*
2122 * Initializing the allocator is just the same as freeing the
2123 * full range of LPIs.
2124 */
2125 err = free_lpi_range(8192, lpis);
2126 pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
2127 return err;
2128}
2129
2130static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
2131{
2132 unsigned long *bitmap = NULL;
2133 int err = 0;
2134
2135 do {
2136 err = alloc_lpi_range(nr_irqs, base);
2137 if (!err)
2138 break;
2139
2140 nr_irqs /= 2;
2141 } while (nr_irqs > 0);
2142
2143 if (!nr_irqs)
2144 err = -ENOSPC;
2145
2146 if (err)
2147 goto out;
2148
2149 bitmap = bitmap_zalloc(nr_irqs, GFP_ATOMIC);
2150 if (!bitmap)
2151 goto out;
2152
2153 *nr_ids = nr_irqs;
2154
2155out:
2156 if (!bitmap)
2157 *base = *nr_ids = 0;
2158
2159 return bitmap;
2160}
2161
2162static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
2163{
2164 WARN_ON(free_lpi_range(base, nr_ids));
2165 bitmap_free(bitmap);
2166}
2167
2168static void gic_reset_prop_table(void *va)
2169{
2170 /* Priority 0xa0, Group-1, disabled */
2171 memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
2172
2173 /* Make sure the GIC will observe the written configuration */
2174 gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ);
2175}
2176
2177static struct page *its_allocate_prop_table(gfp_t gfp_flags)
2178{
2179 struct page *prop_page;
2180
2181 prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
2182 if (!prop_page)
2183 return NULL;
2184
2185 gic_reset_prop_table(page_address(prop_page));
2186
2187 return prop_page;
2188}
2189
2190static void its_free_prop_table(struct page *prop_page)
2191{
2192 free_pages((unsigned long)page_address(prop_page),
2193 get_order(LPI_PROPBASE_SZ));
2194}
2195
2196static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
2197{
2198 phys_addr_t start, end, addr_end;
2199 u64 i;
2200
2201 /*
2202 * We don't bother checking for a kdump kernel as by
2203 * construction, the LPI tables are out of this kernel's
2204 * memory map.
2205 */
2206 if (is_kdump_kernel())
2207 return true;
2208
2209 addr_end = addr + size - 1;
2210
2211 for_each_reserved_mem_range(i, &start, &end) {
2212 if (addr >= start && addr_end <= end)
2213 return true;
2214 }
2215
2216 /* Not found, not a good sign... */
2217 pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n",
2218 &addr, &addr_end);
2219 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
2220 return false;
2221}
2222
2223static int gic_reserve_range(phys_addr_t addr, unsigned long size)
2224{
2225 if (efi_enabled(EFI_CONFIG_TABLES))
2226 return efi_mem_reserve_persistent(addr, size);
2227
2228 return 0;
2229}
2230
2231static int __init its_setup_lpi_prop_table(void)
2232{
2233 if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) {
2234 u64 val;
2235
2236 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
2237 lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1;
2238
2239 gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12);
2240 gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa,
2241 LPI_PROPBASE_SZ,
2242 MEMREMAP_WB);
2243 gic_reset_prop_table(gic_rdists->prop_table_va);
2244 } else {
2245 struct page *page;
2246
2247 lpi_id_bits = min_t(u32,
2248 GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
2249 ITS_MAX_LPI_NRBITS);
2250 page = its_allocate_prop_table(GFP_NOWAIT);
2251 if (!page) {
2252 pr_err("Failed to allocate PROPBASE\n");
2253 return -ENOMEM;
2254 }
2255
2256 gic_rdists->prop_table_pa = page_to_phys(page);
2257 gic_rdists->prop_table_va = page_address(page);
2258 WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa,
2259 LPI_PROPBASE_SZ));
2260 }
2261
2262 pr_info("GICv3: using LPI property table @%pa\n",
2263 &gic_rdists->prop_table_pa);
2264
2265 return its_lpi_init(lpi_id_bits);
2266}
2267
2268static const char *its_base_type_string[] = {
2269 [GITS_BASER_TYPE_DEVICE] = "Devices",
2270 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
2271 [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)",
2272 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
2273 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
2274 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
2275 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
2276};
2277
2278static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
2279{
2280 u32 idx = baser - its->tables;
2281
2282 return gits_read_baser(its->base + GITS_BASER + (idx << 3));
2283}
2284
2285static void its_write_baser(struct its_node *its, struct its_baser *baser,
2286 u64 val)
2287{
2288 u32 idx = baser - its->tables;
2289
2290 gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
2291 baser->val = its_read_baser(its, baser);
2292}
2293
2294static int its_setup_baser(struct its_node *its, struct its_baser *baser,
2295 u64 cache, u64 shr, u32 order, bool indirect)
2296{
2297 u64 val = its_read_baser(its, baser);
2298 u64 esz = GITS_BASER_ENTRY_SIZE(val);
2299 u64 type = GITS_BASER_TYPE(val);
2300 u64 baser_phys, tmp;
2301 u32 alloc_pages, psz;
2302 struct page *page;
2303 void *base;
2304
2305 psz = baser->psz;
2306 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
2307 if (alloc_pages > GITS_BASER_PAGES_MAX) {
2308 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
2309 &its->phys_base, its_base_type_string[type],
2310 alloc_pages, GITS_BASER_PAGES_MAX);
2311 alloc_pages = GITS_BASER_PAGES_MAX;
2312 order = get_order(GITS_BASER_PAGES_MAX * psz);
2313 }
2314
2315 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
2316 if (!page)
2317 return -ENOMEM;
2318
2319 base = (void *)page_address(page);
2320 baser_phys = virt_to_phys(base);
2321
2322 /* Check if the physical address of the memory is above 48bits */
2323 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
2324
2325 /* 52bit PA is supported only when PageSize=64K */
2326 if (psz != SZ_64K) {
2327 pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
2328 free_pages((unsigned long)base, order);
2329 return -ENXIO;
2330 }
2331
2332 /* Convert 52bit PA to 48bit field */
2333 baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
2334 }
2335
2336retry_baser:
2337 val = (baser_phys |
2338 (type << GITS_BASER_TYPE_SHIFT) |
2339 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
2340 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
2341 cache |
2342 shr |
2343 GITS_BASER_VALID);
2344
2345 val |= indirect ? GITS_BASER_INDIRECT : 0x0;
2346
2347 switch (psz) {
2348 case SZ_4K:
2349 val |= GITS_BASER_PAGE_SIZE_4K;
2350 break;
2351 case SZ_16K:
2352 val |= GITS_BASER_PAGE_SIZE_16K;
2353 break;
2354 case SZ_64K:
2355 val |= GITS_BASER_PAGE_SIZE_64K;
2356 break;
2357 }
2358
2359 its_write_baser(its, baser, val);
2360 tmp = baser->val;
2361
2362 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
2363 /*
2364 * Shareability didn't stick. Just use
2365 * whatever the read reported, which is likely
2366 * to be the only thing this redistributor
2367 * supports. If that's zero, make it
2368 * non-cacheable as well.
2369 */
2370 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
2371 if (!shr) {
2372 cache = GITS_BASER_nC;
2373 gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
2374 }
2375 goto retry_baser;
2376 }
2377
2378 if (val != tmp) {
2379 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
2380 &its->phys_base, its_base_type_string[type],
2381 val, tmp);
2382 free_pages((unsigned long)base, order);
2383 return -ENXIO;
2384 }
2385
2386 baser->order = order;
2387 baser->base = base;
2388 baser->psz = psz;
2389 tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
2390
2391 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
2392 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
2393 its_base_type_string[type],
2394 (unsigned long)virt_to_phys(base),
2395 indirect ? "indirect" : "flat", (int)esz,
2396 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
2397
2398 return 0;
2399}
2400
2401static bool its_parse_indirect_baser(struct its_node *its,
2402 struct its_baser *baser,
2403 u32 *order, u32 ids)
2404{
2405 u64 tmp = its_read_baser(its, baser);
2406 u64 type = GITS_BASER_TYPE(tmp);
2407 u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
2408 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
2409 u32 new_order = *order;
2410 u32 psz = baser->psz;
2411 bool indirect = false;
2412
2413 /* No need to enable Indirection if memory requirement < (psz*2)bytes */
2414 if ((esz << ids) > (psz * 2)) {
2415 /*
2416 * Find out whether hw supports a single or two-level table by
2417 * table by reading bit at offset '62' after writing '1' to it.
2418 */
2419 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
2420 indirect = !!(baser->val & GITS_BASER_INDIRECT);
2421
2422 if (indirect) {
2423 /*
2424 * The size of the lvl2 table is equal to ITS page size
2425 * which is 'psz'. For computing lvl1 table size,
2426 * subtract ID bits that sparse lvl2 table from 'ids'
2427 * which is reported by ITS hardware times lvl1 table
2428 * entry size.
2429 */
2430 ids -= ilog2(psz / (int)esz);
2431 esz = GITS_LVL1_ENTRY_SIZE;
2432 }
2433 }
2434
2435 /*
2436 * Allocate as many entries as required to fit the
2437 * range of device IDs that the ITS can grok... The ID
2438 * space being incredibly sparse, this results in a
2439 * massive waste of memory if two-level device table
2440 * feature is not supported by hardware.
2441 */
2442 new_order = max_t(u32, get_order(esz << ids), new_order);
2443 if (new_order >= MAX_ORDER) {
2444 new_order = MAX_ORDER - 1;
2445 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
2446 pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n",
2447 &its->phys_base, its_base_type_string[type],
2448 device_ids(its), ids);
2449 }
2450
2451 *order = new_order;
2452
2453 return indirect;
2454}
2455
2456static u32 compute_common_aff(u64 val)
2457{
2458 u32 aff, clpiaff;
2459
2460 aff = FIELD_GET(GICR_TYPER_AFFINITY, val);
2461 clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val);
2462
2463 return aff & ~(GENMASK(31, 0) >> (clpiaff * 8));
2464}
2465
2466static u32 compute_its_aff(struct its_node *its)
2467{
2468 u64 val;
2469 u32 svpet;
2470
2471 /*
2472 * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute
2473 * the resulting affinity. We then use that to see if this match
2474 * our own affinity.
2475 */
2476 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
2477 val = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet);
2478 val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr);
2479 return compute_common_aff(val);
2480}
2481
2482static struct its_node *find_sibling_its(struct its_node *cur_its)
2483{
2484 struct its_node *its;
2485 u32 aff;
2486
2487 if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer))
2488 return NULL;
2489
2490 aff = compute_its_aff(cur_its);
2491
2492 list_for_each_entry(its, &its_nodes, entry) {
2493 u64 baser;
2494
2495 if (!is_v4_1(its) || its == cur_its)
2496 continue;
2497
2498 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2499 continue;
2500
2501 if (aff != compute_its_aff(its))
2502 continue;
2503
2504 /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
2505 baser = its->tables[2].val;
2506 if (!(baser & GITS_BASER_VALID))
2507 continue;
2508
2509 return its;
2510 }
2511
2512 return NULL;
2513}
2514
2515static void its_free_tables(struct its_node *its)
2516{
2517 int i;
2518
2519 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2520 if (its->tables[i].base) {
2521 free_pages((unsigned long)its->tables[i].base,
2522 its->tables[i].order);
2523 its->tables[i].base = NULL;
2524 }
2525 }
2526}
2527
2528static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser)
2529{
2530 u64 psz = SZ_64K;
2531
2532 while (psz) {
2533 u64 val, gpsz;
2534
2535 val = its_read_baser(its, baser);
2536 val &= ~GITS_BASER_PAGE_SIZE_MASK;
2537
2538 switch (psz) {
2539 case SZ_64K:
2540 gpsz = GITS_BASER_PAGE_SIZE_64K;
2541 break;
2542 case SZ_16K:
2543 gpsz = GITS_BASER_PAGE_SIZE_16K;
2544 break;
2545 case SZ_4K:
2546 default:
2547 gpsz = GITS_BASER_PAGE_SIZE_4K;
2548 break;
2549 }
2550
2551 gpsz >>= GITS_BASER_PAGE_SIZE_SHIFT;
2552
2553 val |= FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK, gpsz);
2554 its_write_baser(its, baser, val);
2555
2556 if (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser->val) == gpsz)
2557 break;
2558
2559 switch (psz) {
2560 case SZ_64K:
2561 psz = SZ_16K;
2562 break;
2563 case SZ_16K:
2564 psz = SZ_4K;
2565 break;
2566 case SZ_4K:
2567 default:
2568 return -1;
2569 }
2570 }
2571
2572 baser->psz = psz;
2573 return 0;
2574}
2575
2576static int its_alloc_tables(struct its_node *its)
2577{
2578 u64 shr = GITS_BASER_InnerShareable;
2579 u64 cache = GITS_BASER_RaWaWb;
2580 int err, i;
2581
2582 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
2583 /* erratum 24313: ignore memory access type */
2584 cache = GITS_BASER_nCnB;
2585
2586 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2587 struct its_baser *baser = its->tables + i;
2588 u64 val = its_read_baser(its, baser);
2589 u64 type = GITS_BASER_TYPE(val);
2590 bool indirect = false;
2591 u32 order;
2592
2593 if (type == GITS_BASER_TYPE_NONE)
2594 continue;
2595
2596 if (its_probe_baser_psz(its, baser)) {
2597 its_free_tables(its);
2598 return -ENXIO;
2599 }
2600
2601 order = get_order(baser->psz);
2602
2603 switch (type) {
2604 case GITS_BASER_TYPE_DEVICE:
2605 indirect = its_parse_indirect_baser(its, baser, &order,
2606 device_ids(its));
2607 break;
2608
2609 case GITS_BASER_TYPE_VCPU:
2610 if (is_v4_1(its)) {
2611 struct its_node *sibling;
2612
2613 WARN_ON(i != 2);
2614 if ((sibling = find_sibling_its(its))) {
2615 *baser = sibling->tables[2];
2616 its_write_baser(its, baser, baser->val);
2617 continue;
2618 }
2619 }
2620
2621 indirect = its_parse_indirect_baser(its, baser, &order,
2622 ITS_MAX_VPEID_BITS);
2623 break;
2624 }
2625
2626 err = its_setup_baser(its, baser, cache, shr, order, indirect);
2627 if (err < 0) {
2628 its_free_tables(its);
2629 return err;
2630 }
2631
2632 /* Update settings which will be used for next BASERn */
2633 cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
2634 shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
2635 }
2636
2637 return 0;
2638}
2639
2640static u64 inherit_vpe_l1_table_from_its(void)
2641{
2642 struct its_node *its;
2643 u64 val;
2644 u32 aff;
2645
2646 val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2647 aff = compute_common_aff(val);
2648
2649 list_for_each_entry(its, &its_nodes, entry) {
2650 u64 baser, addr;
2651
2652 if (!is_v4_1(its))
2653 continue;
2654
2655 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2656 continue;
2657
2658 if (aff != compute_its_aff(its))
2659 continue;
2660
2661 /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
2662 baser = its->tables[2].val;
2663 if (!(baser & GITS_BASER_VALID))
2664 continue;
2665
2666 /* We have a winner! */
2667 gic_data_rdist()->vpe_l1_base = its->tables[2].base;
2668
2669 val = GICR_VPROPBASER_4_1_VALID;
2670 if (baser & GITS_BASER_INDIRECT)
2671 val |= GICR_VPROPBASER_4_1_INDIRECT;
2672 val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE,
2673 FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser));
2674 switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) {
2675 case GIC_PAGE_SIZE_64K:
2676 addr = GITS_BASER_ADDR_48_to_52(baser);
2677 break;
2678 default:
2679 addr = baser & GENMASK_ULL(47, 12);
2680 break;
2681 }
2682 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12);
2683 val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
2684 FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
2685 val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
2686 FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
2687 val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1);
2688
2689 return val;
2690 }
2691
2692 return 0;
2693}
2694
2695static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
2696{
2697 u32 aff;
2698 u64 val;
2699 int cpu;
2700
2701 val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2702 aff = compute_common_aff(val);
2703
2704 for_each_possible_cpu(cpu) {
2705 void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
2706
2707 if (!base || cpu == smp_processor_id())
2708 continue;
2709
2710 val = gic_read_typer(base + GICR_TYPER);
2711 if (aff != compute_common_aff(val))
2712 continue;
2713
2714 /*
2715 * At this point, we have a victim. This particular CPU
2716 * has already booted, and has an affinity that matches
2717 * ours wrt CommonLPIAff. Let's use its own VPROPBASER.
2718 * Make sure we don't write the Z bit in that case.
2719 */
2720 val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
2721 val &= ~GICR_VPROPBASER_4_1_Z;
2722
2723 gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base;
2724 *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask;
2725
2726 return val;
2727 }
2728
2729 return 0;
2730}
2731
2732static bool allocate_vpe_l2_table(int cpu, u32 id)
2733{
2734 void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
2735 unsigned int psz, esz, idx, npg, gpsz;
2736 u64 val;
2737 struct page *page;
2738 __le64 *table;
2739
2740 if (!gic_rdists->has_rvpeid)
2741 return true;
2742
2743 /* Skip non-present CPUs */
2744 if (!base)
2745 return true;
2746
2747 val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
2748
2749 esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1;
2750 gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
2751 npg = FIELD_GET(GICR_VPROPBASER_4_1_SIZE, val) + 1;
2752
2753 switch (gpsz) {
2754 default:
2755 WARN_ON(1);
2756 fallthrough;
2757 case GIC_PAGE_SIZE_4K:
2758 psz = SZ_4K;
2759 break;
2760 case GIC_PAGE_SIZE_16K:
2761 psz = SZ_16K;
2762 break;
2763 case GIC_PAGE_SIZE_64K:
2764 psz = SZ_64K;
2765 break;
2766 }
2767
2768 /* Don't allow vpe_id that exceeds single, flat table limit */
2769 if (!(val & GICR_VPROPBASER_4_1_INDIRECT))
2770 return (id < (npg * psz / (esz * SZ_8)));
2771
2772 /* Compute 1st level table index & check if that exceeds table limit */
2773 idx = id >> ilog2(psz / (esz * SZ_8));
2774 if (idx >= (npg * psz / GITS_LVL1_ENTRY_SIZE))
2775 return false;
2776
2777 table = gic_data_rdist_cpu(cpu)->vpe_l1_base;
2778
2779 /* Allocate memory for 2nd level table */
2780 if (!table[idx]) {
2781 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
2782 if (!page)
2783 return false;
2784
2785 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
2786 if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
2787 gic_flush_dcache_to_poc(page_address(page), psz);
2788
2789 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
2790
2791 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
2792 if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
2793 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
2794
2795 /* Ensure updated table contents are visible to RD hardware */
2796 dsb(sy);
2797 }
2798
2799 return true;
2800}
2801
2802static int allocate_vpe_l1_table(void)
2803{
2804 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2805 u64 val, gpsz, npg, pa;
2806 unsigned int psz = SZ_64K;
2807 unsigned int np, epp, esz;
2808 struct page *page;
2809
2810 if (!gic_rdists->has_rvpeid)
2811 return 0;
2812
2813 /*
2814 * if VPENDBASER.Valid is set, disable any previously programmed
2815 * VPE by setting PendingLast while clearing Valid. This has the
2816 * effect of making sure no doorbell will be generated and we can
2817 * then safely clear VPROPBASER.Valid.
2818 */
2819 if (gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid)
2820 gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
2821 vlpi_base + GICR_VPENDBASER);
2822
2823 /*
2824 * If we can inherit the configuration from another RD, let's do
2825 * so. Otherwise, we have to go through the allocation process. We
2826 * assume that all RDs have the exact same requirements, as
2827 * nothing will work otherwise.
2828 */
2829 val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask);
2830 if (val & GICR_VPROPBASER_4_1_VALID)
2831 goto out;
2832
2833 gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_ATOMIC);
2834 if (!gic_data_rdist()->vpe_table_mask)
2835 return -ENOMEM;
2836
2837 val = inherit_vpe_l1_table_from_its();
2838 if (val & GICR_VPROPBASER_4_1_VALID)
2839 goto out;
2840
2841 /* First probe the page size */
2842 val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K);
2843 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2844 val = gicr_read_vpropbaser(vlpi_base + GICR_VPROPBASER);
2845 gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
2846 esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val);
2847
2848 switch (gpsz) {
2849 default:
2850 gpsz = GIC_PAGE_SIZE_4K;
2851 fallthrough;
2852 case GIC_PAGE_SIZE_4K:
2853 psz = SZ_4K;
2854 break;
2855 case GIC_PAGE_SIZE_16K:
2856 psz = SZ_16K;
2857 break;
2858 case GIC_PAGE_SIZE_64K:
2859 psz = SZ_64K;
2860 break;
2861 }
2862
2863 /*
2864 * Start populating the register from scratch, including RO fields
2865 * (which we want to print in debug cases...)
2866 */
2867 val = 0;
2868 val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz);
2869 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz);
2870
2871 /* How many entries per GIC page? */
2872 esz++;
2873 epp = psz / (esz * SZ_8);
2874
2875 /*
2876 * If we need more than just a single L1 page, flag the table
2877 * as indirect and compute the number of required L1 pages.
2878 */
2879 if (epp < ITS_MAX_VPEID) {
2880 int nl2;
2881
2882 val |= GICR_VPROPBASER_4_1_INDIRECT;
2883
2884 /* Number of L2 pages required to cover the VPEID space */
2885 nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp);
2886
2887 /* Number of L1 pages to point to the L2 pages */
2888 npg = DIV_ROUND_UP(nl2 * SZ_8, psz);
2889 } else {
2890 npg = 1;
2891 }
2892
2893 val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1);
2894
2895 /* Right, that's the number of CPU pages we need for L1 */
2896 np = DIV_ROUND_UP(npg * psz, PAGE_SIZE);
2897
2898 pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
2899 np, npg, psz, epp, esz);
2900 page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
2901 if (!page)
2902 return -ENOMEM;
2903
2904 gic_data_rdist()->vpe_l1_base = page_address(page);
2905 pa = virt_to_phys(page_address(page));
2906 WARN_ON(!IS_ALIGNED(pa, psz));
2907
2908 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12);
2909 val |= GICR_VPROPBASER_RaWb;
2910 val |= GICR_VPROPBASER_InnerShareable;
2911 val |= GICR_VPROPBASER_4_1_Z;
2912 val |= GICR_VPROPBASER_4_1_VALID;
2913
2914out:
2915 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2916 cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask);
2917
2918 pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n",
2919 smp_processor_id(), val,
2920 cpumask_pr_args(gic_data_rdist()->vpe_table_mask));
2921
2922 return 0;
2923}
2924
2925static int its_alloc_collections(struct its_node *its)
2926{
2927 int i;
2928
2929 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
2930 GFP_KERNEL);
2931 if (!its->collections)
2932 return -ENOMEM;
2933
2934 for (i = 0; i < nr_cpu_ids; i++)
2935 its->collections[i].target_address = ~0ULL;
2936
2937 return 0;
2938}
2939
2940static struct page *its_allocate_pending_table(gfp_t gfp_flags)
2941{
2942 struct page *pend_page;
2943
2944 pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
2945 get_order(LPI_PENDBASE_SZ));
2946 if (!pend_page)
2947 return NULL;
2948
2949 /* Make sure the GIC will observe the zero-ed page */
2950 gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
2951
2952 return pend_page;
2953}
2954
2955static void its_free_pending_table(struct page *pt)
2956{
2957 free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
2958}
2959
2960/*
2961 * Booting with kdump and LPIs enabled is generally fine. Any other
2962 * case is wrong in the absence of firmware/EFI support.
2963 */
2964static bool enabled_lpis_allowed(void)
2965{
2966 phys_addr_t addr;
2967 u64 val;
2968
2969 /* Check whether the property table is in a reserved region */
2970 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
2971 addr = val & GENMASK_ULL(51, 12);
2972
2973 return gic_check_reserved_range(addr, LPI_PROPBASE_SZ);
2974}
2975
2976static int __init allocate_lpi_tables(void)
2977{
2978 u64 val;
2979 int err, cpu;
2980
2981 /*
2982 * If LPIs are enabled while we run this from the boot CPU,
2983 * flag the RD tables as pre-allocated if the stars do align.
2984 */
2985 val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR);
2986 if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) {
2987 gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED |
2988 RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING);
2989 pr_info("GICv3: Using preallocated redistributor tables\n");
2990 }
2991
2992 err = its_setup_lpi_prop_table();
2993 if (err)
2994 return err;
2995
2996 /*
2997 * We allocate all the pending tables anyway, as we may have a
2998 * mix of RDs that have had LPIs enabled, and some that
2999 * don't. We'll free the unused ones as each CPU comes online.
3000 */
3001 for_each_possible_cpu(cpu) {
3002 struct page *pend_page;
3003
3004 pend_page = its_allocate_pending_table(GFP_NOWAIT);
3005 if (!pend_page) {
3006 pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
3007 return -ENOMEM;
3008 }
3009
3010 gic_data_rdist_cpu(cpu)->pend_page = pend_page;
3011 }
3012
3013 return 0;
3014}
3015
3016static u64 read_vpend_dirty_clear(void __iomem *vlpi_base)
3017{
3018 u32 count = 1000000; /* 1s! */
3019 bool clean;
3020 u64 val;
3021
3022 do {
3023 val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
3024 clean = !(val & GICR_VPENDBASER_Dirty);
3025 if (!clean) {
3026 count--;
3027 cpu_relax();
3028 udelay(1);
3029 }
3030 } while (!clean && count);
3031
3032 if (unlikely(!clean))
3033 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
3034
3035 return val;
3036}
3037
3038static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
3039{
3040 u64 val;
3041
3042 /* Make sure we wait until the RD is done with the initial scan */
3043 val = read_vpend_dirty_clear(vlpi_base);
3044 val &= ~GICR_VPENDBASER_Valid;
3045 val &= ~clr;
3046 val |= set;
3047 gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
3048
3049 val = read_vpend_dirty_clear(vlpi_base);
3050 if (unlikely(val & GICR_VPENDBASER_Dirty))
3051 val |= GICR_VPENDBASER_PendingLast;
3052
3053 return val;
3054}
3055
3056static void its_cpu_init_lpis(void)
3057{
3058 void __iomem *rbase = gic_data_rdist_rd_base();
3059 struct page *pend_page;
3060 phys_addr_t paddr;
3061 u64 val, tmp;
3062
3063 if (gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED)
3064 return;
3065
3066 val = readl_relaxed(rbase + GICR_CTLR);
3067 if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
3068 (val & GICR_CTLR_ENABLE_LPIS)) {
3069 /*
3070 * Check that we get the same property table on all
3071 * RDs. If we don't, this is hopeless.
3072 */
3073 paddr = gicr_read_propbaser(rbase + GICR_PROPBASER);
3074 paddr &= GENMASK_ULL(51, 12);
3075 if (WARN_ON(gic_rdists->prop_table_pa != paddr))
3076 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
3077
3078 paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER);
3079 paddr &= GENMASK_ULL(51, 16);
3080
3081 WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
3082 gic_data_rdist()->flags |= RD_LOCAL_PENDTABLE_PREALLOCATED;
3083
3084 goto out;
3085 }
3086
3087 pend_page = gic_data_rdist()->pend_page;
3088 paddr = page_to_phys(pend_page);
3089
3090 /* set PROPBASE */
3091 val = (gic_rdists->prop_table_pa |
3092 GICR_PROPBASER_InnerShareable |
3093 GICR_PROPBASER_RaWaWb |
3094 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
3095
3096 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
3097 tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
3098
3099 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
3100 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
3101 /*
3102 * The HW reports non-shareable, we must
3103 * remove the cacheability attributes as
3104 * well.
3105 */
3106 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
3107 GICR_PROPBASER_CACHEABILITY_MASK);
3108 val |= GICR_PROPBASER_nC;
3109 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
3110 }
3111 pr_info_once("GIC: using cache flushing for LPI property table\n");
3112 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
3113 }
3114
3115 /* set PENDBASE */
3116 val = (page_to_phys(pend_page) |
3117 GICR_PENDBASER_InnerShareable |
3118 GICR_PENDBASER_RaWaWb);
3119
3120 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
3121 tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
3122
3123 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
3124 /*
3125 * The HW reports non-shareable, we must remove the
3126 * cacheability attributes as well.
3127 */
3128 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
3129 GICR_PENDBASER_CACHEABILITY_MASK);
3130 val |= GICR_PENDBASER_nC;
3131 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
3132 }
3133
3134 /* Enable LPIs */
3135 val = readl_relaxed(rbase + GICR_CTLR);
3136 val |= GICR_CTLR_ENABLE_LPIS;
3137 writel_relaxed(val, rbase + GICR_CTLR);
3138
3139 if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) {
3140 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3141
3142 /*
3143 * It's possible for CPU to receive VLPIs before it is
3144 * scheduled as a vPE, especially for the first CPU, and the
3145 * VLPI with INTID larger than 2^(IDbits+1) will be considered
3146 * as out of range and dropped by GIC.
3147 * So we initialize IDbits to known value to avoid VLPI drop.
3148 */
3149 val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
3150 pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
3151 smp_processor_id(), val);
3152 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
3153
3154 /*
3155 * Also clear Valid bit of GICR_VPENDBASER, in case some
3156 * ancient programming gets left in and has possibility of
3157 * corrupting memory.
3158 */
3159 val = its_clear_vpend_valid(vlpi_base, 0, 0);
3160 }
3161
3162 if (allocate_vpe_l1_table()) {
3163 /*
3164 * If the allocation has failed, we're in massive trouble.
3165 * Disable direct injection, and pray that no VM was
3166 * already running...
3167 */
3168 gic_rdists->has_rvpeid = false;
3169 gic_rdists->has_vlpis = false;
3170 }
3171
3172 /* Make sure the GIC has seen the above */
3173 dsb(sy);
3174out:
3175 gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED;
3176 pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
3177 smp_processor_id(),
3178 gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED ?
3179 "reserved" : "allocated",
3180 &paddr);
3181}
3182
3183static void its_cpu_init_collection(struct its_node *its)
3184{
3185 int cpu = smp_processor_id();
3186 u64 target;
3187
3188 /* avoid cross node collections and its mapping */
3189 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
3190 struct device_node *cpu_node;
3191
3192 cpu_node = of_get_cpu_node(cpu, NULL);
3193 if (its->numa_node != NUMA_NO_NODE &&
3194 its->numa_node != of_node_to_nid(cpu_node))
3195 return;
3196 }
3197
3198 /*
3199 * We now have to bind each collection to its target
3200 * redistributor.
3201 */
3202 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
3203 /*
3204 * This ITS wants the physical address of the
3205 * redistributor.
3206 */
3207 target = gic_data_rdist()->phys_base;
3208 } else {
3209 /* This ITS wants a linear CPU number. */
3210 target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
3211 target = GICR_TYPER_CPU_NUMBER(target) << 16;
3212 }
3213
3214 /* Perform collection mapping */
3215 its->collections[cpu].target_address = target;
3216 its->collections[cpu].col_id = cpu;
3217
3218 its_send_mapc(its, &its->collections[cpu], 1);
3219 its_send_invall(its, &its->collections[cpu]);
3220}
3221
3222static void its_cpu_init_collections(void)
3223{
3224 struct its_node *its;
3225
3226 raw_spin_lock(&its_lock);
3227
3228 list_for_each_entry(its, &its_nodes, entry)
3229 its_cpu_init_collection(its);
3230
3231 raw_spin_unlock(&its_lock);
3232}
3233
3234static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
3235{
3236 struct its_device *its_dev = NULL, *tmp;
3237 unsigned long flags;
3238
3239 raw_spin_lock_irqsave(&its->lock, flags);
3240
3241 list_for_each_entry(tmp, &its->its_device_list, entry) {
3242 if (tmp->device_id == dev_id) {
3243 its_dev = tmp;
3244 break;
3245 }
3246 }
3247
3248 raw_spin_unlock_irqrestore(&its->lock, flags);
3249
3250 return its_dev;
3251}
3252
3253static struct its_baser *its_get_baser(struct its_node *its, u32 type)
3254{
3255 int i;
3256
3257 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
3258 if (GITS_BASER_TYPE(its->tables[i].val) == type)
3259 return &its->tables[i];
3260 }
3261
3262 return NULL;
3263}
3264
3265static bool its_alloc_table_entry(struct its_node *its,
3266 struct its_baser *baser, u32 id)
3267{
3268 struct page *page;
3269 u32 esz, idx;
3270 __le64 *table;
3271
3272 /* Don't allow device id that exceeds single, flat table limit */
3273 esz = GITS_BASER_ENTRY_SIZE(baser->val);
3274 if (!(baser->val & GITS_BASER_INDIRECT))
3275 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
3276
3277 /* Compute 1st level table index & check if that exceeds table limit */
3278 idx = id >> ilog2(baser->psz / esz);
3279 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
3280 return false;
3281
3282 table = baser->base;
3283
3284 /* Allocate memory for 2nd level table */
3285 if (!table[idx]) {
3286 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
3287 get_order(baser->psz));
3288 if (!page)
3289 return false;
3290
3291 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
3292 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
3293 gic_flush_dcache_to_poc(page_address(page), baser->psz);
3294
3295 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
3296
3297 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
3298 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
3299 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
3300
3301 /* Ensure updated table contents are visible to ITS hardware */
3302 dsb(sy);
3303 }
3304
3305 return true;
3306}
3307
3308static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
3309{
3310 struct its_baser *baser;
3311
3312 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
3313
3314 /* Don't allow device id that exceeds ITS hardware limit */
3315 if (!baser)
3316 return (ilog2(dev_id) < device_ids(its));
3317
3318 return its_alloc_table_entry(its, baser, dev_id);
3319}
3320
3321static bool its_alloc_vpe_table(u32 vpe_id)
3322{
3323 struct its_node *its;
3324 int cpu;
3325
3326 /*
3327 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
3328 * could try and only do it on ITSs corresponding to devices
3329 * that have interrupts targeted at this VPE, but the
3330 * complexity becomes crazy (and you have tons of memory
3331 * anyway, right?).
3332 */
3333 list_for_each_entry(its, &its_nodes, entry) {
3334 struct its_baser *baser;
3335
3336 if (!is_v4(its))
3337 continue;
3338
3339 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
3340 if (!baser)
3341 return false;
3342
3343 if (!its_alloc_table_entry(its, baser, vpe_id))
3344 return false;
3345 }
3346
3347 /* Non v4.1? No need to iterate RDs and go back early. */
3348 if (!gic_rdists->has_rvpeid)
3349 return true;
3350
3351 /*
3352 * Make sure the L2 tables are allocated for all copies of
3353 * the L1 table on *all* v4.1 RDs.
3354 */
3355 for_each_possible_cpu(cpu) {
3356 if (!allocate_vpe_l2_table(cpu, vpe_id))
3357 return false;
3358 }
3359
3360 return true;
3361}
3362
3363static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
3364 int nvecs, bool alloc_lpis)
3365{
3366 struct its_device *dev;
3367 unsigned long *lpi_map = NULL;
3368 unsigned long flags;
3369 u16 *col_map = NULL;
3370 void *itt;
3371 int lpi_base;
3372 int nr_lpis;
3373 int nr_ites;
3374 int sz;
3375
3376 if (!its_alloc_device_table(its, dev_id))
3377 return NULL;
3378
3379 if (WARN_ON(!is_power_of_2(nvecs)))
3380 nvecs = roundup_pow_of_two(nvecs);
3381
3382 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3383 /*
3384 * Even if the device wants a single LPI, the ITT must be
3385 * sized as a power of two (and you need at least one bit...).
3386 */
3387 nr_ites = max(2, nvecs);
3388 sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
3389 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
3390 itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
3391 if (alloc_lpis) {
3392 lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
3393 if (lpi_map)
3394 col_map = kcalloc(nr_lpis, sizeof(*col_map),
3395 GFP_KERNEL);
3396 } else {
3397 col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL);
3398 nr_lpis = 0;
3399 lpi_base = 0;
3400 }
3401
3402 if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
3403 kfree(dev);
3404 kfree(itt);
3405 bitmap_free(lpi_map);
3406 kfree(col_map);
3407 return NULL;
3408 }
3409
3410 gic_flush_dcache_to_poc(itt, sz);
3411
3412 dev->its = its;
3413 dev->itt = itt;
3414 dev->nr_ites = nr_ites;
3415 dev->event_map.lpi_map = lpi_map;
3416 dev->event_map.col_map = col_map;
3417 dev->event_map.lpi_base = lpi_base;
3418 dev->event_map.nr_lpis = nr_lpis;
3419 raw_spin_lock_init(&dev->event_map.vlpi_lock);
3420 dev->device_id = dev_id;
3421 INIT_LIST_HEAD(&dev->entry);
3422
3423 raw_spin_lock_irqsave(&its->lock, flags);
3424 list_add(&dev->entry, &its->its_device_list);
3425 raw_spin_unlock_irqrestore(&its->lock, flags);
3426
3427 /* Map device to its ITT */
3428 its_send_mapd(dev, 1);
3429
3430 return dev;
3431}
3432
3433static void its_free_device(struct its_device *its_dev)
3434{
3435 unsigned long flags;
3436
3437 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
3438 list_del(&its_dev->entry);
3439 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
3440 kfree(its_dev->event_map.col_map);
3441 kfree(its_dev->itt);
3442 kfree(its_dev);
3443}
3444
3445static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
3446{
3447 int idx;
3448
3449 /* Find a free LPI region in lpi_map and allocate them. */
3450 idx = bitmap_find_free_region(dev->event_map.lpi_map,
3451 dev->event_map.nr_lpis,
3452 get_count_order(nvecs));
3453 if (idx < 0)
3454 return -ENOSPC;
3455
3456 *hwirq = dev->event_map.lpi_base + idx;
3457
3458 return 0;
3459}
3460
3461static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
3462 int nvec, msi_alloc_info_t *info)
3463{
3464 struct its_node *its;
3465 struct its_device *its_dev;
3466 struct msi_domain_info *msi_info;
3467 u32 dev_id;
3468 int err = 0;
3469
3470 /*
3471 * We ignore "dev" entirely, and rely on the dev_id that has
3472 * been passed via the scratchpad. This limits this domain's
3473 * usefulness to upper layers that definitely know that they
3474 * are built on top of the ITS.
3475 */
3476 dev_id = info->scratchpad[0].ul;
3477
3478 msi_info = msi_get_domain_info(domain);
3479 its = msi_info->data;
3480
3481 if (!gic_rdists->has_direct_lpi &&
3482 vpe_proxy.dev &&
3483 vpe_proxy.dev->its == its &&
3484 dev_id == vpe_proxy.dev->device_id) {
3485 /* Bad luck. Get yourself a better implementation */
3486 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
3487 dev_id);
3488 return -EINVAL;
3489 }
3490
3491 mutex_lock(&its->dev_alloc_lock);
3492 its_dev = its_find_device(its, dev_id);
3493 if (its_dev) {
3494 /*
3495 * We already have seen this ID, probably through
3496 * another alias (PCI bridge of some sort). No need to
3497 * create the device.
3498 */
3499 its_dev->shared = true;
3500 pr_debug("Reusing ITT for devID %x\n", dev_id);
3501 goto out;
3502 }
3503
3504 its_dev = its_create_device(its, dev_id, nvec, true);
3505 if (!its_dev) {
3506 err = -ENOMEM;
3507 goto out;
3508 }
3509
3510 if (info->flags & MSI_ALLOC_FLAGS_PROXY_DEVICE)
3511 its_dev->shared = true;
3512
3513 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
3514out:
3515 mutex_unlock(&its->dev_alloc_lock);
3516 info->scratchpad[0].ptr = its_dev;
3517 return err;
3518}
3519
3520static struct msi_domain_ops its_msi_domain_ops = {
3521 .msi_prepare = its_msi_prepare,
3522};
3523
3524static int its_irq_gic_domain_alloc(struct irq_domain *domain,
3525 unsigned int virq,
3526 irq_hw_number_t hwirq)
3527{
3528 struct irq_fwspec fwspec;
3529
3530 if (irq_domain_get_of_node(domain->parent)) {
3531 fwspec.fwnode = domain->parent->fwnode;
3532 fwspec.param_count = 3;
3533 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
3534 fwspec.param[1] = hwirq;
3535 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
3536 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
3537 fwspec.fwnode = domain->parent->fwnode;
3538 fwspec.param_count = 2;
3539 fwspec.param[0] = hwirq;
3540 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
3541 } else {
3542 return -EINVAL;
3543 }
3544
3545 return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
3546}
3547
3548static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
3549 unsigned int nr_irqs, void *args)
3550{
3551 msi_alloc_info_t *info = args;
3552 struct its_device *its_dev = info->scratchpad[0].ptr;
3553 struct its_node *its = its_dev->its;
3554 struct irq_data *irqd;
3555 irq_hw_number_t hwirq;
3556 int err;
3557 int i;
3558
3559 err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
3560 if (err)
3561 return err;
3562
3563 err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev));
3564 if (err)
3565 return err;
3566
3567 for (i = 0; i < nr_irqs; i++) {
3568 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
3569 if (err)
3570 return err;
3571
3572 irq_domain_set_hwirq_and_chip(domain, virq + i,
3573 hwirq + i, &its_irq_chip, its_dev);
3574 irqd = irq_get_irq_data(virq + i);
3575 irqd_set_single_target(irqd);
3576 irqd_set_affinity_on_activate(irqd);
3577 pr_debug("ID:%d pID:%d vID:%d\n",
3578 (int)(hwirq + i - its_dev->event_map.lpi_base),
3579 (int)(hwirq + i), virq + i);
3580 }
3581
3582 return 0;
3583}
3584
3585static int its_irq_domain_activate(struct irq_domain *domain,
3586 struct irq_data *d, bool reserve)
3587{
3588 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3589 u32 event = its_get_event_id(d);
3590 int cpu;
3591
3592 cpu = its_select_cpu(d, cpu_online_mask);
3593 if (cpu < 0 || cpu >= nr_cpu_ids)
3594 return -EINVAL;
3595
3596 its_inc_lpi_count(d, cpu);
3597 its_dev->event_map.col_map[event] = cpu;
3598 irq_data_update_effective_affinity(d, cpumask_of(cpu));
3599
3600 /* Map the GIC IRQ and event to the device */
3601 its_send_mapti(its_dev, d->hwirq, event);
3602 return 0;
3603}
3604
3605static void its_irq_domain_deactivate(struct irq_domain *domain,
3606 struct irq_data *d)
3607{
3608 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3609 u32 event = its_get_event_id(d);
3610
3611 its_dec_lpi_count(d, its_dev->event_map.col_map[event]);
3612 /* Stop the delivery of interrupts */
3613 its_send_discard(its_dev, event);
3614}
3615
3616static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
3617 unsigned int nr_irqs)
3618{
3619 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
3620 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3621 struct its_node *its = its_dev->its;
3622 int i;
3623
3624 bitmap_release_region(its_dev->event_map.lpi_map,
3625 its_get_event_id(irq_domain_get_irq_data(domain, virq)),
3626 get_count_order(nr_irqs));
3627
3628 for (i = 0; i < nr_irqs; i++) {
3629 struct irq_data *data = irq_domain_get_irq_data(domain,
3630 virq + i);
3631 /* Nuke the entry in the domain */
3632 irq_domain_reset_irq_data(data);
3633 }
3634
3635 mutex_lock(&its->dev_alloc_lock);
3636
3637 /*
3638 * If all interrupts have been freed, start mopping the
3639 * floor. This is conditioned on the device not being shared.
3640 */
3641 if (!its_dev->shared &&
3642 bitmap_empty(its_dev->event_map.lpi_map,
3643 its_dev->event_map.nr_lpis)) {
3644 its_lpi_free(its_dev->event_map.lpi_map,
3645 its_dev->event_map.lpi_base,
3646 its_dev->event_map.nr_lpis);
3647
3648 /* Unmap device/itt */
3649 its_send_mapd(its_dev, 0);
3650 its_free_device(its_dev);
3651 }
3652
3653 mutex_unlock(&its->dev_alloc_lock);
3654
3655 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
3656}
3657
3658static const struct irq_domain_ops its_domain_ops = {
3659 .alloc = its_irq_domain_alloc,
3660 .free = its_irq_domain_free,
3661 .activate = its_irq_domain_activate,
3662 .deactivate = its_irq_domain_deactivate,
3663};
3664
3665/*
3666 * This is insane.
3667 *
3668 * If a GICv4.0 doesn't implement Direct LPIs (which is extremely
3669 * likely), the only way to perform an invalidate is to use a fake
3670 * device to issue an INV command, implying that the LPI has first
3671 * been mapped to some event on that device. Since this is not exactly
3672 * cheap, we try to keep that mapping around as long as possible, and
3673 * only issue an UNMAP if we're short on available slots.
3674 *
3675 * Broken by design(tm).
3676 *
3677 * GICv4.1, on the other hand, mandates that we're able to invalidate
3678 * by writing to a MMIO register. It doesn't implement the whole of
3679 * DirectLPI, but that's good enough. And most of the time, we don't
3680 * even have to invalidate anything, as the redistributor can be told
3681 * whether to generate a doorbell or not (we thus leave it enabled,
3682 * always).
3683 */
3684static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
3685{
3686 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3687 if (gic_rdists->has_rvpeid)
3688 return;
3689
3690 /* Already unmapped? */
3691 if (vpe->vpe_proxy_event == -1)
3692 return;
3693
3694 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
3695 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
3696
3697 /*
3698 * We don't track empty slots at all, so let's move the
3699 * next_victim pointer if we can quickly reuse that slot
3700 * instead of nuking an existing entry. Not clear that this is
3701 * always a win though, and this might just generate a ripple
3702 * effect... Let's just hope VPEs don't migrate too often.
3703 */
3704 if (vpe_proxy.vpes[vpe_proxy.next_victim])
3705 vpe_proxy.next_victim = vpe->vpe_proxy_event;
3706
3707 vpe->vpe_proxy_event = -1;
3708}
3709
3710static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
3711{
3712 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3713 if (gic_rdists->has_rvpeid)
3714 return;
3715
3716 if (!gic_rdists->has_direct_lpi) {
3717 unsigned long flags;
3718
3719 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3720 its_vpe_db_proxy_unmap_locked(vpe);
3721 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3722 }
3723}
3724
3725static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
3726{
3727 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3728 if (gic_rdists->has_rvpeid)
3729 return;
3730
3731 /* Already mapped? */
3732 if (vpe->vpe_proxy_event != -1)
3733 return;
3734
3735 /* This slot was already allocated. Kick the other VPE out. */
3736 if (vpe_proxy.vpes[vpe_proxy.next_victim])
3737 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
3738
3739 /* Map the new VPE instead */
3740 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
3741 vpe->vpe_proxy_event = vpe_proxy.next_victim;
3742 vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
3743
3744 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
3745 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
3746}
3747
3748static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
3749{
3750 unsigned long flags;
3751 struct its_collection *target_col;
3752
3753 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3754 if (gic_rdists->has_rvpeid)
3755 return;
3756
3757 if (gic_rdists->has_direct_lpi) {
3758 void __iomem *rdbase;
3759
3760 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
3761 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
3762 wait_for_syncr(rdbase);
3763
3764 return;
3765 }
3766
3767 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3768
3769 its_vpe_db_proxy_map_locked(vpe);
3770
3771 target_col = &vpe_proxy.dev->its->collections[to];
3772 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
3773 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
3774
3775 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3776}
3777
3778static int its_vpe_set_affinity(struct irq_data *d,
3779 const struct cpumask *mask_val,
3780 bool force)
3781{
3782 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3783 int from, cpu = cpumask_first(mask_val);
3784 unsigned long flags;
3785
3786 /*
3787 * Changing affinity is mega expensive, so let's be as lazy as
3788 * we can and only do it if we really have to. Also, if mapped
3789 * into the proxy device, we need to move the doorbell
3790 * interrupt to its new location.
3791 *
3792 * Another thing is that changing the affinity of a vPE affects
3793 * *other interrupts* such as all the vLPIs that are routed to
3794 * this vPE. This means that the irq_desc lock is not enough to
3795 * protect us, and that we must ensure nobody samples vpe->col_idx
3796 * during the update, hence the lock below which must also be
3797 * taken on any vLPI handling path that evaluates vpe->col_idx.
3798 */
3799 from = vpe_to_cpuid_lock(vpe, &flags);
3800 if (from == cpu)
3801 goto out;
3802
3803 vpe->col_idx = cpu;
3804
3805 /*
3806 * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD
3807 * is sharing its VPE table with the current one.
3808 */
3809 if (gic_data_rdist_cpu(cpu)->vpe_table_mask &&
3810 cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask))
3811 goto out;
3812
3813 its_send_vmovp(vpe);
3814 its_vpe_db_proxy_move(vpe, from, cpu);
3815
3816out:
3817 irq_data_update_effective_affinity(d, cpumask_of(cpu));
3818 vpe_to_cpuid_unlock(vpe, flags);
3819
3820 return IRQ_SET_MASK_OK_DONE;
3821}
3822
3823static void its_wait_vpt_parse_complete(void)
3824{
3825 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3826 u64 val;
3827
3828 if (!gic_rdists->has_vpend_valid_dirty)
3829 return;
3830
3831 WARN_ON_ONCE(readq_relaxed_poll_timeout_atomic(vlpi_base + GICR_VPENDBASER,
3832 val,
3833 !(val & GICR_VPENDBASER_Dirty),
3834 1, 500));
3835}
3836
3837static void its_vpe_schedule(struct its_vpe *vpe)
3838{
3839 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3840 u64 val;
3841
3842 /* Schedule the VPE */
3843 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
3844 GENMASK_ULL(51, 12);
3845 val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
3846 val |= GICR_VPROPBASER_RaWb;
3847 val |= GICR_VPROPBASER_InnerShareable;
3848 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
3849
3850 val = virt_to_phys(page_address(vpe->vpt_page)) &
3851 GENMASK_ULL(51, 16);
3852 val |= GICR_VPENDBASER_RaWaWb;
3853 val |= GICR_VPENDBASER_InnerShareable;
3854 /*
3855 * There is no good way of finding out if the pending table is
3856 * empty as we can race against the doorbell interrupt very
3857 * easily. So in the end, vpe->pending_last is only an
3858 * indication that the vcpu has something pending, not one
3859 * that the pending table is empty. A good implementation
3860 * would be able to read its coarse map pretty quickly anyway,
3861 * making this a tolerable issue.
3862 */
3863 val |= GICR_VPENDBASER_PendingLast;
3864 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
3865 val |= GICR_VPENDBASER_Valid;
3866 gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
3867}
3868
3869static void its_vpe_deschedule(struct its_vpe *vpe)
3870{
3871 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3872 u64 val;
3873
3874 val = its_clear_vpend_valid(vlpi_base, 0, 0);
3875
3876 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
3877 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
3878}
3879
3880static void its_vpe_invall(struct its_vpe *vpe)
3881{
3882 struct its_node *its;
3883
3884 list_for_each_entry(its, &its_nodes, entry) {
3885 if (!is_v4(its))
3886 continue;
3887
3888 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
3889 continue;
3890
3891 /*
3892 * Sending a VINVALL to a single ITS is enough, as all
3893 * we need is to reach the redistributors.
3894 */
3895 its_send_vinvall(its, vpe);
3896 return;
3897 }
3898}
3899
3900static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
3901{
3902 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3903 struct its_cmd_info *info = vcpu_info;
3904
3905 switch (info->cmd_type) {
3906 case SCHEDULE_VPE:
3907 its_vpe_schedule(vpe);
3908 return 0;
3909
3910 case DESCHEDULE_VPE:
3911 its_vpe_deschedule(vpe);
3912 return 0;
3913
3914 case COMMIT_VPE:
3915 its_wait_vpt_parse_complete();
3916 return 0;
3917
3918 case INVALL_VPE:
3919 its_vpe_invall(vpe);
3920 return 0;
3921
3922 default:
3923 return -EINVAL;
3924 }
3925}
3926
3927static void its_vpe_send_cmd(struct its_vpe *vpe,
3928 void (*cmd)(struct its_device *, u32))
3929{
3930 unsigned long flags;
3931
3932 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3933
3934 its_vpe_db_proxy_map_locked(vpe);
3935 cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
3936
3937 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3938}
3939
3940static void its_vpe_send_inv(struct irq_data *d)
3941{
3942 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3943
3944 if (gic_rdists->has_direct_lpi) {
3945 void __iomem *rdbase;
3946
3947 /* Target the redistributor this VPE is currently known on */
3948 raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
3949 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
3950 gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR);
3951 wait_for_syncr(rdbase);
3952 raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
3953 } else {
3954 its_vpe_send_cmd(vpe, its_send_inv);
3955 }
3956}
3957
3958static void its_vpe_mask_irq(struct irq_data *d)
3959{
3960 /*
3961 * We need to unmask the LPI, which is described by the parent
3962 * irq_data. Instead of calling into the parent (which won't
3963 * exactly do the right thing, let's simply use the
3964 * parent_data pointer. Yes, I'm naughty.
3965 */
3966 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
3967 its_vpe_send_inv(d);
3968}
3969
3970static void its_vpe_unmask_irq(struct irq_data *d)
3971{
3972 /* Same hack as above... */
3973 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
3974 its_vpe_send_inv(d);
3975}
3976
3977static int its_vpe_set_irqchip_state(struct irq_data *d,
3978 enum irqchip_irq_state which,
3979 bool state)
3980{
3981 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3982
3983 if (which != IRQCHIP_STATE_PENDING)
3984 return -EINVAL;
3985
3986 if (gic_rdists->has_direct_lpi) {
3987 void __iomem *rdbase;
3988
3989 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
3990 if (state) {
3991 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
3992 } else {
3993 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
3994 wait_for_syncr(rdbase);
3995 }
3996 } else {
3997 if (state)
3998 its_vpe_send_cmd(vpe, its_send_int);
3999 else
4000 its_vpe_send_cmd(vpe, its_send_clear);
4001 }
4002
4003 return 0;
4004}
4005
4006static int its_vpe_retrigger(struct irq_data *d)
4007{
4008 return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
4009}
4010
4011static struct irq_chip its_vpe_irq_chip = {
4012 .name = "GICv4-vpe",
4013 .irq_mask = its_vpe_mask_irq,
4014 .irq_unmask = its_vpe_unmask_irq,
4015 .irq_eoi = irq_chip_eoi_parent,
4016 .irq_set_affinity = its_vpe_set_affinity,
4017 .irq_retrigger = its_vpe_retrigger,
4018 .irq_set_irqchip_state = its_vpe_set_irqchip_state,
4019 .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
4020};
4021
4022static struct its_node *find_4_1_its(void)
4023{
4024 static struct its_node *its = NULL;
4025
4026 if (!its) {
4027 list_for_each_entry(its, &its_nodes, entry) {
4028 if (is_v4_1(its))
4029 return its;
4030 }
4031
4032 /* Oops? */
4033 its = NULL;
4034 }
4035
4036 return its;
4037}
4038
4039static void its_vpe_4_1_send_inv(struct irq_data *d)
4040{
4041 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4042 struct its_node *its;
4043
4044 /*
4045 * GICv4.1 wants doorbells to be invalidated using the
4046 * INVDB command in order to be broadcast to all RDs. Send
4047 * it to the first valid ITS, and let the HW do its magic.
4048 */
4049 its = find_4_1_its();
4050 if (its)
4051 its_send_invdb(its, vpe);
4052}
4053
4054static void its_vpe_4_1_mask_irq(struct irq_data *d)
4055{
4056 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
4057 its_vpe_4_1_send_inv(d);
4058}
4059
4060static void its_vpe_4_1_unmask_irq(struct irq_data *d)
4061{
4062 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
4063 its_vpe_4_1_send_inv(d);
4064}
4065
4066static void its_vpe_4_1_schedule(struct its_vpe *vpe,
4067 struct its_cmd_info *info)
4068{
4069 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
4070 u64 val = 0;
4071
4072 /* Schedule the VPE */
4073 val |= GICR_VPENDBASER_Valid;
4074 val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0;
4075 val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0;
4076 val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
4077
4078 gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
4079}
4080
4081static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
4082 struct its_cmd_info *info)
4083{
4084 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
4085 u64 val;
4086
4087 if (info->req_db) {
4088 unsigned long flags;
4089
4090 /*
4091 * vPE is going to block: make the vPE non-resident with
4092 * PendingLast clear and DB set. The GIC guarantees that if
4093 * we read-back PendingLast clear, then a doorbell will be
4094 * delivered when an interrupt comes.
4095 *
4096 * Note the locking to deal with the concurrent update of
4097 * pending_last from the doorbell interrupt handler that can
4098 * run concurrently.
4099 */
4100 raw_spin_lock_irqsave(&vpe->vpe_lock, flags);
4101 val = its_clear_vpend_valid(vlpi_base,
4102 GICR_VPENDBASER_PendingLast,
4103 GICR_VPENDBASER_4_1_DB);
4104 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
4105 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
4106 } else {
4107 /*
4108 * We're not blocking, so just make the vPE non-resident
4109 * with PendingLast set, indicating that we'll be back.
4110 */
4111 val = its_clear_vpend_valid(vlpi_base,
4112 0,
4113 GICR_VPENDBASER_PendingLast);
4114 vpe->pending_last = true;
4115 }
4116}
4117
4118static void its_vpe_4_1_invall(struct its_vpe *vpe)
4119{
4120 void __iomem *rdbase;
4121 unsigned long flags;
4122 u64 val;
4123 int cpu;
4124
4125 val = GICR_INVALLR_V;
4126 val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
4127
4128 /* Target the redistributor this vPE is currently known on */
4129 cpu = vpe_to_cpuid_lock(vpe, &flags);
4130 raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
4131 rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
4132 gic_write_lpir(val, rdbase + GICR_INVALLR);
4133
4134 wait_for_syncr(rdbase);
4135 raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
4136 vpe_to_cpuid_unlock(vpe, flags);
4137}
4138
4139static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
4140{
4141 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4142 struct its_cmd_info *info = vcpu_info;
4143
4144 switch (info->cmd_type) {
4145 case SCHEDULE_VPE:
4146 its_vpe_4_1_schedule(vpe, info);
4147 return 0;
4148
4149 case DESCHEDULE_VPE:
4150 its_vpe_4_1_deschedule(vpe, info);
4151 return 0;
4152
4153 case COMMIT_VPE:
4154 its_wait_vpt_parse_complete();
4155 return 0;
4156
4157 case INVALL_VPE:
4158 its_vpe_4_1_invall(vpe);
4159 return 0;
4160
4161 default:
4162 return -EINVAL;
4163 }
4164}
4165
4166static struct irq_chip its_vpe_4_1_irq_chip = {
4167 .name = "GICv4.1-vpe",
4168 .irq_mask = its_vpe_4_1_mask_irq,
4169 .irq_unmask = its_vpe_4_1_unmask_irq,
4170 .irq_eoi = irq_chip_eoi_parent,
4171 .irq_set_affinity = its_vpe_set_affinity,
4172 .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity,
4173};
4174
4175static void its_configure_sgi(struct irq_data *d, bool clear)
4176{
4177 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4178 struct its_cmd_desc desc;
4179
4180 desc.its_vsgi_cmd.vpe = vpe;
4181 desc.its_vsgi_cmd.sgi = d->hwirq;
4182 desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority;
4183 desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled;
4184 desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group;
4185 desc.its_vsgi_cmd.clear = clear;
4186
4187 /*
4188 * GICv4.1 allows us to send VSGI commands to any ITS as long as the
4189 * destination VPE is mapped there. Since we map them eagerly at
4190 * activation time, we're pretty sure the first GICv4.1 ITS will do.
4191 */
4192 its_send_single_vcommand(find_4_1_its(), its_build_vsgi_cmd, &desc);
4193}
4194
4195static void its_sgi_mask_irq(struct irq_data *d)
4196{
4197 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4198
4199 vpe->sgi_config[d->hwirq].enabled = false;
4200 its_configure_sgi(d, false);
4201}
4202
4203static void its_sgi_unmask_irq(struct irq_data *d)
4204{
4205 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4206
4207 vpe->sgi_config[d->hwirq].enabled = true;
4208 its_configure_sgi(d, false);
4209}
4210
4211static int its_sgi_set_affinity(struct irq_data *d,
4212 const struct cpumask *mask_val,
4213 bool force)
4214{
4215 /*
4216 * There is no notion of affinity for virtual SGIs, at least
4217 * not on the host (since they can only be targeting a vPE).
4218 * Tell the kernel we've done whatever it asked for.
4219 */
4220 irq_data_update_effective_affinity(d, mask_val);
4221 return IRQ_SET_MASK_OK;
4222}
4223
4224static int its_sgi_set_irqchip_state(struct irq_data *d,
4225 enum irqchip_irq_state which,
4226 bool state)
4227{
4228 if (which != IRQCHIP_STATE_PENDING)
4229 return -EINVAL;
4230
4231 if (state) {
4232 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4233 struct its_node *its = find_4_1_its();
4234 u64 val;
4235
4236 val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id);
4237 val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq);
4238 writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K);
4239 } else {
4240 its_configure_sgi(d, true);
4241 }
4242
4243 return 0;
4244}
4245
4246static int its_sgi_get_irqchip_state(struct irq_data *d,
4247 enum irqchip_irq_state which, bool *val)
4248{
4249 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4250 void __iomem *base;
4251 unsigned long flags;
4252 u32 count = 1000000; /* 1s! */
4253 u32 status;
4254 int cpu;
4255
4256 if (which != IRQCHIP_STATE_PENDING)
4257 return -EINVAL;
4258
4259 /*
4260 * Locking galore! We can race against two different events:
4261 *
4262 * - Concurrent vPE affinity change: we must make sure it cannot
4263 * happen, or we'll talk to the wrong redistributor. This is
4264 * identical to what happens with vLPIs.
4265 *
4266 * - Concurrent VSGIPENDR access: As it involves accessing two
4267 * MMIO registers, this must be made atomic one way or another.
4268 */
4269 cpu = vpe_to_cpuid_lock(vpe, &flags);
4270 raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
4271 base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K;
4272 writel_relaxed(vpe->vpe_id, base + GICR_VSGIR);
4273 do {
4274 status = readl_relaxed(base + GICR_VSGIPENDR);
4275 if (!(status & GICR_VSGIPENDR_BUSY))
4276 goto out;
4277
4278 count--;
4279 if (!count) {
4280 pr_err_ratelimited("Unable to get SGI status\n");
4281 goto out;
4282 }
4283 cpu_relax();
4284 udelay(1);
4285 } while (count);
4286
4287out:
4288 raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
4289 vpe_to_cpuid_unlock(vpe, flags);
4290
4291 if (!count)
4292 return -ENXIO;
4293
4294 *val = !!(status & (1 << d->hwirq));
4295
4296 return 0;
4297}
4298
4299static int its_sgi_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
4300{
4301 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4302 struct its_cmd_info *info = vcpu_info;
4303
4304 switch (info->cmd_type) {
4305 case PROP_UPDATE_VSGI:
4306 vpe->sgi_config[d->hwirq].priority = info->priority;
4307 vpe->sgi_config[d->hwirq].group = info->group;
4308 its_configure_sgi(d, false);
4309 return 0;
4310
4311 default:
4312 return -EINVAL;
4313 }
4314}
4315
4316static struct irq_chip its_sgi_irq_chip = {
4317 .name = "GICv4.1-sgi",
4318 .irq_mask = its_sgi_mask_irq,
4319 .irq_unmask = its_sgi_unmask_irq,
4320 .irq_set_affinity = its_sgi_set_affinity,
4321 .irq_set_irqchip_state = its_sgi_set_irqchip_state,
4322 .irq_get_irqchip_state = its_sgi_get_irqchip_state,
4323 .irq_set_vcpu_affinity = its_sgi_set_vcpu_affinity,
4324};
4325
4326static int its_sgi_irq_domain_alloc(struct irq_domain *domain,
4327 unsigned int virq, unsigned int nr_irqs,
4328 void *args)
4329{
4330 struct its_vpe *vpe = args;
4331 int i;
4332
4333 /* Yes, we do want 16 SGIs */
4334 WARN_ON(nr_irqs != 16);
4335
4336 for (i = 0; i < 16; i++) {
4337 vpe->sgi_config[i].priority = 0;
4338 vpe->sgi_config[i].enabled = false;
4339 vpe->sgi_config[i].group = false;
4340
4341 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
4342 &its_sgi_irq_chip, vpe);
4343 irq_set_status_flags(virq + i, IRQ_DISABLE_UNLAZY);
4344 }
4345
4346 return 0;
4347}
4348
4349static void its_sgi_irq_domain_free(struct irq_domain *domain,
4350 unsigned int virq,
4351 unsigned int nr_irqs)
4352{
4353 /* Nothing to do */
4354}
4355
4356static int its_sgi_irq_domain_activate(struct irq_domain *domain,
4357 struct irq_data *d, bool reserve)
4358{
4359 /* Write out the initial SGI configuration */
4360 its_configure_sgi(d, false);
4361 return 0;
4362}
4363
4364static void its_sgi_irq_domain_deactivate(struct irq_domain *domain,
4365 struct irq_data *d)
4366{
4367 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4368
4369 /*
4370 * The VSGI command is awkward:
4371 *
4372 * - To change the configuration, CLEAR must be set to false,
4373 * leaving the pending bit unchanged.
4374 * - To clear the pending bit, CLEAR must be set to true, leaving
4375 * the configuration unchanged.
4376 *
4377 * You just can't do both at once, hence the two commands below.
4378 */
4379 vpe->sgi_config[d->hwirq].enabled = false;
4380 its_configure_sgi(d, false);
4381 its_configure_sgi(d, true);
4382}
4383
4384static const struct irq_domain_ops its_sgi_domain_ops = {
4385 .alloc = its_sgi_irq_domain_alloc,
4386 .free = its_sgi_irq_domain_free,
4387 .activate = its_sgi_irq_domain_activate,
4388 .deactivate = its_sgi_irq_domain_deactivate,
4389};
4390
4391static int its_vpe_id_alloc(void)
4392{
4393 return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
4394}
4395
4396static void its_vpe_id_free(u16 id)
4397{
4398 ida_simple_remove(&its_vpeid_ida, id);
4399}
4400
4401static int its_vpe_init(struct its_vpe *vpe)
4402{
4403 struct page *vpt_page;
4404 int vpe_id;
4405
4406 /* Allocate vpe_id */
4407 vpe_id = its_vpe_id_alloc();
4408 if (vpe_id < 0)
4409 return vpe_id;
4410
4411 /* Allocate VPT */
4412 vpt_page = its_allocate_pending_table(GFP_KERNEL);
4413 if (!vpt_page) {
4414 its_vpe_id_free(vpe_id);
4415 return -ENOMEM;
4416 }
4417
4418 if (!its_alloc_vpe_table(vpe_id)) {
4419 its_vpe_id_free(vpe_id);
4420 its_free_pending_table(vpt_page);
4421 return -ENOMEM;
4422 }
4423
4424 raw_spin_lock_init(&vpe->vpe_lock);
4425 vpe->vpe_id = vpe_id;
4426 vpe->vpt_page = vpt_page;
4427 if (gic_rdists->has_rvpeid)
4428 atomic_set(&vpe->vmapp_count, 0);
4429 else
4430 vpe->vpe_proxy_event = -1;
4431
4432 return 0;
4433}
4434
4435static void its_vpe_teardown(struct its_vpe *vpe)
4436{
4437 its_vpe_db_proxy_unmap(vpe);
4438 its_vpe_id_free(vpe->vpe_id);
4439 its_free_pending_table(vpe->vpt_page);
4440}
4441
4442static void its_vpe_irq_domain_free(struct irq_domain *domain,
4443 unsigned int virq,
4444 unsigned int nr_irqs)
4445{
4446 struct its_vm *vm = domain->host_data;
4447 int i;
4448
4449 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
4450
4451 for (i = 0; i < nr_irqs; i++) {
4452 struct irq_data *data = irq_domain_get_irq_data(domain,
4453 virq + i);
4454 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
4455
4456 BUG_ON(vm != vpe->its_vm);
4457
4458 clear_bit(data->hwirq, vm->db_bitmap);
4459 its_vpe_teardown(vpe);
4460 irq_domain_reset_irq_data(data);
4461 }
4462
4463 if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
4464 its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
4465 its_free_prop_table(vm->vprop_page);
4466 }
4467}
4468
4469static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
4470 unsigned int nr_irqs, void *args)
4471{
4472 struct irq_chip *irqchip = &its_vpe_irq_chip;
4473 struct its_vm *vm = args;
4474 unsigned long *bitmap;
4475 struct page *vprop_page;
4476 int base, nr_ids, i, err = 0;
4477
4478 BUG_ON(!vm);
4479
4480 bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
4481 if (!bitmap)
4482 return -ENOMEM;
4483
4484 if (nr_ids < nr_irqs) {
4485 its_lpi_free(bitmap, base, nr_ids);
4486 return -ENOMEM;
4487 }
4488
4489 vprop_page = its_allocate_prop_table(GFP_KERNEL);
4490 if (!vprop_page) {
4491 its_lpi_free(bitmap, base, nr_ids);
4492 return -ENOMEM;
4493 }
4494
4495 vm->db_bitmap = bitmap;
4496 vm->db_lpi_base = base;
4497 vm->nr_db_lpis = nr_ids;
4498 vm->vprop_page = vprop_page;
4499
4500 if (gic_rdists->has_rvpeid)
4501 irqchip = &its_vpe_4_1_irq_chip;
4502
4503 for (i = 0; i < nr_irqs; i++) {
4504 vm->vpes[i]->vpe_db_lpi = base + i;
4505 err = its_vpe_init(vm->vpes[i]);
4506 if (err)
4507 break;
4508 err = its_irq_gic_domain_alloc(domain, virq + i,
4509 vm->vpes[i]->vpe_db_lpi);
4510 if (err)
4511 break;
4512 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
4513 irqchip, vm->vpes[i]);
4514 set_bit(i, bitmap);
4515 }
4516
4517 if (err) {
4518 if (i > 0)
4519 its_vpe_irq_domain_free(domain, virq, i);
4520
4521 its_lpi_free(bitmap, base, nr_ids);
4522 its_free_prop_table(vprop_page);
4523 }
4524
4525 return err;
4526}
4527
4528static int its_vpe_irq_domain_activate(struct irq_domain *domain,
4529 struct irq_data *d, bool reserve)
4530{
4531 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4532 struct its_node *its;
4533
4534 /*
4535 * If we use the list map, we issue VMAPP on demand... Unless
4536 * we're on a GICv4.1 and we eagerly map the VPE on all ITSs
4537 * so that VSGIs can work.
4538 */
4539 if (!gic_requires_eager_mapping())
4540 return 0;
4541
4542 /* Map the VPE to the first possible CPU */
4543 vpe->col_idx = cpumask_first(cpu_online_mask);
4544
4545 list_for_each_entry(its, &its_nodes, entry) {
4546 if (!is_v4(its))
4547 continue;
4548
4549 its_send_vmapp(its, vpe, true);
4550 its_send_vinvall(its, vpe);
4551 }
4552
4553 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
4554
4555 return 0;
4556}
4557
4558static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
4559 struct irq_data *d)
4560{
4561 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4562 struct its_node *its;
4563
4564 /*
4565 * If we use the list map on GICv4.0, we unmap the VPE once no
4566 * VLPIs are associated with the VM.
4567 */
4568 if (!gic_requires_eager_mapping())
4569 return;
4570
4571 list_for_each_entry(its, &its_nodes, entry) {
4572 if (!is_v4(its))
4573 continue;
4574
4575 its_send_vmapp(its, vpe, false);
4576 }
4577
4578 /*
4579 * There may be a direct read to the VPT after unmapping the
4580 * vPE, to guarantee the validity of this, we make the VPT
4581 * memory coherent with the CPU caches here.
4582 */
4583 if (find_4_1_its() && !atomic_read(&vpe->vmapp_count))
4584 gic_flush_dcache_to_poc(page_address(vpe->vpt_page),
4585 LPI_PENDBASE_SZ);
4586}
4587
4588static const struct irq_domain_ops its_vpe_domain_ops = {
4589 .alloc = its_vpe_irq_domain_alloc,
4590 .free = its_vpe_irq_domain_free,
4591 .activate = its_vpe_irq_domain_activate,
4592 .deactivate = its_vpe_irq_domain_deactivate,
4593};
4594
4595static int its_force_quiescent(void __iomem *base)
4596{
4597 u32 count = 1000000; /* 1s */
4598 u32 val;
4599
4600 val = readl_relaxed(base + GITS_CTLR);
4601 /*
4602 * GIC architecture specification requires the ITS to be both
4603 * disabled and quiescent for writes to GITS_BASER<n> or
4604 * GITS_CBASER to not have UNPREDICTABLE results.
4605 */
4606 if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
4607 return 0;
4608
4609 /* Disable the generation of all interrupts to this ITS */
4610 val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
4611 writel_relaxed(val, base + GITS_CTLR);
4612
4613 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
4614 while (1) {
4615 val = readl_relaxed(base + GITS_CTLR);
4616 if (val & GITS_CTLR_QUIESCENT)
4617 return 0;
4618
4619 count--;
4620 if (!count)
4621 return -EBUSY;
4622
4623 cpu_relax();
4624 udelay(1);
4625 }
4626}
4627
4628static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
4629{
4630 struct its_node *its = data;
4631
4632 /* erratum 22375: only alloc 8MB table size (20 bits) */
4633 its->typer &= ~GITS_TYPER_DEVBITS;
4634 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1);
4635 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
4636
4637 return true;
4638}
4639
4640static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
4641{
4642 struct its_node *its = data;
4643
4644 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
4645
4646 return true;
4647}
4648
4649static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
4650{
4651 struct its_node *its = data;
4652
4653 /* On QDF2400, the size of the ITE is 16Bytes */
4654 its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE;
4655 its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1);
4656
4657 return true;
4658}
4659
4660static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
4661{
4662 struct its_node *its = its_dev->its;
4663
4664 /*
4665 * The Socionext Synquacer SoC has a so-called 'pre-ITS',
4666 * which maps 32-bit writes targeted at a separate window of
4667 * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
4668 * with device ID taken from bits [device_id_bits + 1:2] of
4669 * the window offset.
4670 */
4671 return its->pre_its_base + (its_dev->device_id << 2);
4672}
4673
4674static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
4675{
4676 struct its_node *its = data;
4677 u32 pre_its_window[2];
4678 u32 ids;
4679
4680 if (!fwnode_property_read_u32_array(its->fwnode_handle,
4681 "socionext,synquacer-pre-its",
4682 pre_its_window,
4683 ARRAY_SIZE(pre_its_window))) {
4684
4685 its->pre_its_base = pre_its_window[0];
4686 its->get_msi_base = its_irq_get_msi_base_pre_its;
4687
4688 ids = ilog2(pre_its_window[1]) - 2;
4689 if (device_ids(its) > ids) {
4690 its->typer &= ~GITS_TYPER_DEVBITS;
4691 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1);
4692 }
4693
4694 /* the pre-ITS breaks isolation, so disable MSI remapping */
4695 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
4696 return true;
4697 }
4698 return false;
4699}
4700
4701static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
4702{
4703 struct its_node *its = data;
4704
4705 /*
4706 * Hip07 insists on using the wrong address for the VLPI
4707 * page. Trick it into doing the right thing...
4708 */
4709 its->vlpi_redist_offset = SZ_128K;
4710 return true;
4711}
4712
4713static const struct gic_quirk its_quirks[] = {
4714#ifdef CONFIG_CAVIUM_ERRATUM_22375
4715 {
4716 .desc = "ITS: Cavium errata 22375, 24313",
4717 .iidr = 0xa100034c, /* ThunderX pass 1.x */
4718 .mask = 0xffff0fff,
4719 .init = its_enable_quirk_cavium_22375,
4720 },
4721#endif
4722#ifdef CONFIG_CAVIUM_ERRATUM_23144
4723 {
4724 .desc = "ITS: Cavium erratum 23144",
4725 .iidr = 0xa100034c, /* ThunderX pass 1.x */
4726 .mask = 0xffff0fff,
4727 .init = its_enable_quirk_cavium_23144,
4728 },
4729#endif
4730#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
4731 {
4732 .desc = "ITS: QDF2400 erratum 0065",
4733 .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
4734 .mask = 0xffffffff,
4735 .init = its_enable_quirk_qdf2400_e0065,
4736 },
4737#endif
4738#ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
4739 {
4740 /*
4741 * The Socionext Synquacer SoC incorporates ARM's own GIC-500
4742 * implementation, but with a 'pre-ITS' added that requires
4743 * special handling in software.
4744 */
4745 .desc = "ITS: Socionext Synquacer pre-ITS",
4746 .iidr = 0x0001143b,
4747 .mask = 0xffffffff,
4748 .init = its_enable_quirk_socionext_synquacer,
4749 },
4750#endif
4751#ifdef CONFIG_HISILICON_ERRATUM_161600802
4752 {
4753 .desc = "ITS: Hip07 erratum 161600802",
4754 .iidr = 0x00000004,
4755 .mask = 0xffffffff,
4756 .init = its_enable_quirk_hip07_161600802,
4757 },
4758#endif
4759 {
4760 }
4761};
4762
4763static void its_enable_quirks(struct its_node *its)
4764{
4765 u32 iidr = readl_relaxed(its->base + GITS_IIDR);
4766
4767 gic_enable_quirks(iidr, its_quirks, its);
4768}
4769
4770static int its_save_disable(void)
4771{
4772 struct its_node *its;
4773 int err = 0;
4774
4775 raw_spin_lock(&its_lock);
4776 list_for_each_entry(its, &its_nodes, entry) {
4777 void __iomem *base;
4778
4779 base = its->base;
4780 its->ctlr_save = readl_relaxed(base + GITS_CTLR);
4781 err = its_force_quiescent(base);
4782 if (err) {
4783 pr_err("ITS@%pa: failed to quiesce: %d\n",
4784 &its->phys_base, err);
4785 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4786 goto err;
4787 }
4788
4789 its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
4790 }
4791
4792err:
4793 if (err) {
4794 list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
4795 void __iomem *base;
4796
4797 base = its->base;
4798 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4799 }
4800 }
4801 raw_spin_unlock(&its_lock);
4802
4803 return err;
4804}
4805
4806static void its_restore_enable(void)
4807{
4808 struct its_node *its;
4809 int ret;
4810
4811 raw_spin_lock(&its_lock);
4812 list_for_each_entry(its, &its_nodes, entry) {
4813 void __iomem *base;
4814 int i;
4815
4816 base = its->base;
4817
4818 /*
4819 * Make sure that the ITS is disabled. If it fails to quiesce,
4820 * don't restore it since writing to CBASER or BASER<n>
4821 * registers is undefined according to the GIC v3 ITS
4822 * Specification.
4823 *
4824 * Firmware resuming with the ITS enabled is terminally broken.
4825 */
4826 WARN_ON(readl_relaxed(base + GITS_CTLR) & GITS_CTLR_ENABLE);
4827 ret = its_force_quiescent(base);
4828 if (ret) {
4829 pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
4830 &its->phys_base, ret);
4831 continue;
4832 }
4833
4834 gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
4835
4836 /*
4837 * Writing CBASER resets CREADR to 0, so make CWRITER and
4838 * cmd_write line up with it.
4839 */
4840 its->cmd_write = its->cmd_base;
4841 gits_write_cwriter(0, base + GITS_CWRITER);
4842
4843 /* Restore GITS_BASER from the value cache. */
4844 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
4845 struct its_baser *baser = &its->tables[i];
4846
4847 if (!(baser->val & GITS_BASER_VALID))
4848 continue;
4849
4850 its_write_baser(its, baser, baser->val);
4851 }
4852 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4853
4854 /*
4855 * Reinit the collection if it's stored in the ITS. This is
4856 * indicated by the col_id being less than the HCC field.
4857 * CID < HCC as specified in the GIC v3 Documentation.
4858 */
4859 if (its->collections[smp_processor_id()].col_id <
4860 GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
4861 its_cpu_init_collection(its);
4862 }
4863 raw_spin_unlock(&its_lock);
4864}
4865
4866static struct syscore_ops its_syscore_ops = {
4867 .suspend = its_save_disable,
4868 .resume = its_restore_enable,
4869};
4870
4871static void __init __iomem *its_map_one(struct resource *res, int *err)
4872{
4873 void __iomem *its_base;
4874 u32 val;
4875
4876 its_base = ioremap(res->start, SZ_64K);
4877 if (!its_base) {
4878 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
4879 *err = -ENOMEM;
4880 return NULL;
4881 }
4882
4883 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
4884 if (val != 0x30 && val != 0x40) {
4885 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
4886 *err = -ENODEV;
4887 goto out_unmap;
4888 }
4889
4890 *err = its_force_quiescent(its_base);
4891 if (*err) {
4892 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
4893 goto out_unmap;
4894 }
4895
4896 return its_base;
4897
4898out_unmap:
4899 iounmap(its_base);
4900 return NULL;
4901}
4902
4903static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
4904{
4905 struct irq_domain *inner_domain;
4906 struct msi_domain_info *info;
4907
4908 info = kzalloc(sizeof(*info), GFP_KERNEL);
4909 if (!info)
4910 return -ENOMEM;
4911
4912 inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
4913 if (!inner_domain) {
4914 kfree(info);
4915 return -ENOMEM;
4916 }
4917
4918 inner_domain->parent = its_parent;
4919 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
4920 inner_domain->flags |= its->msi_domain_flags;
4921 info->ops = &its_msi_domain_ops;
4922 info->data = its;
4923 inner_domain->host_data = info;
4924
4925 return 0;
4926}
4927
4928static int its_init_vpe_domain(void)
4929{
4930 struct its_node *its;
4931 u32 devid;
4932 int entries;
4933
4934 if (gic_rdists->has_direct_lpi) {
4935 pr_info("ITS: Using DirectLPI for VPE invalidation\n");
4936 return 0;
4937 }
4938
4939 /* Any ITS will do, even if not v4 */
4940 its = list_first_entry(&its_nodes, struct its_node, entry);
4941
4942 entries = roundup_pow_of_two(nr_cpu_ids);
4943 vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes),
4944 GFP_KERNEL);
4945 if (!vpe_proxy.vpes)
4946 return -ENOMEM;
4947
4948 /* Use the last possible DevID */
4949 devid = GENMASK(device_ids(its) - 1, 0);
4950 vpe_proxy.dev = its_create_device(its, devid, entries, false);
4951 if (!vpe_proxy.dev) {
4952 kfree(vpe_proxy.vpes);
4953 pr_err("ITS: Can't allocate GICv4 proxy device\n");
4954 return -ENOMEM;
4955 }
4956
4957 BUG_ON(entries > vpe_proxy.dev->nr_ites);
4958
4959 raw_spin_lock_init(&vpe_proxy.lock);
4960 vpe_proxy.next_victim = 0;
4961 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
4962 devid, vpe_proxy.dev->nr_ites);
4963
4964 return 0;
4965}
4966
4967static int __init its_compute_its_list_map(struct resource *res,
4968 void __iomem *its_base)
4969{
4970 int its_number;
4971 u32 ctlr;
4972
4973 /*
4974 * This is assumed to be done early enough that we're
4975 * guaranteed to be single-threaded, hence no
4976 * locking. Should this change, we should address
4977 * this.
4978 */
4979 its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
4980 if (its_number >= GICv4_ITS_LIST_MAX) {
4981 pr_err("ITS@%pa: No ITSList entry available!\n",
4982 &res->start);
4983 return -EINVAL;
4984 }
4985
4986 ctlr = readl_relaxed(its_base + GITS_CTLR);
4987 ctlr &= ~GITS_CTLR_ITS_NUMBER;
4988 ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
4989 writel_relaxed(ctlr, its_base + GITS_CTLR);
4990 ctlr = readl_relaxed(its_base + GITS_CTLR);
4991 if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
4992 its_number = ctlr & GITS_CTLR_ITS_NUMBER;
4993 its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
4994 }
4995
4996 if (test_and_set_bit(its_number, &its_list_map)) {
4997 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
4998 &res->start, its_number);
4999 return -EINVAL;
5000 }
5001
5002 return its_number;
5003}
5004
5005static int __init its_probe_one(struct resource *res,
5006 struct fwnode_handle *handle, int numa_node)
5007{
5008 struct its_node *its;
5009 void __iomem *its_base;
5010 u64 baser, tmp, typer;
5011 struct page *page;
5012 u32 ctlr;
5013 int err;
5014
5015 its_base = its_map_one(res, &err);
5016 if (!its_base)
5017 return err;
5018
5019 pr_info("ITS %pR\n", res);
5020
5021 its = kzalloc(sizeof(*its), GFP_KERNEL);
5022 if (!its) {
5023 err = -ENOMEM;
5024 goto out_unmap;
5025 }
5026
5027 raw_spin_lock_init(&its->lock);
5028 mutex_init(&its->dev_alloc_lock);
5029 INIT_LIST_HEAD(&its->entry);
5030 INIT_LIST_HEAD(&its->its_device_list);
5031 typer = gic_read_typer(its_base + GITS_TYPER);
5032 its->typer = typer;
5033 its->base = its_base;
5034 its->phys_base = res->start;
5035 if (is_v4(its)) {
5036 if (!(typer & GITS_TYPER_VMOVP)) {
5037 err = its_compute_its_list_map(res, its_base);
5038 if (err < 0)
5039 goto out_free_its;
5040
5041 its->list_nr = err;
5042
5043 pr_info("ITS@%pa: Using ITS number %d\n",
5044 &res->start, err);
5045 } else {
5046 pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
5047 }
5048
5049 if (is_v4_1(its)) {
5050 u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer);
5051
5052 its->sgir_base = ioremap(res->start + SZ_128K, SZ_64K);
5053 if (!its->sgir_base) {
5054 err = -ENOMEM;
5055 goto out_free_its;
5056 }
5057
5058 its->mpidr = readl_relaxed(its_base + GITS_MPIDR);
5059
5060 pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n",
5061 &res->start, its->mpidr, svpet);
5062 }
5063 }
5064
5065 its->numa_node = numa_node;
5066
5067 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
5068 get_order(ITS_CMD_QUEUE_SZ));
5069 if (!page) {
5070 err = -ENOMEM;
5071 goto out_unmap_sgir;
5072 }
5073 its->cmd_base = (void *)page_address(page);
5074 its->cmd_write = its->cmd_base;
5075 its->fwnode_handle = handle;
5076 its->get_msi_base = its_irq_get_msi_base;
5077 its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP;
5078
5079 its_enable_quirks(its);
5080
5081 err = its_alloc_tables(its);
5082 if (err)
5083 goto out_free_cmd;
5084
5085 err = its_alloc_collections(its);
5086 if (err)
5087 goto out_free_tables;
5088
5089 baser = (virt_to_phys(its->cmd_base) |
5090 GITS_CBASER_RaWaWb |
5091 GITS_CBASER_InnerShareable |
5092 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
5093 GITS_CBASER_VALID);
5094
5095 gits_write_cbaser(baser, its->base + GITS_CBASER);
5096 tmp = gits_read_cbaser(its->base + GITS_CBASER);
5097
5098 if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
5099 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
5100 /*
5101 * The HW reports non-shareable, we must
5102 * remove the cacheability attributes as
5103 * well.
5104 */
5105 baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
5106 GITS_CBASER_CACHEABILITY_MASK);
5107 baser |= GITS_CBASER_nC;
5108 gits_write_cbaser(baser, its->base + GITS_CBASER);
5109 }
5110 pr_info("ITS: using cache flushing for cmd queue\n");
5111 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
5112 }
5113
5114 gits_write_cwriter(0, its->base + GITS_CWRITER);
5115 ctlr = readl_relaxed(its->base + GITS_CTLR);
5116 ctlr |= GITS_CTLR_ENABLE;
5117 if (is_v4(its))
5118 ctlr |= GITS_CTLR_ImDe;
5119 writel_relaxed(ctlr, its->base + GITS_CTLR);
5120
5121 err = its_init_domain(handle, its);
5122 if (err)
5123 goto out_free_tables;
5124
5125 raw_spin_lock(&its_lock);
5126 list_add(&its->entry, &its_nodes);
5127 raw_spin_unlock(&its_lock);
5128
5129 return 0;
5130
5131out_free_tables:
5132 its_free_tables(its);
5133out_free_cmd:
5134 free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
5135out_unmap_sgir:
5136 if (its->sgir_base)
5137 iounmap(its->sgir_base);
5138out_free_its:
5139 kfree(its);
5140out_unmap:
5141 iounmap(its_base);
5142 pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
5143 return err;
5144}
5145
5146static bool gic_rdists_supports_plpis(void)
5147{
5148 return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
5149}
5150
5151static int redist_disable_lpis(void)
5152{
5153 void __iomem *rbase = gic_data_rdist_rd_base();
5154 u64 timeout = USEC_PER_SEC;
5155 u64 val;
5156
5157 if (!gic_rdists_supports_plpis()) {
5158 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
5159 return -ENXIO;
5160 }
5161
5162 val = readl_relaxed(rbase + GICR_CTLR);
5163 if (!(val & GICR_CTLR_ENABLE_LPIS))
5164 return 0;
5165
5166 /*
5167 * If coming via a CPU hotplug event, we don't need to disable
5168 * LPIs before trying to re-enable them. They are already
5169 * configured and all is well in the world.
5170 *
5171 * If running with preallocated tables, there is nothing to do.
5172 */
5173 if ((gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) ||
5174 (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
5175 return 0;
5176
5177 /*
5178 * From that point on, we only try to do some damage control.
5179 */
5180 pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
5181 smp_processor_id());
5182 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
5183
5184 /* Disable LPIs */
5185 val &= ~GICR_CTLR_ENABLE_LPIS;
5186 writel_relaxed(val, rbase + GICR_CTLR);
5187
5188 /* Make sure any change to GICR_CTLR is observable by the GIC */
5189 dsb(sy);
5190
5191 /*
5192 * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
5193 * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
5194 * Error out if we time out waiting for RWP to clear.
5195 */
5196 while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) {
5197 if (!timeout) {
5198 pr_err("CPU%d: Timeout while disabling LPIs\n",
5199 smp_processor_id());
5200 return -ETIMEDOUT;
5201 }
5202 udelay(1);
5203 timeout--;
5204 }
5205
5206 /*
5207 * After it has been written to 1, it is IMPLEMENTATION
5208 * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
5209 * cleared to 0. Error out if clearing the bit failed.
5210 */
5211 if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) {
5212 pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
5213 return -EBUSY;
5214 }
5215
5216 return 0;
5217}
5218
5219int its_cpu_init(void)
5220{
5221 if (!list_empty(&its_nodes)) {
5222 int ret;
5223
5224 ret = redist_disable_lpis();
5225 if (ret)
5226 return ret;
5227
5228 its_cpu_init_lpis();
5229 its_cpu_init_collections();
5230 }
5231
5232 return 0;
5233}
5234
5235static void rdist_memreserve_cpuhp_cleanup_workfn(struct work_struct *work)
5236{
5237 cpuhp_remove_state_nocalls(gic_rdists->cpuhp_memreserve_state);
5238 gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID;
5239}
5240
5241static DECLARE_WORK(rdist_memreserve_cpuhp_cleanup_work,
5242 rdist_memreserve_cpuhp_cleanup_workfn);
5243
5244static int its_cpu_memreserve_lpi(unsigned int cpu)
5245{
5246 struct page *pend_page;
5247 int ret = 0;
5248
5249 /* This gets to run exactly once per CPU */
5250 if (gic_data_rdist()->flags & RD_LOCAL_MEMRESERVE_DONE)
5251 return 0;
5252
5253 pend_page = gic_data_rdist()->pend_page;
5254 if (WARN_ON(!pend_page)) {
5255 ret = -ENOMEM;
5256 goto out;
5257 }
5258 /*
5259 * If the pending table was pre-programmed, free the memory we
5260 * preemptively allocated. Otherwise, reserve that memory for
5261 * later kexecs.
5262 */
5263 if (gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED) {
5264 its_free_pending_table(pend_page);
5265 gic_data_rdist()->pend_page = NULL;
5266 } else {
5267 phys_addr_t paddr = page_to_phys(pend_page);
5268 WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
5269 }
5270
5271out:
5272 /* Last CPU being brought up gets to issue the cleanup */
5273 if (!IS_ENABLED(CONFIG_SMP) ||
5274 cpumask_equal(&cpus_booted_once_mask, cpu_possible_mask))
5275 schedule_work(&rdist_memreserve_cpuhp_cleanup_work);
5276
5277 gic_data_rdist()->flags |= RD_LOCAL_MEMRESERVE_DONE;
5278 return ret;
5279}
5280
5281/* Mark all the BASER registers as invalid before they get reprogrammed */
5282static int __init its_reset_one(struct resource *res)
5283{
5284 void __iomem *its_base;
5285 int err, i;
5286
5287 its_base = its_map_one(res, &err);
5288 if (!its_base)
5289 return err;
5290
5291 for (i = 0; i < GITS_BASER_NR_REGS; i++)
5292 gits_write_baser(0, its_base + GITS_BASER + (i << 3));
5293
5294 iounmap(its_base);
5295 return 0;
5296}
5297
5298static const struct of_device_id its_device_id[] = {
5299 { .compatible = "arm,gic-v3-its", },
5300 {},
5301};
5302
5303static int __init its_of_probe(struct device_node *node)
5304{
5305 struct device_node *np;
5306 struct resource res;
5307
5308 /*
5309 * Make sure *all* the ITS are reset before we probe any, as
5310 * they may be sharing memory. If any of the ITS fails to
5311 * reset, don't even try to go any further, as this could
5312 * result in something even worse.
5313 */
5314 for (np = of_find_matching_node(node, its_device_id); np;
5315 np = of_find_matching_node(np, its_device_id)) {
5316 int err;
5317
5318 if (!of_device_is_available(np) ||
5319 !of_property_read_bool(np, "msi-controller") ||
5320 of_address_to_resource(np, 0, &res))
5321 continue;
5322
5323 err = its_reset_one(&res);
5324 if (err)
5325 return err;
5326 }
5327
5328 for (np = of_find_matching_node(node, its_device_id); np;
5329 np = of_find_matching_node(np, its_device_id)) {
5330 if (!of_device_is_available(np))
5331 continue;
5332 if (!of_property_read_bool(np, "msi-controller")) {
5333 pr_warn("%pOF: no msi-controller property, ITS ignored\n",
5334 np);
5335 continue;
5336 }
5337
5338 if (of_address_to_resource(np, 0, &res)) {
5339 pr_warn("%pOF: no regs?\n", np);
5340 continue;
5341 }
5342
5343 its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
5344 }
5345 return 0;
5346}
5347
5348#ifdef CONFIG_ACPI
5349
5350#define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
5351
5352#ifdef CONFIG_ACPI_NUMA
5353struct its_srat_map {
5354 /* numa node id */
5355 u32 numa_node;
5356 /* GIC ITS ID */
5357 u32 its_id;
5358};
5359
5360static struct its_srat_map *its_srat_maps __initdata;
5361static int its_in_srat __initdata;
5362
5363static int __init acpi_get_its_numa_node(u32 its_id)
5364{
5365 int i;
5366
5367 for (i = 0; i < its_in_srat; i++) {
5368 if (its_id == its_srat_maps[i].its_id)
5369 return its_srat_maps[i].numa_node;
5370 }
5371 return NUMA_NO_NODE;
5372}
5373
5374static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header,
5375 const unsigned long end)
5376{
5377 return 0;
5378}
5379
5380static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header,
5381 const unsigned long end)
5382{
5383 int node;
5384 struct acpi_srat_gic_its_affinity *its_affinity;
5385
5386 its_affinity = (struct acpi_srat_gic_its_affinity *)header;
5387 if (!its_affinity)
5388 return -EINVAL;
5389
5390 if (its_affinity->header.length < sizeof(*its_affinity)) {
5391 pr_err("SRAT: Invalid header length %d in ITS affinity\n",
5392 its_affinity->header.length);
5393 return -EINVAL;
5394 }
5395
5396 /*
5397 * Note that in theory a new proximity node could be created by this
5398 * entry as it is an SRAT resource allocation structure.
5399 * We do not currently support doing so.
5400 */
5401 node = pxm_to_node(its_affinity->proximity_domain);
5402
5403 if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
5404 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
5405 return 0;
5406 }
5407
5408 its_srat_maps[its_in_srat].numa_node = node;
5409 its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
5410 its_in_srat++;
5411 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
5412 its_affinity->proximity_domain, its_affinity->its_id, node);
5413
5414 return 0;
5415}
5416
5417static void __init acpi_table_parse_srat_its(void)
5418{
5419 int count;
5420
5421 count = acpi_table_parse_entries(ACPI_SIG_SRAT,
5422 sizeof(struct acpi_table_srat),
5423 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
5424 gic_acpi_match_srat_its, 0);
5425 if (count <= 0)
5426 return;
5427
5428 its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map),
5429 GFP_KERNEL);
5430 if (!its_srat_maps)
5431 return;
5432
5433 acpi_table_parse_entries(ACPI_SIG_SRAT,
5434 sizeof(struct acpi_table_srat),
5435 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
5436 gic_acpi_parse_srat_its, 0);
5437}
5438
5439/* free the its_srat_maps after ITS probing */
5440static void __init acpi_its_srat_maps_free(void)
5441{
5442 kfree(its_srat_maps);
5443}
5444#else
5445static void __init acpi_table_parse_srat_its(void) { }
5446static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
5447static void __init acpi_its_srat_maps_free(void) { }
5448#endif
5449
5450static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
5451 const unsigned long end)
5452{
5453 struct acpi_madt_generic_translator *its_entry;
5454 struct fwnode_handle *dom_handle;
5455 struct resource res;
5456 int err;
5457
5458 its_entry = (struct acpi_madt_generic_translator *)header;
5459 memset(&res, 0, sizeof(res));
5460 res.start = its_entry->base_address;
5461 res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
5462 res.flags = IORESOURCE_MEM;
5463
5464 dom_handle = irq_domain_alloc_fwnode(&res.start);
5465 if (!dom_handle) {
5466 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
5467 &res.start);
5468 return -ENOMEM;
5469 }
5470
5471 err = iort_register_domain_token(its_entry->translation_id, res.start,
5472 dom_handle);
5473 if (err) {
5474 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
5475 &res.start, its_entry->translation_id);
5476 goto dom_err;
5477 }
5478
5479 err = its_probe_one(&res, dom_handle,
5480 acpi_get_its_numa_node(its_entry->translation_id));
5481 if (!err)
5482 return 0;
5483
5484 iort_deregister_domain_token(its_entry->translation_id);
5485dom_err:
5486 irq_domain_free_fwnode(dom_handle);
5487 return err;
5488}
5489
5490static int __init its_acpi_reset(union acpi_subtable_headers *header,
5491 const unsigned long end)
5492{
5493 struct acpi_madt_generic_translator *its_entry;
5494 struct resource res;
5495
5496 its_entry = (struct acpi_madt_generic_translator *)header;
5497 res = (struct resource) {
5498 .start = its_entry->base_address,
5499 .end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1,
5500 .flags = IORESOURCE_MEM,
5501 };
5502
5503 return its_reset_one(&res);
5504}
5505
5506static void __init its_acpi_probe(void)
5507{
5508 acpi_table_parse_srat_its();
5509 /*
5510 * Make sure *all* the ITS are reset before we probe any, as
5511 * they may be sharing memory. If any of the ITS fails to
5512 * reset, don't even try to go any further, as this could
5513 * result in something even worse.
5514 */
5515 if (acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
5516 its_acpi_reset, 0) > 0)
5517 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
5518 gic_acpi_parse_madt_its, 0);
5519 acpi_its_srat_maps_free();
5520}
5521#else
5522static void __init its_acpi_probe(void) { }
5523#endif
5524
5525int __init its_lpi_memreserve_init(void)
5526{
5527 int state;
5528
5529 if (!efi_enabled(EFI_CONFIG_TABLES))
5530 return 0;
5531
5532 if (list_empty(&its_nodes))
5533 return 0;
5534
5535 gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID;
5536 state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
5537 "irqchip/arm/gicv3/memreserve:online",
5538 its_cpu_memreserve_lpi,
5539 NULL);
5540 if (state < 0)
5541 return state;
5542
5543 gic_rdists->cpuhp_memreserve_state = state;
5544
5545 return 0;
5546}
5547
5548int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
5549 struct irq_domain *parent_domain)
5550{
5551 struct device_node *of_node;
5552 struct its_node *its;
5553 bool has_v4 = false;
5554 bool has_v4_1 = false;
5555 int err;
5556
5557 gic_rdists = rdists;
5558
5559 its_parent = parent_domain;
5560 of_node = to_of_node(handle);
5561 if (of_node)
5562 its_of_probe(of_node);
5563 else
5564 its_acpi_probe();
5565
5566 if (list_empty(&its_nodes)) {
5567 pr_warn("ITS: No ITS available, not enabling LPIs\n");
5568 return -ENXIO;
5569 }
5570
5571 err = allocate_lpi_tables();
5572 if (err)
5573 return err;
5574
5575 list_for_each_entry(its, &its_nodes, entry) {
5576 has_v4 |= is_v4(its);
5577 has_v4_1 |= is_v4_1(its);
5578 }
5579
5580 /* Don't bother with inconsistent systems */
5581 if (WARN_ON(!has_v4_1 && rdists->has_rvpeid))
5582 rdists->has_rvpeid = false;
5583
5584 if (has_v4 & rdists->has_vlpis) {
5585 const struct irq_domain_ops *sgi_ops;
5586
5587 if (has_v4_1)
5588 sgi_ops = &its_sgi_domain_ops;
5589 else
5590 sgi_ops = NULL;
5591
5592 if (its_init_vpe_domain() ||
5593 its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops)) {
5594 rdists->has_vlpis = false;
5595 pr_err("ITS: Disabling GICv4 support\n");
5596 }
5597 }
5598
5599 register_syscore_ops(&its_syscore_ops);
5600
5601 return 0;
5602}