Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * OF helpers for IOMMU
4 *
5 * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
6 */
7
8#include <linux/export.h>
9#include <linux/iommu.h>
10#include <linux/limits.h>
11#include <linux/module.h>
12#include <linux/of.h>
13#include <linux/of_iommu.h>
14#include <linux/of_pci.h>
15#include <linux/pci.h>
16#include <linux/slab.h>
17#include <linux/fsl/mc.h>
18
19#define NO_IOMMU 1
20
21static int of_iommu_xlate(struct device *dev,
22 struct of_phandle_args *iommu_spec)
23{
24 const struct iommu_ops *ops;
25 struct fwnode_handle *fwnode = &iommu_spec->np->fwnode;
26 int ret;
27
28 ops = iommu_ops_from_fwnode(fwnode);
29 if ((ops && !ops->of_xlate) ||
30 !of_device_is_available(iommu_spec->np))
31 return NO_IOMMU;
32
33 ret = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops);
34 if (ret)
35 return ret;
36 /*
37 * The otherwise-empty fwspec handily serves to indicate the specific
38 * IOMMU device we're waiting for, which will be useful if we ever get
39 * a proper probe-ordering dependency mechanism in future.
40 */
41 if (!ops)
42 return driver_deferred_probe_check_state(dev);
43
44 if (!try_module_get(ops->owner))
45 return -ENODEV;
46
47 ret = ops->of_xlate(dev, iommu_spec);
48 module_put(ops->owner);
49 return ret;
50}
51
52static int of_iommu_configure_dev_id(struct device_node *master_np,
53 struct device *dev,
54 const u32 *id)
55{
56 struct of_phandle_args iommu_spec = { .args_count = 1 };
57 int err;
58
59 err = of_map_id(master_np, *id, "iommu-map",
60 "iommu-map-mask", &iommu_spec.np,
61 iommu_spec.args);
62 if (err)
63 return err == -ENODEV ? NO_IOMMU : err;
64
65 err = of_iommu_xlate(dev, &iommu_spec);
66 of_node_put(iommu_spec.np);
67 return err;
68}
69
70static int of_iommu_configure_dev(struct device_node *master_np,
71 struct device *dev)
72{
73 struct of_phandle_args iommu_spec;
74 int err = NO_IOMMU, idx = 0;
75
76 while (!of_parse_phandle_with_args(master_np, "iommus",
77 "#iommu-cells",
78 idx, &iommu_spec)) {
79 err = of_iommu_xlate(dev, &iommu_spec);
80 of_node_put(iommu_spec.np);
81 idx++;
82 if (err)
83 break;
84 }
85
86 return err;
87}
88
89struct of_pci_iommu_alias_info {
90 struct device *dev;
91 struct device_node *np;
92};
93
94static int of_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
95{
96 struct of_pci_iommu_alias_info *info = data;
97 u32 input_id = alias;
98
99 return of_iommu_configure_dev_id(info->np, info->dev, &input_id);
100}
101
102static int of_iommu_configure_device(struct device_node *master_np,
103 struct device *dev, const u32 *id)
104{
105 return (id) ? of_iommu_configure_dev_id(master_np, dev, id) :
106 of_iommu_configure_dev(master_np, dev);
107}
108
109const struct iommu_ops *of_iommu_configure(struct device *dev,
110 struct device_node *master_np,
111 const u32 *id)
112{
113 const struct iommu_ops *ops = NULL;
114 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
115 int err = NO_IOMMU;
116
117 if (!master_np)
118 return NULL;
119
120 if (fwspec) {
121 if (fwspec->ops)
122 return fwspec->ops;
123
124 /* In the deferred case, start again from scratch */
125 iommu_fwspec_free(dev);
126 }
127
128 /*
129 * We don't currently walk up the tree looking for a parent IOMMU.
130 * See the `Notes:' section of
131 * Documentation/devicetree/bindings/iommu/iommu.txt
132 */
133 if (dev_is_pci(dev)) {
134 struct of_pci_iommu_alias_info info = {
135 .dev = dev,
136 .np = master_np,
137 };
138
139 pci_request_acs();
140 err = pci_for_each_dma_alias(to_pci_dev(dev),
141 of_pci_iommu_init, &info);
142 } else {
143 err = of_iommu_configure_device(master_np, dev, id);
144 }
145
146 /*
147 * Two success conditions can be represented by non-negative err here:
148 * >0 : there is no IOMMU, or one was unavailable for non-fatal reasons
149 * 0 : we found an IOMMU, and dev->fwspec is initialised appropriately
150 * <0 : any actual error
151 */
152 if (!err) {
153 /* The fwspec pointer changed, read it again */
154 fwspec = dev_iommu_fwspec_get(dev);
155 ops = fwspec->ops;
156 }
157 /*
158 * If we have reason to believe the IOMMU driver missed the initial
159 * probe for dev, replay it to get things in order.
160 */
161 if (!err && dev->bus && !device_iommu_mapped(dev))
162 err = iommu_probe_device(dev);
163
164 /* Ignore all other errors apart from EPROBE_DEFER */
165 if (err == -EPROBE_DEFER) {
166 ops = ERR_PTR(err);
167 } else if (err < 0) {
168 dev_dbg(dev, "Adding to IOMMU failed: %d\n", err);
169 ops = NULL;
170 }
171
172 return ops;
173}
1/*
2 * OF helpers for IOMMU
3 *
4 * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20#include <linux/export.h>
21#include <linux/iommu.h>
22#include <linux/limits.h>
23#include <linux/of.h>
24#include <linux/of_iommu.h>
25#include <linux/slab.h>
26
27static const struct of_device_id __iommu_of_table_sentinel
28 __used __section(__iommu_of_table_end);
29
30/**
31 * of_get_dma_window - Parse *dma-window property and returns 0 if found.
32 *
33 * @dn: device node
34 * @prefix: prefix for property name if any
35 * @index: index to start to parse
36 * @busno: Returns busno if supported. Otherwise pass NULL
37 * @addr: Returns address that DMA starts
38 * @size: Returns the range that DMA can handle
39 *
40 * This supports different formats flexibly. "prefix" can be
41 * configured if any. "busno" and "index" are optionally
42 * specified. Set 0(or NULL) if not used.
43 */
44int of_get_dma_window(struct device_node *dn, const char *prefix, int index,
45 unsigned long *busno, dma_addr_t *addr, size_t *size)
46{
47 const __be32 *dma_window, *end;
48 int bytes, cur_index = 0;
49 char propname[NAME_MAX], addrname[NAME_MAX], sizename[NAME_MAX];
50
51 if (!dn || !addr || !size)
52 return -EINVAL;
53
54 if (!prefix)
55 prefix = "";
56
57 snprintf(propname, sizeof(propname), "%sdma-window", prefix);
58 snprintf(addrname, sizeof(addrname), "%s#dma-address-cells", prefix);
59 snprintf(sizename, sizeof(sizename), "%s#dma-size-cells", prefix);
60
61 dma_window = of_get_property(dn, propname, &bytes);
62 if (!dma_window)
63 return -ENODEV;
64 end = dma_window + bytes / sizeof(*dma_window);
65
66 while (dma_window < end) {
67 u32 cells;
68 const void *prop;
69
70 /* busno is one cell if supported */
71 if (busno)
72 *busno = be32_to_cpup(dma_window++);
73
74 prop = of_get_property(dn, addrname, NULL);
75 if (!prop)
76 prop = of_get_property(dn, "#address-cells", NULL);
77
78 cells = prop ? be32_to_cpup(prop) : of_n_addr_cells(dn);
79 if (!cells)
80 return -EINVAL;
81 *addr = of_read_number(dma_window, cells);
82 dma_window += cells;
83
84 prop = of_get_property(dn, sizename, NULL);
85 cells = prop ? be32_to_cpup(prop) : of_n_size_cells(dn);
86 if (!cells)
87 return -EINVAL;
88 *size = of_read_number(dma_window, cells);
89 dma_window += cells;
90
91 if (cur_index++ == index)
92 break;
93 }
94 return 0;
95}
96EXPORT_SYMBOL_GPL(of_get_dma_window);
97
98struct of_iommu_node {
99 struct list_head list;
100 struct device_node *np;
101 struct iommu_ops *ops;
102};
103static LIST_HEAD(of_iommu_list);
104static DEFINE_SPINLOCK(of_iommu_lock);
105
106void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops)
107{
108 struct of_iommu_node *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
109
110 if (WARN_ON(!iommu))
111 return;
112
113 of_node_get(np);
114 INIT_LIST_HEAD(&iommu->list);
115 iommu->np = np;
116 iommu->ops = ops;
117 spin_lock(&of_iommu_lock);
118 list_add_tail(&iommu->list, &of_iommu_list);
119 spin_unlock(&of_iommu_lock);
120}
121
122struct iommu_ops *of_iommu_get_ops(struct device_node *np)
123{
124 struct of_iommu_node *node;
125 struct iommu_ops *ops = NULL;
126
127 spin_lock(&of_iommu_lock);
128 list_for_each_entry(node, &of_iommu_list, list)
129 if (node->np == np) {
130 ops = node->ops;
131 break;
132 }
133 spin_unlock(&of_iommu_lock);
134 return ops;
135}
136
137struct iommu_ops *of_iommu_configure(struct device *dev,
138 struct device_node *master_np)
139{
140 struct of_phandle_args iommu_spec;
141 struct device_node *np;
142 struct iommu_ops *ops = NULL;
143 int idx = 0;
144
145 /*
146 * We can't do much for PCI devices without knowing how
147 * device IDs are wired up from the PCI bus to the IOMMU.
148 */
149 if (dev_is_pci(dev))
150 return NULL;
151
152 /*
153 * We don't currently walk up the tree looking for a parent IOMMU.
154 * See the `Notes:' section of
155 * Documentation/devicetree/bindings/iommu/iommu.txt
156 */
157 while (!of_parse_phandle_with_args(master_np, "iommus",
158 "#iommu-cells", idx,
159 &iommu_spec)) {
160 np = iommu_spec.np;
161 ops = of_iommu_get_ops(np);
162
163 if (!ops || !ops->of_xlate || ops->of_xlate(dev, &iommu_spec))
164 goto err_put_node;
165
166 of_node_put(np);
167 idx++;
168 }
169
170 return ops;
171
172err_put_node:
173 of_node_put(np);
174 return NULL;
175}
176
177void __init of_iommu_init(void)
178{
179 struct device_node *np;
180 const struct of_device_id *match, *matches = &__iommu_of_table;
181
182 for_each_matching_node_and_match(np, matches, &match) {
183 const of_iommu_init_fn init_fn = match->data;
184
185 if (init_fn(np))
186 pr_err("Failed to initialise IOMMU %s\n",
187 of_node_full_name(np));
188 }
189}