Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/of.h>
4#include <linux/of_device.h>
5#include <linux/of_address.h>
6#include <linux/of_iommu.h>
7#include <linux/of_reserved_mem.h>
8#include <linux/dma-direct.h> /* for bus_dma_region */
9#include <linux/dma-map-ops.h>
10#include <linux/init.h>
11#include <linux/mod_devicetable.h>
12#include <linux/slab.h>
13#include <linux/platform_device.h>
14
15#include <asm/errno.h>
16#include "of_private.h"
17
18/**
19 * of_match_device - Tell if a struct device matches an of_device_id list
20 * @matches: array of of device match structures to search in
21 * @dev: the of device structure to match against
22 *
23 * Used by a driver to check whether an platform_device present in the
24 * system is in its list of supported devices.
25 */
26const struct of_device_id *of_match_device(const struct of_device_id *matches,
27 const struct device *dev)
28{
29 if (!matches || !dev->of_node || dev->of_node_reused)
30 return NULL;
31 return of_match_node(matches, dev->of_node);
32}
33EXPORT_SYMBOL(of_match_device);
34
35static void
36of_dma_set_restricted_buffer(struct device *dev, struct device_node *np)
37{
38 struct device_node *node, *of_node = dev->of_node;
39 int count, i;
40
41 if (!IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL))
42 return;
43
44 count = of_property_count_elems_of_size(of_node, "memory-region",
45 sizeof(u32));
46 /*
47 * If dev->of_node doesn't exist or doesn't contain memory-region, try
48 * the OF node having DMA configuration.
49 */
50 if (count <= 0) {
51 of_node = np;
52 count = of_property_count_elems_of_size(
53 of_node, "memory-region", sizeof(u32));
54 }
55
56 for (i = 0; i < count; i++) {
57 node = of_parse_phandle(of_node, "memory-region", i);
58 /*
59 * There might be multiple memory regions, but only one
60 * restricted-dma-pool region is allowed.
61 */
62 if (of_device_is_compatible(node, "restricted-dma-pool") &&
63 of_device_is_available(node)) {
64 of_node_put(node);
65 break;
66 }
67 of_node_put(node);
68 }
69
70 /*
71 * Attempt to initialize a restricted-dma-pool region if one was found.
72 * Note that count can hold a negative error code.
73 */
74 if (i < count && of_reserved_mem_device_init_by_idx(dev, of_node, i))
75 dev_warn(dev, "failed to initialise \"restricted-dma-pool\" memory node\n");
76}
77
78/**
79 * of_dma_configure_id - Setup DMA configuration
80 * @dev: Device to apply DMA configuration
81 * @np: Pointer to OF node having DMA configuration
82 * @force_dma: Whether device is to be set up by of_dma_configure() even if
83 * DMA capability is not explicitly described by firmware.
84 * @id: Optional const pointer value input id
85 *
86 * Try to get devices's DMA configuration from DT and update it
87 * accordingly.
88 *
89 * If platform code needs to use its own special DMA configuration, it
90 * can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events
91 * to fix up DMA configuration.
92 */
93int of_dma_configure_id(struct device *dev, struct device_node *np,
94 bool force_dma, const u32 *id)
95{
96 const struct bus_dma_region *map = NULL;
97 struct device_node *bus_np;
98 u64 mask, end = 0;
99 bool coherent, set_map = false;
100 int ret;
101
102 if (np == dev->of_node)
103 bus_np = __of_get_dma_parent(np);
104 else
105 bus_np = of_node_get(np);
106
107 ret = of_dma_get_range(bus_np, &map);
108 of_node_put(bus_np);
109 if (ret < 0) {
110 /*
111 * For legacy reasons, we have to assume some devices need
112 * DMA configuration regardless of whether "dma-ranges" is
113 * correctly specified or not.
114 */
115 if (!force_dma)
116 return ret == -ENODEV ? 0 : ret;
117 } else {
118 /* Determine the overall bounds of all DMA regions */
119 end = dma_range_map_max(map);
120 set_map = true;
121 }
122
123 /*
124 * If @dev is expected to be DMA-capable then the bus code that created
125 * it should have initialised its dma_mask pointer by this point. For
126 * now, we'll continue the legacy behaviour of coercing it to the
127 * coherent mask if not, but we'll no longer do so quietly.
128 */
129 if (!dev->dma_mask) {
130 dev_warn(dev, "DMA mask not set\n");
131 dev->dma_mask = &dev->coherent_dma_mask;
132 }
133
134 if (!end && dev->coherent_dma_mask)
135 end = dev->coherent_dma_mask;
136 else if (!end)
137 end = (1ULL << 32) - 1;
138
139 /*
140 * Limit coherent and dma mask based on size and default mask
141 * set by the driver.
142 */
143 mask = DMA_BIT_MASK(ilog2(end) + 1);
144 dev->coherent_dma_mask &= mask;
145 *dev->dma_mask &= mask;
146 /* ...but only set bus limit and range map if we found valid dma-ranges earlier */
147 if (set_map) {
148 dev->bus_dma_limit = end;
149 dev->dma_range_map = map;
150 }
151
152 coherent = of_dma_is_coherent(np);
153 dev_dbg(dev, "device is%sdma coherent\n",
154 coherent ? " " : " not ");
155
156 ret = of_iommu_configure(dev, np, id);
157 if (ret == -EPROBE_DEFER) {
158 /* Don't touch range map if it wasn't set from a valid dma-ranges */
159 if (set_map)
160 dev->dma_range_map = NULL;
161 kfree(map);
162 return -EPROBE_DEFER;
163 }
164 /* Take all other IOMMU errors to mean we'll just carry on without it */
165 dev_dbg(dev, "device is%sbehind an iommu\n",
166 !ret ? " " : " not ");
167
168 arch_setup_dma_ops(dev, coherent);
169
170 if (ret)
171 of_dma_set_restricted_buffer(dev, np);
172
173 return 0;
174}
175EXPORT_SYMBOL_GPL(of_dma_configure_id);
176
177const void *of_device_get_match_data(const struct device *dev)
178{
179 const struct of_device_id *match;
180
181 match = of_match_device(dev->driver->of_match_table, dev);
182 if (!match)
183 return NULL;
184
185 return match->data;
186}
187EXPORT_SYMBOL(of_device_get_match_data);
188
189/**
190 * of_device_modalias - Fill buffer with newline terminated modalias string
191 * @dev: Calling device
192 * @str: Modalias string
193 * @len: Size of @str
194 */
195ssize_t of_device_modalias(struct device *dev, char *str, ssize_t len)
196{
197 ssize_t sl;
198
199 if (!dev || !dev->of_node || dev->of_node_reused)
200 return -ENODEV;
201
202 sl = of_modalias(dev->of_node, str, len - 2);
203 if (sl < 0)
204 return sl;
205 if (sl > len - 2)
206 return -ENOMEM;
207
208 str[sl++] = '\n';
209 str[sl] = 0;
210 return sl;
211}
212EXPORT_SYMBOL_GPL(of_device_modalias);
213
214/**
215 * of_device_uevent - Display OF related uevent information
216 * @dev: Device to display the uevent information for
217 * @env: Kernel object's userspace event reference to fill up
218 */
219void of_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
220{
221 const char *compat, *type;
222 struct alias_prop *app;
223 struct property *p;
224 int seen = 0;
225
226 if ((!dev) || (!dev->of_node))
227 return;
228
229 add_uevent_var(env, "OF_NAME=%pOFn", dev->of_node);
230 add_uevent_var(env, "OF_FULLNAME=%pOF", dev->of_node);
231 type = of_node_get_device_type(dev->of_node);
232 if (type)
233 add_uevent_var(env, "OF_TYPE=%s", type);
234
235 /* Since the compatible field can contain pretty much anything
236 * it's not really legal to split it out with commas. We split it
237 * up using a number of environment variables instead. */
238 of_property_for_each_string(dev->of_node, "compatible", p, compat) {
239 add_uevent_var(env, "OF_COMPATIBLE_%d=%s", seen, compat);
240 seen++;
241 }
242 add_uevent_var(env, "OF_COMPATIBLE_N=%d", seen);
243
244 seen = 0;
245 mutex_lock(&of_mutex);
246 list_for_each_entry(app, &aliases_lookup, link) {
247 if (dev->of_node == app->np) {
248 add_uevent_var(env, "OF_ALIAS_%d=%s", seen,
249 app->alias);
250 seen++;
251 }
252 }
253 mutex_unlock(&of_mutex);
254}
255EXPORT_SYMBOL_GPL(of_device_uevent);
256
257int of_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *env)
258{
259 int sl;
260
261 if ((!dev) || (!dev->of_node) || dev->of_node_reused)
262 return -ENODEV;
263
264 /* Devicetree modalias is tricky, we add it in 2 steps */
265 if (add_uevent_var(env, "MODALIAS="))
266 return -ENOMEM;
267
268 sl = of_modalias(dev->of_node, &env->buf[env->buflen-1],
269 sizeof(env->buf) - env->buflen);
270 if (sl < 0)
271 return sl;
272 if (sl >= (sizeof(env->buf) - env->buflen))
273 return -ENOMEM;
274 env->buflen += sl;
275
276 return 0;
277}
278EXPORT_SYMBOL_GPL(of_device_uevent_modalias);
279
280/**
281 * of_device_make_bus_id - Use the device node data to assign a unique name
282 * @dev: pointer to device structure that is linked to a device tree node
283 *
284 * This routine will first try using the translated bus address to
285 * derive a unique name. If it cannot, then it will prepend names from
286 * parent nodes until a unique name can be derived.
287 */
288void of_device_make_bus_id(struct device *dev)
289{
290 struct device_node *node = dev->of_node;
291 const __be32 *reg;
292 u64 addr;
293 u32 mask;
294
295 /* Construct the name, using parent nodes if necessary to ensure uniqueness */
296 while (node->parent) {
297 /*
298 * If the address can be translated, then that is as much
299 * uniqueness as we need. Make it the first component and return
300 */
301 reg = of_get_property(node, "reg", NULL);
302 if (reg && (addr = of_translate_address(node, reg)) != OF_BAD_ADDR) {
303 if (!of_property_read_u32(node, "mask", &mask))
304 dev_set_name(dev, dev_name(dev) ? "%llx.%x.%pOFn:%s" : "%llx.%x.%pOFn",
305 addr, ffs(mask) - 1, node, dev_name(dev));
306
307 else
308 dev_set_name(dev, dev_name(dev) ? "%llx.%pOFn:%s" : "%llx.%pOFn",
309 addr, node, dev_name(dev));
310 return;
311 }
312
313 /* format arguments only used if dev_name() resolves to NULL */
314 dev_set_name(dev, dev_name(dev) ? "%s:%s" : "%s",
315 kbasename(node->full_name), dev_name(dev));
316 node = node->parent;
317 }
318}
319EXPORT_SYMBOL_GPL(of_device_make_bus_id);
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/of.h>
4#include <linux/of_device.h>
5#include <linux/of_address.h>
6#include <linux/of_iommu.h>
7#include <linux/of_reserved_mem.h>
8#include <linux/dma-direct.h> /* for bus_dma_region */
9#include <linux/dma-map-ops.h>
10#include <linux/init.h>
11#include <linux/mod_devicetable.h>
12#include <linux/slab.h>
13#include <linux/platform_device.h>
14
15#include <asm/errno.h>
16#include "of_private.h"
17
18/**
19 * of_match_device - Tell if a struct device matches an of_device_id list
20 * @matches: array of of device match structures to search in
21 * @dev: the of device structure to match against
22 *
23 * Used by a driver to check whether an platform_device present in the
24 * system is in its list of supported devices.
25 */
26const struct of_device_id *of_match_device(const struct of_device_id *matches,
27 const struct device *dev)
28{
29 if (!matches || !dev->of_node || dev->of_node_reused)
30 return NULL;
31 return of_match_node(matches, dev->of_node);
32}
33EXPORT_SYMBOL(of_match_device);
34
35static void
36of_dma_set_restricted_buffer(struct device *dev, struct device_node *np)
37{
38 struct device_node *node, *of_node = dev->of_node;
39 int count, i;
40
41 if (!IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL))
42 return;
43
44 count = of_property_count_elems_of_size(of_node, "memory-region",
45 sizeof(u32));
46 /*
47 * If dev->of_node doesn't exist or doesn't contain memory-region, try
48 * the OF node having DMA configuration.
49 */
50 if (count <= 0) {
51 of_node = np;
52 count = of_property_count_elems_of_size(
53 of_node, "memory-region", sizeof(u32));
54 }
55
56 for (i = 0; i < count; i++) {
57 node = of_parse_phandle(of_node, "memory-region", i);
58 /*
59 * There might be multiple memory regions, but only one
60 * restricted-dma-pool region is allowed.
61 */
62 if (of_device_is_compatible(node, "restricted-dma-pool") &&
63 of_device_is_available(node)) {
64 of_node_put(node);
65 break;
66 }
67 of_node_put(node);
68 }
69
70 /*
71 * Attempt to initialize a restricted-dma-pool region if one was found.
72 * Note that count can hold a negative error code.
73 */
74 if (i < count && of_reserved_mem_device_init_by_idx(dev, of_node, i))
75 dev_warn(dev, "failed to initialise \"restricted-dma-pool\" memory node\n");
76}
77
78/**
79 * of_dma_configure_id - Setup DMA configuration
80 * @dev: Device to apply DMA configuration
81 * @np: Pointer to OF node having DMA configuration
82 * @force_dma: Whether device is to be set up by of_dma_configure() even if
83 * DMA capability is not explicitly described by firmware.
84 * @id: Optional const pointer value input id
85 *
86 * Try to get devices's DMA configuration from DT and update it
87 * accordingly.
88 *
89 * If platform code needs to use its own special DMA configuration, it
90 * can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events
91 * to fix up DMA configuration.
92 */
93int of_dma_configure_id(struct device *dev, struct device_node *np,
94 bool force_dma, const u32 *id)
95{
96 const struct bus_dma_region *map = NULL;
97 struct device_node *bus_np;
98 u64 dma_start = 0;
99 u64 mask, end, size = 0;
100 bool coherent;
101 int iommu_ret;
102 int ret;
103
104 if (np == dev->of_node)
105 bus_np = __of_get_dma_parent(np);
106 else
107 bus_np = of_node_get(np);
108
109 ret = of_dma_get_range(bus_np, &map);
110 of_node_put(bus_np);
111 if (ret < 0) {
112 /*
113 * For legacy reasons, we have to assume some devices need
114 * DMA configuration regardless of whether "dma-ranges" is
115 * correctly specified or not.
116 */
117 if (!force_dma)
118 return ret == -ENODEV ? 0 : ret;
119 } else {
120 const struct bus_dma_region *r = map;
121 u64 dma_end = 0;
122
123 /* Determine the overall bounds of all DMA regions */
124 for (dma_start = ~0; r->size; r++) {
125 /* Take lower and upper limits */
126 if (r->dma_start < dma_start)
127 dma_start = r->dma_start;
128 if (r->dma_start + r->size > dma_end)
129 dma_end = r->dma_start + r->size;
130 }
131 size = dma_end - dma_start;
132
133 /*
134 * Add a work around to treat the size as mask + 1 in case
135 * it is defined in DT as a mask.
136 */
137 if (size & 1) {
138 dev_warn(dev, "Invalid size 0x%llx for dma-range(s)\n",
139 size);
140 size = size + 1;
141 }
142
143 if (!size) {
144 dev_err(dev, "Adjusted size 0x%llx invalid\n", size);
145 kfree(map);
146 return -EINVAL;
147 }
148 }
149
150 /*
151 * If @dev is expected to be DMA-capable then the bus code that created
152 * it should have initialised its dma_mask pointer by this point. For
153 * now, we'll continue the legacy behaviour of coercing it to the
154 * coherent mask if not, but we'll no longer do so quietly.
155 */
156 if (!dev->dma_mask) {
157 dev_warn(dev, "DMA mask not set\n");
158 dev->dma_mask = &dev->coherent_dma_mask;
159 }
160
161 if (!size && dev->coherent_dma_mask)
162 size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
163 else if (!size)
164 size = 1ULL << 32;
165
166 /*
167 * Limit coherent and dma mask based on size and default mask
168 * set by the driver.
169 */
170 end = dma_start + size - 1;
171 mask = DMA_BIT_MASK(ilog2(end) + 1);
172 dev->coherent_dma_mask &= mask;
173 *dev->dma_mask &= mask;
174 /* ...but only set bus limit and range map if we found valid dma-ranges earlier */
175 if (!ret) {
176 dev->bus_dma_limit = end;
177 dev->dma_range_map = map;
178 }
179
180 coherent = of_dma_is_coherent(np);
181 dev_dbg(dev, "device is%sdma coherent\n",
182 coherent ? " " : " not ");
183
184 iommu_ret = of_iommu_configure(dev, np, id);
185 if (iommu_ret == -EPROBE_DEFER) {
186 /* Don't touch range map if it wasn't set from a valid dma-ranges */
187 if (!ret)
188 dev->dma_range_map = NULL;
189 kfree(map);
190 return -EPROBE_DEFER;
191 } else if (iommu_ret == -ENODEV) {
192 dev_dbg(dev, "device is not behind an iommu\n");
193 } else if (iommu_ret) {
194 dev_err(dev, "iommu configuration for device failed with %pe\n",
195 ERR_PTR(iommu_ret));
196
197 /*
198 * Historically this routine doesn't fail driver probing
199 * due to errors in of_iommu_configure()
200 */
201 } else
202 dev_dbg(dev, "device is behind an iommu\n");
203
204 arch_setup_dma_ops(dev, dma_start, size, coherent);
205
206 if (iommu_ret)
207 of_dma_set_restricted_buffer(dev, np);
208
209 return 0;
210}
211EXPORT_SYMBOL_GPL(of_dma_configure_id);
212
213const void *of_device_get_match_data(const struct device *dev)
214{
215 const struct of_device_id *match;
216
217 match = of_match_device(dev->driver->of_match_table, dev);
218 if (!match)
219 return NULL;
220
221 return match->data;
222}
223EXPORT_SYMBOL(of_device_get_match_data);
224
225/**
226 * of_device_modalias - Fill buffer with newline terminated modalias string
227 * @dev: Calling device
228 * @str: Modalias string
229 * @len: Size of @str
230 */
231ssize_t of_device_modalias(struct device *dev, char *str, ssize_t len)
232{
233 ssize_t sl;
234
235 if (!dev || !dev->of_node || dev->of_node_reused)
236 return -ENODEV;
237
238 sl = of_modalias(dev->of_node, str, len - 2);
239 if (sl < 0)
240 return sl;
241 if (sl > len - 2)
242 return -ENOMEM;
243
244 str[sl++] = '\n';
245 str[sl] = 0;
246 return sl;
247}
248EXPORT_SYMBOL_GPL(of_device_modalias);
249
250/**
251 * of_device_uevent - Display OF related uevent information
252 * @dev: Device to display the uevent information for
253 * @env: Kernel object's userspace event reference to fill up
254 */
255void of_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
256{
257 const char *compat, *type;
258 struct alias_prop *app;
259 struct property *p;
260 int seen = 0;
261
262 if ((!dev) || (!dev->of_node))
263 return;
264
265 add_uevent_var(env, "OF_NAME=%pOFn", dev->of_node);
266 add_uevent_var(env, "OF_FULLNAME=%pOF", dev->of_node);
267 type = of_node_get_device_type(dev->of_node);
268 if (type)
269 add_uevent_var(env, "OF_TYPE=%s", type);
270
271 /* Since the compatible field can contain pretty much anything
272 * it's not really legal to split it out with commas. We split it
273 * up using a number of environment variables instead. */
274 of_property_for_each_string(dev->of_node, "compatible", p, compat) {
275 add_uevent_var(env, "OF_COMPATIBLE_%d=%s", seen, compat);
276 seen++;
277 }
278 add_uevent_var(env, "OF_COMPATIBLE_N=%d", seen);
279
280 seen = 0;
281 mutex_lock(&of_mutex);
282 list_for_each_entry(app, &aliases_lookup, link) {
283 if (dev->of_node == app->np) {
284 add_uevent_var(env, "OF_ALIAS_%d=%s", seen,
285 app->alias);
286 seen++;
287 }
288 }
289 mutex_unlock(&of_mutex);
290}
291EXPORT_SYMBOL_GPL(of_device_uevent);
292
293int of_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *env)
294{
295 int sl;
296
297 if ((!dev) || (!dev->of_node) || dev->of_node_reused)
298 return -ENODEV;
299
300 /* Devicetree modalias is tricky, we add it in 2 steps */
301 if (add_uevent_var(env, "MODALIAS="))
302 return -ENOMEM;
303
304 sl = of_modalias(dev->of_node, &env->buf[env->buflen-1],
305 sizeof(env->buf) - env->buflen);
306 if (sl < 0)
307 return sl;
308 if (sl >= (sizeof(env->buf) - env->buflen))
309 return -ENOMEM;
310 env->buflen += sl;
311
312 return 0;
313}
314EXPORT_SYMBOL_GPL(of_device_uevent_modalias);
315
316/**
317 * of_device_make_bus_id - Use the device node data to assign a unique name
318 * @dev: pointer to device structure that is linked to a device tree node
319 *
320 * This routine will first try using the translated bus address to
321 * derive a unique name. If it cannot, then it will prepend names from
322 * parent nodes until a unique name can be derived.
323 */
324void of_device_make_bus_id(struct device *dev)
325{
326 struct device_node *node = dev->of_node;
327 const __be32 *reg;
328 u64 addr;
329 u32 mask;
330
331 /* Construct the name, using parent nodes if necessary to ensure uniqueness */
332 while (node->parent) {
333 /*
334 * If the address can be translated, then that is as much
335 * uniqueness as we need. Make it the first component and return
336 */
337 reg = of_get_property(node, "reg", NULL);
338 if (reg && (addr = of_translate_address(node, reg)) != OF_BAD_ADDR) {
339 if (!of_property_read_u32(node, "mask", &mask))
340 dev_set_name(dev, dev_name(dev) ? "%llx.%x.%pOFn:%s" : "%llx.%x.%pOFn",
341 addr, ffs(mask) - 1, node, dev_name(dev));
342
343 else
344 dev_set_name(dev, dev_name(dev) ? "%llx.%pOFn:%s" : "%llx.%pOFn",
345 addr, node, dev_name(dev));
346 return;
347 }
348
349 /* format arguments only used if dev_name() resolves to NULL */
350 dev_set_name(dev, dev_name(dev) ? "%s:%s" : "%s",
351 kbasename(node->full_name), dev_name(dev));
352 node = node->parent;
353 }
354}
355EXPORT_SYMBOL_GPL(of_device_make_bus_id);