Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2021, NVIDIA Corporation.
4 */
5
6#include <linux/device.h>
7#include <linux/kref.h>
8#include <linux/of.h>
9#include <linux/of_device.h>
10#include <linux/pid.h>
11#include <linux/slab.h>
12
13#include "context.h"
14#include "dev.h"
15
16static void host1x_memory_context_release(struct device *dev)
17{
18 /* context device is freed in host1x_memory_context_list_free() */
19}
20
21int host1x_memory_context_list_init(struct host1x *host1x)
22{
23 struct host1x_memory_context_list *cdl = &host1x->context_list;
24 struct device_node *node = host1x->dev->of_node;
25 struct host1x_memory_context *ctx;
26 unsigned int i;
27 int err;
28
29 cdl->devs = NULL;
30 cdl->len = 0;
31 mutex_init(&cdl->lock);
32
33 err = of_property_count_u32_elems(node, "iommu-map");
34 if (err < 0)
35 return 0;
36
37 cdl->len = err / 4;
38 cdl->devs = kcalloc(cdl->len, sizeof(*cdl->devs), GFP_KERNEL);
39 if (!cdl->devs)
40 return -ENOMEM;
41
42 for (i = 0; i < cdl->len; i++) {
43 ctx = &cdl->devs[i];
44
45 ctx->host = host1x;
46
47 device_initialize(&ctx->dev);
48
49 /*
50 * Due to an issue with T194 NVENC, only 38 bits can be used.
51 * Anyway, 256GiB of IOVA ought to be enough for anyone.
52 */
53 ctx->dma_mask = DMA_BIT_MASK(38);
54 ctx->dev.dma_mask = &ctx->dma_mask;
55 ctx->dev.coherent_dma_mask = ctx->dma_mask;
56 dev_set_name(&ctx->dev, "host1x-ctx.%d", i);
57 ctx->dev.bus = &host1x_context_device_bus_type;
58 ctx->dev.parent = host1x->dev;
59 ctx->dev.release = host1x_memory_context_release;
60
61 ctx->dev.dma_parms = &ctx->dma_parms;
62 dma_set_max_seg_size(&ctx->dev, UINT_MAX);
63
64 err = device_add(&ctx->dev);
65 if (err) {
66 dev_err(host1x->dev, "could not add context device %d: %d\n", i, err);
67 put_device(&ctx->dev);
68 goto unreg_devices;
69 }
70
71 err = of_dma_configure_id(&ctx->dev, node, true, &i);
72 if (err) {
73 dev_err(host1x->dev, "IOMMU configuration failed for context device %d: %d\n",
74 i, err);
75 device_unregister(&ctx->dev);
76 goto unreg_devices;
77 }
78
79 if (!tegra_dev_iommu_get_stream_id(&ctx->dev, &ctx->stream_id) ||
80 !device_iommu_mapped(&ctx->dev)) {
81 dev_err(host1x->dev, "Context device %d has no IOMMU!\n", i);
82 device_unregister(&ctx->dev);
83
84 /*
85 * This means that if IOMMU is disabled but context devices
86 * are defined in the device tree, Host1x will fail to probe.
87 * That's probably OK in this time and age.
88 */
89 err = -EINVAL;
90
91 goto unreg_devices;
92 }
93 }
94
95 return 0;
96
97unreg_devices:
98 while (i--)
99 device_unregister(&cdl->devs[i].dev);
100
101 kfree(cdl->devs);
102 cdl->devs = NULL;
103 cdl->len = 0;
104
105 return err;
106}
107
108void host1x_memory_context_list_free(struct host1x_memory_context_list *cdl)
109{
110 unsigned int i;
111
112 for (i = 0; i < cdl->len; i++)
113 device_unregister(&cdl->devs[i].dev);
114
115 kfree(cdl->devs);
116 cdl->len = 0;
117}
118
119struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
120 struct device *dev,
121 struct pid *pid)
122{
123 struct host1x_memory_context_list *cdl = &host1x->context_list;
124 struct host1x_memory_context *free = NULL;
125 int i;
126
127 if (!cdl->len)
128 return ERR_PTR(-EOPNOTSUPP);
129
130 mutex_lock(&cdl->lock);
131
132 for (i = 0; i < cdl->len; i++) {
133 struct host1x_memory_context *cd = &cdl->devs[i];
134
135 if (cd->dev.iommu->iommu_dev != dev->iommu->iommu_dev)
136 continue;
137
138 if (cd->owner == pid) {
139 refcount_inc(&cd->ref);
140 mutex_unlock(&cdl->lock);
141 return cd;
142 } else if (!cd->owner && !free) {
143 free = cd;
144 }
145 }
146
147 if (!free) {
148 mutex_unlock(&cdl->lock);
149 return ERR_PTR(-EBUSY);
150 }
151
152 refcount_set(&free->ref, 1);
153 free->owner = get_pid(pid);
154
155 mutex_unlock(&cdl->lock);
156
157 return free;
158}
159EXPORT_SYMBOL_GPL(host1x_memory_context_alloc);
160
161void host1x_memory_context_get(struct host1x_memory_context *cd)
162{
163 refcount_inc(&cd->ref);
164}
165EXPORT_SYMBOL_GPL(host1x_memory_context_get);
166
167void host1x_memory_context_put(struct host1x_memory_context *cd)
168{
169 struct host1x_memory_context_list *cdl = &cd->host->context_list;
170
171 if (refcount_dec_and_mutex_lock(&cd->ref, &cdl->lock)) {
172 put_pid(cd->owner);
173 cd->owner = NULL;
174 mutex_unlock(&cdl->lock);
175 }
176}
177EXPORT_SYMBOL_GPL(host1x_memory_context_put);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2021, NVIDIA Corporation.
4 */
5
6#include <linux/device.h>
7#include <linux/kref.h>
8#include <linux/of.h>
9#include <linux/of_platform.h>
10#include <linux/pid.h>
11#include <linux/slab.h>
12
13#include "context.h"
14#include "dev.h"
15
16int host1x_memory_context_list_init(struct host1x *host1x)
17{
18 struct host1x_memory_context_list *cdl = &host1x->context_list;
19 struct device_node *node = host1x->dev->of_node;
20 struct host1x_memory_context *ctx;
21 unsigned int i;
22 int err;
23
24 cdl->devs = NULL;
25 cdl->len = 0;
26 mutex_init(&cdl->lock);
27
28 err = of_property_count_u32_elems(node, "iommu-map");
29 if (err < 0)
30 return 0;
31
32 cdl->devs = kcalloc(err, sizeof(*cdl->devs), GFP_KERNEL);
33 if (!cdl->devs)
34 return -ENOMEM;
35 cdl->len = err / 4;
36
37 for (i = 0; i < cdl->len; i++) {
38 struct iommu_fwspec *fwspec;
39
40 ctx = &cdl->devs[i];
41
42 ctx->host = host1x;
43
44 device_initialize(&ctx->dev);
45
46 /*
47 * Due to an issue with T194 NVENC, only 38 bits can be used.
48 * Anyway, 256GiB of IOVA ought to be enough for anyone.
49 */
50 ctx->dma_mask = DMA_BIT_MASK(38);
51 ctx->dev.dma_mask = &ctx->dma_mask;
52 ctx->dev.coherent_dma_mask = ctx->dma_mask;
53 dev_set_name(&ctx->dev, "host1x-ctx.%d", i);
54 ctx->dev.bus = &host1x_context_device_bus_type;
55 ctx->dev.parent = host1x->dev;
56
57 dma_set_max_seg_size(&ctx->dev, UINT_MAX);
58
59 err = device_add(&ctx->dev);
60 if (err) {
61 dev_err(host1x->dev, "could not add context device %d: %d\n", i, err);
62 goto del_devices;
63 }
64
65 err = of_dma_configure_id(&ctx->dev, node, true, &i);
66 if (err) {
67 dev_err(host1x->dev, "IOMMU configuration failed for context device %d: %d\n",
68 i, err);
69 device_del(&ctx->dev);
70 goto del_devices;
71 }
72
73 fwspec = dev_iommu_fwspec_get(&ctx->dev);
74 if (!fwspec || !device_iommu_mapped(&ctx->dev)) {
75 dev_err(host1x->dev, "Context device %d has no IOMMU!\n", i);
76 device_del(&ctx->dev);
77 goto del_devices;
78 }
79
80 ctx->stream_id = fwspec->ids[0] & 0xffff;
81 }
82
83 return 0;
84
85del_devices:
86 while (i--)
87 device_del(&cdl->devs[i].dev);
88
89 kfree(cdl->devs);
90 cdl->len = 0;
91
92 return err;
93}
94
95void host1x_memory_context_list_free(struct host1x_memory_context_list *cdl)
96{
97 unsigned int i;
98
99 for (i = 0; i < cdl->len; i++)
100 device_del(&cdl->devs[i].dev);
101
102 kfree(cdl->devs);
103 cdl->len = 0;
104}
105
106struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
107 struct device *dev,
108 struct pid *pid)
109{
110 struct host1x_memory_context_list *cdl = &host1x->context_list;
111 struct host1x_memory_context *free = NULL;
112 int i;
113
114 if (!cdl->len)
115 return ERR_PTR(-EOPNOTSUPP);
116
117 mutex_lock(&cdl->lock);
118
119 for (i = 0; i < cdl->len; i++) {
120 struct host1x_memory_context *cd = &cdl->devs[i];
121
122 if (cd->dev.iommu->iommu_dev != dev->iommu->iommu_dev)
123 continue;
124
125 if (cd->owner == pid) {
126 refcount_inc(&cd->ref);
127 mutex_unlock(&cdl->lock);
128 return cd;
129 } else if (!cd->owner && !free) {
130 free = cd;
131 }
132 }
133
134 if (!free) {
135 mutex_unlock(&cdl->lock);
136 return ERR_PTR(-EBUSY);
137 }
138
139 refcount_set(&free->ref, 1);
140 free->owner = get_pid(pid);
141
142 mutex_unlock(&cdl->lock);
143
144 return free;
145}
146EXPORT_SYMBOL_GPL(host1x_memory_context_alloc);
147
148void host1x_memory_context_get(struct host1x_memory_context *cd)
149{
150 refcount_inc(&cd->ref);
151}
152EXPORT_SYMBOL_GPL(host1x_memory_context_get);
153
154void host1x_memory_context_put(struct host1x_memory_context *cd)
155{
156 struct host1x_memory_context_list *cdl = &cd->host->context_list;
157
158 if (refcount_dec_and_mutex_lock(&cd->ref, &cdl->lock)) {
159 put_pid(cd->owner);
160 cd->owner = NULL;
161 mutex_unlock(&cdl->lock);
162 }
163}
164EXPORT_SYMBOL_GPL(host1x_memory_context_put);