Loading...
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/printk.h>
24#include <linux/device.h>
25#include <linux/slab.h>
26#include <linux/pci.h>
27#include <linux/amd-iommu.h>
28#include "kfd_priv.h"
29#include "kfd_dbgmgr.h"
30#include "kfd_topology.h"
31#include "kfd_iommu.h"
32
33static const u32 required_iommu_flags = AMD_IOMMU_DEVICE_FLAG_ATS_SUP |
34 AMD_IOMMU_DEVICE_FLAG_PRI_SUP |
35 AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
36
37/** kfd_iommu_check_device - Check whether IOMMU is available for device
38 */
39int kfd_iommu_check_device(struct kfd_dev *kfd)
40{
41 struct amd_iommu_device_info iommu_info;
42 int err;
43
44 if (!kfd->device_info->needs_iommu_device)
45 return -ENODEV;
46
47 iommu_info.flags = 0;
48 err = amd_iommu_device_info(kfd->pdev, &iommu_info);
49 if (err)
50 return err;
51
52 if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags)
53 return -ENODEV;
54
55 return 0;
56}
57
58/** kfd_iommu_device_init - Initialize IOMMU for device
59 */
60int kfd_iommu_device_init(struct kfd_dev *kfd)
61{
62 struct amd_iommu_device_info iommu_info;
63 unsigned int pasid_limit;
64 int err;
65
66 if (!kfd->device_info->needs_iommu_device)
67 return 0;
68
69 iommu_info.flags = 0;
70 err = amd_iommu_device_info(kfd->pdev, &iommu_info);
71 if (err < 0) {
72 dev_err(kfd_device,
73 "error getting iommu info. is the iommu enabled?\n");
74 return -ENODEV;
75 }
76
77 if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) {
78 dev_err(kfd_device,
79 "error required iommu flags ats %i, pri %i, pasid %i\n",
80 (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0,
81 (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0,
82 (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP)
83 != 0);
84 return -ENODEV;
85 }
86
87 pasid_limit = min_t(unsigned int,
88 (unsigned int)(1 << kfd->device_info->max_pasid_bits),
89 iommu_info.max_pasids);
90
91 if (!kfd_set_pasid_limit(pasid_limit)) {
92 dev_err(kfd_device, "error setting pasid limit\n");
93 return -EBUSY;
94 }
95
96 return 0;
97}
98
99/** kfd_iommu_bind_process_to_device - Have the IOMMU bind a process
100 *
101 * Binds the given process to the given device using its PASID. This
102 * enables IOMMUv2 address translation for the process on the device.
103 *
104 * This function assumes that the process mutex is held.
105 */
106int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd)
107{
108 struct kfd_dev *dev = pdd->dev;
109 struct kfd_process *p = pdd->process;
110 int err;
111
112 if (!dev->device_info->needs_iommu_device || pdd->bound == PDD_BOUND)
113 return 0;
114
115 if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) {
116 pr_err("Binding PDD_BOUND_SUSPENDED pdd is unexpected!\n");
117 return -EINVAL;
118 }
119
120 err = amd_iommu_bind_pasid(dev->pdev, p->pasid, p->lead_thread);
121 if (!err)
122 pdd->bound = PDD_BOUND;
123
124 return err;
125}
126
127/** kfd_iommu_unbind_process - Unbind process from all devices
128 *
129 * This removes all IOMMU device bindings of the process. To be used
130 * before process termination.
131 */
132void kfd_iommu_unbind_process(struct kfd_process *p)
133{
134 struct kfd_process_device *pdd;
135
136 list_for_each_entry(pdd, &p->per_device_data, per_device_list)
137 if (pdd->bound == PDD_BOUND)
138 amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
139}
140
141/* Callback for process shutdown invoked by the IOMMU driver */
142static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid)
143{
144 struct kfd_dev *dev = kfd_device_by_pci_dev(pdev);
145 struct kfd_process *p;
146 struct kfd_process_device *pdd;
147
148 if (!dev)
149 return;
150
151 /*
152 * Look for the process that matches the pasid. If there is no such
153 * process, we either released it in amdkfd's own notifier, or there
154 * is a bug. Unfortunately, there is no way to tell...
155 */
156 p = kfd_lookup_process_by_pasid(pasid);
157 if (!p)
158 return;
159
160 pr_debug("Unbinding process 0x%x from IOMMU\n", pasid);
161
162 mutex_lock(kfd_get_dbgmgr_mutex());
163
164 if (dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
165 if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
166 kfd_dbgmgr_destroy(dev->dbgmgr);
167 dev->dbgmgr = NULL;
168 }
169 }
170
171 mutex_unlock(kfd_get_dbgmgr_mutex());
172
173 mutex_lock(&p->mutex);
174
175 pdd = kfd_get_process_device_data(dev, p);
176 if (pdd)
177 /* For GPU relying on IOMMU, we need to dequeue here
178 * when PASID is still bound.
179 */
180 kfd_process_dequeue_from_device(pdd);
181
182 mutex_unlock(&p->mutex);
183
184 kfd_unref_process(p);
185}
186
187/* This function called by IOMMU driver on PPR failure */
188static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
189 unsigned long address, u16 flags)
190{
191 struct kfd_dev *dev;
192
193 dev_warn_ratelimited(kfd_device,
194 "Invalid PPR device %x:%x.%x pasid 0x%x address 0x%lX flags 0x%X",
195 pdev->bus->number,
196 PCI_SLOT(pdev->devfn),
197 PCI_FUNC(pdev->devfn),
198 pasid,
199 address,
200 flags);
201
202 dev = kfd_device_by_pci_dev(pdev);
203 if (!WARN_ON(!dev))
204 kfd_signal_iommu_event(dev, pasid, address,
205 flags & PPR_FAULT_WRITE, flags & PPR_FAULT_EXEC);
206
207 return AMD_IOMMU_INV_PRI_RSP_INVALID;
208}
209
210/*
211 * Bind processes do the device that have been temporarily unbound
212 * (PDD_BOUND_SUSPENDED) in kfd_unbind_processes_from_device.
213 */
214static int kfd_bind_processes_to_device(struct kfd_dev *kfd)
215{
216 struct kfd_process_device *pdd;
217 struct kfd_process *p;
218 unsigned int temp;
219 int err = 0;
220
221 int idx = srcu_read_lock(&kfd_processes_srcu);
222
223 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
224 mutex_lock(&p->mutex);
225 pdd = kfd_get_process_device_data(kfd, p);
226
227 if (WARN_ON(!pdd) || pdd->bound != PDD_BOUND_SUSPENDED) {
228 mutex_unlock(&p->mutex);
229 continue;
230 }
231
232 err = amd_iommu_bind_pasid(kfd->pdev, p->pasid,
233 p->lead_thread);
234 if (err < 0) {
235 pr_err("Unexpected pasid 0x%x binding failure\n",
236 p->pasid);
237 mutex_unlock(&p->mutex);
238 break;
239 }
240
241 pdd->bound = PDD_BOUND;
242 mutex_unlock(&p->mutex);
243 }
244
245 srcu_read_unlock(&kfd_processes_srcu, idx);
246
247 return err;
248}
249
250/*
251 * Mark currently bound processes as PDD_BOUND_SUSPENDED. These
252 * processes will be restored to PDD_BOUND state in
253 * kfd_bind_processes_to_device.
254 */
255static void kfd_unbind_processes_from_device(struct kfd_dev *kfd)
256{
257 struct kfd_process_device *pdd;
258 struct kfd_process *p;
259 unsigned int temp;
260
261 int idx = srcu_read_lock(&kfd_processes_srcu);
262
263 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
264 mutex_lock(&p->mutex);
265 pdd = kfd_get_process_device_data(kfd, p);
266
267 if (WARN_ON(!pdd)) {
268 mutex_unlock(&p->mutex);
269 continue;
270 }
271
272 if (pdd->bound == PDD_BOUND)
273 pdd->bound = PDD_BOUND_SUSPENDED;
274 mutex_unlock(&p->mutex);
275 }
276
277 srcu_read_unlock(&kfd_processes_srcu, idx);
278}
279
280/** kfd_iommu_suspend - Prepare IOMMU for suspend
281 *
282 * This unbinds processes from the device and disables the IOMMU for
283 * the device.
284 */
285void kfd_iommu_suspend(struct kfd_dev *kfd)
286{
287 if (!kfd->device_info->needs_iommu_device)
288 return;
289
290 kfd_unbind_processes_from_device(kfd);
291
292 amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
293 amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL);
294 amd_iommu_free_device(kfd->pdev);
295}
296
297/** kfd_iommu_resume - Restore IOMMU after resume
298 *
299 * This reinitializes the IOMMU for the device and re-binds previously
300 * suspended processes to the device.
301 */
302int kfd_iommu_resume(struct kfd_dev *kfd)
303{
304 unsigned int pasid_limit;
305 int err;
306
307 if (!kfd->device_info->needs_iommu_device)
308 return 0;
309
310 pasid_limit = kfd_get_pasid_limit();
311
312 err = amd_iommu_init_device(kfd->pdev, pasid_limit);
313 if (err)
314 return -ENXIO;
315
316 amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
317 iommu_pasid_shutdown_callback);
318 amd_iommu_set_invalid_ppr_cb(kfd->pdev,
319 iommu_invalid_ppr_cb);
320
321 err = kfd_bind_processes_to_device(kfd);
322 if (err) {
323 amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
324 amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL);
325 amd_iommu_free_device(kfd->pdev);
326 return err;
327 }
328
329 return 0;
330}
331
332extern bool amd_iommu_pc_supported(void);
333extern u8 amd_iommu_pc_get_max_banks(u16 devid);
334extern u8 amd_iommu_pc_get_max_counters(u16 devid);
335
336/** kfd_iommu_add_perf_counters - Add IOMMU performance counters to topology
337 */
338int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
339{
340 struct kfd_perf_properties *props;
341
342 if (!(kdev->node_props.capability & HSA_CAP_ATS_PRESENT))
343 return 0;
344
345 if (!amd_iommu_pc_supported())
346 return 0;
347
348 props = kfd_alloc_struct(props);
349 if (!props)
350 return -ENOMEM;
351 strcpy(props->block_name, "iommu");
352 props->max_concurrent = amd_iommu_pc_get_max_banks(0) *
353 amd_iommu_pc_get_max_counters(0); /* assume one iommu */
354 list_add_tail(&props->list, &kdev->perf_props);
355
356 return 0;
357}
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/*
3 * Copyright 2018-2022 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24#include <linux/kconfig.h>
25
26#if IS_REACHABLE(CONFIG_AMD_IOMMU_V2)
27
28#include <linux/printk.h>
29#include <linux/device.h>
30#include <linux/slab.h>
31#include <linux/pci.h>
32#include <linux/amd-iommu.h>
33#include "kfd_priv.h"
34#include "kfd_topology.h"
35#include "kfd_iommu.h"
36
37static const u32 required_iommu_flags = AMD_IOMMU_DEVICE_FLAG_ATS_SUP |
38 AMD_IOMMU_DEVICE_FLAG_PRI_SUP |
39 AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
40
41/** kfd_iommu_check_device - Check whether IOMMU is available for device
42 */
43int kfd_iommu_check_device(struct kfd_dev *kfd)
44{
45 struct amd_iommu_device_info iommu_info;
46 int err;
47
48 if (!kfd->use_iommu_v2)
49 return -ENODEV;
50
51 iommu_info.flags = 0;
52 err = amd_iommu_device_info(kfd->adev->pdev, &iommu_info);
53 if (err)
54 return err;
55
56 if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags)
57 return -ENODEV;
58
59 return 0;
60}
61
62/** kfd_iommu_device_init - Initialize IOMMU for device
63 */
64int kfd_iommu_device_init(struct kfd_dev *kfd)
65{
66 struct amd_iommu_device_info iommu_info;
67 unsigned int pasid_limit;
68 int err;
69
70 if (!kfd->use_iommu_v2)
71 return 0;
72
73 iommu_info.flags = 0;
74 err = amd_iommu_device_info(kfd->adev->pdev, &iommu_info);
75 if (err < 0) {
76 dev_err(kfd_device,
77 "error getting iommu info. is the iommu enabled?\n");
78 return -ENODEV;
79 }
80
81 if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) {
82 dev_err(kfd_device,
83 "error required iommu flags ats %i, pri %i, pasid %i\n",
84 (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0,
85 (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0,
86 (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP)
87 != 0);
88 return -ENODEV;
89 }
90
91 pasid_limit = min_t(unsigned int,
92 (unsigned int)(1 << kfd->device_info.max_pasid_bits),
93 iommu_info.max_pasids);
94
95 if (!kfd_set_pasid_limit(pasid_limit)) {
96 dev_err(kfd_device, "error setting pasid limit\n");
97 return -EBUSY;
98 }
99
100 return 0;
101}
102
103/** kfd_iommu_bind_process_to_device - Have the IOMMU bind a process
104 *
105 * Binds the given process to the given device using its PASID. This
106 * enables IOMMUv2 address translation for the process on the device.
107 *
108 * This function assumes that the process mutex is held.
109 */
110int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd)
111{
112 struct kfd_dev *dev = pdd->dev;
113 struct kfd_process *p = pdd->process;
114 int err;
115
116 if (!dev->use_iommu_v2 || pdd->bound == PDD_BOUND)
117 return 0;
118
119 if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) {
120 pr_err("Binding PDD_BOUND_SUSPENDED pdd is unexpected!\n");
121 return -EINVAL;
122 }
123
124 err = amd_iommu_bind_pasid(dev->adev->pdev, p->pasid, p->lead_thread);
125 if (!err)
126 pdd->bound = PDD_BOUND;
127
128 return err;
129}
130
131/** kfd_iommu_unbind_process - Unbind process from all devices
132 *
133 * This removes all IOMMU device bindings of the process. To be used
134 * before process termination.
135 */
136void kfd_iommu_unbind_process(struct kfd_process *p)
137{
138 int i;
139
140 for (i = 0; i < p->n_pdds; i++)
141 if (p->pdds[i]->bound == PDD_BOUND)
142 amd_iommu_unbind_pasid(p->pdds[i]->dev->adev->pdev,
143 p->pasid);
144}
145
146/* Callback for process shutdown invoked by the IOMMU driver */
147static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, u32 pasid)
148{
149 struct kfd_dev *dev = kfd_device_by_pci_dev(pdev);
150 struct kfd_process *p;
151 struct kfd_process_device *pdd;
152
153 if (!dev)
154 return;
155
156 /*
157 * Look for the process that matches the pasid. If there is no such
158 * process, we either released it in amdkfd's own notifier, or there
159 * is a bug. Unfortunately, there is no way to tell...
160 */
161 p = kfd_lookup_process_by_pasid(pasid);
162 if (!p)
163 return;
164
165 pr_debug("Unbinding process 0x%x from IOMMU\n", pasid);
166
167 mutex_lock(&p->mutex);
168
169 pdd = kfd_get_process_device_data(dev, p);
170 if (pdd)
171 /* For GPU relying on IOMMU, we need to dequeue here
172 * when PASID is still bound.
173 */
174 kfd_process_dequeue_from_device(pdd);
175
176 mutex_unlock(&p->mutex);
177
178 kfd_unref_process(p);
179}
180
181/* This function called by IOMMU driver on PPR failure */
182static int iommu_invalid_ppr_cb(struct pci_dev *pdev, u32 pasid,
183 unsigned long address, u16 flags)
184{
185 struct kfd_dev *dev;
186
187 dev_warn_ratelimited(kfd_device,
188 "Invalid PPR device %x:%x.%x pasid 0x%x address 0x%lX flags 0x%X",
189 pdev->bus->number,
190 PCI_SLOT(pdev->devfn),
191 PCI_FUNC(pdev->devfn),
192 pasid,
193 address,
194 flags);
195
196 dev = kfd_device_by_pci_dev(pdev);
197 if (!WARN_ON(!dev))
198 kfd_signal_iommu_event(dev, pasid, address,
199 flags & PPR_FAULT_WRITE, flags & PPR_FAULT_EXEC);
200
201 return AMD_IOMMU_INV_PRI_RSP_INVALID;
202}
203
204/*
205 * Bind processes do the device that have been temporarily unbound
206 * (PDD_BOUND_SUSPENDED) in kfd_unbind_processes_from_device.
207 */
208static int kfd_bind_processes_to_device(struct kfd_dev *kfd)
209{
210 struct kfd_process_device *pdd;
211 struct kfd_process *p;
212 unsigned int temp;
213 int err = 0;
214
215 int idx = srcu_read_lock(&kfd_processes_srcu);
216
217 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
218 mutex_lock(&p->mutex);
219 pdd = kfd_get_process_device_data(kfd, p);
220
221 if (WARN_ON(!pdd) || pdd->bound != PDD_BOUND_SUSPENDED) {
222 mutex_unlock(&p->mutex);
223 continue;
224 }
225
226 err = amd_iommu_bind_pasid(kfd->adev->pdev, p->pasid,
227 p->lead_thread);
228 if (err < 0) {
229 pr_err("Unexpected pasid 0x%x binding failure\n",
230 p->pasid);
231 mutex_unlock(&p->mutex);
232 break;
233 }
234
235 pdd->bound = PDD_BOUND;
236 mutex_unlock(&p->mutex);
237 }
238
239 srcu_read_unlock(&kfd_processes_srcu, idx);
240
241 return err;
242}
243
244/*
245 * Mark currently bound processes as PDD_BOUND_SUSPENDED. These
246 * processes will be restored to PDD_BOUND state in
247 * kfd_bind_processes_to_device.
248 */
249static void kfd_unbind_processes_from_device(struct kfd_dev *kfd)
250{
251 struct kfd_process_device *pdd;
252 struct kfd_process *p;
253 unsigned int temp;
254
255 int idx = srcu_read_lock(&kfd_processes_srcu);
256
257 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
258 mutex_lock(&p->mutex);
259 pdd = kfd_get_process_device_data(kfd, p);
260
261 if (WARN_ON(!pdd)) {
262 mutex_unlock(&p->mutex);
263 continue;
264 }
265
266 if (pdd->bound == PDD_BOUND)
267 pdd->bound = PDD_BOUND_SUSPENDED;
268 mutex_unlock(&p->mutex);
269 }
270
271 srcu_read_unlock(&kfd_processes_srcu, idx);
272}
273
274/** kfd_iommu_suspend - Prepare IOMMU for suspend
275 *
276 * This unbinds processes from the device and disables the IOMMU for
277 * the device.
278 */
279void kfd_iommu_suspend(struct kfd_dev *kfd)
280{
281 if (!kfd->use_iommu_v2)
282 return;
283
284 kfd_unbind_processes_from_device(kfd);
285
286 amd_iommu_set_invalidate_ctx_cb(kfd->adev->pdev, NULL);
287 amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, NULL);
288 amd_iommu_free_device(kfd->adev->pdev);
289}
290
291/** kfd_iommu_resume - Restore IOMMU after resume
292 *
293 * This reinitializes the IOMMU for the device and re-binds previously
294 * suspended processes to the device.
295 */
296int kfd_iommu_resume(struct kfd_dev *kfd)
297{
298 unsigned int pasid_limit;
299 int err;
300
301 if (!kfd->use_iommu_v2)
302 return 0;
303
304 pasid_limit = kfd_get_pasid_limit();
305
306 err = amd_iommu_init_device(kfd->adev->pdev, pasid_limit);
307 if (err)
308 return -ENXIO;
309
310 amd_iommu_set_invalidate_ctx_cb(kfd->adev->pdev,
311 iommu_pasid_shutdown_callback);
312 amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev,
313 iommu_invalid_ppr_cb);
314
315 err = kfd_bind_processes_to_device(kfd);
316 if (err) {
317 amd_iommu_set_invalidate_ctx_cb(kfd->adev->pdev, NULL);
318 amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, NULL);
319 amd_iommu_free_device(kfd->adev->pdev);
320 return err;
321 }
322
323 return 0;
324}
325
326/** kfd_iommu_add_perf_counters - Add IOMMU performance counters to topology
327 */
328int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
329{
330 struct kfd_perf_properties *props;
331
332 if (!(kdev->node_props.capability & HSA_CAP_ATS_PRESENT))
333 return 0;
334
335 if (!amd_iommu_pc_supported())
336 return 0;
337
338 props = kfd_alloc_struct(props);
339 if (!props)
340 return -ENOMEM;
341 strcpy(props->block_name, "iommu");
342 props->max_concurrent = amd_iommu_pc_get_max_banks(0) *
343 amd_iommu_pc_get_max_counters(0); /* assume one iommu */
344 list_add_tail(&props->list, &kdev->perf_props);
345
346 return 0;
347}
348
349#endif