Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * IOMMU API for s390 PCI devices
4 *
5 * Copyright IBM Corp. 2015
6 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
7 */
8
9#include <linux/pci.h>
10#include <linux/iommu.h>
11#include <linux/iommu-helper.h>
12#include <linux/sizes.h>
13#include <asm/pci_dma.h>
14
15/*
16 * Physically contiguous memory regions can be mapped with 4 KiB alignment,
17 * we allow all page sizes that are an order of 4KiB (no special large page
18 * support so far).
19 */
20#define S390_IOMMU_PGSIZES (~0xFFFUL)
21
22static const struct iommu_ops s390_iommu_ops;
23
24struct s390_domain {
25 struct iommu_domain domain;
26 struct list_head devices;
27 unsigned long *dma_table;
28 spinlock_t dma_table_lock;
29 spinlock_t list_lock;
30};
31
32struct s390_domain_device {
33 struct list_head list;
34 struct zpci_dev *zdev;
35};
36
37static struct s390_domain *to_s390_domain(struct iommu_domain *dom)
38{
39 return container_of(dom, struct s390_domain, domain);
40}
41
42static bool s390_iommu_capable(enum iommu_cap cap)
43{
44 switch (cap) {
45 case IOMMU_CAP_CACHE_COHERENCY:
46 return true;
47 case IOMMU_CAP_INTR_REMAP:
48 return true;
49 default:
50 return false;
51 }
52}
53
54static struct iommu_domain *s390_domain_alloc(unsigned domain_type)
55{
56 struct s390_domain *s390_domain;
57
58 if (domain_type != IOMMU_DOMAIN_UNMANAGED)
59 return NULL;
60
61 s390_domain = kzalloc(sizeof(*s390_domain), GFP_KERNEL);
62 if (!s390_domain)
63 return NULL;
64
65 s390_domain->dma_table = dma_alloc_cpu_table();
66 if (!s390_domain->dma_table) {
67 kfree(s390_domain);
68 return NULL;
69 }
70
71 spin_lock_init(&s390_domain->dma_table_lock);
72 spin_lock_init(&s390_domain->list_lock);
73 INIT_LIST_HEAD(&s390_domain->devices);
74
75 return &s390_domain->domain;
76}
77
78static void s390_domain_free(struct iommu_domain *domain)
79{
80 struct s390_domain *s390_domain = to_s390_domain(domain);
81
82 dma_cleanup_tables(s390_domain->dma_table);
83 kfree(s390_domain);
84}
85
86static int s390_iommu_attach_device(struct iommu_domain *domain,
87 struct device *dev)
88{
89 struct s390_domain *s390_domain = to_s390_domain(domain);
90 struct zpci_dev *zdev = to_zpci_dev(dev);
91 struct s390_domain_device *domain_device;
92 unsigned long flags;
93 int rc;
94
95 if (!zdev)
96 return -ENODEV;
97
98 domain_device = kzalloc(sizeof(*domain_device), GFP_KERNEL);
99 if (!domain_device)
100 return -ENOMEM;
101
102 if (zdev->dma_table)
103 zpci_dma_exit_device(zdev);
104
105 zdev->dma_table = s390_domain->dma_table;
106 rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
107 (u64) zdev->dma_table);
108 if (rc)
109 goto out_restore;
110
111 spin_lock_irqsave(&s390_domain->list_lock, flags);
112 /* First device defines the DMA range limits */
113 if (list_empty(&s390_domain->devices)) {
114 domain->geometry.aperture_start = zdev->start_dma;
115 domain->geometry.aperture_end = zdev->end_dma;
116 domain->geometry.force_aperture = true;
117 /* Allow only devices with identical DMA range limits */
118 } else if (domain->geometry.aperture_start != zdev->start_dma ||
119 domain->geometry.aperture_end != zdev->end_dma) {
120 rc = -EINVAL;
121 spin_unlock_irqrestore(&s390_domain->list_lock, flags);
122 goto out_restore;
123 }
124 domain_device->zdev = zdev;
125 zdev->s390_domain = s390_domain;
126 list_add(&domain_device->list, &s390_domain->devices);
127 spin_unlock_irqrestore(&s390_domain->list_lock, flags);
128
129 return 0;
130
131out_restore:
132 zpci_dma_init_device(zdev);
133 kfree(domain_device);
134
135 return rc;
136}
137
138static void s390_iommu_detach_device(struct iommu_domain *domain,
139 struct device *dev)
140{
141 struct s390_domain *s390_domain = to_s390_domain(domain);
142 struct zpci_dev *zdev = to_zpci_dev(dev);
143 struct s390_domain_device *domain_device, *tmp;
144 unsigned long flags;
145 int found = 0;
146
147 if (!zdev)
148 return;
149
150 spin_lock_irqsave(&s390_domain->list_lock, flags);
151 list_for_each_entry_safe(domain_device, tmp, &s390_domain->devices,
152 list) {
153 if (domain_device->zdev == zdev) {
154 list_del(&domain_device->list);
155 kfree(domain_device);
156 found = 1;
157 break;
158 }
159 }
160 spin_unlock_irqrestore(&s390_domain->list_lock, flags);
161
162 if (found) {
163 zdev->s390_domain = NULL;
164 zpci_unregister_ioat(zdev, 0);
165 zpci_dma_init_device(zdev);
166 }
167}
168
169static struct iommu_device *s390_iommu_probe_device(struct device *dev)
170{
171 struct zpci_dev *zdev = to_zpci_dev(dev);
172
173 return &zdev->iommu_dev;
174}
175
176static void s390_iommu_release_device(struct device *dev)
177{
178 struct zpci_dev *zdev = to_zpci_dev(dev);
179 struct iommu_domain *domain;
180
181 /*
182 * This is a workaround for a scenario where the IOMMU API common code
183 * "forgets" to call the detach_dev callback: After binding a device
184 * to vfio-pci and completing the VFIO_SET_IOMMU ioctl (which triggers
185 * the attach_dev), removing the device via
186 * "echo 1 > /sys/bus/pci/devices/.../remove" won't trigger detach_dev,
187 * only release_device will be called via the BUS_NOTIFY_REMOVED_DEVICE
188 * notifier.
189 *
190 * So let's call detach_dev from here if it hasn't been called before.
191 */
192 if (zdev && zdev->s390_domain) {
193 domain = iommu_get_domain_for_dev(dev);
194 if (domain)
195 s390_iommu_detach_device(domain, dev);
196 }
197}
198
199static int s390_iommu_update_trans(struct s390_domain *s390_domain,
200 unsigned long pa, dma_addr_t dma_addr,
201 size_t size, int flags)
202{
203 struct s390_domain_device *domain_device;
204 u8 *page_addr = (u8 *) (pa & PAGE_MASK);
205 dma_addr_t start_dma_addr = dma_addr;
206 unsigned long irq_flags, nr_pages, i;
207 unsigned long *entry;
208 int rc = 0;
209
210 if (dma_addr < s390_domain->domain.geometry.aperture_start ||
211 dma_addr + size > s390_domain->domain.geometry.aperture_end)
212 return -EINVAL;
213
214 nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
215 if (!nr_pages)
216 return 0;
217
218 spin_lock_irqsave(&s390_domain->dma_table_lock, irq_flags);
219 for (i = 0; i < nr_pages; i++) {
220 entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr);
221 if (!entry) {
222 rc = -ENOMEM;
223 goto undo_cpu_trans;
224 }
225 dma_update_cpu_trans(entry, page_addr, flags);
226 page_addr += PAGE_SIZE;
227 dma_addr += PAGE_SIZE;
228 }
229
230 spin_lock(&s390_domain->list_lock);
231 list_for_each_entry(domain_device, &s390_domain->devices, list) {
232 rc = zpci_refresh_trans((u64) domain_device->zdev->fh << 32,
233 start_dma_addr, nr_pages * PAGE_SIZE);
234 if (rc)
235 break;
236 }
237 spin_unlock(&s390_domain->list_lock);
238
239undo_cpu_trans:
240 if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
241 flags = ZPCI_PTE_INVALID;
242 while (i-- > 0) {
243 page_addr -= PAGE_SIZE;
244 dma_addr -= PAGE_SIZE;
245 entry = dma_walk_cpu_trans(s390_domain->dma_table,
246 dma_addr);
247 if (!entry)
248 break;
249 dma_update_cpu_trans(entry, page_addr, flags);
250 }
251 }
252 spin_unlock_irqrestore(&s390_domain->dma_table_lock, irq_flags);
253
254 return rc;
255}
256
257static int s390_iommu_map(struct iommu_domain *domain, unsigned long iova,
258 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
259{
260 struct s390_domain *s390_domain = to_s390_domain(domain);
261 int flags = ZPCI_PTE_VALID, rc = 0;
262
263 if (!(prot & IOMMU_READ))
264 return -EINVAL;
265
266 if (!(prot & IOMMU_WRITE))
267 flags |= ZPCI_TABLE_PROTECTED;
268
269 rc = s390_iommu_update_trans(s390_domain, (unsigned long) paddr, iova,
270 size, flags);
271
272 return rc;
273}
274
275static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain,
276 dma_addr_t iova)
277{
278 struct s390_domain *s390_domain = to_s390_domain(domain);
279 unsigned long *sto, *pto, *rto, flags;
280 unsigned int rtx, sx, px;
281 phys_addr_t phys = 0;
282
283 if (iova < domain->geometry.aperture_start ||
284 iova > domain->geometry.aperture_end)
285 return 0;
286
287 rtx = calc_rtx(iova);
288 sx = calc_sx(iova);
289 px = calc_px(iova);
290 rto = s390_domain->dma_table;
291
292 spin_lock_irqsave(&s390_domain->dma_table_lock, flags);
293 if (rto && reg_entry_isvalid(rto[rtx])) {
294 sto = get_rt_sto(rto[rtx]);
295 if (sto && reg_entry_isvalid(sto[sx])) {
296 pto = get_st_pto(sto[sx]);
297 if (pto && pt_entry_isvalid(pto[px]))
298 phys = pto[px] & ZPCI_PTE_ADDR_MASK;
299 }
300 }
301 spin_unlock_irqrestore(&s390_domain->dma_table_lock, flags);
302
303 return phys;
304}
305
306static size_t s390_iommu_unmap(struct iommu_domain *domain,
307 unsigned long iova, size_t size,
308 struct iommu_iotlb_gather *gather)
309{
310 struct s390_domain *s390_domain = to_s390_domain(domain);
311 int flags = ZPCI_PTE_INVALID;
312 phys_addr_t paddr;
313 int rc;
314
315 paddr = s390_iommu_iova_to_phys(domain, iova);
316 if (!paddr)
317 return 0;
318
319 rc = s390_iommu_update_trans(s390_domain, (unsigned long) paddr, iova,
320 size, flags);
321 if (rc)
322 return 0;
323
324 return size;
325}
326
327int zpci_init_iommu(struct zpci_dev *zdev)
328{
329 int rc = 0;
330
331 rc = iommu_device_sysfs_add(&zdev->iommu_dev, NULL, NULL,
332 "s390-iommu.%08x", zdev->fid);
333 if (rc)
334 goto out_err;
335
336 iommu_device_set_ops(&zdev->iommu_dev, &s390_iommu_ops);
337
338 rc = iommu_device_register(&zdev->iommu_dev);
339 if (rc)
340 goto out_sysfs;
341
342 return 0;
343
344out_sysfs:
345 iommu_device_sysfs_remove(&zdev->iommu_dev);
346
347out_err:
348 return rc;
349}
350
351void zpci_destroy_iommu(struct zpci_dev *zdev)
352{
353 iommu_device_unregister(&zdev->iommu_dev);
354 iommu_device_sysfs_remove(&zdev->iommu_dev);
355}
356
357static const struct iommu_ops s390_iommu_ops = {
358 .capable = s390_iommu_capable,
359 .domain_alloc = s390_domain_alloc,
360 .domain_free = s390_domain_free,
361 .attach_dev = s390_iommu_attach_device,
362 .detach_dev = s390_iommu_detach_device,
363 .map = s390_iommu_map,
364 .unmap = s390_iommu_unmap,
365 .iova_to_phys = s390_iommu_iova_to_phys,
366 .probe_device = s390_iommu_probe_device,
367 .release_device = s390_iommu_release_device,
368 .device_group = generic_device_group,
369 .pgsize_bitmap = S390_IOMMU_PGSIZES,
370};
371
372static int __init s390_iommu_init(void)
373{
374 return bus_set_iommu(&pci_bus_type, &s390_iommu_ops);
375}
376subsys_initcall(s390_iommu_init);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * IOMMU API for s390 PCI devices
4 *
5 * Copyright IBM Corp. 2015
6 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
7 */
8
9#include <linux/pci.h>
10#include <linux/iommu.h>
11#include <linux/iommu-helper.h>
12#include <linux/sizes.h>
13#include <linux/rculist.h>
14#include <linux/rcupdate.h>
15#include <asm/pci_dma.h>
16
17static const struct iommu_ops s390_iommu_ops;
18
19struct s390_domain {
20 struct iommu_domain domain;
21 struct list_head devices;
22 unsigned long *dma_table;
23 spinlock_t list_lock;
24 struct rcu_head rcu;
25};
26
27static struct s390_domain *to_s390_domain(struct iommu_domain *dom)
28{
29 return container_of(dom, struct s390_domain, domain);
30}
31
32static bool s390_iommu_capable(struct device *dev, enum iommu_cap cap)
33{
34 switch (cap) {
35 case IOMMU_CAP_CACHE_COHERENCY:
36 return true;
37 case IOMMU_CAP_INTR_REMAP:
38 return true;
39 default:
40 return false;
41 }
42}
43
44static struct iommu_domain *s390_domain_alloc(unsigned domain_type)
45{
46 struct s390_domain *s390_domain;
47
48 if (domain_type != IOMMU_DOMAIN_UNMANAGED)
49 return NULL;
50
51 s390_domain = kzalloc(sizeof(*s390_domain), GFP_KERNEL);
52 if (!s390_domain)
53 return NULL;
54
55 s390_domain->dma_table = dma_alloc_cpu_table();
56 if (!s390_domain->dma_table) {
57 kfree(s390_domain);
58 return NULL;
59 }
60 s390_domain->domain.geometry.force_aperture = true;
61 s390_domain->domain.geometry.aperture_start = 0;
62 s390_domain->domain.geometry.aperture_end = ZPCI_TABLE_SIZE_RT - 1;
63
64 spin_lock_init(&s390_domain->list_lock);
65 INIT_LIST_HEAD_RCU(&s390_domain->devices);
66
67 return &s390_domain->domain;
68}
69
70static void s390_iommu_rcu_free_domain(struct rcu_head *head)
71{
72 struct s390_domain *s390_domain = container_of(head, struct s390_domain, rcu);
73
74 dma_cleanup_tables(s390_domain->dma_table);
75 kfree(s390_domain);
76}
77
78static void s390_domain_free(struct iommu_domain *domain)
79{
80 struct s390_domain *s390_domain = to_s390_domain(domain);
81
82 rcu_read_lock();
83 WARN_ON(!list_empty(&s390_domain->devices));
84 rcu_read_unlock();
85
86 call_rcu(&s390_domain->rcu, s390_iommu_rcu_free_domain);
87}
88
89static void __s390_iommu_detach_device(struct zpci_dev *zdev)
90{
91 struct s390_domain *s390_domain = zdev->s390_domain;
92 unsigned long flags;
93
94 if (!s390_domain)
95 return;
96
97 spin_lock_irqsave(&s390_domain->list_lock, flags);
98 list_del_rcu(&zdev->iommu_list);
99 spin_unlock_irqrestore(&s390_domain->list_lock, flags);
100
101 zpci_unregister_ioat(zdev, 0);
102 zdev->s390_domain = NULL;
103 zdev->dma_table = NULL;
104}
105
106static int s390_iommu_attach_device(struct iommu_domain *domain,
107 struct device *dev)
108{
109 struct s390_domain *s390_domain = to_s390_domain(domain);
110 struct zpci_dev *zdev = to_zpci_dev(dev);
111 unsigned long flags;
112 u8 status;
113 int cc;
114
115 if (!zdev)
116 return -ENODEV;
117
118 if (WARN_ON(domain->geometry.aperture_start > zdev->end_dma ||
119 domain->geometry.aperture_end < zdev->start_dma))
120 return -EINVAL;
121
122 if (zdev->s390_domain)
123 __s390_iommu_detach_device(zdev);
124 else if (zdev->dma_table)
125 zpci_dma_exit_device(zdev);
126
127 cc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
128 virt_to_phys(s390_domain->dma_table), &status);
129 /*
130 * If the device is undergoing error recovery the reset code
131 * will re-establish the new domain.
132 */
133 if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL)
134 return -EIO;
135 zdev->dma_table = s390_domain->dma_table;
136
137 zdev->dma_table = s390_domain->dma_table;
138 zdev->s390_domain = s390_domain;
139
140 spin_lock_irqsave(&s390_domain->list_lock, flags);
141 list_add_rcu(&zdev->iommu_list, &s390_domain->devices);
142 spin_unlock_irqrestore(&s390_domain->list_lock, flags);
143
144 return 0;
145}
146
147static void s390_iommu_detach_device(struct iommu_domain *domain,
148 struct device *dev)
149{
150 struct zpci_dev *zdev = to_zpci_dev(dev);
151
152 WARN_ON(zdev->s390_domain != to_s390_domain(domain));
153
154 __s390_iommu_detach_device(zdev);
155 zpci_dma_init_device(zdev);
156}
157
158static void s390_iommu_get_resv_regions(struct device *dev,
159 struct list_head *list)
160{
161 struct zpci_dev *zdev = to_zpci_dev(dev);
162 struct iommu_resv_region *region;
163
164 if (zdev->start_dma) {
165 region = iommu_alloc_resv_region(0, zdev->start_dma, 0,
166 IOMMU_RESV_RESERVED, GFP_KERNEL);
167 if (!region)
168 return;
169 list_add_tail(®ion->list, list);
170 }
171
172 if (zdev->end_dma < ZPCI_TABLE_SIZE_RT - 1) {
173 region = iommu_alloc_resv_region(zdev->end_dma + 1,
174 ZPCI_TABLE_SIZE_RT - zdev->end_dma - 1,
175 0, IOMMU_RESV_RESERVED, GFP_KERNEL);
176 if (!region)
177 return;
178 list_add_tail(®ion->list, list);
179 }
180}
181
182static struct iommu_device *s390_iommu_probe_device(struct device *dev)
183{
184 struct zpci_dev *zdev;
185
186 if (!dev_is_pci(dev))
187 return ERR_PTR(-ENODEV);
188
189 zdev = to_zpci_dev(dev);
190
191 if (zdev->start_dma > zdev->end_dma ||
192 zdev->start_dma > ZPCI_TABLE_SIZE_RT - 1)
193 return ERR_PTR(-EINVAL);
194
195 if (zdev->end_dma > ZPCI_TABLE_SIZE_RT - 1)
196 zdev->end_dma = ZPCI_TABLE_SIZE_RT - 1;
197
198 return &zdev->iommu_dev;
199}
200
201static void s390_iommu_release_device(struct device *dev)
202{
203 struct zpci_dev *zdev = to_zpci_dev(dev);
204
205 /*
206 * release_device is expected to detach any domain currently attached
207 * to the device, but keep it attached to other devices in the group.
208 */
209 if (zdev)
210 __s390_iommu_detach_device(zdev);
211}
212
213static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain)
214{
215 struct s390_domain *s390_domain = to_s390_domain(domain);
216 struct zpci_dev *zdev;
217
218 rcu_read_lock();
219 list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
220 zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma,
221 zdev->end_dma - zdev->start_dma + 1);
222 }
223 rcu_read_unlock();
224}
225
226static void s390_iommu_iotlb_sync(struct iommu_domain *domain,
227 struct iommu_iotlb_gather *gather)
228{
229 struct s390_domain *s390_domain = to_s390_domain(domain);
230 size_t size = gather->end - gather->start + 1;
231 struct zpci_dev *zdev;
232
233 /* If gather was never added to there is nothing to flush */
234 if (!gather->end)
235 return;
236
237 rcu_read_lock();
238 list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
239 zpci_refresh_trans((u64)zdev->fh << 32, gather->start,
240 size);
241 }
242 rcu_read_unlock();
243}
244
245static void s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
246 unsigned long iova, size_t size)
247{
248 struct s390_domain *s390_domain = to_s390_domain(domain);
249 struct zpci_dev *zdev;
250
251 rcu_read_lock();
252 list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
253 if (!zdev->tlb_refresh)
254 continue;
255 zpci_refresh_trans((u64)zdev->fh << 32,
256 iova, size);
257 }
258 rcu_read_unlock();
259}
260
261static int s390_iommu_validate_trans(struct s390_domain *s390_domain,
262 phys_addr_t pa, dma_addr_t dma_addr,
263 unsigned long nr_pages, int flags)
264{
265 phys_addr_t page_addr = pa & PAGE_MASK;
266 unsigned long *entry;
267 unsigned long i;
268 int rc;
269
270 for (i = 0; i < nr_pages; i++) {
271 entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr);
272 if (unlikely(!entry)) {
273 rc = -ENOMEM;
274 goto undo_cpu_trans;
275 }
276 dma_update_cpu_trans(entry, page_addr, flags);
277 page_addr += PAGE_SIZE;
278 dma_addr += PAGE_SIZE;
279 }
280
281 return 0;
282
283undo_cpu_trans:
284 while (i-- > 0) {
285 dma_addr -= PAGE_SIZE;
286 entry = dma_walk_cpu_trans(s390_domain->dma_table,
287 dma_addr);
288 if (!entry)
289 break;
290 dma_update_cpu_trans(entry, 0, ZPCI_PTE_INVALID);
291 }
292
293 return rc;
294}
295
296static int s390_iommu_invalidate_trans(struct s390_domain *s390_domain,
297 dma_addr_t dma_addr, unsigned long nr_pages)
298{
299 unsigned long *entry;
300 unsigned long i;
301 int rc = 0;
302
303 for (i = 0; i < nr_pages; i++) {
304 entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr);
305 if (unlikely(!entry)) {
306 rc = -EINVAL;
307 break;
308 }
309 dma_update_cpu_trans(entry, 0, ZPCI_PTE_INVALID);
310 dma_addr += PAGE_SIZE;
311 }
312
313 return rc;
314}
315
316static int s390_iommu_map_pages(struct iommu_domain *domain,
317 unsigned long iova, phys_addr_t paddr,
318 size_t pgsize, size_t pgcount,
319 int prot, gfp_t gfp, size_t *mapped)
320{
321 struct s390_domain *s390_domain = to_s390_domain(domain);
322 size_t size = pgcount << __ffs(pgsize);
323 int flags = ZPCI_PTE_VALID, rc = 0;
324
325 if (pgsize != SZ_4K)
326 return -EINVAL;
327
328 if (iova < s390_domain->domain.geometry.aperture_start ||
329 (iova + size - 1) > s390_domain->domain.geometry.aperture_end)
330 return -EINVAL;
331
332 if (!IS_ALIGNED(iova | paddr, pgsize))
333 return -EINVAL;
334
335 if (!(prot & IOMMU_READ))
336 return -EINVAL;
337
338 if (!(prot & IOMMU_WRITE))
339 flags |= ZPCI_TABLE_PROTECTED;
340
341 rc = s390_iommu_validate_trans(s390_domain, paddr, iova,
342 pgcount, flags);
343 if (!rc)
344 *mapped = size;
345
346 return rc;
347}
348
349static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain,
350 dma_addr_t iova)
351{
352 struct s390_domain *s390_domain = to_s390_domain(domain);
353 unsigned long *rto, *sto, *pto;
354 unsigned long ste, pte, rte;
355 unsigned int rtx, sx, px;
356 phys_addr_t phys = 0;
357
358 if (iova < domain->geometry.aperture_start ||
359 iova > domain->geometry.aperture_end)
360 return 0;
361
362 rtx = calc_rtx(iova);
363 sx = calc_sx(iova);
364 px = calc_px(iova);
365 rto = s390_domain->dma_table;
366
367 rte = READ_ONCE(rto[rtx]);
368 if (reg_entry_isvalid(rte)) {
369 sto = get_rt_sto(rte);
370 ste = READ_ONCE(sto[sx]);
371 if (reg_entry_isvalid(ste)) {
372 pto = get_st_pto(ste);
373 pte = READ_ONCE(pto[px]);
374 if (pt_entry_isvalid(pte))
375 phys = pte & ZPCI_PTE_ADDR_MASK;
376 }
377 }
378
379 return phys;
380}
381
382static size_t s390_iommu_unmap_pages(struct iommu_domain *domain,
383 unsigned long iova,
384 size_t pgsize, size_t pgcount,
385 struct iommu_iotlb_gather *gather)
386{
387 struct s390_domain *s390_domain = to_s390_domain(domain);
388 size_t size = pgcount << __ffs(pgsize);
389 int rc;
390
391 if (WARN_ON(iova < s390_domain->domain.geometry.aperture_start ||
392 (iova + size - 1) > s390_domain->domain.geometry.aperture_end))
393 return 0;
394
395 rc = s390_iommu_invalidate_trans(s390_domain, iova, pgcount);
396 if (rc)
397 return 0;
398
399 iommu_iotlb_gather_add_range(gather, iova, size);
400
401 return size;
402}
403
404int zpci_init_iommu(struct zpci_dev *zdev)
405{
406 int rc = 0;
407
408 rc = iommu_device_sysfs_add(&zdev->iommu_dev, NULL, NULL,
409 "s390-iommu.%08x", zdev->fid);
410 if (rc)
411 goto out_err;
412
413 rc = iommu_device_register(&zdev->iommu_dev, &s390_iommu_ops, NULL);
414 if (rc)
415 goto out_sysfs;
416
417 return 0;
418
419out_sysfs:
420 iommu_device_sysfs_remove(&zdev->iommu_dev);
421
422out_err:
423 return rc;
424}
425
426void zpci_destroy_iommu(struct zpci_dev *zdev)
427{
428 iommu_device_unregister(&zdev->iommu_dev);
429 iommu_device_sysfs_remove(&zdev->iommu_dev);
430}
431
432static const struct iommu_ops s390_iommu_ops = {
433 .capable = s390_iommu_capable,
434 .domain_alloc = s390_domain_alloc,
435 .probe_device = s390_iommu_probe_device,
436 .release_device = s390_iommu_release_device,
437 .device_group = generic_device_group,
438 .pgsize_bitmap = SZ_4K,
439 .get_resv_regions = s390_iommu_get_resv_regions,
440 .default_domain_ops = &(const struct iommu_domain_ops) {
441 .attach_dev = s390_iommu_attach_device,
442 .detach_dev = s390_iommu_detach_device,
443 .map_pages = s390_iommu_map_pages,
444 .unmap_pages = s390_iommu_unmap_pages,
445 .flush_iotlb_all = s390_iommu_flush_iotlb_all,
446 .iotlb_sync = s390_iommu_iotlb_sync,
447 .iotlb_sync_map = s390_iommu_iotlb_sync_map,
448 .iova_to_phys = s390_iommu_iova_to_phys,
449 .free = s390_domain_free,
450 }
451};