Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * nested.c - nested mode translation support
4 *
5 * Copyright (C) 2023 Intel Corporation
6 *
7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
8 * Jacob Pan <jacob.jun.pan@linux.intel.com>
9 * Yi Liu <yi.l.liu@intel.com>
10 */
11
12#define pr_fmt(fmt) "DMAR: " fmt
13
14#include <linux/iommu.h>
15#include <linux/pci.h>
16#include <linux/pci-ats.h>
17
18#include "iommu.h"
19#include "pasid.h"
20
21static int intel_nested_attach_dev(struct iommu_domain *domain,
22 struct device *dev)
23{
24 struct device_domain_info *info = dev_iommu_priv_get(dev);
25 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
26 struct intel_iommu *iommu = info->iommu;
27 unsigned long flags;
28 int ret = 0;
29
30 if (info->domain)
31 device_block_translation(dev);
32
33 if (iommu->agaw < dmar_domain->s2_domain->agaw) {
34 dev_err_ratelimited(dev, "Adjusted guest address width not compatible\n");
35 return -ENODEV;
36 }
37
38 /*
39 * Stage-1 domain cannot work alone, it is nested on a s2_domain.
40 * The s2_domain will be used in nested translation, hence needs
41 * to ensure the s2_domain is compatible with this IOMMU.
42 */
43 ret = paging_domain_compatible(&dmar_domain->s2_domain->domain, dev);
44 if (ret) {
45 dev_err_ratelimited(dev, "s2 domain is not compatible\n");
46 return ret;
47 }
48
49 ret = domain_attach_iommu(dmar_domain, iommu);
50 if (ret) {
51 dev_err_ratelimited(dev, "Failed to attach domain to iommu\n");
52 return ret;
53 }
54
55 ret = cache_tag_assign_domain(dmar_domain, dev, IOMMU_NO_PASID);
56 if (ret)
57 goto detach_iommu;
58
59 ret = intel_pasid_setup_nested(iommu, dev,
60 IOMMU_NO_PASID, dmar_domain);
61 if (ret)
62 goto unassign_tag;
63
64 info->domain = dmar_domain;
65 spin_lock_irqsave(&dmar_domain->lock, flags);
66 list_add(&info->link, &dmar_domain->devices);
67 spin_unlock_irqrestore(&dmar_domain->lock, flags);
68
69 return 0;
70unassign_tag:
71 cache_tag_unassign_domain(dmar_domain, dev, IOMMU_NO_PASID);
72detach_iommu:
73 domain_detach_iommu(dmar_domain, iommu);
74
75 return ret;
76}
77
78static void intel_nested_domain_free(struct iommu_domain *domain)
79{
80 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
81 struct dmar_domain *s2_domain = dmar_domain->s2_domain;
82
83 spin_lock(&s2_domain->s1_lock);
84 list_del(&dmar_domain->s2_link);
85 spin_unlock(&s2_domain->s1_lock);
86 kfree(dmar_domain->qi_batch);
87 kfree(dmar_domain);
88}
89
90static int intel_nested_cache_invalidate_user(struct iommu_domain *domain,
91 struct iommu_user_data_array *array)
92{
93 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
94 struct iommu_hwpt_vtd_s1_invalidate inv_entry;
95 u32 index, processed = 0;
96 int ret = 0;
97
98 if (array->type != IOMMU_HWPT_INVALIDATE_DATA_VTD_S1) {
99 ret = -EINVAL;
100 goto out;
101 }
102
103 for (index = 0; index < array->entry_num; index++) {
104 ret = iommu_copy_struct_from_user_array(&inv_entry, array,
105 IOMMU_HWPT_INVALIDATE_DATA_VTD_S1,
106 index, __reserved);
107 if (ret)
108 break;
109
110 if ((inv_entry.flags & ~IOMMU_VTD_INV_FLAGS_LEAF) ||
111 inv_entry.__reserved) {
112 ret = -EOPNOTSUPP;
113 break;
114 }
115
116 if (!IS_ALIGNED(inv_entry.addr, VTD_PAGE_SIZE) ||
117 ((inv_entry.npages == U64_MAX) && inv_entry.addr)) {
118 ret = -EINVAL;
119 break;
120 }
121
122 cache_tag_flush_range(dmar_domain, inv_entry.addr,
123 inv_entry.addr + nrpages_to_size(inv_entry.npages) - 1,
124 inv_entry.flags & IOMMU_VTD_INV_FLAGS_LEAF);
125 processed++;
126 }
127
128out:
129 array->entry_num = processed;
130 return ret;
131}
132
133static int domain_setup_nested(struct intel_iommu *iommu,
134 struct dmar_domain *domain,
135 struct device *dev, ioasid_t pasid,
136 struct iommu_domain *old)
137{
138 if (!old)
139 return intel_pasid_setup_nested(iommu, dev, pasid, domain);
140 return intel_pasid_replace_nested(iommu, dev, pasid,
141 iommu_domain_did(old, iommu),
142 domain);
143}
144
145static int intel_nested_set_dev_pasid(struct iommu_domain *domain,
146 struct device *dev, ioasid_t pasid,
147 struct iommu_domain *old)
148{
149 struct device_domain_info *info = dev_iommu_priv_get(dev);
150 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
151 struct intel_iommu *iommu = info->iommu;
152 struct dev_pasid_info *dev_pasid;
153 int ret;
154
155 if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev))
156 return -EOPNOTSUPP;
157
158 if (context_copied(iommu, info->bus, info->devfn))
159 return -EBUSY;
160
161 ret = paging_domain_compatible(&dmar_domain->s2_domain->domain, dev);
162 if (ret)
163 return ret;
164
165 dev_pasid = domain_add_dev_pasid(domain, dev, pasid);
166 if (IS_ERR(dev_pasid))
167 return PTR_ERR(dev_pasid);
168
169 ret = domain_setup_nested(iommu, dmar_domain, dev, pasid, old);
170 if (ret)
171 goto out_remove_dev_pasid;
172
173 domain_remove_dev_pasid(old, dev, pasid);
174
175 return 0;
176
177out_remove_dev_pasid:
178 domain_remove_dev_pasid(domain, dev, pasid);
179 return ret;
180}
181
182static const struct iommu_domain_ops intel_nested_domain_ops = {
183 .attach_dev = intel_nested_attach_dev,
184 .set_dev_pasid = intel_nested_set_dev_pasid,
185 .free = intel_nested_domain_free,
186 .cache_invalidate_user = intel_nested_cache_invalidate_user,
187};
188
189struct iommu_domain *
190intel_iommu_domain_alloc_nested(struct device *dev, struct iommu_domain *parent,
191 u32 flags,
192 const struct iommu_user_data *user_data)
193{
194 struct device_domain_info *info = dev_iommu_priv_get(dev);
195 struct dmar_domain *s2_domain = to_dmar_domain(parent);
196 struct intel_iommu *iommu = info->iommu;
197 struct iommu_hwpt_vtd_s1 vtd;
198 struct dmar_domain *domain;
199 int ret;
200
201 if (!nested_supported(iommu) || flags)
202 return ERR_PTR(-EOPNOTSUPP);
203
204 /* Must be nested domain */
205 if (user_data->type != IOMMU_HWPT_DATA_VTD_S1)
206 return ERR_PTR(-EOPNOTSUPP);
207 if (parent->ops != intel_iommu_ops.default_domain_ops ||
208 !s2_domain->nested_parent)
209 return ERR_PTR(-EINVAL);
210
211 ret = iommu_copy_struct_from_user(&vtd, user_data,
212 IOMMU_HWPT_DATA_VTD_S1, __reserved);
213 if (ret)
214 return ERR_PTR(ret);
215
216 domain = kzalloc(sizeof(*domain), GFP_KERNEL_ACCOUNT);
217 if (!domain)
218 return ERR_PTR(-ENOMEM);
219
220 domain->use_first_level = true;
221 domain->s2_domain = s2_domain;
222 domain->s1_cfg = vtd;
223 domain->domain.ops = &intel_nested_domain_ops;
224 domain->domain.type = IOMMU_DOMAIN_NESTED;
225 INIT_LIST_HEAD(&domain->devices);
226 INIT_LIST_HEAD(&domain->dev_pasids);
227 INIT_LIST_HEAD(&domain->cache_tags);
228 spin_lock_init(&domain->lock);
229 spin_lock_init(&domain->cache_lock);
230 xa_init(&domain->iommu_array);
231
232 spin_lock(&s2_domain->s1_lock);
233 list_add(&domain->s2_link, &s2_domain->s1_domains);
234 spin_unlock(&s2_domain->s1_lock);
235
236 return &domain->domain;
237}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * nested.c - nested mode translation support
4 *
5 * Copyright (C) 2023 Intel Corporation
6 *
7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
8 * Jacob Pan <jacob.jun.pan@linux.intel.com>
9 * Yi Liu <yi.l.liu@intel.com>
10 */
11
12#define pr_fmt(fmt) "DMAR: " fmt
13
14#include <linux/iommu.h>
15#include <linux/pci.h>
16#include <linux/pci-ats.h>
17
18#include "iommu.h"
19#include "pasid.h"
20
21static int intel_nested_attach_dev(struct iommu_domain *domain,
22 struct device *dev)
23{
24 struct device_domain_info *info = dev_iommu_priv_get(dev);
25 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
26 struct intel_iommu *iommu = info->iommu;
27 unsigned long flags;
28 int ret = 0;
29
30 if (info->domain)
31 device_block_translation(dev);
32
33 if (iommu->agaw < dmar_domain->s2_domain->agaw) {
34 dev_err_ratelimited(dev, "Adjusted guest address width not compatible\n");
35 return -ENODEV;
36 }
37
38 /*
39 * Stage-1 domain cannot work alone, it is nested on a s2_domain.
40 * The s2_domain will be used in nested translation, hence needs
41 * to ensure the s2_domain is compatible with this IOMMU.
42 */
43 ret = prepare_domain_attach_device(&dmar_domain->s2_domain->domain, dev);
44 if (ret) {
45 dev_err_ratelimited(dev, "s2 domain is not compatible\n");
46 return ret;
47 }
48
49 ret = domain_attach_iommu(dmar_domain, iommu);
50 if (ret) {
51 dev_err_ratelimited(dev, "Failed to attach domain to iommu\n");
52 return ret;
53 }
54
55 ret = intel_pasid_setup_nested(iommu, dev,
56 IOMMU_NO_PASID, dmar_domain);
57 if (ret) {
58 domain_detach_iommu(dmar_domain, iommu);
59 dev_err_ratelimited(dev, "Failed to setup pasid entry\n");
60 return ret;
61 }
62
63 info->domain = dmar_domain;
64 spin_lock_irqsave(&dmar_domain->lock, flags);
65 list_add(&info->link, &dmar_domain->devices);
66 spin_unlock_irqrestore(&dmar_domain->lock, flags);
67
68 domain_update_iotlb(dmar_domain);
69
70 return 0;
71}
72
73static void intel_nested_domain_free(struct iommu_domain *domain)
74{
75 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
76 struct dmar_domain *s2_domain = dmar_domain->s2_domain;
77
78 spin_lock(&s2_domain->s1_lock);
79 list_del(&dmar_domain->s2_link);
80 spin_unlock(&s2_domain->s1_lock);
81 kfree(dmar_domain);
82}
83
84static void nested_flush_dev_iotlb(struct dmar_domain *domain, u64 addr,
85 unsigned int mask)
86{
87 struct device_domain_info *info;
88 unsigned long flags;
89 u16 sid, qdep;
90
91 spin_lock_irqsave(&domain->lock, flags);
92 list_for_each_entry(info, &domain->devices, link) {
93 if (!info->ats_enabled)
94 continue;
95 sid = info->bus << 8 | info->devfn;
96 qdep = info->ats_qdep;
97 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
98 qdep, addr, mask);
99 quirk_extra_dev_tlb_flush(info, addr, mask,
100 IOMMU_NO_PASID, qdep);
101 }
102 spin_unlock_irqrestore(&domain->lock, flags);
103}
104
105static void intel_nested_flush_cache(struct dmar_domain *domain, u64 addr,
106 u64 npages, bool ih)
107{
108 struct iommu_domain_info *info;
109 unsigned int mask;
110 unsigned long i;
111
112 xa_for_each(&domain->iommu_array, i, info)
113 qi_flush_piotlb(info->iommu,
114 domain_id_iommu(domain, info->iommu),
115 IOMMU_NO_PASID, addr, npages, ih);
116
117 if (!domain->has_iotlb_device)
118 return;
119
120 if (npages == U64_MAX)
121 mask = 64 - VTD_PAGE_SHIFT;
122 else
123 mask = ilog2(__roundup_pow_of_two(npages));
124
125 nested_flush_dev_iotlb(domain, addr, mask);
126}
127
128static int intel_nested_cache_invalidate_user(struct iommu_domain *domain,
129 struct iommu_user_data_array *array)
130{
131 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
132 struct iommu_hwpt_vtd_s1_invalidate inv_entry;
133 u32 index, processed = 0;
134 int ret = 0;
135
136 if (array->type != IOMMU_HWPT_INVALIDATE_DATA_VTD_S1) {
137 ret = -EINVAL;
138 goto out;
139 }
140
141 for (index = 0; index < array->entry_num; index++) {
142 ret = iommu_copy_struct_from_user_array(&inv_entry, array,
143 IOMMU_HWPT_INVALIDATE_DATA_VTD_S1,
144 index, __reserved);
145 if (ret)
146 break;
147
148 if ((inv_entry.flags & ~IOMMU_VTD_INV_FLAGS_LEAF) ||
149 inv_entry.__reserved) {
150 ret = -EOPNOTSUPP;
151 break;
152 }
153
154 if (!IS_ALIGNED(inv_entry.addr, VTD_PAGE_SIZE) ||
155 ((inv_entry.npages == U64_MAX) && inv_entry.addr)) {
156 ret = -EINVAL;
157 break;
158 }
159
160 intel_nested_flush_cache(dmar_domain, inv_entry.addr,
161 inv_entry.npages,
162 inv_entry.flags & IOMMU_VTD_INV_FLAGS_LEAF);
163 processed++;
164 }
165
166out:
167 array->entry_num = processed;
168 return ret;
169}
170
171static const struct iommu_domain_ops intel_nested_domain_ops = {
172 .attach_dev = intel_nested_attach_dev,
173 .free = intel_nested_domain_free,
174 .cache_invalidate_user = intel_nested_cache_invalidate_user,
175};
176
177struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,
178 const struct iommu_user_data *user_data)
179{
180 struct dmar_domain *s2_domain = to_dmar_domain(parent);
181 struct iommu_hwpt_vtd_s1 vtd;
182 struct dmar_domain *domain;
183 int ret;
184
185 /* Must be nested domain */
186 if (user_data->type != IOMMU_HWPT_DATA_VTD_S1)
187 return ERR_PTR(-EOPNOTSUPP);
188 if (parent->ops != intel_iommu_ops.default_domain_ops ||
189 !s2_domain->nested_parent)
190 return ERR_PTR(-EINVAL);
191
192 ret = iommu_copy_struct_from_user(&vtd, user_data,
193 IOMMU_HWPT_DATA_VTD_S1, __reserved);
194 if (ret)
195 return ERR_PTR(ret);
196
197 domain = kzalloc(sizeof(*domain), GFP_KERNEL_ACCOUNT);
198 if (!domain)
199 return ERR_PTR(-ENOMEM);
200
201 domain->use_first_level = true;
202 domain->s2_domain = s2_domain;
203 domain->s1_pgtbl = vtd.pgtbl_addr;
204 domain->s1_cfg = vtd;
205 domain->domain.ops = &intel_nested_domain_ops;
206 domain->domain.type = IOMMU_DOMAIN_NESTED;
207 INIT_LIST_HEAD(&domain->devices);
208 INIT_LIST_HEAD(&domain->dev_pasids);
209 spin_lock_init(&domain->lock);
210 xa_init(&domain->iommu_array);
211
212 spin_lock(&s2_domain->s1_lock);
213 list_add(&domain->s2_link, &s2_domain->s1_domains);
214 spin_unlock(&s2_domain->s1_lock);
215
216 return &domain->domain;
217}