Loading...
1/*
2 * This file manages the translation entries for the IBM Calgary IOMMU.
3 *
4 * Derived from arch/powerpc/platforms/pseries/iommu.c
5 *
6 * Copyright (C) IBM Corporation, 2006
7 *
8 * Author: Jon Mason <jdmason@us.ibm.com>
9 * Author: Muli Ben-Yehuda <muli@il.ibm.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
25
26#include <linux/types.h>
27#include <linux/slab.h>
28#include <linux/mm.h>
29#include <linux/spinlock.h>
30#include <linux/string.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <linux/bootmem.h>
34#include <asm/tce.h>
35#include <asm/calgary.h>
36#include <asm/proto.h>
37#include <asm/cacheflush.h>
38
39/* flush a tce at 'tceaddr' to main memory */
40static inline void flush_tce(void* tceaddr)
41{
42 /* a single tce can't cross a cache line */
43 if (cpu_has_clflush)
44 clflush(tceaddr);
45 else
46 wbinvd();
47}
48
49void tce_build(struct iommu_table *tbl, unsigned long index,
50 unsigned int npages, unsigned long uaddr, int direction)
51{
52 u64* tp;
53 u64 t;
54 u64 rpn;
55
56 t = (1 << TCE_READ_SHIFT);
57 if (direction != DMA_TO_DEVICE)
58 t |= (1 << TCE_WRITE_SHIFT);
59
60 tp = ((u64*)tbl->it_base) + index;
61
62 while (npages--) {
63 rpn = (virt_to_bus((void*)uaddr)) >> PAGE_SHIFT;
64 t &= ~TCE_RPN_MASK;
65 t |= (rpn << TCE_RPN_SHIFT);
66
67 *tp = cpu_to_be64(t);
68 flush_tce(tp);
69
70 uaddr += PAGE_SIZE;
71 tp++;
72 }
73}
74
75void tce_free(struct iommu_table *tbl, long index, unsigned int npages)
76{
77 u64* tp;
78
79 tp = ((u64*)tbl->it_base) + index;
80
81 while (npages--) {
82 *tp = cpu_to_be64(0);
83 flush_tce(tp);
84 tp++;
85 }
86}
87
88static inline unsigned int table_size_to_number_of_entries(unsigned char size)
89{
90 /*
91 * size is the order of the table, 0-7
92 * smallest table is 8K entries, so shift result by 13 to
93 * multiply by 8K
94 */
95 return (1 << size) << 13;
96}
97
98static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl)
99{
100 unsigned int bitmapsz;
101 unsigned long bmppages;
102 int ret;
103
104 tbl->it_busno = dev->bus->number;
105
106 /* set the tce table size - measured in entries */
107 tbl->it_size = table_size_to_number_of_entries(specified_table_size);
108
109 /*
110 * number of bytes needed for the bitmap size in number of
111 * entries; we need one bit per entry
112 */
113 bitmapsz = tbl->it_size / BITS_PER_BYTE;
114 bmppages = __get_free_pages(GFP_KERNEL, get_order(bitmapsz));
115 if (!bmppages) {
116 printk(KERN_ERR "Calgary: cannot allocate bitmap\n");
117 ret = -ENOMEM;
118 goto done;
119 }
120
121 tbl->it_map = (unsigned long*)bmppages;
122
123 memset(tbl->it_map, 0, bitmapsz);
124
125 tbl->it_hint = 0;
126
127 spin_lock_init(&tbl->it_lock);
128
129 return 0;
130
131done:
132 return ret;
133}
134
135int __init build_tce_table(struct pci_dev *dev, void __iomem *bbar)
136{
137 struct iommu_table *tbl;
138 int ret;
139
140 if (pci_iommu(dev->bus)) {
141 printk(KERN_ERR "Calgary: dev %p has sysdata->iommu %p\n",
142 dev, pci_iommu(dev->bus));
143 BUG();
144 }
145
146 tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL);
147 if (!tbl) {
148 printk(KERN_ERR "Calgary: error allocating iommu_table\n");
149 ret = -ENOMEM;
150 goto done;
151 }
152
153 ret = tce_table_setparms(dev, tbl);
154 if (ret)
155 goto free_tbl;
156
157 tbl->bbar = bbar;
158
159 set_pci_iommu(dev->bus, tbl);
160
161 return 0;
162
163free_tbl:
164 kfree(tbl);
165done:
166 return ret;
167}
168
169void * __init alloc_tce_table(void)
170{
171 unsigned int size;
172
173 size = table_size_to_number_of_entries(specified_table_size);
174 size *= TCE_ENTRY_SIZE;
175
176 return __alloc_bootmem_low(size, size, 0);
177}
178
179void __init free_tce_table(void *tbl)
180{
181 unsigned int size;
182
183 if (!tbl)
184 return;
185
186 size = table_size_to_number_of_entries(specified_table_size);
187 size *= TCE_ENTRY_SIZE;
188
189 free_bootmem(__pa(tbl), size);
190}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * This file manages the translation entries for the IBM Calgary IOMMU.
4 *
5 * Derived from arch/powerpc/platforms/pseries/iommu.c
6 *
7 * Copyright (C) IBM Corporation, 2006
8 *
9 * Author: Jon Mason <jdmason@us.ibm.com>
10 * Author: Muli Ben-Yehuda <muli@il.ibm.com>
11 */
12
13#include <linux/types.h>
14#include <linux/slab.h>
15#include <linux/mm.h>
16#include <linux/spinlock.h>
17#include <linux/string.h>
18#include <linux/pci.h>
19#include <linux/dma-mapping.h>
20#include <linux/memblock.h>
21#include <asm/tce.h>
22#include <asm/calgary.h>
23#include <asm/proto.h>
24#include <asm/cacheflush.h>
25
26/* flush a tce at 'tceaddr' to main memory */
27static inline void flush_tce(void* tceaddr)
28{
29 /* a single tce can't cross a cache line */
30 if (boot_cpu_has(X86_FEATURE_CLFLUSH))
31 clflush(tceaddr);
32 else
33 wbinvd();
34}
35
36void tce_build(struct iommu_table *tbl, unsigned long index,
37 unsigned int npages, unsigned long uaddr, int direction)
38{
39 u64* tp;
40 u64 t;
41 u64 rpn;
42
43 t = (1 << TCE_READ_SHIFT);
44 if (direction != DMA_TO_DEVICE)
45 t |= (1 << TCE_WRITE_SHIFT);
46
47 tp = ((u64*)tbl->it_base) + index;
48
49 while (npages--) {
50 rpn = (virt_to_bus((void*)uaddr)) >> PAGE_SHIFT;
51 t &= ~TCE_RPN_MASK;
52 t |= (rpn << TCE_RPN_SHIFT);
53
54 *tp = cpu_to_be64(t);
55 flush_tce(tp);
56
57 uaddr += PAGE_SIZE;
58 tp++;
59 }
60}
61
62void tce_free(struct iommu_table *tbl, long index, unsigned int npages)
63{
64 u64* tp;
65
66 tp = ((u64*)tbl->it_base) + index;
67
68 while (npages--) {
69 *tp = cpu_to_be64(0);
70 flush_tce(tp);
71 tp++;
72 }
73}
74
75static inline unsigned int table_size_to_number_of_entries(unsigned char size)
76{
77 /*
78 * size is the order of the table, 0-7
79 * smallest table is 8K entries, so shift result by 13 to
80 * multiply by 8K
81 */
82 return (1 << size) << 13;
83}
84
85static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl)
86{
87 unsigned int bitmapsz;
88 unsigned long bmppages;
89 int ret;
90
91 tbl->it_busno = dev->bus->number;
92
93 /* set the tce table size - measured in entries */
94 tbl->it_size = table_size_to_number_of_entries(specified_table_size);
95
96 /*
97 * number of bytes needed for the bitmap size in number of
98 * entries; we need one bit per entry
99 */
100 bitmapsz = tbl->it_size / BITS_PER_BYTE;
101 bmppages = __get_free_pages(GFP_KERNEL, get_order(bitmapsz));
102 if (!bmppages) {
103 printk(KERN_ERR "Calgary: cannot allocate bitmap\n");
104 ret = -ENOMEM;
105 goto done;
106 }
107
108 tbl->it_map = (unsigned long*)bmppages;
109
110 memset(tbl->it_map, 0, bitmapsz);
111
112 tbl->it_hint = 0;
113
114 spin_lock_init(&tbl->it_lock);
115
116 return 0;
117
118done:
119 return ret;
120}
121
122int __init build_tce_table(struct pci_dev *dev, void __iomem *bbar)
123{
124 struct iommu_table *tbl;
125 int ret;
126
127 if (pci_iommu(dev->bus)) {
128 printk(KERN_ERR "Calgary: dev %p has sysdata->iommu %p\n",
129 dev, pci_iommu(dev->bus));
130 BUG();
131 }
132
133 tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL);
134 if (!tbl) {
135 printk(KERN_ERR "Calgary: error allocating iommu_table\n");
136 ret = -ENOMEM;
137 goto done;
138 }
139
140 ret = tce_table_setparms(dev, tbl);
141 if (ret)
142 goto free_tbl;
143
144 tbl->bbar = bbar;
145
146 set_pci_iommu(dev->bus, tbl);
147
148 return 0;
149
150free_tbl:
151 kfree(tbl);
152done:
153 return ret;
154}
155
156void * __init alloc_tce_table(void)
157{
158 unsigned int size;
159
160 size = table_size_to_number_of_entries(specified_table_size);
161 size *= TCE_ENTRY_SIZE;
162
163 return memblock_alloc_low(size, size);
164}
165
166void __init free_tce_table(void *tbl)
167{
168 unsigned int size;
169
170 if (!tbl)
171 return;
172
173 size = table_size_to_number_of_entries(specified_table_size);
174 size *= TCE_ENTRY_SIZE;
175
176 memblock_free(__pa(tbl), size);
177}