Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/prefetch.h>
3
4/**
5 * iommu_fill_pdir - Insert coalesced scatter/gather chunks into the I/O Pdir.
6 * @ioc: The I/O Controller.
7 * @startsg: The scatter/gather list of coalesced chunks.
8 * @nents: The number of entries in the scatter/gather list.
9 * @hint: The DMA Hint.
10 *
11 * This function inserts the coalesced scatter/gather list chunks into the
12 * I/O Controller's I/O Pdir.
13 */
14static inline unsigned int
15iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
16 unsigned long hint,
17 void (*iommu_io_pdir_entry)(__le64 *, space_t, unsigned long,
18 unsigned long))
19{
20 struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
21 unsigned int n_mappings = 0;
22 unsigned long dma_offset = 0, dma_len = 0;
23 __le64 *pdirp = NULL;
24
25 /* Horrible hack. For efficiency's sake, dma_sg starts one
26 * entry below the true start (it is immediately incremented
27 * in the loop) */
28 dma_sg--;
29
30 while (nents-- > 0) {
31 unsigned long vaddr;
32 long size;
33
34 DBG_RUN_SG(" %d : %08lx %p/%05x\n", nents,
35 (unsigned long)sg_dma_address(startsg),
36 sg_virt(startsg), startsg->length
37 );
38
39
40 /*
41 ** Look for the start of a new DMA stream
42 */
43
44 if (sg_dma_address(startsg) & PIDE_FLAG) {
45 u32 pide = sg_dma_address(startsg) & ~PIDE_FLAG;
46
47 BUG_ON(pdirp && (dma_len != sg_dma_len(dma_sg)));
48
49 dma_sg++;
50
51 dma_len = sg_dma_len(startsg);
52 sg_dma_len(startsg) = 0;
53 dma_offset = (unsigned long) pide & ~IOVP_MASK;
54 n_mappings++;
55#if defined(ZX1_SUPPORT)
56 /* Pluto IOMMU IO Virt Address is not zero based */
57 sg_dma_address(dma_sg) = pide | ioc->ibase;
58#else
59 /* SBA, ccio, and dino are zero based.
60 * Trying to save a few CPU cycles for most users.
61 */
62 sg_dma_address(dma_sg) = pide;
63#endif
64 pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]);
65 prefetchw(pdirp);
66 }
67
68 BUG_ON(pdirp == NULL);
69
70 vaddr = (unsigned long)sg_virt(startsg);
71 sg_dma_len(dma_sg) += startsg->length;
72 size = startsg->length + dma_offset;
73 dma_offset = 0;
74#ifdef IOMMU_MAP_STATS
75 ioc->msg_pages += startsg->length >> IOVP_SHIFT;
76#endif
77 do {
78 iommu_io_pdir_entry(pdirp, KERNEL_SPACE,
79 vaddr, hint);
80 vaddr += IOVP_SIZE;
81 size -= IOVP_SIZE;
82 pdirp++;
83 } while(unlikely(size > 0));
84 startsg++;
85 }
86 return(n_mappings);
87}
88
89
90/*
91** First pass is to walk the SG list and determine where the breaks are
92** in the DMA stream. Allocates PDIR entries but does not fill them.
93** Returns the number of DMA chunks.
94**
95** Doing the fill separate from the coalescing/allocation keeps the
96** code simpler. Future enhancement could make one pass through
97** the sglist do both.
98*/
99
100static inline unsigned int
101iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
102 struct scatterlist *startsg, int nents,
103 int (*iommu_alloc_range)(struct ioc *, struct device *, size_t))
104{
105 struct scatterlist *contig_sg; /* contig chunk head */
106 unsigned long dma_offset, dma_len; /* start/len of DMA stream */
107 unsigned int n_mappings = 0;
108 unsigned int max_seg_size = min(dma_get_max_seg_size(dev),
109 (unsigned)DMA_CHUNK_SIZE);
110 unsigned int max_seg_boundary = dma_get_seg_boundary(dev) + 1;
111 if (max_seg_boundary) /* check if the addition above didn't overflow */
112 max_seg_size = min(max_seg_size, max_seg_boundary);
113
114 while (nents > 0) {
115
116 /*
117 ** Prepare for first/next DMA stream
118 */
119 contig_sg = startsg;
120 dma_len = startsg->length;
121 dma_offset = startsg->offset;
122
123 /* PARANOID: clear entries */
124 sg_dma_address(startsg) = 0;
125 sg_dma_len(startsg) = 0;
126
127 /*
128 ** This loop terminates one iteration "early" since
129 ** it's always looking one "ahead".
130 */
131 while(--nents > 0) {
132 unsigned long prev_end, sg_start;
133
134 prev_end = (unsigned long)sg_virt(startsg) +
135 startsg->length;
136
137 startsg++;
138 sg_start = (unsigned long)sg_virt(startsg);
139
140 /* PARANOID: clear entries */
141 sg_dma_address(startsg) = 0;
142 sg_dma_len(startsg) = 0;
143
144 /*
145 ** First make sure current dma stream won't
146 ** exceed max_seg_size if we coalesce the
147 ** next entry.
148 */
149 if (unlikely(ALIGN(dma_len + dma_offset + startsg->length, IOVP_SIZE) >
150 max_seg_size))
151 break;
152
153 /*
154 * Next see if we can append the next chunk (i.e.
155 * it must end on one page and begin on another, or
156 * it must start on the same address as the previous
157 * entry ended.
158 */
159 if (unlikely((prev_end != sg_start) ||
160 ((prev_end | sg_start) & ~PAGE_MASK)))
161 break;
162
163 dma_len += startsg->length;
164 }
165
166 /*
167 ** End of DMA Stream
168 ** Terminate last VCONTIG block.
169 ** Allocate space for DMA stream.
170 */
171 sg_dma_len(contig_sg) = dma_len;
172 dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE);
173 sg_dma_address(contig_sg) =
174 PIDE_FLAG
175 | (iommu_alloc_range(ioc, dev, dma_len) << IOVP_SHIFT)
176 | dma_offset;
177 n_mappings++;
178 }
179
180 return n_mappings;
181}
182
1/**
2 * iommu_fill_pdir - Insert coalesced scatter/gather chunks into the I/O Pdir.
3 * @ioc: The I/O Controller.
4 * @startsg: The scatter/gather list of coalesced chunks.
5 * @nents: The number of entries in the scatter/gather list.
6 * @hint: The DMA Hint.
7 *
8 * This function inserts the coalesced scatter/gather list chunks into the
9 * I/O Controller's I/O Pdir.
10 */
11static inline unsigned int
12iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
13 unsigned long hint,
14 void (*iommu_io_pdir_entry)(u64 *, space_t, unsigned long,
15 unsigned long))
16{
17 struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
18 unsigned int n_mappings = 0;
19 unsigned long dma_offset = 0, dma_len = 0;
20 u64 *pdirp = NULL;
21
22 /* Horrible hack. For efficiency's sake, dma_sg starts one
23 * entry below the true start (it is immediately incremented
24 * in the loop) */
25 dma_sg--;
26
27 while (nents-- > 0) {
28 unsigned long vaddr;
29 long size;
30
31 DBG_RUN_SG(" %d : %08lx/%05x %08lx/%05x\n", nents,
32 (unsigned long)sg_dma_address(startsg), cnt,
33 sg_virt_addr(startsg), startsg->length
34 );
35
36
37 /*
38 ** Look for the start of a new DMA stream
39 */
40
41 if (sg_dma_address(startsg) & PIDE_FLAG) {
42 u32 pide = sg_dma_address(startsg) & ~PIDE_FLAG;
43
44 BUG_ON(pdirp && (dma_len != sg_dma_len(dma_sg)));
45
46 dma_sg++;
47
48 dma_len = sg_dma_len(startsg);
49 sg_dma_len(startsg) = 0;
50 dma_offset = (unsigned long) pide & ~IOVP_MASK;
51 n_mappings++;
52#if defined(ZX1_SUPPORT)
53 /* Pluto IOMMU IO Virt Address is not zero based */
54 sg_dma_address(dma_sg) = pide | ioc->ibase;
55#else
56 /* SBA, ccio, and dino are zero based.
57 * Trying to save a few CPU cycles for most users.
58 */
59 sg_dma_address(dma_sg) = pide;
60#endif
61 pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]);
62 prefetchw(pdirp);
63 }
64
65 BUG_ON(pdirp == NULL);
66
67 vaddr = sg_virt_addr(startsg);
68 sg_dma_len(dma_sg) += startsg->length;
69 size = startsg->length + dma_offset;
70 dma_offset = 0;
71#ifdef IOMMU_MAP_STATS
72 ioc->msg_pages += startsg->length >> IOVP_SHIFT;
73#endif
74 do {
75 iommu_io_pdir_entry(pdirp, KERNEL_SPACE,
76 vaddr, hint);
77 vaddr += IOVP_SIZE;
78 size -= IOVP_SIZE;
79 pdirp++;
80 } while(unlikely(size > 0));
81 startsg++;
82 }
83 return(n_mappings);
84}
85
86
87/*
88** First pass is to walk the SG list and determine where the breaks are
89** in the DMA stream. Allocates PDIR entries but does not fill them.
90** Returns the number of DMA chunks.
91**
92** Doing the fill separate from the coalescing/allocation keeps the
93** code simpler. Future enhancement could make one pass through
94** the sglist do both.
95*/
96
97static inline unsigned int
98iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
99 struct scatterlist *startsg, int nents,
100 int (*iommu_alloc_range)(struct ioc *, struct device *, size_t))
101{
102 struct scatterlist *contig_sg; /* contig chunk head */
103 unsigned long dma_offset, dma_len; /* start/len of DMA stream */
104 unsigned int n_mappings = 0;
105 unsigned int max_seg_size = dma_get_max_seg_size(dev);
106
107 while (nents > 0) {
108
109 /*
110 ** Prepare for first/next DMA stream
111 */
112 contig_sg = startsg;
113 dma_len = startsg->length;
114 dma_offset = sg_virt_addr(startsg) & ~IOVP_MASK;
115
116 /* PARANOID: clear entries */
117 sg_dma_address(startsg) = 0;
118 sg_dma_len(startsg) = 0;
119
120 /*
121 ** This loop terminates one iteration "early" since
122 ** it's always looking one "ahead".
123 */
124 while(--nents > 0) {
125 unsigned long prevstartsg_end, startsg_end;
126
127 prevstartsg_end = sg_virt_addr(startsg) +
128 startsg->length;
129
130 startsg++;
131 startsg_end = sg_virt_addr(startsg) +
132 startsg->length;
133
134 /* PARANOID: clear entries */
135 sg_dma_address(startsg) = 0;
136 sg_dma_len(startsg) = 0;
137
138 /*
139 ** First make sure current dma stream won't
140 ** exceed DMA_CHUNK_SIZE if we coalesce the
141 ** next entry.
142 */
143 if(unlikely(ALIGN(dma_len + dma_offset + startsg->length,
144 IOVP_SIZE) > DMA_CHUNK_SIZE))
145 break;
146
147 if (startsg->length + dma_len > max_seg_size)
148 break;
149
150 /*
151 ** Next see if we can append the next chunk (i.e.
152 ** it must end on one page and begin on another
153 */
154 if (unlikely(((prevstartsg_end | sg_virt_addr(startsg)) & ~PAGE_MASK) != 0))
155 break;
156
157 dma_len += startsg->length;
158 }
159
160 /*
161 ** End of DMA Stream
162 ** Terminate last VCONTIG block.
163 ** Allocate space for DMA stream.
164 */
165 sg_dma_len(contig_sg) = dma_len;
166 dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE);
167 sg_dma_address(contig_sg) =
168 PIDE_FLAG
169 | (iommu_alloc_range(ioc, dev, dma_len) << IOVP_SHIFT)
170 | dma_offset;
171 n_mappings++;
172 }
173
174 return n_mappings;
175}
176