Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/prefetch.h>
3
4/**
5 * iommu_fill_pdir - Insert coalesced scatter/gather chunks into the I/O Pdir.
6 * @ioc: The I/O Controller.
7 * @startsg: The scatter/gather list of coalesced chunks.
8 * @nents: The number of entries in the scatter/gather list.
9 * @hint: The DMA Hint.
10 *
11 * This function inserts the coalesced scatter/gather list chunks into the
12 * I/O Controller's I/O Pdir.
13 */
14static inline unsigned int
15iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
16 unsigned long hint,
17 void (*iommu_io_pdir_entry)(u64 *, space_t, unsigned long,
18 unsigned long))
19{
20 struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
21 unsigned int n_mappings = 0;
22 unsigned long dma_offset = 0, dma_len = 0;
23 u64 *pdirp = NULL;
24
25 /* Horrible hack. For efficiency's sake, dma_sg starts one
26 * entry below the true start (it is immediately incremented
27 * in the loop) */
28 dma_sg--;
29
30 while (nents-- > 0) {
31 unsigned long vaddr;
32 long size;
33
34 DBG_RUN_SG(" %d : %08lx/%05x %p/%05x\n", nents,
35 (unsigned long)sg_dma_address(startsg), cnt,
36 sg_virt(startsg), startsg->length
37 );
38
39
40 /*
41 ** Look for the start of a new DMA stream
42 */
43
44 if (sg_dma_address(startsg) & PIDE_FLAG) {
45 u32 pide = sg_dma_address(startsg) & ~PIDE_FLAG;
46
47 BUG_ON(pdirp && (dma_len != sg_dma_len(dma_sg)));
48
49 dma_sg++;
50
51 dma_len = sg_dma_len(startsg);
52 sg_dma_len(startsg) = 0;
53 dma_offset = (unsigned long) pide & ~IOVP_MASK;
54 n_mappings++;
55#if defined(ZX1_SUPPORT)
56 /* Pluto IOMMU IO Virt Address is not zero based */
57 sg_dma_address(dma_sg) = pide | ioc->ibase;
58#else
59 /* SBA, ccio, and dino are zero based.
60 * Trying to save a few CPU cycles for most users.
61 */
62 sg_dma_address(dma_sg) = pide;
63#endif
64 pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]);
65 prefetchw(pdirp);
66 }
67
68 BUG_ON(pdirp == NULL);
69
70 vaddr = (unsigned long)sg_virt(startsg);
71 sg_dma_len(dma_sg) += startsg->length;
72 size = startsg->length + dma_offset;
73 dma_offset = 0;
74#ifdef IOMMU_MAP_STATS
75 ioc->msg_pages += startsg->length >> IOVP_SHIFT;
76#endif
77 do {
78 iommu_io_pdir_entry(pdirp, KERNEL_SPACE,
79 vaddr, hint);
80 vaddr += IOVP_SIZE;
81 size -= IOVP_SIZE;
82 pdirp++;
83 } while(unlikely(size > 0));
84 startsg++;
85 }
86 return(n_mappings);
87}
88
89
90/*
91** First pass is to walk the SG list and determine where the breaks are
92** in the DMA stream. Allocates PDIR entries but does not fill them.
93** Returns the number of DMA chunks.
94**
95** Doing the fill separate from the coalescing/allocation keeps the
96** code simpler. Future enhancement could make one pass through
97** the sglist do both.
98*/
99
100static inline unsigned int
101iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
102 struct scatterlist *startsg, int nents,
103 int (*iommu_alloc_range)(struct ioc *, struct device *, size_t))
104{
105 struct scatterlist *contig_sg; /* contig chunk head */
106 unsigned long dma_offset, dma_len; /* start/len of DMA stream */
107 unsigned int n_mappings = 0;
108 unsigned int max_seg_size = min(dma_get_max_seg_size(dev),
109 (unsigned)DMA_CHUNK_SIZE);
110 unsigned int max_seg_boundary = dma_get_seg_boundary(dev) + 1;
111 if (max_seg_boundary) /* check if the addition above didn't overflow */
112 max_seg_size = min(max_seg_size, max_seg_boundary);
113
114 while (nents > 0) {
115
116 /*
117 ** Prepare for first/next DMA stream
118 */
119 contig_sg = startsg;
120 dma_len = startsg->length;
121 dma_offset = startsg->offset;
122
123 /* PARANOID: clear entries */
124 sg_dma_address(startsg) = 0;
125 sg_dma_len(startsg) = 0;
126
127 /*
128 ** This loop terminates one iteration "early" since
129 ** it's always looking one "ahead".
130 */
131 while(--nents > 0) {
132 unsigned long prev_end, sg_start;
133
134 prev_end = (unsigned long)sg_virt(startsg) +
135 startsg->length;
136
137 startsg++;
138 sg_start = (unsigned long)sg_virt(startsg);
139
140 /* PARANOID: clear entries */
141 sg_dma_address(startsg) = 0;
142 sg_dma_len(startsg) = 0;
143
144 /*
145 ** First make sure current dma stream won't
146 ** exceed max_seg_size if we coalesce the
147 ** next entry.
148 */
149 if (unlikely(ALIGN(dma_len + dma_offset + startsg->length, IOVP_SIZE) >
150 max_seg_size))
151 break;
152
153 /*
154 * Next see if we can append the next chunk (i.e.
155 * it must end on one page and begin on another, or
156 * it must start on the same address as the previous
157 * entry ended.
158 */
159 if (unlikely((prev_end != sg_start) ||
160 ((prev_end | sg_start) & ~PAGE_MASK)))
161 break;
162
163 dma_len += startsg->length;
164 }
165
166 /*
167 ** End of DMA Stream
168 ** Terminate last VCONTIG block.
169 ** Allocate space for DMA stream.
170 */
171 sg_dma_len(contig_sg) = dma_len;
172 dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE);
173 sg_dma_address(contig_sg) =
174 PIDE_FLAG
175 | (iommu_alloc_range(ioc, dev, dma_len) << IOVP_SHIFT)
176 | dma_offset;
177 n_mappings++;
178 }
179
180 return n_mappings;
181}
182
1#include <linux/prefetch.h>
2
3/**
4 * iommu_fill_pdir - Insert coalesced scatter/gather chunks into the I/O Pdir.
5 * @ioc: The I/O Controller.
6 * @startsg: The scatter/gather list of coalesced chunks.
7 * @nents: The number of entries in the scatter/gather list.
8 * @hint: The DMA Hint.
9 *
10 * This function inserts the coalesced scatter/gather list chunks into the
11 * I/O Controller's I/O Pdir.
12 */
13static inline unsigned int
14iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
15 unsigned long hint,
16 void (*iommu_io_pdir_entry)(u64 *, space_t, unsigned long,
17 unsigned long))
18{
19 struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
20 unsigned int n_mappings = 0;
21 unsigned long dma_offset = 0, dma_len = 0;
22 u64 *pdirp = NULL;
23
24 /* Horrible hack. For efficiency's sake, dma_sg starts one
25 * entry below the true start (it is immediately incremented
26 * in the loop) */
27 dma_sg--;
28
29 while (nents-- > 0) {
30 unsigned long vaddr;
31 long size;
32
33 DBG_RUN_SG(" %d : %08lx/%05x %p/%05x\n", nents,
34 (unsigned long)sg_dma_address(startsg), cnt,
35 sg_virt(startsg), startsg->length
36 );
37
38
39 /*
40 ** Look for the start of a new DMA stream
41 */
42
43 if (sg_dma_address(startsg) & PIDE_FLAG) {
44 u32 pide = sg_dma_address(startsg) & ~PIDE_FLAG;
45
46 BUG_ON(pdirp && (dma_len != sg_dma_len(dma_sg)));
47
48 dma_sg++;
49
50 dma_len = sg_dma_len(startsg);
51 sg_dma_len(startsg) = 0;
52 dma_offset = (unsigned long) pide & ~IOVP_MASK;
53 n_mappings++;
54#if defined(ZX1_SUPPORT)
55 /* Pluto IOMMU IO Virt Address is not zero based */
56 sg_dma_address(dma_sg) = pide | ioc->ibase;
57#else
58 /* SBA, ccio, and dino are zero based.
59 * Trying to save a few CPU cycles for most users.
60 */
61 sg_dma_address(dma_sg) = pide;
62#endif
63 pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]);
64 prefetchw(pdirp);
65 }
66
67 BUG_ON(pdirp == NULL);
68
69 vaddr = (unsigned long)sg_virt(startsg);
70 sg_dma_len(dma_sg) += startsg->length;
71 size = startsg->length + dma_offset;
72 dma_offset = 0;
73#ifdef IOMMU_MAP_STATS
74 ioc->msg_pages += startsg->length >> IOVP_SHIFT;
75#endif
76 do {
77 iommu_io_pdir_entry(pdirp, KERNEL_SPACE,
78 vaddr, hint);
79 vaddr += IOVP_SIZE;
80 size -= IOVP_SIZE;
81 pdirp++;
82 } while(unlikely(size > 0));
83 startsg++;
84 }
85 return(n_mappings);
86}
87
88
89/*
90** First pass is to walk the SG list and determine where the breaks are
91** in the DMA stream. Allocates PDIR entries but does not fill them.
92** Returns the number of DMA chunks.
93**
94** Doing the fill separate from the coalescing/allocation keeps the
95** code simpler. Future enhancement could make one pass through
96** the sglist do both.
97*/
98
99static inline unsigned int
100iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
101 struct scatterlist *startsg, int nents,
102 int (*iommu_alloc_range)(struct ioc *, struct device *, size_t))
103{
104 struct scatterlist *contig_sg; /* contig chunk head */
105 unsigned long dma_offset, dma_len; /* start/len of DMA stream */
106 unsigned int n_mappings = 0;
107 unsigned int max_seg_size = min(dma_get_max_seg_size(dev),
108 (unsigned)DMA_CHUNK_SIZE);
109 unsigned int max_seg_boundary = dma_get_seg_boundary(dev) + 1;
110 if (max_seg_boundary) /* check if the addition above didn't overflow */
111 max_seg_size = min(max_seg_size, max_seg_boundary);
112
113 while (nents > 0) {
114
115 /*
116 ** Prepare for first/next DMA stream
117 */
118 contig_sg = startsg;
119 dma_len = startsg->length;
120 dma_offset = startsg->offset;
121
122 /* PARANOID: clear entries */
123 sg_dma_address(startsg) = 0;
124 sg_dma_len(startsg) = 0;
125
126 /*
127 ** This loop terminates one iteration "early" since
128 ** it's always looking one "ahead".
129 */
130 while(--nents > 0) {
131 unsigned long prev_end, sg_start;
132
133 prev_end = (unsigned long)sg_virt(startsg) +
134 startsg->length;
135
136 startsg++;
137 sg_start = (unsigned long)sg_virt(startsg);
138
139 /* PARANOID: clear entries */
140 sg_dma_address(startsg) = 0;
141 sg_dma_len(startsg) = 0;
142
143 /*
144 ** First make sure current dma stream won't
145 ** exceed max_seg_size if we coalesce the
146 ** next entry.
147 */
148 if (unlikely(ALIGN(dma_len + dma_offset + startsg->length, IOVP_SIZE) >
149 max_seg_size))
150 break;
151
152 /*
153 * Next see if we can append the next chunk (i.e.
154 * it must end on one page and begin on another, or
155 * it must start on the same address as the previous
156 * entry ended.
157 */
158 if (unlikely((prev_end != sg_start) ||
159 ((prev_end | sg_start) & ~PAGE_MASK)))
160 break;
161
162 dma_len += startsg->length;
163 }
164
165 /*
166 ** End of DMA Stream
167 ** Terminate last VCONTIG block.
168 ** Allocate space for DMA stream.
169 */
170 sg_dma_len(contig_sg) = dma_len;
171 dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE);
172 sg_dma_address(contig_sg) =
173 PIDE_FLAG
174 | (iommu_alloc_range(ioc, dev, dma_len) << IOVP_SHIFT)
175 | dma_offset;
176 n_mappings++;
177 }
178
179 return n_mappings;
180}
181