Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * OMAP Crypto driver common support routines.
4 *
5 * Copyright (c) 2017 Texas Instruments Incorporated
6 * Tero Kristo <t-kristo@ti.com>
7 */
8
9#include <linux/module.h>
10#include <linux/kernel.h>
11#include <linux/scatterlist.h>
12#include <crypto/scatterwalk.h>
13
14#include "omap-crypto.h"
15
16static int omap_crypto_copy_sg_lists(int total, int bs,
17 struct scatterlist **sg,
18 struct scatterlist *new_sg, u16 flags)
19{
20 int n = sg_nents(*sg);
21 struct scatterlist *tmp;
22
23 if (!(flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY)) {
24 new_sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
25 if (!new_sg)
26 return -ENOMEM;
27
28 sg_init_table(new_sg, n);
29 }
30
31 tmp = new_sg;
32
33 while (*sg && total) {
34 int len = (*sg)->length;
35
36 if (total < len)
37 len = total;
38
39 if (len > 0) {
40 total -= len;
41 sg_set_page(tmp, sg_page(*sg), len, (*sg)->offset);
42 if (total <= 0)
43 sg_mark_end(tmp);
44 tmp = sg_next(tmp);
45 }
46
47 *sg = sg_next(*sg);
48 }
49
50 *sg = new_sg;
51
52 return 0;
53}
54
55static int omap_crypto_copy_sgs(int total, int bs, struct scatterlist **sg,
56 struct scatterlist *new_sg, u16 flags)
57{
58 void *buf;
59 int pages;
60 int new_len;
61
62 new_len = ALIGN(total, bs);
63 pages = get_order(new_len);
64
65 buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
66 if (!buf) {
67 pr_err("%s: Couldn't allocate pages for unaligned cases.\n",
68 __func__);
69 return -ENOMEM;
70 }
71
72 if (flags & OMAP_CRYPTO_COPY_DATA) {
73 scatterwalk_map_and_copy(buf, *sg, 0, total, 0);
74 if (flags & OMAP_CRYPTO_ZERO_BUF)
75 memset(buf + total, 0, new_len - total);
76 }
77
78 if (!(flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY))
79 sg_init_table(new_sg, 1);
80
81 sg_set_buf(new_sg, buf, new_len);
82
83 *sg = new_sg;
84
85 return 0;
86}
87
88static int omap_crypto_check_sg(struct scatterlist *sg, int total, int bs,
89 u16 flags)
90{
91 int len = 0;
92 int num_sg = 0;
93
94 if (!IS_ALIGNED(total, bs))
95 return OMAP_CRYPTO_NOT_ALIGNED;
96
97 while (sg) {
98 num_sg++;
99
100 if (!IS_ALIGNED(sg->offset, 4))
101 return OMAP_CRYPTO_NOT_ALIGNED;
102 if (!IS_ALIGNED(sg->length, bs))
103 return OMAP_CRYPTO_NOT_ALIGNED;
104#ifdef CONFIG_ZONE_DMA
105 if (page_zonenum(sg_page(sg)) != ZONE_DMA)
106 return OMAP_CRYPTO_NOT_ALIGNED;
107#endif
108
109 len += sg->length;
110 sg = sg_next(sg);
111
112 if (len >= total)
113 break;
114 }
115
116 if ((flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY) && num_sg > 1)
117 return OMAP_CRYPTO_NOT_ALIGNED;
118
119 if (len != total)
120 return OMAP_CRYPTO_BAD_DATA_LENGTH;
121
122 return 0;
123}
124
125int omap_crypto_align_sg(struct scatterlist **sg, int total, int bs,
126 struct scatterlist *new_sg, u16 flags,
127 u8 flags_shift, unsigned long *dd_flags)
128{
129 int ret;
130
131 *dd_flags &= ~(OMAP_CRYPTO_COPY_MASK << flags_shift);
132
133 if (flags & OMAP_CRYPTO_FORCE_COPY)
134 ret = OMAP_CRYPTO_NOT_ALIGNED;
135 else
136 ret = omap_crypto_check_sg(*sg, total, bs, flags);
137
138 if (ret == OMAP_CRYPTO_NOT_ALIGNED) {
139 ret = omap_crypto_copy_sgs(total, bs, sg, new_sg, flags);
140 if (ret)
141 return ret;
142 *dd_flags |= OMAP_CRYPTO_DATA_COPIED << flags_shift;
143 } else if (ret == OMAP_CRYPTO_BAD_DATA_LENGTH) {
144 ret = omap_crypto_copy_sg_lists(total, bs, sg, new_sg, flags);
145 if (ret)
146 return ret;
147 if (!(flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY))
148 *dd_flags |= OMAP_CRYPTO_SG_COPIED << flags_shift;
149 } else if (flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY) {
150 sg_set_buf(new_sg, sg_virt(*sg), (*sg)->length);
151 }
152
153 return 0;
154}
155EXPORT_SYMBOL_GPL(omap_crypto_align_sg);
156
157static void omap_crypto_copy_data(struct scatterlist *src,
158 struct scatterlist *dst,
159 int offset, int len)
160{
161 int amt;
162 void *srcb, *dstb;
163 int srco = 0, dsto = offset;
164
165 while (src && dst && len) {
166 if (srco >= src->length) {
167 srco -= src->length;
168 src = sg_next(src);
169 continue;
170 }
171
172 if (dsto >= dst->length) {
173 dsto -= dst->length;
174 dst = sg_next(dst);
175 continue;
176 }
177
178 amt = min(src->length - srco, dst->length - dsto);
179 amt = min(len, amt);
180
181 srcb = kmap_atomic(sg_page(src)) + srco + src->offset;
182 dstb = kmap_atomic(sg_page(dst)) + dsto + dst->offset;
183
184 memcpy(dstb, srcb, amt);
185
186 flush_dcache_page(sg_page(dst));
187
188 kunmap_atomic(srcb);
189 kunmap_atomic(dstb);
190
191 srco += amt;
192 dsto += amt;
193 len -= amt;
194 }
195}
196
197void omap_crypto_cleanup(struct scatterlist *sg, struct scatterlist *orig,
198 int offset, int len, u8 flags_shift,
199 unsigned long flags)
200{
201 void *buf;
202 int pages;
203
204 flags >>= flags_shift;
205 flags &= OMAP_CRYPTO_COPY_MASK;
206
207 if (!flags)
208 return;
209
210 buf = sg_virt(sg);
211 pages = get_order(len);
212
213 if (orig && (flags & OMAP_CRYPTO_DATA_COPIED))
214 omap_crypto_copy_data(sg, orig, offset, len);
215
216 if (flags & OMAP_CRYPTO_DATA_COPIED)
217 free_pages((unsigned long)buf, pages);
218 else if (flags & OMAP_CRYPTO_SG_COPIED)
219 kfree(sg);
220}
221EXPORT_SYMBOL_GPL(omap_crypto_cleanup);
222
223MODULE_DESCRIPTION("OMAP crypto support library.");
224MODULE_LICENSE("GPL v2");
225MODULE_AUTHOR("Tero Kristo <t-kristo@ti.com>");
1/*
2 * OMAP Crypto driver common support routines.
3 *
4 * Copyright (c) 2017 Texas Instruments Incorporated
5 * Tero Kristo <t-kristo@ti.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as published
9 * by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/scatterlist.h>
15#include <crypto/scatterwalk.h>
16
17#include "omap-crypto.h"
18
19static int omap_crypto_copy_sg_lists(int total, int bs,
20 struct scatterlist **sg,
21 struct scatterlist *new_sg, u16 flags)
22{
23 int n = sg_nents(*sg);
24 struct scatterlist *tmp;
25
26 if (!(flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY)) {
27 new_sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
28 if (!new_sg)
29 return -ENOMEM;
30
31 sg_init_table(new_sg, n);
32 }
33
34 tmp = new_sg;
35
36 while (*sg && total) {
37 int len = (*sg)->length;
38
39 if (total < len)
40 len = total;
41
42 if (len > 0) {
43 total -= len;
44 sg_set_page(tmp, sg_page(*sg), len, (*sg)->offset);
45 if (total <= 0)
46 sg_mark_end(tmp);
47 tmp = sg_next(tmp);
48 }
49
50 *sg = sg_next(*sg);
51 }
52
53 *sg = new_sg;
54
55 return 0;
56}
57
58static int omap_crypto_copy_sgs(int total, int bs, struct scatterlist **sg,
59 struct scatterlist *new_sg, u16 flags)
60{
61 void *buf;
62 int pages;
63 int new_len;
64
65 new_len = ALIGN(total, bs);
66 pages = get_order(new_len);
67
68 buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
69 if (!buf) {
70 pr_err("%s: Couldn't allocate pages for unaligned cases.\n",
71 __func__);
72 return -ENOMEM;
73 }
74
75 if (flags & OMAP_CRYPTO_COPY_DATA) {
76 scatterwalk_map_and_copy(buf, *sg, 0, total, 0);
77 if (flags & OMAP_CRYPTO_ZERO_BUF)
78 memset(buf + total, 0, new_len - total);
79 }
80
81 if (!(flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY))
82 sg_init_table(new_sg, 1);
83
84 sg_set_buf(new_sg, buf, new_len);
85
86 *sg = new_sg;
87
88 return 0;
89}
90
91static int omap_crypto_check_sg(struct scatterlist *sg, int total, int bs,
92 u16 flags)
93{
94 int len = 0;
95 int num_sg = 0;
96
97 if (!IS_ALIGNED(total, bs))
98 return OMAP_CRYPTO_NOT_ALIGNED;
99
100 while (sg) {
101 num_sg++;
102
103 if (!IS_ALIGNED(sg->offset, 4))
104 return OMAP_CRYPTO_NOT_ALIGNED;
105 if (!IS_ALIGNED(sg->length, bs))
106 return OMAP_CRYPTO_NOT_ALIGNED;
107#ifdef CONFIG_ZONE_DMA
108 if (page_zonenum(sg_page(sg)) != ZONE_DMA)
109 return OMAP_CRYPTO_NOT_ALIGNED;
110#endif
111
112 len += sg->length;
113 sg = sg_next(sg);
114
115 if (len >= total)
116 break;
117 }
118
119 if ((flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY) && num_sg > 1)
120 return OMAP_CRYPTO_NOT_ALIGNED;
121
122 if (len != total)
123 return OMAP_CRYPTO_BAD_DATA_LENGTH;
124
125 return 0;
126}
127
128int omap_crypto_align_sg(struct scatterlist **sg, int total, int bs,
129 struct scatterlist *new_sg, u16 flags,
130 u8 flags_shift, unsigned long *dd_flags)
131{
132 int ret;
133
134 *dd_flags &= ~(OMAP_CRYPTO_COPY_MASK << flags_shift);
135
136 if (flags & OMAP_CRYPTO_FORCE_COPY)
137 ret = OMAP_CRYPTO_NOT_ALIGNED;
138 else
139 ret = omap_crypto_check_sg(*sg, total, bs, flags);
140
141 if (ret == OMAP_CRYPTO_NOT_ALIGNED) {
142 ret = omap_crypto_copy_sgs(total, bs, sg, new_sg, flags);
143 if (ret)
144 return ret;
145 *dd_flags |= OMAP_CRYPTO_DATA_COPIED << flags_shift;
146 } else if (ret == OMAP_CRYPTO_BAD_DATA_LENGTH) {
147 ret = omap_crypto_copy_sg_lists(total, bs, sg, new_sg, flags);
148 if (ret)
149 return ret;
150 if (!(flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY))
151 *dd_flags |= OMAP_CRYPTO_SG_COPIED << flags_shift;
152 } else if (flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY) {
153 sg_set_buf(new_sg, sg_virt(*sg), (*sg)->length);
154 }
155
156 return 0;
157}
158EXPORT_SYMBOL_GPL(omap_crypto_align_sg);
159
160void omap_crypto_cleanup(struct scatterlist *sg, struct scatterlist *orig,
161 int offset, int len, u8 flags_shift,
162 unsigned long flags)
163{
164 void *buf;
165 int pages;
166
167 flags >>= flags_shift;
168 flags &= OMAP_CRYPTO_COPY_MASK;
169
170 if (!flags)
171 return;
172
173 buf = sg_virt(sg);
174 pages = get_order(len);
175
176 if (orig && (flags & OMAP_CRYPTO_COPY_MASK))
177 scatterwalk_map_and_copy(buf, orig, offset, len, 1);
178
179 if (flags & OMAP_CRYPTO_DATA_COPIED)
180 free_pages((unsigned long)buf, pages);
181 else if (flags & OMAP_CRYPTO_SG_COPIED)
182 kfree(sg);
183}
184EXPORT_SYMBOL_GPL(omap_crypto_cleanup);
185
186MODULE_DESCRIPTION("OMAP crypto support library.");
187MODULE_LICENSE("GPL v2");
188MODULE_AUTHOR("Tero Kristo <t-kristo@ti.com>");