Loading...
1/*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#ifndef _TLS_OFFLOAD_H
35#define _TLS_OFFLOAD_H
36
37#include <linux/types.h>
38#include <asm/byteorder.h>
39#include <linux/crypto.h>
40#include <linux/socket.h>
41#include <linux/tcp.h>
42#include <linux/mutex.h>
43#include <linux/netdevice.h>
44#include <linux/rcupdate.h>
45
46#include <net/net_namespace.h>
47#include <net/tcp.h>
48#include <net/strparser.h>
49#include <crypto/aead.h>
50#include <uapi/linux/tls.h>
51
52struct tls_rec;
53
54/* Maximum data size carried in a TLS record */
55#define TLS_MAX_PAYLOAD_SIZE ((size_t)1 << 14)
56
57#define TLS_HEADER_SIZE 5
58#define TLS_NONCE_OFFSET TLS_HEADER_SIZE
59
60#define TLS_CRYPTO_INFO_READY(info) ((info)->cipher_type)
61
62#define TLS_AAD_SPACE_SIZE 13
63
64#define TLS_MAX_IV_SIZE 16
65#define TLS_MAX_SALT_SIZE 4
66#define TLS_TAG_SIZE 16
67#define TLS_MAX_REC_SEQ_SIZE 8
68#define TLS_MAX_AAD_SIZE TLS_AAD_SPACE_SIZE
69
70/* For CCM mode, the full 16-bytes of IV is made of '4' fields of given sizes.
71 *
72 * IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3]
73 *
74 * The field 'length' is encoded in field 'b0' as '(length width - 1)'.
75 * Hence b0 contains (3 - 1) = 2.
76 */
77#define TLS_AES_CCM_IV_B0_BYTE 2
78#define TLS_SM4_CCM_IV_B0_BYTE 2
79
80enum {
81 TLS_BASE,
82 TLS_SW,
83 TLS_HW,
84 TLS_HW_RECORD,
85 TLS_NUM_CONFIG,
86};
87
88struct tx_work {
89 struct delayed_work work;
90 struct sock *sk;
91};
92
93struct tls_sw_context_tx {
94 struct crypto_aead *aead_send;
95 struct crypto_wait async_wait;
96 struct tx_work tx_work;
97 struct tls_rec *open_rec;
98 struct list_head tx_list;
99 atomic_t encrypt_pending;
100 u8 async_capable:1;
101
102#define BIT_TX_SCHEDULED 0
103#define BIT_TX_CLOSING 1
104 unsigned long tx_bitmask;
105};
106
107struct tls_strparser {
108 struct sock *sk;
109
110 u32 mark : 8;
111 u32 stopped : 1;
112 u32 copy_mode : 1;
113 u32 mixed_decrypted : 1;
114
115 bool msg_ready;
116
117 struct strp_msg stm;
118
119 struct sk_buff *anchor;
120 struct work_struct work;
121};
122
123struct tls_sw_context_rx {
124 struct crypto_aead *aead_recv;
125 struct crypto_wait async_wait;
126 struct sk_buff_head rx_list; /* list of decrypted 'data' records */
127 void (*saved_data_ready)(struct sock *sk);
128
129 u8 reader_present;
130 u8 async_capable:1;
131 u8 zc_capable:1;
132 u8 reader_contended:1;
133
134 struct tls_strparser strp;
135
136 atomic_t decrypt_pending;
137 struct sk_buff_head async_hold;
138 struct wait_queue_head wq;
139};
140
141struct tls_record_info {
142 struct list_head list;
143 u32 end_seq;
144 int len;
145 int num_frags;
146 skb_frag_t frags[MAX_SKB_FRAGS];
147};
148
149#define TLS_DRIVER_STATE_SIZE_TX 16
150struct tls_offload_context_tx {
151 struct crypto_aead *aead_send;
152 spinlock_t lock; /* protects records list */
153 struct list_head records_list;
154 struct tls_record_info *open_record;
155 struct tls_record_info *retransmit_hint;
156 u64 hint_record_sn;
157 u64 unacked_record_sn;
158
159 struct scatterlist sg_tx_data[MAX_SKB_FRAGS];
160 void (*sk_destruct)(struct sock *sk);
161 struct work_struct destruct_work;
162 struct tls_context *ctx;
163 /* The TLS layer reserves room for driver specific state
164 * Currently the belief is that there is not enough
165 * driver specific state to justify another layer of indirection
166 */
167 u8 driver_state[TLS_DRIVER_STATE_SIZE_TX] __aligned(8);
168};
169
170enum tls_context_flags {
171 /* tls_device_down was called after the netdev went down, device state
172 * was released, and kTLS works in software, even though rx_conf is
173 * still TLS_HW (needed for transition).
174 */
175 TLS_RX_DEV_DEGRADED = 0,
176 /* Unlike RX where resync is driven entirely by the core in TX only
177 * the driver knows when things went out of sync, so we need the flag
178 * to be atomic.
179 */
180 TLS_TX_SYNC_SCHED = 1,
181 /* tls_dev_del was called for the RX side, device state was released,
182 * but tls_ctx->netdev might still be kept, because TX-side driver
183 * resources might not be released yet. Used to prevent the second
184 * tls_dev_del call in tls_device_down if it happens simultaneously.
185 */
186 TLS_RX_DEV_CLOSED = 2,
187};
188
189struct cipher_context {
190 char iv[TLS_MAX_IV_SIZE + TLS_MAX_SALT_SIZE];
191 char rec_seq[TLS_MAX_REC_SEQ_SIZE];
192};
193
194union tls_crypto_context {
195 struct tls_crypto_info info;
196 union {
197 struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
198 struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
199 struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305;
200 struct tls12_crypto_info_sm4_gcm sm4_gcm;
201 struct tls12_crypto_info_sm4_ccm sm4_ccm;
202 };
203};
204
205struct tls_prot_info {
206 u16 version;
207 u16 cipher_type;
208 u16 prepend_size;
209 u16 tag_size;
210 u16 overhead_size;
211 u16 iv_size;
212 u16 salt_size;
213 u16 rec_seq_size;
214 u16 aad_size;
215 u16 tail_size;
216};
217
218struct tls_context {
219 /* read-only cache line */
220 struct tls_prot_info prot_info;
221
222 u8 tx_conf:3;
223 u8 rx_conf:3;
224 u8 zerocopy_sendfile:1;
225 u8 rx_no_pad:1;
226
227 int (*push_pending_record)(struct sock *sk, int flags);
228 void (*sk_write_space)(struct sock *sk);
229
230 void *priv_ctx_tx;
231 void *priv_ctx_rx;
232
233 struct net_device __rcu *netdev;
234
235 /* rw cache line */
236 struct cipher_context tx;
237 struct cipher_context rx;
238
239 struct scatterlist *partially_sent_record;
240 u16 partially_sent_offset;
241
242 bool splicing_pages;
243 bool pending_open_record_frags;
244
245 struct mutex tx_lock; /* protects partially_sent_* fields and
246 * per-type TX fields
247 */
248 unsigned long flags;
249
250 /* cache cold stuff */
251 struct proto *sk_proto;
252 struct sock *sk;
253
254 void (*sk_destruct)(struct sock *sk);
255
256 union tls_crypto_context crypto_send;
257 union tls_crypto_context crypto_recv;
258
259 struct list_head list;
260 refcount_t refcount;
261 struct rcu_head rcu;
262};
263
264enum tls_offload_ctx_dir {
265 TLS_OFFLOAD_CTX_DIR_RX,
266 TLS_OFFLOAD_CTX_DIR_TX,
267};
268
269struct tlsdev_ops {
270 int (*tls_dev_add)(struct net_device *netdev, struct sock *sk,
271 enum tls_offload_ctx_dir direction,
272 struct tls_crypto_info *crypto_info,
273 u32 start_offload_tcp_sn);
274 void (*tls_dev_del)(struct net_device *netdev,
275 struct tls_context *ctx,
276 enum tls_offload_ctx_dir direction);
277 int (*tls_dev_resync)(struct net_device *netdev,
278 struct sock *sk, u32 seq, u8 *rcd_sn,
279 enum tls_offload_ctx_dir direction);
280};
281
282enum tls_offload_sync_type {
283 TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ = 0,
284 TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT = 1,
285 TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC = 2,
286};
287
288#define TLS_DEVICE_RESYNC_NH_START_IVAL 2
289#define TLS_DEVICE_RESYNC_NH_MAX_IVAL 128
290
291#define TLS_DEVICE_RESYNC_ASYNC_LOGMAX 13
292struct tls_offload_resync_async {
293 atomic64_t req;
294 u16 loglen;
295 u16 rcd_delta;
296 u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX];
297};
298
299#define TLS_DRIVER_STATE_SIZE_RX 8
300struct tls_offload_context_rx {
301 /* sw must be the first member of tls_offload_context_rx */
302 struct tls_sw_context_rx sw;
303 enum tls_offload_sync_type resync_type;
304 /* this member is set regardless of resync_type, to avoid branches */
305 u8 resync_nh_reset:1;
306 /* CORE_NEXT_HINT-only member, but use the hole here */
307 u8 resync_nh_do_now:1;
308 union {
309 /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ */
310 struct {
311 atomic64_t resync_req;
312 };
313 /* TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT */
314 struct {
315 u32 decrypted_failed;
316 u32 decrypted_tgt;
317 } resync_nh;
318 /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC */
319 struct {
320 struct tls_offload_resync_async *resync_async;
321 };
322 };
323 /* The TLS layer reserves room for driver specific state
324 * Currently the belief is that there is not enough
325 * driver specific state to justify another layer of indirection
326 */
327 u8 driver_state[TLS_DRIVER_STATE_SIZE_RX] __aligned(8);
328};
329
330struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
331 u32 seq, u64 *p_record_sn);
332
333static inline bool tls_record_is_start_marker(struct tls_record_info *rec)
334{
335 return rec->len == 0;
336}
337
338static inline u32 tls_record_start_seq(struct tls_record_info *rec)
339{
340 return rec->end_seq - rec->len;
341}
342
343struct sk_buff *
344tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
345 struct sk_buff *skb);
346struct sk_buff *
347tls_validate_xmit_skb_sw(struct sock *sk, struct net_device *dev,
348 struct sk_buff *skb);
349
350static inline bool tls_is_skb_tx_device_offloaded(const struct sk_buff *skb)
351{
352#ifdef CONFIG_TLS_DEVICE
353 struct sock *sk = skb->sk;
354
355 return sk && sk_fullsock(sk) &&
356 (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
357 &tls_validate_xmit_skb);
358#else
359 return false;
360#endif
361}
362
363static inline struct tls_context *tls_get_ctx(const struct sock *sk)
364{
365 const struct inet_connection_sock *icsk = inet_csk(sk);
366
367 /* Use RCU on icsk_ulp_data only for sock diag code,
368 * TLS data path doesn't need rcu_dereference().
369 */
370 return (__force void *)icsk->icsk_ulp_data;
371}
372
373static inline struct tls_sw_context_rx *tls_sw_ctx_rx(
374 const struct tls_context *tls_ctx)
375{
376 return (struct tls_sw_context_rx *)tls_ctx->priv_ctx_rx;
377}
378
379static inline struct tls_sw_context_tx *tls_sw_ctx_tx(
380 const struct tls_context *tls_ctx)
381{
382 return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx;
383}
384
385static inline struct tls_offload_context_tx *
386tls_offload_ctx_tx(const struct tls_context *tls_ctx)
387{
388 return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx;
389}
390
391static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
392{
393 struct tls_context *ctx;
394
395 if (!sk_is_inet(sk) || !inet_test_bit(IS_ICSK, sk))
396 return false;
397
398 ctx = tls_get_ctx(sk);
399 if (!ctx)
400 return false;
401 return !!tls_sw_ctx_tx(ctx);
402}
403
404static inline bool tls_sw_has_ctx_rx(const struct sock *sk)
405{
406 struct tls_context *ctx;
407
408 if (!sk_is_inet(sk) || !inet_test_bit(IS_ICSK, sk))
409 return false;
410
411 ctx = tls_get_ctx(sk);
412 if (!ctx)
413 return false;
414 return !!tls_sw_ctx_rx(ctx);
415}
416
417static inline struct tls_offload_context_rx *
418tls_offload_ctx_rx(const struct tls_context *tls_ctx)
419{
420 return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
421}
422
423static inline void *__tls_driver_ctx(struct tls_context *tls_ctx,
424 enum tls_offload_ctx_dir direction)
425{
426 if (direction == TLS_OFFLOAD_CTX_DIR_TX)
427 return tls_offload_ctx_tx(tls_ctx)->driver_state;
428 else
429 return tls_offload_ctx_rx(tls_ctx)->driver_state;
430}
431
432static inline void *
433tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction)
434{
435 return __tls_driver_ctx(tls_get_ctx(sk), direction);
436}
437
438#define RESYNC_REQ BIT(0)
439#define RESYNC_REQ_ASYNC BIT(1)
440/* The TLS context is valid until sk_destruct is called */
441static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
442{
443 struct tls_context *tls_ctx = tls_get_ctx(sk);
444 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
445
446 atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | RESYNC_REQ);
447}
448
449/* Log all TLS record header TCP sequences in [seq, seq+len] */
450static inline void
451tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len)
452{
453 struct tls_context *tls_ctx = tls_get_ctx(sk);
454 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
455
456 atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) |
457 ((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC);
458 rx_ctx->resync_async->loglen = 0;
459 rx_ctx->resync_async->rcd_delta = 0;
460}
461
462static inline void
463tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq)
464{
465 struct tls_context *tls_ctx = tls_get_ctx(sk);
466 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
467
468 atomic64_set(&rx_ctx->resync_async->req,
469 ((u64)ntohl(seq) << 32) | RESYNC_REQ);
470}
471
472static inline void
473tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type)
474{
475 struct tls_context *tls_ctx = tls_get_ctx(sk);
476
477 tls_offload_ctx_rx(tls_ctx)->resync_type = type;
478}
479
480/* Driver's seq tracking has to be disabled until resync succeeded */
481static inline bool tls_offload_tx_resync_pending(struct sock *sk)
482{
483 struct tls_context *tls_ctx = tls_get_ctx(sk);
484 bool ret;
485
486 ret = test_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
487 smp_mb__after_atomic();
488 return ret;
489}
490
491struct sk_buff *tls_encrypt_skb(struct sk_buff *skb);
492
493#ifdef CONFIG_TLS_DEVICE
494void tls_device_sk_destruct(struct sock *sk);
495void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq);
496
497static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk)
498{
499 if (!sk_fullsock(sk) ||
500 smp_load_acquire(&sk->sk_destruct) != tls_device_sk_destruct)
501 return false;
502 return tls_get_ctx(sk)->rx_conf == TLS_HW;
503}
504#endif
505#endif /* _TLS_OFFLOAD_H */
1/*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#ifndef _TLS_OFFLOAD_H
35#define _TLS_OFFLOAD_H
36
37#include <linux/types.h>
38#include <asm/byteorder.h>
39#include <linux/crypto.h>
40#include <linux/socket.h>
41#include <linux/tcp.h>
42#include <net/tcp.h>
43#include <net/strparser.h>
44
45#include <uapi/linux/tls.h>
46
47
48/* Maximum data size carried in a TLS record */
49#define TLS_MAX_PAYLOAD_SIZE ((size_t)1 << 14)
50
51#define TLS_HEADER_SIZE 5
52#define TLS_NONCE_OFFSET TLS_HEADER_SIZE
53
54#define TLS_CRYPTO_INFO_READY(info) ((info)->cipher_type)
55
56#define TLS_RECORD_TYPE_DATA 0x17
57
58#define TLS_AAD_SPACE_SIZE 13
59#define TLS_DEVICE_NAME_MAX 32
60
61/*
62 * This structure defines the routines for Inline TLS driver.
63 * The following routines are optional and filled with a
64 * null pointer if not defined.
65 *
66 * @name: Its the name of registered Inline tls device
67 * @dev_list: Inline tls device list
68 * int (*feature)(struct tls_device *device);
69 * Called to return Inline TLS driver capability
70 *
71 * int (*hash)(struct tls_device *device, struct sock *sk);
72 * This function sets Inline driver for listen and program
73 * device specific functioanlity as required
74 *
75 * void (*unhash)(struct tls_device *device, struct sock *sk);
76 * This function cleans listen state set by Inline TLS driver
77 */
78struct tls_device {
79 char name[TLS_DEVICE_NAME_MAX];
80 struct list_head dev_list;
81 int (*feature)(struct tls_device *device);
82 int (*hash)(struct tls_device *device, struct sock *sk);
83 void (*unhash)(struct tls_device *device, struct sock *sk);
84};
85
86struct tls_sw_context {
87 struct crypto_aead *aead_send;
88 struct crypto_aead *aead_recv;
89 struct crypto_wait async_wait;
90
91 /* Receive context */
92 struct strparser strp;
93 void (*saved_data_ready)(struct sock *sk);
94 unsigned int (*sk_poll)(struct file *file, struct socket *sock,
95 struct poll_table_struct *wait);
96 struct sk_buff *recv_pkt;
97 u8 control;
98 bool decrypted;
99
100 char rx_aad_ciphertext[TLS_AAD_SPACE_SIZE];
101 char rx_aad_plaintext[TLS_AAD_SPACE_SIZE];
102
103 /* Sending context */
104 char aad_space[TLS_AAD_SPACE_SIZE];
105
106 unsigned int sg_plaintext_size;
107 int sg_plaintext_num_elem;
108 struct scatterlist sg_plaintext_data[MAX_SKB_FRAGS];
109
110 unsigned int sg_encrypted_size;
111 int sg_encrypted_num_elem;
112 struct scatterlist sg_encrypted_data[MAX_SKB_FRAGS];
113
114 /* AAD | sg_plaintext_data | sg_tag */
115 struct scatterlist sg_aead_in[2];
116 /* AAD | sg_encrypted_data (data contain overhead for hdr&iv&tag) */
117 struct scatterlist sg_aead_out[2];
118};
119
120enum {
121 TLS_PENDING_CLOSED_RECORD
122};
123
124struct cipher_context {
125 u16 prepend_size;
126 u16 tag_size;
127 u16 overhead_size;
128 u16 iv_size;
129 char *iv;
130 u16 rec_seq_size;
131 char *rec_seq;
132};
133
134struct tls_context {
135 union {
136 struct tls_crypto_info crypto_send;
137 struct tls12_crypto_info_aes_gcm_128 crypto_send_aes_gcm_128;
138 };
139 union {
140 struct tls_crypto_info crypto_recv;
141 struct tls12_crypto_info_aes_gcm_128 crypto_recv_aes_gcm_128;
142 };
143
144 void *priv_ctx;
145
146 u8 conf:3;
147
148 struct cipher_context tx;
149 struct cipher_context rx;
150
151 struct scatterlist *partially_sent_record;
152 u16 partially_sent_offset;
153 unsigned long flags;
154 bool in_tcp_sendpages;
155
156 u16 pending_open_record_frags;
157 int (*push_pending_record)(struct sock *sk, int flags);
158
159 void (*sk_write_space)(struct sock *sk);
160 void (*sk_proto_close)(struct sock *sk, long timeout);
161
162 int (*setsockopt)(struct sock *sk, int level,
163 int optname, char __user *optval,
164 unsigned int optlen);
165 int (*getsockopt)(struct sock *sk, int level,
166 int optname, char __user *optval,
167 int __user *optlen);
168 int (*hash)(struct sock *sk);
169 void (*unhash)(struct sock *sk);
170};
171
172int wait_on_pending_writer(struct sock *sk, long *timeo);
173int tls_sk_query(struct sock *sk, int optname, char __user *optval,
174 int __user *optlen);
175int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
176 unsigned int optlen);
177
178
179int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
180int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
181int tls_sw_sendpage(struct sock *sk, struct page *page,
182 int offset, size_t size, int flags);
183void tls_sw_close(struct sock *sk, long timeout);
184void tls_sw_free_resources(struct sock *sk);
185int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
186 int nonblock, int flags, int *addr_len);
187unsigned int tls_sw_poll(struct file *file, struct socket *sock,
188 struct poll_table_struct *wait);
189ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
190 struct pipe_inode_info *pipe,
191 size_t len, unsigned int flags);
192
193void tls_sk_destruct(struct sock *sk, struct tls_context *ctx);
194void tls_icsk_clean_acked(struct sock *sk);
195
196int tls_push_sg(struct sock *sk, struct tls_context *ctx,
197 struct scatterlist *sg, u16 first_offset,
198 int flags);
199int tls_push_pending_closed_record(struct sock *sk, struct tls_context *ctx,
200 int flags, long *timeo);
201
202static inline bool tls_is_pending_closed_record(struct tls_context *ctx)
203{
204 return test_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags);
205}
206
207static inline int tls_complete_pending_work(struct sock *sk,
208 struct tls_context *ctx,
209 int flags, long *timeo)
210{
211 int rc = 0;
212
213 if (unlikely(sk->sk_write_pending))
214 rc = wait_on_pending_writer(sk, timeo);
215
216 if (!rc && tls_is_pending_closed_record(ctx))
217 rc = tls_push_pending_closed_record(sk, ctx, flags, timeo);
218
219 return rc;
220}
221
222static inline bool tls_is_partially_sent_record(struct tls_context *ctx)
223{
224 return !!ctx->partially_sent_record;
225}
226
227static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
228{
229 return tls_ctx->pending_open_record_frags;
230}
231
232static inline void tls_err_abort(struct sock *sk, int err)
233{
234 sk->sk_err = err;
235 sk->sk_error_report(sk);
236}
237
238static inline bool tls_bigint_increment(unsigned char *seq, int len)
239{
240 int i;
241
242 for (i = len - 1; i >= 0; i--) {
243 ++seq[i];
244 if (seq[i] != 0)
245 break;
246 }
247
248 return (i == -1);
249}
250
251static inline void tls_advance_record_sn(struct sock *sk,
252 struct cipher_context *ctx)
253{
254 if (tls_bigint_increment(ctx->rec_seq, ctx->rec_seq_size))
255 tls_err_abort(sk, EBADMSG);
256 tls_bigint_increment(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
257 ctx->iv_size);
258}
259
260static inline void tls_fill_prepend(struct tls_context *ctx,
261 char *buf,
262 size_t plaintext_len,
263 unsigned char record_type)
264{
265 size_t pkt_len, iv_size = ctx->tx.iv_size;
266
267 pkt_len = plaintext_len + iv_size + ctx->tx.tag_size;
268
269 /* we cover nonce explicit here as well, so buf should be of
270 * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE
271 */
272 buf[0] = record_type;
273 buf[1] = TLS_VERSION_MINOR(ctx->crypto_send.version);
274 buf[2] = TLS_VERSION_MAJOR(ctx->crypto_send.version);
275 /* we can use IV for nonce explicit according to spec */
276 buf[3] = pkt_len >> 8;
277 buf[4] = pkt_len & 0xFF;
278 memcpy(buf + TLS_NONCE_OFFSET,
279 ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv_size);
280}
281
282static inline void tls_make_aad(char *buf,
283 size_t size,
284 char *record_sequence,
285 int record_sequence_size,
286 unsigned char record_type)
287{
288 memcpy(buf, record_sequence, record_sequence_size);
289
290 buf[8] = record_type;
291 buf[9] = TLS_1_2_VERSION_MAJOR;
292 buf[10] = TLS_1_2_VERSION_MINOR;
293 buf[11] = size >> 8;
294 buf[12] = size & 0xFF;
295}
296
297static inline struct tls_context *tls_get_ctx(const struct sock *sk)
298{
299 struct inet_connection_sock *icsk = inet_csk(sk);
300
301 return icsk->icsk_ulp_data;
302}
303
304static inline struct tls_sw_context *tls_sw_ctx(
305 const struct tls_context *tls_ctx)
306{
307 return (struct tls_sw_context *)tls_ctx->priv_ctx;
308}
309
310static inline struct tls_offload_context *tls_offload_ctx(
311 const struct tls_context *tls_ctx)
312{
313 return (struct tls_offload_context *)tls_ctx->priv_ctx;
314}
315
316int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
317 unsigned char *record_type);
318void tls_register_device(struct tls_device *device);
319void tls_unregister_device(struct tls_device *device);
320
321#endif /* _TLS_OFFLOAD_H */