Loading...
1#include <linux/export.h>
2#include <linux/if_vlan.h>
3#include <net/ip.h>
4#include <net/tso.h>
5#include <asm/unaligned.h>
6
7/* Calculate expected number of TX descriptors */
8int tso_count_descs(struct sk_buff *skb)
9{
10 /* The Marvell Way */
11 return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags;
12}
13EXPORT_SYMBOL(tso_count_descs);
14
15void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
16 int size, bool is_last)
17{
18 struct tcphdr *tcph;
19 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
20 int mac_hdr_len = skb_network_offset(skb);
21
22 memcpy(hdr, skb->data, hdr_len);
23 if (!tso->ipv6) {
24 struct iphdr *iph = (void *)(hdr + mac_hdr_len);
25
26 iph->id = htons(tso->ip_id);
27 iph->tot_len = htons(size + hdr_len - mac_hdr_len);
28 tso->ip_id++;
29 } else {
30 struct ipv6hdr *iph = (void *)(hdr + mac_hdr_len);
31
32 iph->payload_len = htons(size + tcp_hdrlen(skb));
33 }
34 tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb));
35 put_unaligned_be32(tso->tcp_seq, &tcph->seq);
36
37 if (!is_last) {
38 /* Clear all special flags for not last packet */
39 tcph->psh = 0;
40 tcph->fin = 0;
41 tcph->rst = 0;
42 }
43}
44EXPORT_SYMBOL(tso_build_hdr);
45
46void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size)
47{
48 tso->tcp_seq += size;
49 tso->size -= size;
50 tso->data += size;
51
52 if ((tso->size == 0) &&
53 (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) {
54 skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx];
55
56 /* Move to next segment */
57 tso->size = frag->size;
58 tso->data = page_address(frag->page.p) + frag->page_offset;
59 tso->next_frag_idx++;
60 }
61}
62EXPORT_SYMBOL(tso_build_data);
63
64void tso_start(struct sk_buff *skb, struct tso_t *tso)
65{
66 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
67
68 tso->ip_id = ntohs(ip_hdr(skb)->id);
69 tso->tcp_seq = ntohl(tcp_hdr(skb)->seq);
70 tso->next_frag_idx = 0;
71 tso->ipv6 = vlan_get_protocol(skb) == htons(ETH_P_IPV6);
72
73 /* Build first data */
74 tso->size = skb_headlen(skb) - hdr_len;
75 tso->data = skb->data + hdr_len;
76 if ((tso->size == 0) &&
77 (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) {
78 skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx];
79
80 /* Move to next segment */
81 tso->size = frag->size;
82 tso->data = page_address(frag->page.p) + frag->page_offset;
83 tso->next_frag_idx++;
84 }
85}
86EXPORT_SYMBOL(tso_start);
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/export.h>
3#include <linux/if_vlan.h>
4#include <net/ip.h>
5#include <net/tso.h>
6#include <asm/unaligned.h>
7
8/* Calculate expected number of TX descriptors */
9int tso_count_descs(const struct sk_buff *skb)
10{
11 /* The Marvell Way */
12 return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags;
13}
14EXPORT_SYMBOL(tso_count_descs);
15
16void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso,
17 int size, bool is_last)
18{
19 int hdr_len = skb_transport_offset(skb) + tso->tlen;
20 int mac_hdr_len = skb_network_offset(skb);
21
22 memcpy(hdr, skb->data, hdr_len);
23 if (!tso->ipv6) {
24 struct iphdr *iph = (void *)(hdr + mac_hdr_len);
25
26 iph->id = htons(tso->ip_id);
27 iph->tot_len = htons(size + hdr_len - mac_hdr_len);
28 tso->ip_id++;
29 } else {
30 struct ipv6hdr *iph = (void *)(hdr + mac_hdr_len);
31
32 iph->payload_len = htons(size + tso->tlen);
33 }
34 hdr += skb_transport_offset(skb);
35 if (tso->tlen != sizeof(struct udphdr)) {
36 struct tcphdr *tcph = (struct tcphdr *)hdr;
37
38 put_unaligned_be32(tso->tcp_seq, &tcph->seq);
39
40 if (!is_last) {
41 /* Clear all special flags for not last packet */
42 tcph->psh = 0;
43 tcph->fin = 0;
44 tcph->rst = 0;
45 }
46 } else {
47 struct udphdr *uh = (struct udphdr *)hdr;
48
49 uh->len = htons(sizeof(*uh) + size);
50 }
51}
52EXPORT_SYMBOL(tso_build_hdr);
53
54void tso_build_data(const struct sk_buff *skb, struct tso_t *tso, int size)
55{
56 tso->tcp_seq += size; /* not worth avoiding this operation for UDP */
57 tso->size -= size;
58 tso->data += size;
59
60 if ((tso->size == 0) &&
61 (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) {
62 skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx];
63
64 /* Move to next segment */
65 tso->size = skb_frag_size(frag);
66 tso->data = skb_frag_address(frag);
67 tso->next_frag_idx++;
68 }
69}
70EXPORT_SYMBOL(tso_build_data);
71
72int tso_start(struct sk_buff *skb, struct tso_t *tso)
73{
74 int tlen = skb_is_gso_tcp(skb) ? tcp_hdrlen(skb) : sizeof(struct udphdr);
75 int hdr_len = skb_transport_offset(skb) + tlen;
76
77 tso->tlen = tlen;
78 tso->ip_id = ntohs(ip_hdr(skb)->id);
79 tso->tcp_seq = (tlen != sizeof(struct udphdr)) ? ntohl(tcp_hdr(skb)->seq) : 0;
80 tso->next_frag_idx = 0;
81 tso->ipv6 = vlan_get_protocol(skb) == htons(ETH_P_IPV6);
82
83 /* Build first data */
84 tso->size = skb_headlen(skb) - hdr_len;
85 tso->data = skb->data + hdr_len;
86 if ((tso->size == 0) &&
87 (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) {
88 skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx];
89
90 /* Move to next segment */
91 tso->size = skb_frag_size(frag);
92 tso->data = skb_frag_address(frag);
93 tso->next_frag_idx++;
94 }
95 return hdr_len;
96}
97EXPORT_SYMBOL(tso_start);