| // SPDX-License-Identifier: GPL-2.0-or-later |
| /* |
| * IPV4 GSO/GRO offload support |
| * Linux INET implementation |
| * |
| * TCPv4 GSO/GRO support |
| */ |
| |
| #include <linux/indirect_call_wrapper.h> |
| #include <linux/skbuff.h> |
| #include <net/gro.h> |
| #include <net/gso.h> |
| #include <net/tcp.h> |
| #include <net/protocol.h> |
| |
| static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq, |
| unsigned int seq, unsigned int mss) |
| { |
| while (skb) { |
| if (before(ts_seq, seq + mss)) { |
| skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP; |
| skb_shinfo(skb)->tskey = ts_seq; |
| return; |
| } |
| |
| skb = skb->next; |
| seq += mss; |
| } |
| } |
| |
| static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb, |
| netdev_features_t features) |
| { |
| if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)) |
| return ERR_PTR(-EINVAL); |
| |
| if (!pskb_may_pull(skb, sizeof(struct tcphdr))) |
| return ERR_PTR(-EINVAL); |
| |
| if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { |
| const struct iphdr *iph = ip_hdr(skb); |
| struct tcphdr *th = tcp_hdr(skb); |
| |
| /* Set up checksum pseudo header, usually expect stack to |
| * have done this already. |
| */ |
| |
| th->check = 0; |
| skb->ip_summed = CHECKSUM_PARTIAL; |
| __tcp_v4_send_check(skb, iph->saddr, iph->daddr); |
| } |
| |
| return tcp_gso_segment(skb, features); |
| } |
| |
| struct sk_buff *tcp_gso_segment(struct sk_buff *skb, |
| netdev_features_t features) |
| { |
| struct sk_buff *segs = ERR_PTR(-EINVAL); |
| unsigned int sum_truesize = 0; |
| struct tcphdr *th; |
| unsigned int thlen; |
| unsigned int seq; |
| unsigned int oldlen; |
| unsigned int mss; |
| struct sk_buff *gso_skb = skb; |
| __sum16 newcheck; |
| bool ooo_okay, copy_destructor; |
| __wsum delta; |
| |
| th = tcp_hdr(skb); |
| thlen = th->doff * 4; |
| if (thlen < sizeof(*th)) |
| goto out; |
| |
| if (!pskb_may_pull(skb, thlen)) |
| goto out; |
| |
| oldlen = ~skb->len; |
| __skb_pull(skb, thlen); |
| |
| mss = skb_shinfo(skb)->gso_size; |
| if (unlikely(skb->len <= mss)) |
| goto out; |
| |
| if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { |
| /* Packet is from an untrusted source, reset gso_segs. */ |
| |
| skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); |
| |
| segs = NULL; |
| goto out; |
| } |
| |
| copy_destructor = gso_skb->destructor == tcp_wfree; |
| ooo_okay = gso_skb->ooo_okay; |
| /* All segments but the first should have ooo_okay cleared */ |
| skb->ooo_okay = 0; |
| |
| segs = skb_segment(skb, features); |
| if (IS_ERR(segs)) |
| goto out; |
| |
| /* Only first segment might have ooo_okay set */ |
| segs->ooo_okay = ooo_okay; |
| |
| /* GSO partial and frag_list segmentation only requires splitting |
| * the frame into an MSS multiple and possibly a remainder, both |
| * cases return a GSO skb. So update the mss now. |
| */ |
| if (skb_is_gso(segs)) |
| mss *= skb_shinfo(segs)->gso_segs; |
| |
| delta = (__force __wsum)htonl(oldlen + thlen + mss); |
| |
| skb = segs; |
| th = tcp_hdr(skb); |
| seq = ntohl(th->seq); |
| |
| if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP)) |
| tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss); |
| |
| newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta)); |
| |
| while (skb->next) { |
| th->fin = th->psh = 0; |
| th->check = newcheck; |
| |
| if (skb->ip_summed == CHECKSUM_PARTIAL) |
| gso_reset_checksum(skb, ~th->check); |
| else |
| th->check = gso_make_checksum(skb, ~th->check); |
| |
| seq += mss; |
| if (copy_destructor) { |
| skb->destructor = gso_skb->destructor; |
| skb->sk = gso_skb->sk; |
| sum_truesize += skb->truesize; |
| } |
| skb = skb->next; |
| th = tcp_hdr(skb); |
| |
| th->seq = htonl(seq); |
| th->cwr = 0; |
| } |
| |
| /* Following permits TCP Small Queues to work well with GSO : |
| * The callback to TCP stack will be called at the time last frag |
| * is freed at TX completion, and not right now when gso_skb |
| * is freed by GSO engine |
| */ |
| if (copy_destructor) { |
| int delta; |
| |
| swap(gso_skb->sk, skb->sk); |
| swap(gso_skb->destructor, skb->destructor); |
| sum_truesize += skb->truesize; |
| delta = sum_truesize - gso_skb->truesize; |
| /* In some pathological cases, delta can be negative. |
| * We need to either use refcount_add() or refcount_sub_and_test() |
| */ |
| if (likely(delta >= 0)) |
| refcount_add(delta, &skb->sk->sk_wmem_alloc); |
| else |
| WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc)); |
| } |
| |
| delta = (__force __wsum)htonl(oldlen + |
| (skb_tail_pointer(skb) - |
| skb_transport_header(skb)) + |
| skb->data_len); |
| th->check = ~csum_fold(csum_add(csum_unfold(th->check), delta)); |
| if (skb->ip_summed == CHECKSUM_PARTIAL) |
| gso_reset_checksum(skb, ~th->check); |
| else |
| th->check = gso_make_checksum(skb, ~th->check); |
| out: |
| return segs; |
| } |
| |
| struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb) |
| { |
| struct sk_buff *pp = NULL; |
| struct sk_buff *p; |
| struct tcphdr *th; |
| struct tcphdr *th2; |
| unsigned int len; |
| unsigned int thlen; |
| __be32 flags; |
| unsigned int mss = 1; |
| unsigned int hlen; |
| unsigned int off; |
| int flush = 1; |
| int i; |
| |
| off = skb_gro_offset(skb); |
| hlen = off + sizeof(*th); |
| th = skb_gro_header(skb, hlen, off); |
| if (unlikely(!th)) |
| goto out; |
| |
| thlen = th->doff * 4; |
| if (thlen < sizeof(*th)) |
| goto out; |
| |
| hlen = off + thlen; |
| if (!skb_gro_may_pull(skb, hlen)) { |
| th = skb_gro_header_slow(skb, hlen, off); |
| if (unlikely(!th)) |
| goto out; |
| } |
| |
| skb_gro_pull(skb, thlen); |
| |
| len = skb_gro_len(skb); |
| flags = tcp_flag_word(th); |
| |
| list_for_each_entry(p, head, list) { |
| if (!NAPI_GRO_CB(p)->same_flow) |
| continue; |
| |
| th2 = tcp_hdr(p); |
| |
| if (*(u32 *)&th->source ^ *(u32 *)&th2->source) { |
| NAPI_GRO_CB(p)->same_flow = 0; |
| continue; |
| } |
| |
| goto found; |
| } |
| p = NULL; |
| goto out_check_final; |
| |
| found: |
| /* Include the IP ID check below from the inner most IP hdr */ |
| flush = NAPI_GRO_CB(p)->flush; |
| flush |= (__force int)(flags & TCP_FLAG_CWR); |
| flush |= (__force int)((flags ^ tcp_flag_word(th2)) & |
| ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH)); |
| flush |= (__force int)(th->ack_seq ^ th2->ack_seq); |
| for (i = sizeof(*th); i < thlen; i += 4) |
| flush |= *(u32 *)((u8 *)th + i) ^ |
| *(u32 *)((u8 *)th2 + i); |
| |
| /* When we receive our second frame we can made a decision on if we |
| * continue this flow as an atomic flow with a fixed ID or if we use |
| * an incrementing ID. |
| */ |
| if (NAPI_GRO_CB(p)->flush_id != 1 || |
| NAPI_GRO_CB(p)->count != 1 || |
| !NAPI_GRO_CB(p)->is_atomic) |
| flush |= NAPI_GRO_CB(p)->flush_id; |
| else |
| NAPI_GRO_CB(p)->is_atomic = false; |
| |
| mss = skb_shinfo(p)->gso_size; |
| |
| /* If skb is a GRO packet, make sure its gso_size matches prior packet mss. |
| * If it is a single frame, do not aggregate it if its length |
| * is bigger than our mss. |
| */ |
| if (unlikely(skb_is_gso(skb))) |
| flush |= (mss != skb_shinfo(skb)->gso_size); |
| else |
| flush |= (len - 1) >= mss; |
| |
| flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); |
| #ifdef CONFIG_TLS_DEVICE |
| flush |= p->decrypted ^ skb->decrypted; |
| #endif |
| |
| if (flush || skb_gro_receive(p, skb)) { |
| mss = 1; |
| goto out_check_final; |
| } |
| |
| tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH); |
| |
| out_check_final: |
| /* Force a flush if last segment is smaller than mss. */ |
| if (unlikely(skb_is_gso(skb))) |
| flush = len != NAPI_GRO_CB(skb)->count * skb_shinfo(skb)->gso_size; |
| else |
| flush = len < mss; |
| |
| flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH | |
| TCP_FLAG_RST | TCP_FLAG_SYN | |
| TCP_FLAG_FIN)); |
| |
| if (p && (!NAPI_GRO_CB(skb)->same_flow || flush)) |
| pp = p; |
| |
| out: |
| NAPI_GRO_CB(skb)->flush |= (flush != 0); |
| |
| return pp; |
| } |
| |
| void tcp_gro_complete(struct sk_buff *skb) |
| { |
| struct tcphdr *th = tcp_hdr(skb); |
| struct skb_shared_info *shinfo; |
| |
| if (skb->encapsulation) |
| skb->inner_transport_header = skb->transport_header; |
| |
| skb->csum_start = (unsigned char *)th - skb->head; |
| skb->csum_offset = offsetof(struct tcphdr, check); |
| skb->ip_summed = CHECKSUM_PARTIAL; |
| |
| shinfo = skb_shinfo(skb); |
| shinfo->gso_segs = NAPI_GRO_CB(skb)->count; |
| |
| if (th->cwr) |
| shinfo->gso_type |= SKB_GSO_TCP_ECN; |
| } |
| EXPORT_SYMBOL(tcp_gro_complete); |
| |
| INDIRECT_CALLABLE_SCOPE |
| struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb) |
| { |
| /* Don't bother verifying checksum if we're going to flush anyway. */ |
| if (!NAPI_GRO_CB(skb)->flush && |
| skb_gro_checksum_validate(skb, IPPROTO_TCP, |
| inet_gro_compute_pseudo)) { |
| NAPI_GRO_CB(skb)->flush = 1; |
| return NULL; |
| } |
| |
| return tcp_gro_receive(head, skb); |
| } |
| |
| INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff) |
| { |
| const struct iphdr *iph = ip_hdr(skb); |
| struct tcphdr *th = tcp_hdr(skb); |
| |
| th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr, |
| iph->daddr, 0); |
| |
| skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4 | |
| (NAPI_GRO_CB(skb)->is_atomic * SKB_GSO_TCP_FIXEDID); |
| |
| tcp_gro_complete(skb); |
| return 0; |
| } |
| |
| int __init tcpv4_offload_init(void) |
| { |
| net_hotdata.tcpv4_offload = (struct net_offload) { |
| .callbacks = { |
| .gso_segment = tcp4_gso_segment, |
| .gro_receive = tcp4_gro_receive, |
| .gro_complete = tcp4_gro_complete, |
| }, |
| }; |
| return inet_add_offload(&net_hotdata.tcpv4_offload, IPPROTO_TCP); |
| } |