mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-04-24 14:07:52 -04:00
commit 89add40066f9ed9abe5f7f886fe5789ff7e0c50e upstream. Tighten csum_start and csum_offset checks in virtio_net_hdr_to_skb for GSO packets. The function already checks that a checksum requested with VIRTIO_NET_HDR_F_NEEDS_CSUM is in skb linear. But for GSO packets this might not hold for segs after segmentation. Syzkaller demonstrated to reach this warning in skb_checksum_help offset = skb_checksum_start_offset(skb); ret = -EINVAL; if (WARN_ON_ONCE(offset >= skb_headlen(skb))) By injecting a TSO packet: WARNING: CPU: 1 PID: 3539 at net/core/dev.c:3284 skb_checksum_help+0x3d0/0x5b0 ip_do_fragment+0x209/0x1b20 net/ipv4/ip_output.c:774 ip_finish_output_gso net/ipv4/ip_output.c:279 [inline] __ip_finish_output+0x2bd/0x4b0 net/ipv4/ip_output.c:301 iptunnel_xmit+0x50c/0x930 net/ipv4/ip_tunnel_core.c:82 ip_tunnel_xmit+0x2296/0x2c70 net/ipv4/ip_tunnel.c:813 __gre_xmit net/ipv4/ip_gre.c:469 [inline] ipgre_xmit+0x759/0xa60 net/ipv4/ip_gre.c:661 __netdev_start_xmit include/linux/netdevice.h:4850 [inline] netdev_start_xmit include/linux/netdevice.h:4864 [inline] xmit_one net/core/dev.c:3595 [inline] dev_hard_start_xmit+0x261/0x8c0 net/core/dev.c:3611 __dev_queue_xmit+0x1b97/0x3c90 net/core/dev.c:4261 packet_snd net/packet/af_packet.c:3073 [inline] The geometry of the bad input packet at tcp_gso_segment: [ 52.003050][ T8403] skb len=12202 headroom=244 headlen=12093 tailroom=0 [ 52.003050][ T8403] mac=(168,24) mac_len=24 net=(192,52) trans=244 [ 52.003050][ T8403] shinfo(txflags=0 nr_frags=1 gso(size=1552 type=3 segs=0)) [ 52.003050][ T8403] csum(0x60000c7 start=199 offset=1536 ip_summed=3 complete_sw=0 valid=0 level=0) Mitigate with stricter input validation. csum_offset: for GSO packets, deduce the correct value from gso_type. This is already done for USO. Extend it to TSO. Let UFO be: udp[46]_ufo_fragment ignores these fields and always computes the checksum in software. csum_start: finding the real offset requires parsing to the transport header. Do not add a parser, use existing segmentation parsing. Thanks to SKB_GSO_DODGY, that also catches bad packets that are hw offloaded. Again test both TSO and USO. Do not test UFO for the above reason, and do not test UDP tunnel offload. GSO packet are almost always CHECKSUM_PARTIAL. USO packets may be CHECKSUM_NONE since commit 10154dbded6d6 ("udp: Allow GSO transmit from devices with no checksum offload"), but then still these fields are initialized correctly in udp4_hwcsum/udp6_hwcsum_outgoing. So no need to test for ip_summed == CHECKSUM_PARTIAL first. This revises an existing fix mentioned in the Fixes tag, which broke small packets with GSO offload, as detected by kselftests. Link: https://syzkaller.appspot.com/bug?extid=e1db31216c789f552871 Link: https://lore.kernel.org/netdev/20240723223109.2196886-1-kuba@kernel.org Fixes: e269d79c7d35 ("net: missing check virtio") Cc: stable@vger.kernel.org Signed-off-by: Willem de Bruijn <willemb@google.com> Link: https://patch.msgid.link/20240729201108.1615114-1-willemdebruijn.kernel@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
361 lines
8.6 KiB
C
361 lines
8.6 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/*
|
|
* IPV4 GSO/GRO offload support
|
|
* Linux INET implementation
|
|
*
|
|
* TCPv4 GSO/GRO support
|
|
*/
|
|
|
|
#include <linux/indirect_call_wrapper.h>
|
|
#include <linux/skbuff.h>
|
|
#include <net/gro.h>
|
|
#include <net/gso.h>
|
|
#include <net/tcp.h>
|
|
#include <net/protocol.h>
|
|
|
|
static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
|
|
unsigned int seq, unsigned int mss)
|
|
{
|
|
while (skb) {
|
|
if (before(ts_seq, seq + mss)) {
|
|
skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
|
|
skb_shinfo(skb)->tskey = ts_seq;
|
|
return;
|
|
}
|
|
|
|
skb = skb->next;
|
|
seq += mss;
|
|
}
|
|
}
|
|
|
|
static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
|
|
netdev_features_t features)
|
|
{
|
|
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
|
|
const struct iphdr *iph = ip_hdr(skb);
|
|
struct tcphdr *th = tcp_hdr(skb);
|
|
|
|
/* Set up checksum pseudo header, usually expect stack to
|
|
* have done this already.
|
|
*/
|
|
|
|
th->check = 0;
|
|
skb->ip_summed = CHECKSUM_PARTIAL;
|
|
__tcp_v4_send_check(skb, iph->saddr, iph->daddr);
|
|
}
|
|
|
|
return tcp_gso_segment(skb, features);
|
|
}
|
|
|
|
struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
|
|
netdev_features_t features)
|
|
{
|
|
struct sk_buff *segs = ERR_PTR(-EINVAL);
|
|
unsigned int sum_truesize = 0;
|
|
struct tcphdr *th;
|
|
unsigned int thlen;
|
|
unsigned int seq;
|
|
unsigned int oldlen;
|
|
unsigned int mss;
|
|
struct sk_buff *gso_skb = skb;
|
|
__sum16 newcheck;
|
|
bool ooo_okay, copy_destructor;
|
|
__wsum delta;
|
|
|
|
th = tcp_hdr(skb);
|
|
thlen = th->doff * 4;
|
|
if (thlen < sizeof(*th))
|
|
goto out;
|
|
|
|
if (unlikely(skb_checksum_start(skb) != skb_transport_header(skb)))
|
|
goto out;
|
|
|
|
if (!pskb_may_pull(skb, thlen))
|
|
goto out;
|
|
|
|
oldlen = ~skb->len;
|
|
__skb_pull(skb, thlen);
|
|
|
|
mss = skb_shinfo(skb)->gso_size;
|
|
if (unlikely(skb->len <= mss))
|
|
goto out;
|
|
|
|
if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
|
|
/* Packet is from an untrusted source, reset gso_segs. */
|
|
|
|
skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
|
|
|
|
segs = NULL;
|
|
goto out;
|
|
}
|
|
|
|
copy_destructor = gso_skb->destructor == tcp_wfree;
|
|
ooo_okay = gso_skb->ooo_okay;
|
|
/* All segments but the first should have ooo_okay cleared */
|
|
skb->ooo_okay = 0;
|
|
|
|
segs = skb_segment(skb, features);
|
|
if (IS_ERR(segs))
|
|
goto out;
|
|
|
|
/* Only first segment might have ooo_okay set */
|
|
segs->ooo_okay = ooo_okay;
|
|
|
|
/* GSO partial and frag_list segmentation only requires splitting
|
|
* the frame into an MSS multiple and possibly a remainder, both
|
|
* cases return a GSO skb. So update the mss now.
|
|
*/
|
|
if (skb_is_gso(segs))
|
|
mss *= skb_shinfo(segs)->gso_segs;
|
|
|
|
delta = (__force __wsum)htonl(oldlen + thlen + mss);
|
|
|
|
skb = segs;
|
|
th = tcp_hdr(skb);
|
|
seq = ntohl(th->seq);
|
|
|
|
if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
|
|
tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
|
|
|
|
newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta));
|
|
|
|
while (skb->next) {
|
|
th->fin = th->psh = 0;
|
|
th->check = newcheck;
|
|
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL)
|
|
gso_reset_checksum(skb, ~th->check);
|
|
else
|
|
th->check = gso_make_checksum(skb, ~th->check);
|
|
|
|
seq += mss;
|
|
if (copy_destructor) {
|
|
skb->destructor = gso_skb->destructor;
|
|
skb->sk = gso_skb->sk;
|
|
sum_truesize += skb->truesize;
|
|
}
|
|
skb = skb->next;
|
|
th = tcp_hdr(skb);
|
|
|
|
th->seq = htonl(seq);
|
|
th->cwr = 0;
|
|
}
|
|
|
|
/* Following permits TCP Small Queues to work well with GSO :
|
|
* The callback to TCP stack will be called at the time last frag
|
|
* is freed at TX completion, and not right now when gso_skb
|
|
* is freed by GSO engine
|
|
*/
|
|
if (copy_destructor) {
|
|
int delta;
|
|
|
|
swap(gso_skb->sk, skb->sk);
|
|
swap(gso_skb->destructor, skb->destructor);
|
|
sum_truesize += skb->truesize;
|
|
delta = sum_truesize - gso_skb->truesize;
|
|
/* In some pathological cases, delta can be negative.
|
|
* We need to either use refcount_add() or refcount_sub_and_test()
|
|
*/
|
|
if (likely(delta >= 0))
|
|
refcount_add(delta, &skb->sk->sk_wmem_alloc);
|
|
else
|
|
WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
|
|
}
|
|
|
|
delta = (__force __wsum)htonl(oldlen +
|
|
(skb_tail_pointer(skb) -
|
|
skb_transport_header(skb)) +
|
|
skb->data_len);
|
|
th->check = ~csum_fold(csum_add(csum_unfold(th->check), delta));
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL)
|
|
gso_reset_checksum(skb, ~th->check);
|
|
else
|
|
th->check = gso_make_checksum(skb, ~th->check);
|
|
out:
|
|
return segs;
|
|
}
|
|
|
|
struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
|
|
{
|
|
struct sk_buff *pp = NULL;
|
|
struct sk_buff *p;
|
|
struct tcphdr *th;
|
|
struct tcphdr *th2;
|
|
unsigned int len;
|
|
unsigned int thlen;
|
|
__be32 flags;
|
|
unsigned int mss = 1;
|
|
unsigned int hlen;
|
|
unsigned int off;
|
|
int flush = 1;
|
|
int i;
|
|
|
|
off = skb_gro_offset(skb);
|
|
hlen = off + sizeof(*th);
|
|
th = skb_gro_header(skb, hlen, off);
|
|
if (unlikely(!th))
|
|
goto out;
|
|
|
|
thlen = th->doff * 4;
|
|
if (thlen < sizeof(*th))
|
|
goto out;
|
|
|
|
hlen = off + thlen;
|
|
if (skb_gro_header_hard(skb, hlen)) {
|
|
th = skb_gro_header_slow(skb, hlen, off);
|
|
if (unlikely(!th))
|
|
goto out;
|
|
}
|
|
|
|
skb_gro_pull(skb, thlen);
|
|
|
|
len = skb_gro_len(skb);
|
|
flags = tcp_flag_word(th);
|
|
|
|
list_for_each_entry(p, head, list) {
|
|
if (!NAPI_GRO_CB(p)->same_flow)
|
|
continue;
|
|
|
|
th2 = tcp_hdr(p);
|
|
|
|
if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
|
|
NAPI_GRO_CB(p)->same_flow = 0;
|
|
continue;
|
|
}
|
|
|
|
goto found;
|
|
}
|
|
p = NULL;
|
|
goto out_check_final;
|
|
|
|
found:
|
|
/* Include the IP ID check below from the inner most IP hdr */
|
|
flush = NAPI_GRO_CB(p)->flush;
|
|
flush |= (__force int)(flags & TCP_FLAG_CWR);
|
|
flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
|
|
~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
|
|
flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
|
|
for (i = sizeof(*th); i < thlen; i += 4)
|
|
flush |= *(u32 *)((u8 *)th + i) ^
|
|
*(u32 *)((u8 *)th2 + i);
|
|
|
|
/* When we receive our second frame we can made a decision on if we
|
|
* continue this flow as an atomic flow with a fixed ID or if we use
|
|
* an incrementing ID.
|
|
*/
|
|
if (NAPI_GRO_CB(p)->flush_id != 1 ||
|
|
NAPI_GRO_CB(p)->count != 1 ||
|
|
!NAPI_GRO_CB(p)->is_atomic)
|
|
flush |= NAPI_GRO_CB(p)->flush_id;
|
|
else
|
|
NAPI_GRO_CB(p)->is_atomic = false;
|
|
|
|
mss = skb_shinfo(p)->gso_size;
|
|
|
|
/* If skb is a GRO packet, make sure its gso_size matches prior packet mss.
|
|
* If it is a single frame, do not aggregate it if its length
|
|
* is bigger than our mss.
|
|
*/
|
|
if (unlikely(skb_is_gso(skb)))
|
|
flush |= (mss != skb_shinfo(skb)->gso_size);
|
|
else
|
|
flush |= (len - 1) >= mss;
|
|
|
|
flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
|
|
#ifdef CONFIG_TLS_DEVICE
|
|
flush |= p->decrypted ^ skb->decrypted;
|
|
#endif
|
|
|
|
if (flush || skb_gro_receive(p, skb)) {
|
|
mss = 1;
|
|
goto out_check_final;
|
|
}
|
|
|
|
tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
|
|
|
|
out_check_final:
|
|
/* Force a flush if last segment is smaller than mss. */
|
|
if (unlikely(skb_is_gso(skb)))
|
|
flush = len != NAPI_GRO_CB(skb)->count * skb_shinfo(skb)->gso_size;
|
|
else
|
|
flush = len < mss;
|
|
|
|
flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
|
|
TCP_FLAG_RST | TCP_FLAG_SYN |
|
|
TCP_FLAG_FIN));
|
|
|
|
if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
|
|
pp = p;
|
|
|
|
out:
|
|
NAPI_GRO_CB(skb)->flush |= (flush != 0);
|
|
|
|
return pp;
|
|
}
|
|
|
|
void tcp_gro_complete(struct sk_buff *skb)
|
|
{
|
|
struct tcphdr *th = tcp_hdr(skb);
|
|
|
|
skb->csum_start = (unsigned char *)th - skb->head;
|
|
skb->csum_offset = offsetof(struct tcphdr, check);
|
|
skb->ip_summed = CHECKSUM_PARTIAL;
|
|
|
|
skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
|
|
|
|
if (th->cwr)
|
|
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
|
|
|
|
if (skb->encapsulation)
|
|
skb->inner_transport_header = skb->transport_header;
|
|
}
|
|
EXPORT_SYMBOL(tcp_gro_complete);
|
|
|
|
INDIRECT_CALLABLE_SCOPE
|
|
struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
|
|
{
|
|
/* Don't bother verifying checksum if we're going to flush anyway. */
|
|
if (!NAPI_GRO_CB(skb)->flush &&
|
|
skb_gro_checksum_validate(skb, IPPROTO_TCP,
|
|
inet_gro_compute_pseudo)) {
|
|
NAPI_GRO_CB(skb)->flush = 1;
|
|
return NULL;
|
|
}
|
|
|
|
return tcp_gro_receive(head, skb);
|
|
}
|
|
|
|
INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
|
|
{
|
|
const struct iphdr *iph = ip_hdr(skb);
|
|
struct tcphdr *th = tcp_hdr(skb);
|
|
|
|
th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
|
|
iph->daddr, 0);
|
|
skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
|
|
|
|
if (NAPI_GRO_CB(skb)->is_atomic)
|
|
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
|
|
|
|
tcp_gro_complete(skb);
|
|
return 0;
|
|
}
|
|
|
|
static const struct net_offload tcpv4_offload = {
|
|
.callbacks = {
|
|
.gso_segment = tcp4_gso_segment,
|
|
.gro_receive = tcp4_gro_receive,
|
|
.gro_complete = tcp4_gro_complete,
|
|
},
|
|
};
|
|
|
|
int __init tcpv4_offload_init(void)
|
|
{
|
|
return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
|
|
}
|