mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-04-24 14:07:52 -04:00
Networking fixes for 6.5-rc4, including fixes from can, netfilter
Current release - regressions: - core: fix splice_to_socket() for O_NONBLOCK socket - af_unix: fix fortify_panic() in unix_bind_bsd(). - can: raw: fix lockdep issue in raw_release() Previous releases - regressions: - tcp: reduce chance of collisions in inet6_hashfn(). - netfilter: skip immediate deactivate in _PREPARE_ERROR - tipc: stop tipc crypto on failure in tipc_node_create - eth: igc: fix kernel panic during ndo_tx_timeout callback - eth: iavf: fix potential deadlock on allocation failure Previous releases - always broken: - ipv6: fix bug where deleting a mngtmpaddr can create a new temporary address - eth: ice: fix memory management in ice_ethtool_fdir.c - eth: hns3: fix the imp capability bit cannot exceed 32 bits issue - eth: vxlan: calculate correct header length for GPE - eth: stmmac: apply redundant write work around on 4.xx too Signed-off-by: Paolo Abeni <pabeni@redhat.com> -----BEGIN PGP SIGNATURE----- iQJGBAABCAAwFiEEg1AjqC77wbdLX2LbKSR5jcyPE6QFAmTCbIwSHHBhYmVuaUBy ZWRoYXQuY29tAAoJECkkeY3MjxOkbGIP/RBALM+vg1ZpPWMUXRtjcdvuBqWFB2jB GsAfOj1PpHhusHx/CCyxl80oCtkmnLW3dE9HdoZJ6FYwxTYfhfwDhoPy02QOJ0OQ yy4xbtrczFekBoQECEzUHT+0oBTZXoU7eR+3LOhx5IGNnP2zMX8rQkbnjU21dahq Kkqo0Ir2L7VxGck67WDOaMAxJukO/WFB97KFsTJATwkbiQxhpw07NJi1fV4SzeFQ WXKfe7MiXBXmq53QWLScbUxRAcq3kduDNl0UCpz+L9ks5kayVJ3MOHkEfnJ5LIAQ dL4IJO6ugNj2FSZb9ulw6Kj3ZAjKXbrSWE0gHzU3vO8g6uqs4/yjz0uzbFSffiNs mbwASGxYRb48JO22Hn92xHNz9Wjpc1TXzLABp5dA2ykEqzw+XJ39qP0LUndVLlAW UBAKNK9w5+8UprN6HQpFq4pTlXN3Tr/WCGzRsB1x4rNVIoYHn5Y1VMtM8IZRODf3 VcEenHg7k8SP8q4aFknmCueHdXWI4Rc66W4pUbcmyqDfH/+Xl4Q9qXxnH0a/SUx8 3gxAfKCjFhnCqsXvlvHxwexY4TSN05jE+y5ZjQH0xSKkOFZsr5Qch1h75q5IGo2b /d87HwCP5eWPArR8eIl8WiONA94wbjWma04y65KsnoLtRz2iKZHXsei2jP0UtCHn zK3gbyXHq+iW =rx5z -----END PGP SIGNATURE----- Merge tag 'net-6.5-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Paolo Abeni: "Including fixes from can, netfilter. Current release - regressions: - core: fix splice_to_socket() for O_NONBLOCK socket - af_unix: fix fortify_panic() in unix_bind_bsd(). - can: raw: fix lockdep issue in raw_release() Previous releases - regressions: - tcp: reduce chance of collisions in inet6_hashfn(). - netfilter: skip immediate deactivate in _PREPARE_ERROR - tipc: stop tipc crypto on failure in tipc_node_create - eth: igc: fix kernel panic during ndo_tx_timeout callback - eth: iavf: fix potential deadlock on allocation failure Previous releases - always broken: - ipv6: fix bug where deleting a mngtmpaddr can create a new temporary address - eth: ice: fix memory management in ice_ethtool_fdir.c - eth: hns3: fix the imp capability bit cannot exceed 32 bits issue - eth: vxlan: calculate correct header length for GPE - eth: stmmac: apply redundant write work around on 4.xx too" * tag 'net-6.5-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (49 commits) tipc: stop tipc crypto on failure in tipc_node_create af_unix: Terminate sun_path when bind()ing pathname socket. tipc: check return value of pskb_trim() benet: fix return value check in be_lancer_xmit_workarounds() virtio-net: fix race between set queues and probe net/sched: mqprio: Add length check for TCA_MQPRIO_{MAX/MIN}_RATE64 splice, net: Fix splice_to_socket() for O_NONBLOCK socket net: fec: tx processing does not call XDP APIs if budget is 0 mptcp: more accurate NL event generation selftests: mptcp: join: only check for ip6tables if needed tools: ynl-gen: fix parse multi-attr enum attribute tools: ynl-gen: fix enum index in _decode_enum(..) netfilter: nf_tables: disallow rule addition to bound chain via NFTA_RULE_CHAIN_ID netfilter: nf_tables: skip immediate deactivate in _PREPARE_ERROR netfilter: nft_set_rbtree: fix overlap expiration walk igc: Fix Kernel Panic during ndo_tx_timeout callback net: dsa: qca8k: fix mdb add/del case with 0 VID net: dsa: qca8k: fix broken search_and_del net: dsa: qca8k: fix search_and_insert wrong handling of new rule net: dsa: qca8k: enable use_single_write for qca8xxx ...
This commit is contained in:
commit
57012c5753
49 changed files with 523 additions and 202 deletions
|
@ -65,15 +65,16 @@ argument - drivers can process completions for any number of Tx
|
||||||
packets but should only process up to ``budget`` number of
|
packets but should only process up to ``budget`` number of
|
||||||
Rx packets. Rx processing is usually much more expensive.
|
Rx packets. Rx processing is usually much more expensive.
|
||||||
|
|
||||||
In other words, it is recommended to ignore the budget argument when
|
In other words for Rx processing the ``budget`` argument limits how many
|
||||||
performing TX buffer reclamation to ensure that the reclamation is not
|
packets driver can process in a single poll. Rx specific APIs like page
|
||||||
arbitrarily bounded; however, it is required to honor the budget argument
|
pool or XDP cannot be used at all when ``budget`` is 0.
|
||||||
for RX processing.
|
skb Tx processing should happen regardless of the ``budget``, but if
|
||||||
|
the argument is 0 driver cannot call any XDP (or page pool) APIs.
|
||||||
|
|
||||||
.. warning::
|
.. warning::
|
||||||
|
|
||||||
The ``budget`` argument may be 0 if core tries to only process Tx completions
|
The ``budget`` argument may be 0 if core tries to only process
|
||||||
and no Rx packets.
|
skb Tx completions and no Rx or XDP packets.
|
||||||
|
|
||||||
The poll method returns the amount of work done. If the driver still
|
The poll method returns the amount of work done. If the driver still
|
||||||
has outstanding work to do (e.g. ``budget`` was exhausted)
|
has outstanding work to do (e.g. ``budget`` was exhausted)
|
||||||
|
|
|
@ -1508,6 +1508,11 @@ static void bond_setup_by_slave(struct net_device *bond_dev,
|
||||||
|
|
||||||
memcpy(bond_dev->broadcast, slave_dev->broadcast,
|
memcpy(bond_dev->broadcast, slave_dev->broadcast,
|
||||||
slave_dev->addr_len);
|
slave_dev->addr_len);
|
||||||
|
|
||||||
|
if (slave_dev->flags & IFF_POINTOPOINT) {
|
||||||
|
bond_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
|
||||||
|
bond_dev->flags |= (IFF_POINTOPOINT | IFF_NOARP);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* On bonding slaves other than the currently active slave, suppress
|
/* On bonding slaves other than the currently active slave, suppress
|
||||||
|
|
|
@ -1030,6 +1030,8 @@ static int gs_can_close(struct net_device *netdev)
|
||||||
usb_kill_anchored_urbs(&dev->tx_submitted);
|
usb_kill_anchored_urbs(&dev->tx_submitted);
|
||||||
atomic_set(&dev->active_tx_urbs, 0);
|
atomic_set(&dev->active_tx_urbs, 0);
|
||||||
|
|
||||||
|
dev->can.state = CAN_STATE_STOPPED;
|
||||||
|
|
||||||
/* reset the device */
|
/* reset the device */
|
||||||
rc = gs_cmd_reset(dev);
|
rc = gs_cmd_reset(dev);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
|
|
|
@ -576,8 +576,11 @@ static struct regmap_config qca8k_regmap_config = {
|
||||||
.rd_table = &qca8k_readable_table,
|
.rd_table = &qca8k_readable_table,
|
||||||
.disable_locking = true, /* Locking is handled by qca8k read/write */
|
.disable_locking = true, /* Locking is handled by qca8k read/write */
|
||||||
.cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
|
.cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
|
||||||
.max_raw_read = 32, /* mgmt eth can read/write up to 8 registers at time */
|
.max_raw_read = 32, /* mgmt eth can read up to 8 registers at time */
|
||||||
.max_raw_write = 32,
|
/* ATU regs suffer from a bug where some data are not correctly
|
||||||
|
* written. Disable bulk write to correctly write ATU entry.
|
||||||
|
*/
|
||||||
|
.use_single_write = true,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
|
|
@ -244,7 +244,7 @@ void qca8k_fdb_flush(struct qca8k_priv *priv)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
|
static int qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
|
||||||
const u8 *mac, u16 vid)
|
const u8 *mac, u16 vid, u8 aging)
|
||||||
{
|
{
|
||||||
struct qca8k_fdb fdb = { 0 };
|
struct qca8k_fdb fdb = { 0 };
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -261,10 +261,12 @@ static int qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
|
||||||
goto exit;
|
goto exit;
|
||||||
|
|
||||||
/* Rule exist. Delete first */
|
/* Rule exist. Delete first */
|
||||||
if (!fdb.aging) {
|
if (fdb.aging) {
|
||||||
ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
|
ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto exit;
|
goto exit;
|
||||||
|
} else {
|
||||||
|
fdb.aging = aging;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Add port to fdb portmask */
|
/* Add port to fdb portmask */
|
||||||
|
@ -291,6 +293,10 @@ static int qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto exit;
|
goto exit;
|
||||||
|
|
||||||
|
ret = qca8k_fdb_read(priv, &fdb);
|
||||||
|
if (ret < 0)
|
||||||
|
goto exit;
|
||||||
|
|
||||||
/* Rule doesn't exist. Why delete? */
|
/* Rule doesn't exist. Why delete? */
|
||||||
if (!fdb.aging) {
|
if (!fdb.aging) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
|
@ -810,7 +816,11 @@ int qca8k_port_mdb_add(struct dsa_switch *ds, int port,
|
||||||
const u8 *addr = mdb->addr;
|
const u8 *addr = mdb->addr;
|
||||||
u16 vid = mdb->vid;
|
u16 vid = mdb->vid;
|
||||||
|
|
||||||
return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
|
if (!vid)
|
||||||
|
vid = QCA8K_PORT_VID_DEF;
|
||||||
|
|
||||||
|
return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid,
|
||||||
|
QCA8K_ATU_STATUS_STATIC);
|
||||||
}
|
}
|
||||||
|
|
||||||
int qca8k_port_mdb_del(struct dsa_switch *ds, int port,
|
int qca8k_port_mdb_del(struct dsa_switch *ds, int port,
|
||||||
|
@ -821,6 +831,9 @@ int qca8k_port_mdb_del(struct dsa_switch *ds, int port,
|
||||||
const u8 *addr = mdb->addr;
|
const u8 *addr = mdb->addr;
|
||||||
u16 vid = mdb->vid;
|
u16 vid = mdb->vid;
|
||||||
|
|
||||||
|
if (!vid)
|
||||||
|
vid = QCA8K_PORT_VID_DEF;
|
||||||
|
|
||||||
return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
|
return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2094,8 +2094,11 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
|
||||||
real_len = (((unsigned char *)ip_hdr(skb) - skb->data)
|
real_len = (((unsigned char *)ip_hdr(skb) - skb->data)
|
||||||
+ ntohs(ip_hdr(skb)->tot_len));
|
+ ntohs(ip_hdr(skb)->tot_len));
|
||||||
|
|
||||||
if (real_len < skb->len)
|
if (real_len < skb->len) {
|
||||||
pskb_trim(skb, real_len);
|
err = pskb_trim(skb, real_len);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
hdr_len = skb_tcp_all_headers(skb);
|
hdr_len = skb_tcp_all_headers(skb);
|
||||||
if (unlikely(skb->len == hdr_len)) {
|
if (unlikely(skb->len == hdr_len)) {
|
||||||
|
|
|
@ -1641,8 +1641,11 @@ static int atl1e_tso_csum(struct atl1e_adapter *adapter,
|
||||||
real_len = (((unsigned char *)ip_hdr(skb) - skb->data)
|
real_len = (((unsigned char *)ip_hdr(skb) - skb->data)
|
||||||
+ ntohs(ip_hdr(skb)->tot_len));
|
+ ntohs(ip_hdr(skb)->tot_len));
|
||||||
|
|
||||||
if (real_len < skb->len)
|
if (real_len < skb->len) {
|
||||||
pskb_trim(skb, real_len);
|
err = pskb_trim(skb, real_len);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
hdr_len = skb_tcp_all_headers(skb);
|
hdr_len = skb_tcp_all_headers(skb);
|
||||||
if (unlikely(skb->len == hdr_len)) {
|
if (unlikely(skb->len == hdr_len)) {
|
||||||
|
|
|
@ -2113,8 +2113,11 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
|
||||||
|
|
||||||
real_len = (((unsigned char *)iph - skb->data) +
|
real_len = (((unsigned char *)iph - skb->data) +
|
||||||
ntohs(iph->tot_len));
|
ntohs(iph->tot_len));
|
||||||
if (real_len < skb->len)
|
if (real_len < skb->len) {
|
||||||
pskb_trim(skb, real_len);
|
err = pskb_trim(skb, real_len);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
hdr_len = skb_tcp_all_headers(skb);
|
hdr_len = skb_tcp_all_headers(skb);
|
||||||
if (skb->len == hdr_len) {
|
if (skb->len == hdr_len) {
|
||||||
iph->check = 0;
|
iph->check = 0;
|
||||||
|
|
|
@ -1138,7 +1138,8 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
|
||||||
(lancer_chip(adapter) || BE3_chip(adapter) ||
|
(lancer_chip(adapter) || BE3_chip(adapter) ||
|
||||||
skb_vlan_tag_present(skb)) && is_ipv4_pkt(skb)) {
|
skb_vlan_tag_present(skb)) && is_ipv4_pkt(skb)) {
|
||||||
ip = (struct iphdr *)ip_hdr(skb);
|
ip = (struct iphdr *)ip_hdr(skb);
|
||||||
pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
|
if (unlikely(pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len))))
|
||||||
|
goto tx_drop;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If vlan tag is already inlined in the packet, skip HW VLAN
|
/* If vlan tag is already inlined in the packet, skip HW VLAN
|
||||||
|
|
|
@ -1372,7 +1372,7 @@ fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
|
fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
|
||||||
{
|
{
|
||||||
struct fec_enet_private *fep;
|
struct fec_enet_private *fep;
|
||||||
struct xdp_frame *xdpf;
|
struct xdp_frame *xdpf;
|
||||||
|
@ -1416,6 +1416,14 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
|
||||||
if (!skb)
|
if (!skb)
|
||||||
goto tx_buf_done;
|
goto tx_buf_done;
|
||||||
} else {
|
} else {
|
||||||
|
/* Tx processing cannot call any XDP (or page pool) APIs if
|
||||||
|
* the "budget" is 0. Because NAPI is called with budget of
|
||||||
|
* 0 (such as netpoll) indicates we may be in an IRQ context,
|
||||||
|
* however, we can't use the page pool from IRQ context.
|
||||||
|
*/
|
||||||
|
if (unlikely(!budget))
|
||||||
|
break;
|
||||||
|
|
||||||
xdpf = txq->tx_buf[index].xdp;
|
xdpf = txq->tx_buf[index].xdp;
|
||||||
if (bdp->cbd_bufaddr)
|
if (bdp->cbd_bufaddr)
|
||||||
dma_unmap_single(&fep->pdev->dev,
|
dma_unmap_single(&fep->pdev->dev,
|
||||||
|
@ -1508,14 +1516,14 @@ tx_buf_done:
|
||||||
writel(0, txq->bd.reg_desc_active);
|
writel(0, txq->bd.reg_desc_active);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void fec_enet_tx(struct net_device *ndev)
|
static void fec_enet_tx(struct net_device *ndev, int budget)
|
||||||
{
|
{
|
||||||
struct fec_enet_private *fep = netdev_priv(ndev);
|
struct fec_enet_private *fep = netdev_priv(ndev);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
/* Make sure that AVB queues are processed first. */
|
/* Make sure that AVB queues are processed first. */
|
||||||
for (i = fep->num_tx_queues - 1; i >= 0; i--)
|
for (i = fep->num_tx_queues - 1; i >= 0; i--)
|
||||||
fec_enet_tx_queue(ndev, i);
|
fec_enet_tx_queue(ndev, i, budget);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
|
static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
|
||||||
|
@ -1858,7 +1866,7 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
|
||||||
|
|
||||||
do {
|
do {
|
||||||
done += fec_enet_rx(ndev, budget - done);
|
done += fec_enet_rx(ndev, budget - done);
|
||||||
fec_enet_tx(ndev);
|
fec_enet_tx(ndev, budget);
|
||||||
} while ((done < budget) && fec_enet_collect_events(fep));
|
} while ((done < budget) && fec_enet_collect_events(fep));
|
||||||
|
|
||||||
if (done < budget) {
|
if (done < budget) {
|
||||||
|
@ -3916,6 +3924,8 @@ static int fec_enet_xdp_xmit(struct net_device *dev,
|
||||||
|
|
||||||
__netif_tx_lock(nq, cpu);
|
__netif_tx_lock(nq, cpu);
|
||||||
|
|
||||||
|
/* Avoid tx timeout as XDP shares the queue with kernel stack */
|
||||||
|
txq_trans_cond_update(nq);
|
||||||
for (i = 0; i < num_frames; i++) {
|
for (i = 0; i < num_frames; i++) {
|
||||||
if (fec_enet_txq_xmit_frame(fep, txq, frames[i]) < 0)
|
if (fec_enet_txq_xmit_frame(fep, txq, frames[i]) < 0)
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -31,6 +31,7 @@
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
#include <linux/pkt_sched.h>
|
#include <linux/pkt_sched.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
#include <linux/bitmap.h>
|
||||||
#include <net/pkt_cls.h>
|
#include <net/pkt_cls.h>
|
||||||
#include <net/pkt_sched.h>
|
#include <net/pkt_sched.h>
|
||||||
|
|
||||||
|
@ -101,6 +102,7 @@ enum HNAE3_DEV_CAP_BITS {
|
||||||
HNAE3_DEV_SUPPORT_FEC_STATS_B,
|
HNAE3_DEV_SUPPORT_FEC_STATS_B,
|
||||||
HNAE3_DEV_SUPPORT_LANE_NUM_B,
|
HNAE3_DEV_SUPPORT_LANE_NUM_B,
|
||||||
HNAE3_DEV_SUPPORT_WOL_B,
|
HNAE3_DEV_SUPPORT_WOL_B,
|
||||||
|
HNAE3_DEV_SUPPORT_TM_FLUSH_B,
|
||||||
};
|
};
|
||||||
|
|
||||||
#define hnae3_ae_dev_fd_supported(ae_dev) \
|
#define hnae3_ae_dev_fd_supported(ae_dev) \
|
||||||
|
@ -172,6 +174,9 @@ enum HNAE3_DEV_CAP_BITS {
|
||||||
#define hnae3_ae_dev_wol_supported(ae_dev) \
|
#define hnae3_ae_dev_wol_supported(ae_dev) \
|
||||||
test_bit(HNAE3_DEV_SUPPORT_WOL_B, (ae_dev)->caps)
|
test_bit(HNAE3_DEV_SUPPORT_WOL_B, (ae_dev)->caps)
|
||||||
|
|
||||||
|
#define hnae3_ae_dev_tm_flush_supported(hdev) \
|
||||||
|
test_bit(HNAE3_DEV_SUPPORT_TM_FLUSH_B, (hdev)->ae_dev->caps)
|
||||||
|
|
||||||
enum HNAE3_PF_CAP_BITS {
|
enum HNAE3_PF_CAP_BITS {
|
||||||
HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0,
|
HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0,
|
||||||
};
|
};
|
||||||
|
@ -407,7 +412,7 @@ struct hnae3_ae_dev {
|
||||||
unsigned long hw_err_reset_req;
|
unsigned long hw_err_reset_req;
|
||||||
struct hnae3_dev_specs dev_specs;
|
struct hnae3_dev_specs dev_specs;
|
||||||
u32 dev_version;
|
u32 dev_version;
|
||||||
unsigned long caps[BITS_TO_LONGS(HNAE3_DEV_CAPS_MAX_NUM)];
|
DECLARE_BITMAP(caps, HNAE3_DEV_CAPS_MAX_NUM);
|
||||||
void *priv;
|
void *priv;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -156,6 +156,7 @@ static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = {
|
||||||
{HCLGE_COMM_CAP_FEC_STATS_B, HNAE3_DEV_SUPPORT_FEC_STATS_B},
|
{HCLGE_COMM_CAP_FEC_STATS_B, HNAE3_DEV_SUPPORT_FEC_STATS_B},
|
||||||
{HCLGE_COMM_CAP_LANE_NUM_B, HNAE3_DEV_SUPPORT_LANE_NUM_B},
|
{HCLGE_COMM_CAP_LANE_NUM_B, HNAE3_DEV_SUPPORT_LANE_NUM_B},
|
||||||
{HCLGE_COMM_CAP_WOL_B, HNAE3_DEV_SUPPORT_WOL_B},
|
{HCLGE_COMM_CAP_WOL_B, HNAE3_DEV_SUPPORT_WOL_B},
|
||||||
|
{HCLGE_COMM_CAP_TM_FLUSH_B, HNAE3_DEV_SUPPORT_TM_FLUSH_B},
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
|
static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
|
||||||
|
@ -171,6 +172,20 @@ static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
|
||||||
{HCLGE_COMM_CAP_GRO_B, HNAE3_DEV_SUPPORT_GRO_B},
|
{HCLGE_COMM_CAP_GRO_B, HNAE3_DEV_SUPPORT_GRO_B},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void
|
||||||
|
hclge_comm_capability_to_bitmap(unsigned long *bitmap, __le32 *caps)
|
||||||
|
{
|
||||||
|
const unsigned int words = HCLGE_COMM_QUERY_CAP_LENGTH;
|
||||||
|
u32 val[HCLGE_COMM_QUERY_CAP_LENGTH];
|
||||||
|
unsigned int i;
|
||||||
|
|
||||||
|
for (i = 0; i < words; i++)
|
||||||
|
val[i] = __le32_to_cpu(caps[i]);
|
||||||
|
|
||||||
|
bitmap_from_arr32(bitmap, val,
|
||||||
|
HCLGE_COMM_QUERY_CAP_LENGTH * BITS_PER_TYPE(u32));
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
hclge_comm_parse_capability(struct hnae3_ae_dev *ae_dev, bool is_pf,
|
hclge_comm_parse_capability(struct hnae3_ae_dev *ae_dev, bool is_pf,
|
||||||
struct hclge_comm_query_version_cmd *cmd)
|
struct hclge_comm_query_version_cmd *cmd)
|
||||||
|
@ -179,11 +194,12 @@ hclge_comm_parse_capability(struct hnae3_ae_dev *ae_dev, bool is_pf,
|
||||||
is_pf ? hclge_pf_cmd_caps : hclge_vf_cmd_caps;
|
is_pf ? hclge_pf_cmd_caps : hclge_vf_cmd_caps;
|
||||||
u32 size = is_pf ? ARRAY_SIZE(hclge_pf_cmd_caps) :
|
u32 size = is_pf ? ARRAY_SIZE(hclge_pf_cmd_caps) :
|
||||||
ARRAY_SIZE(hclge_vf_cmd_caps);
|
ARRAY_SIZE(hclge_vf_cmd_caps);
|
||||||
u32 caps, i;
|
DECLARE_BITMAP(caps, HCLGE_COMM_QUERY_CAP_LENGTH * BITS_PER_TYPE(u32));
|
||||||
|
u32 i;
|
||||||
|
|
||||||
caps = __le32_to_cpu(cmd->caps[0]);
|
hclge_comm_capability_to_bitmap(caps, cmd->caps);
|
||||||
for (i = 0; i < size; i++)
|
for (i = 0; i < size; i++)
|
||||||
if (hnae3_get_bit(caps, caps_map[i].imp_bit))
|
if (test_bit(caps_map[i].imp_bit, caps))
|
||||||
set_bit(caps_map[i].local_bit, ae_dev->caps);
|
set_bit(caps_map[i].local_bit, ae_dev->caps);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -153,6 +153,7 @@ enum hclge_opcode_type {
|
||||||
HCLGE_OPC_TM_INTERNAL_STS = 0x0850,
|
HCLGE_OPC_TM_INTERNAL_STS = 0x0850,
|
||||||
HCLGE_OPC_TM_INTERNAL_CNT = 0x0851,
|
HCLGE_OPC_TM_INTERNAL_CNT = 0x0851,
|
||||||
HCLGE_OPC_TM_INTERNAL_STS_1 = 0x0852,
|
HCLGE_OPC_TM_INTERNAL_STS_1 = 0x0852,
|
||||||
|
HCLGE_OPC_TM_FLUSH = 0x0872,
|
||||||
|
|
||||||
/* Packet buffer allocate commands */
|
/* Packet buffer allocate commands */
|
||||||
HCLGE_OPC_TX_BUFF_ALLOC = 0x0901,
|
HCLGE_OPC_TX_BUFF_ALLOC = 0x0901,
|
||||||
|
@ -349,6 +350,7 @@ enum HCLGE_COMM_CAP_BITS {
|
||||||
HCLGE_COMM_CAP_FEC_STATS_B = 25,
|
HCLGE_COMM_CAP_FEC_STATS_B = 25,
|
||||||
HCLGE_COMM_CAP_LANE_NUM_B = 27,
|
HCLGE_COMM_CAP_LANE_NUM_B = 27,
|
||||||
HCLGE_COMM_CAP_WOL_B = 28,
|
HCLGE_COMM_CAP_WOL_B = 28,
|
||||||
|
HCLGE_COMM_CAP_TM_FLUSH_B = 31,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum HCLGE_COMM_API_CAP_BITS {
|
enum HCLGE_COMM_API_CAP_BITS {
|
||||||
|
|
|
@ -411,6 +411,9 @@ static struct hns3_dbg_cap_info hns3_dbg_cap[] = {
|
||||||
}, {
|
}, {
|
||||||
.name = "support wake on lan",
|
.name = "support wake on lan",
|
||||||
.cap_bit = HNAE3_DEV_SUPPORT_WOL_B,
|
.cap_bit = HNAE3_DEV_SUPPORT_WOL_B,
|
||||||
|
}, {
|
||||||
|
.name = "support tm flush",
|
||||||
|
.cap_bit = HNAE3_DEV_SUPPORT_TM_FLUSH_B,
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -52,7 +52,10 @@ static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
|
||||||
|
|
||||||
for (i = 0; i < HNAE3_MAX_TC; i++) {
|
for (i = 0; i < HNAE3_MAX_TC; i++) {
|
||||||
ets->prio_tc[i] = hdev->tm_info.prio_tc[i];
|
ets->prio_tc[i] = hdev->tm_info.prio_tc[i];
|
||||||
ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
|
if (i < hdev->tm_info.num_tc)
|
||||||
|
ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
|
||||||
|
else
|
||||||
|
ets->tc_tx_bw[i] = 0;
|
||||||
|
|
||||||
if (hdev->tm_info.tc_info[i].tc_sch_mode ==
|
if (hdev->tm_info.tc_info[i].tc_sch_mode ==
|
||||||
HCLGE_SCH_MODE_SP)
|
HCLGE_SCH_MODE_SP)
|
||||||
|
@ -123,7 +126,8 @@ static u8 hclge_ets_tc_changed(struct hclge_dev *hdev, struct ieee_ets *ets,
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
|
static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
|
||||||
struct ieee_ets *ets, bool *changed)
|
struct ieee_ets *ets, bool *changed,
|
||||||
|
u8 tc_num)
|
||||||
{
|
{
|
||||||
bool has_ets_tc = false;
|
bool has_ets_tc = false;
|
||||||
u32 total_ets_bw = 0;
|
u32 total_ets_bw = 0;
|
||||||
|
@ -137,6 +141,13 @@ static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
|
||||||
*changed = true;
|
*changed = true;
|
||||||
break;
|
break;
|
||||||
case IEEE_8021QAZ_TSA_ETS:
|
case IEEE_8021QAZ_TSA_ETS:
|
||||||
|
if (i >= tc_num) {
|
||||||
|
dev_err(&hdev->pdev->dev,
|
||||||
|
"tc%u is disabled, cannot set ets bw\n",
|
||||||
|
i);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
/* The hardware will switch to sp mode if bandwidth is
|
/* The hardware will switch to sp mode if bandwidth is
|
||||||
* 0, so limit ets bandwidth must be greater than 0.
|
* 0, so limit ets bandwidth must be greater than 0.
|
||||||
*/
|
*/
|
||||||
|
@ -176,7 +187,7 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = hclge_ets_sch_mode_validate(hdev, ets, changed);
|
ret = hclge_ets_sch_mode_validate(hdev, ets, changed, tc_num);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -216,6 +227,10 @@ static int hclge_notify_down_uinit(struct hclge_dev *hdev)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
ret = hclge_tm_flush_cfg(hdev, true);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
return hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
|
return hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -227,6 +242,10 @@ static int hclge_notify_init_up(struct hclge_dev *hdev)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
ret = hclge_tm_flush_cfg(hdev, false);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
|
return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -313,6 +332,7 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
|
||||||
struct net_device *netdev = h->kinfo.netdev;
|
struct net_device *netdev = h->kinfo.netdev;
|
||||||
struct hclge_dev *hdev = vport->back;
|
struct hclge_dev *hdev = vport->back;
|
||||||
u8 i, j, pfc_map, *prio_tc;
|
u8 i, j, pfc_map, *prio_tc;
|
||||||
|
int last_bad_ret = 0;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
|
if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
|
||||||
|
@ -350,13 +370,28 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = hclge_buffer_alloc(hdev);
|
ret = hclge_tm_flush_cfg(hdev, true);
|
||||||
if (ret) {
|
if (ret)
|
||||||
hclge_notify_client(hdev, HNAE3_UP_CLIENT);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
|
||||||
|
|
||||||
return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
|
/* No matter whether the following operations are performed
|
||||||
|
* successfully or not, disabling the tm flush and notify
|
||||||
|
* the network status to up are necessary.
|
||||||
|
* Do not return immediately.
|
||||||
|
*/
|
||||||
|
ret = hclge_buffer_alloc(hdev);
|
||||||
|
if (ret)
|
||||||
|
last_bad_ret = ret;
|
||||||
|
|
||||||
|
ret = hclge_tm_flush_cfg(hdev, false);
|
||||||
|
if (ret)
|
||||||
|
last_bad_ret = ret;
|
||||||
|
|
||||||
|
ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
|
||||||
|
if (ret)
|
||||||
|
last_bad_ret = ret;
|
||||||
|
|
||||||
|
return last_bad_ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hclge_ieee_setapp(struct hnae3_handle *h, struct dcb_app *app)
|
static int hclge_ieee_setapp(struct hnae3_handle *h, struct dcb_app *app)
|
||||||
|
|
|
@ -693,8 +693,7 @@ static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len)
|
||||||
for (i = 0; i < HNAE3_MAX_TC; i++) {
|
for (i = 0; i < HNAE3_MAX_TC; i++) {
|
||||||
sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp";
|
sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp";
|
||||||
pos += scnprintf(buf + pos, len - pos, "%u %4s %3u\n",
|
pos += scnprintf(buf + pos, len - pos, "%u %4s %3u\n",
|
||||||
i, sch_mode_str,
|
i, sch_mode_str, ets_weight->tc_weight[i]);
|
||||||
hdev->tm_info.pg_info[0].tc_dwrr[i]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -785,6 +785,7 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
|
||||||
static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
|
static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
|
||||||
{
|
{
|
||||||
#define BW_PERCENT 100
|
#define BW_PERCENT 100
|
||||||
|
#define DEFAULT_BW_WEIGHT 1
|
||||||
|
|
||||||
u8 i;
|
u8 i;
|
||||||
|
|
||||||
|
@ -806,7 +807,7 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
|
||||||
for (k = 0; k < hdev->tm_info.num_tc; k++)
|
for (k = 0; k < hdev->tm_info.num_tc; k++)
|
||||||
hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
|
hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
|
||||||
for (; k < HNAE3_MAX_TC; k++)
|
for (; k < HNAE3_MAX_TC; k++)
|
||||||
hdev->tm_info.pg_info[i].tc_dwrr[k] = 0;
|
hdev->tm_info.pg_info[i].tc_dwrr[k] = DEFAULT_BW_WEIGHT;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1484,7 +1485,11 @@ int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/* Cfg schd mode for each level schd */
|
/* Cfg schd mode for each level schd */
|
||||||
return hclge_tm_schd_mode_hw(hdev);
|
ret = hclge_tm_schd_mode_hw(hdev);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
return hclge_tm_flush_cfg(hdev, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
|
static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
|
||||||
|
@ -2113,3 +2118,28 @@ int hclge_tm_get_port_shaper(struct hclge_dev *hdev,
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int hclge_tm_flush_cfg(struct hclge_dev *hdev, bool enable)
|
||||||
|
{
|
||||||
|
struct hclge_desc desc;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (!hnae3_ae_dev_tm_flush_supported(hdev))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_FLUSH, false);
|
||||||
|
|
||||||
|
desc.data[0] = cpu_to_le32(enable ? HCLGE_TM_FLUSH_EN_MSK : 0);
|
||||||
|
|
||||||
|
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(&hdev->pdev->dev,
|
||||||
|
"failed to config tm flush, ret = %d\n", ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (enable)
|
||||||
|
msleep(HCLGE_TM_FLUSH_TIME_MS);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
|
@ -33,6 +33,9 @@ enum hclge_opcode_type;
|
||||||
#define HCLGE_DSCP_MAP_TC_BD_NUM 2
|
#define HCLGE_DSCP_MAP_TC_BD_NUM 2
|
||||||
#define HCLGE_DSCP_TC_SHIFT(n) (((n) & 1) * 4)
|
#define HCLGE_DSCP_TC_SHIFT(n) (((n) & 1) * 4)
|
||||||
|
|
||||||
|
#define HCLGE_TM_FLUSH_TIME_MS 10
|
||||||
|
#define HCLGE_TM_FLUSH_EN_MSK BIT(0)
|
||||||
|
|
||||||
struct hclge_pg_to_pri_link_cmd {
|
struct hclge_pg_to_pri_link_cmd {
|
||||||
u8 pg_id;
|
u8 pg_id;
|
||||||
u8 rsvd1[3];
|
u8 rsvd1[3];
|
||||||
|
@ -272,4 +275,5 @@ int hclge_tm_get_port_shaper(struct hclge_dev *hdev,
|
||||||
struct hclge_tm_shaper_para *para);
|
struct hclge_tm_shaper_para *para);
|
||||||
int hclge_up_to_tc_map(struct hclge_dev *hdev);
|
int hclge_up_to_tc_map(struct hclge_dev *hdev);
|
||||||
int hclge_dscp_to_tc_map(struct hclge_dev *hdev);
|
int hclge_dscp_to_tc_map(struct hclge_dev *hdev);
|
||||||
|
int hclge_tm_flush_cfg(struct hclge_dev *hdev, bool enable);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1839,7 +1839,7 @@ void i40e_dbg_pf_exit(struct i40e_pf *pf)
|
||||||
void i40e_dbg_init(void)
|
void i40e_dbg_init(void)
|
||||||
{
|
{
|
||||||
i40e_dbg_root = debugfs_create_dir(i40e_driver_name, NULL);
|
i40e_dbg_root = debugfs_create_dir(i40e_driver_name, NULL);
|
||||||
if (!i40e_dbg_root)
|
if (IS_ERR(i40e_dbg_root))
|
||||||
pr_info("init of debugfs failed\n");
|
pr_info("init of debugfs failed\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3250,9 +3250,6 @@ static void iavf_adminq_task(struct work_struct *work)
|
||||||
u32 val, oldval;
|
u32 val, oldval;
|
||||||
u16 pending;
|
u16 pending;
|
||||||
|
|
||||||
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
if (!mutex_trylock(&adapter->crit_lock)) {
|
if (!mutex_trylock(&adapter->crit_lock)) {
|
||||||
if (adapter->state == __IAVF_REMOVE)
|
if (adapter->state == __IAVF_REMOVE)
|
||||||
return;
|
return;
|
||||||
|
@ -3261,10 +3258,13 @@ static void iavf_adminq_task(struct work_struct *work)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
|
||||||
|
goto unlock;
|
||||||
|
|
||||||
event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
|
event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
|
||||||
event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
|
event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
|
||||||
if (!event.msg_buf)
|
if (!event.msg_buf)
|
||||||
goto out;
|
goto unlock;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
ret = iavf_clean_arq_element(hw, &event, &pending);
|
ret = iavf_clean_arq_element(hw, &event, &pending);
|
||||||
|
@ -3279,7 +3279,6 @@ static void iavf_adminq_task(struct work_struct *work)
|
||||||
if (pending != 0)
|
if (pending != 0)
|
||||||
memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
|
memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
|
||||||
} while (pending);
|
} while (pending);
|
||||||
mutex_unlock(&adapter->crit_lock);
|
|
||||||
|
|
||||||
if (iavf_is_reset_in_progress(adapter))
|
if (iavf_is_reset_in_progress(adapter))
|
||||||
goto freedom;
|
goto freedom;
|
||||||
|
@ -3323,6 +3322,8 @@ static void iavf_adminq_task(struct work_struct *work)
|
||||||
|
|
||||||
freedom:
|
freedom:
|
||||||
kfree(event.msg_buf);
|
kfree(event.msg_buf);
|
||||||
|
unlock:
|
||||||
|
mutex_unlock(&adapter->crit_lock);
|
||||||
out:
|
out:
|
||||||
/* re-enable Admin queue interrupt cause */
|
/* re-enable Admin queue interrupt cause */
|
||||||
iavf_misc_irq_enable(adapter);
|
iavf_misc_irq_enable(adapter);
|
||||||
|
|
|
@ -1281,16 +1281,21 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
|
||||||
ICE_FLOW_FLD_OFF_INVAL);
|
ICE_FLOW_FLD_OFF_INVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* add filter for outer headers */
|
|
||||||
fltr_idx = ice_ethtool_flow_to_fltr(fsp->flow_type & ~FLOW_EXT);
|
fltr_idx = ice_ethtool_flow_to_fltr(fsp->flow_type & ~FLOW_EXT);
|
||||||
|
|
||||||
|
assign_bit(fltr_idx, hw->fdir_perfect_fltr, perfect_filter);
|
||||||
|
|
||||||
|
/* add filter for outer headers */
|
||||||
ret = ice_fdir_set_hw_fltr_rule(pf, seg, fltr_idx,
|
ret = ice_fdir_set_hw_fltr_rule(pf, seg, fltr_idx,
|
||||||
ICE_FD_HW_SEG_NON_TUN);
|
ICE_FD_HW_SEG_NON_TUN);
|
||||||
if (ret == -EEXIST)
|
if (ret == -EEXIST) {
|
||||||
/* Rule already exists, free memory and continue */
|
/* Rule already exists, free memory and count as success */
|
||||||
devm_kfree(dev, seg);
|
ret = 0;
|
||||||
else if (ret)
|
goto err_exit;
|
||||||
|
} else if (ret) {
|
||||||
/* could not write filter, free memory */
|
/* could not write filter, free memory */
|
||||||
goto err_exit;
|
goto err_exit;
|
||||||
|
}
|
||||||
|
|
||||||
/* make tunneled filter HW entries if possible */
|
/* make tunneled filter HW entries if possible */
|
||||||
memcpy(&tun_seg[1], seg, sizeof(*seg));
|
memcpy(&tun_seg[1], seg, sizeof(*seg));
|
||||||
|
@ -1305,18 +1310,13 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
|
||||||
devm_kfree(dev, tun_seg);
|
devm_kfree(dev, tun_seg);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (perfect_filter)
|
|
||||||
set_bit(fltr_idx, hw->fdir_perfect_fltr);
|
|
||||||
else
|
|
||||||
clear_bit(fltr_idx, hw->fdir_perfect_fltr);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
err_exit:
|
err_exit:
|
||||||
devm_kfree(dev, tun_seg);
|
devm_kfree(dev, tun_seg);
|
||||||
devm_kfree(dev, seg);
|
devm_kfree(dev, seg);
|
||||||
|
|
||||||
return -EOPNOTSUPP;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1914,7 +1914,9 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
|
||||||
input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL;
|
input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL;
|
||||||
|
|
||||||
/* input struct is added to the HW filter list */
|
/* input struct is added to the HW filter list */
|
||||||
ice_fdir_update_list_entry(pf, input, fsp->location);
|
ret = ice_fdir_update_list_entry(pf, input, fsp->location);
|
||||||
|
if (ret)
|
||||||
|
goto release_lock;
|
||||||
|
|
||||||
ret = ice_fdir_write_all_fltr(pf, input, true);
|
ret = ice_fdir_write_all_fltr(pf, input, true);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
|
@ -316,6 +316,33 @@ static void igc_clean_all_tx_rings(struct igc_adapter *adapter)
|
||||||
igc_clean_tx_ring(adapter->tx_ring[i]);
|
igc_clean_tx_ring(adapter->tx_ring[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void igc_disable_tx_ring_hw(struct igc_ring *ring)
|
||||||
|
{
|
||||||
|
struct igc_hw *hw = &ring->q_vector->adapter->hw;
|
||||||
|
u8 idx = ring->reg_idx;
|
||||||
|
u32 txdctl;
|
||||||
|
|
||||||
|
txdctl = rd32(IGC_TXDCTL(idx));
|
||||||
|
txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE;
|
||||||
|
txdctl |= IGC_TXDCTL_SWFLUSH;
|
||||||
|
wr32(IGC_TXDCTL(idx), txdctl);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* igc_disable_all_tx_rings_hw - Disable all transmit queue operation
|
||||||
|
* @adapter: board private structure
|
||||||
|
*/
|
||||||
|
static void igc_disable_all_tx_rings_hw(struct igc_adapter *adapter)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < adapter->num_tx_queues; i++) {
|
||||||
|
struct igc_ring *tx_ring = adapter->tx_ring[i];
|
||||||
|
|
||||||
|
igc_disable_tx_ring_hw(tx_ring);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* igc_setup_tx_resources - allocate Tx resources (Descriptors)
|
* igc_setup_tx_resources - allocate Tx resources (Descriptors)
|
||||||
* @tx_ring: tx descriptor ring (for a specific queue) to setup
|
* @tx_ring: tx descriptor ring (for a specific queue) to setup
|
||||||
|
@ -5058,6 +5085,7 @@ void igc_down(struct igc_adapter *adapter)
|
||||||
/* clear VLAN promisc flag so VFTA will be updated if necessary */
|
/* clear VLAN promisc flag so VFTA will be updated if necessary */
|
||||||
adapter->flags &= ~IGC_FLAG_VLAN_PROMISC;
|
adapter->flags &= ~IGC_FLAG_VLAN_PROMISC;
|
||||||
|
|
||||||
|
igc_disable_all_tx_rings_hw(adapter);
|
||||||
igc_clean_all_tx_rings(adapter);
|
igc_clean_all_tx_rings(adapter);
|
||||||
igc_clean_all_rx_rings(adapter);
|
igc_clean_all_rx_rings(adapter);
|
||||||
}
|
}
|
||||||
|
@ -7290,18 +7318,6 @@ void igc_enable_rx_ring(struct igc_ring *ring)
|
||||||
igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
|
igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void igc_disable_tx_ring_hw(struct igc_ring *ring)
|
|
||||||
{
|
|
||||||
struct igc_hw *hw = &ring->q_vector->adapter->hw;
|
|
||||||
u8 idx = ring->reg_idx;
|
|
||||||
u32 txdctl;
|
|
||||||
|
|
||||||
txdctl = rd32(IGC_TXDCTL(idx));
|
|
||||||
txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE;
|
|
||||||
txdctl |= IGC_TXDCTL_SWFLUSH;
|
|
||||||
wr32(IGC_TXDCTL(idx), txdctl);
|
|
||||||
}
|
|
||||||
|
|
||||||
void igc_disable_tx_ring(struct igc_ring *ring)
|
void igc_disable_tx_ring(struct igc_ring *ring)
|
||||||
{
|
{
|
||||||
igc_disable_tx_ring_hw(ring);
|
igc_disable_tx_ring_hw(ring);
|
||||||
|
|
|
@ -8479,7 +8479,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
|
||||||
struct ixgbe_adapter *adapter = q_vector->adapter;
|
struct ixgbe_adapter *adapter = q_vector->adapter;
|
||||||
|
|
||||||
if (unlikely(skb_tail_pointer(skb) < hdr.network +
|
if (unlikely(skb_tail_pointer(skb) < hdr.network +
|
||||||
VXLAN_HEADROOM))
|
vxlan_headroom(0)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* verify the port is recognized as VXLAN */
|
/* verify the port is recognized as VXLAN */
|
||||||
|
|
|
@ -218,13 +218,54 @@ void npc_config_secret_key(struct rvu *rvu, int blkaddr)
|
||||||
|
|
||||||
void npc_program_mkex_hash(struct rvu *rvu, int blkaddr)
|
void npc_program_mkex_hash(struct rvu *rvu, int blkaddr)
|
||||||
{
|
{
|
||||||
|
struct npc_mcam_kex_hash *mh = rvu->kpu.mkex_hash;
|
||||||
struct hw_cap *hwcap = &rvu->hw->cap;
|
struct hw_cap *hwcap = &rvu->hw->cap;
|
||||||
|
u8 intf, ld, hdr_offset, byte_len;
|
||||||
struct rvu_hwinfo *hw = rvu->hw;
|
struct rvu_hwinfo *hw = rvu->hw;
|
||||||
u8 intf;
|
u64 cfg;
|
||||||
|
|
||||||
|
/* Check if hardware supports hash extraction */
|
||||||
if (!hwcap->npc_hash_extract)
|
if (!hwcap->npc_hash_extract)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
/* Check if IPv6 source/destination address
|
||||||
|
* should be hash enabled.
|
||||||
|
* Hashing reduces 128bit SIP/DIP fields to 32bit
|
||||||
|
* so that 224 bit X2 key can be used for IPv6 based filters as well,
|
||||||
|
* which in turn results in more number of MCAM entries available for
|
||||||
|
* use.
|
||||||
|
*
|
||||||
|
* Hashing of IPV6 SIP/DIP is enabled in below scenarios
|
||||||
|
* 1. If the silicon variant supports hashing feature
|
||||||
|
* 2. If the number of bytes of IP addr being extracted is 4 bytes ie
|
||||||
|
* 32bit. The assumption here is that if user wants 8bytes of LSB of
|
||||||
|
* IP addr or full 16 bytes then his intention is not to use 32bit
|
||||||
|
* hash.
|
||||||
|
*/
|
||||||
|
for (intf = 0; intf < hw->npc_intfs; intf++) {
|
||||||
|
for (ld = 0; ld < NPC_MAX_LD; ld++) {
|
||||||
|
cfg = rvu_read64(rvu, blkaddr,
|
||||||
|
NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf,
|
||||||
|
NPC_LID_LC,
|
||||||
|
NPC_LT_LC_IP6,
|
||||||
|
ld));
|
||||||
|
hdr_offset = FIELD_GET(NPC_HDR_OFFSET, cfg);
|
||||||
|
byte_len = FIELD_GET(NPC_BYTESM, cfg);
|
||||||
|
/* Hashing of IPv6 source/destination address should be
|
||||||
|
* enabled if,
|
||||||
|
* hdr_offset == 8 (offset of source IPv6 address) or
|
||||||
|
* hdr_offset == 24 (offset of destination IPv6)
|
||||||
|
* address) and the number of byte to be
|
||||||
|
* extracted is 4. As per hardware configuration
|
||||||
|
* byte_len should be == actual byte_len - 1.
|
||||||
|
* Hence byte_len is checked against 3 but nor 4.
|
||||||
|
*/
|
||||||
|
if ((hdr_offset == 8 || hdr_offset == 24) && byte_len == 3)
|
||||||
|
mh->lid_lt_ld_hash_en[intf][NPC_LID_LC][NPC_LT_LC_IP6][ld] = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Update hash configuration if the field is hash enabled */
|
||||||
for (intf = 0; intf < hw->npc_intfs; intf++) {
|
for (intf = 0; intf < hw->npc_intfs; intf++) {
|
||||||
npc_program_mkex_hash_rx(rvu, blkaddr, intf);
|
npc_program_mkex_hash_rx(rvu, blkaddr, intf);
|
||||||
npc_program_mkex_hash_tx(rvu, blkaddr, intf);
|
npc_program_mkex_hash_tx(rvu, blkaddr, intf);
|
||||||
|
|
|
@ -70,8 +70,8 @@ static struct npc_mcam_kex_hash npc_mkex_hash_default __maybe_unused = {
|
||||||
[NIX_INTF_RX] = {
|
[NIX_INTF_RX] = {
|
||||||
[NPC_LID_LC] = {
|
[NPC_LID_LC] = {
|
||||||
[NPC_LT_LC_IP6] = {
|
[NPC_LT_LC_IP6] = {
|
||||||
true,
|
false,
|
||||||
true,
|
false,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -79,8 +79,8 @@ static struct npc_mcam_kex_hash npc_mkex_hash_default __maybe_unused = {
|
||||||
[NIX_INTF_TX] = {
|
[NIX_INTF_TX] = {
|
||||||
[NPC_LID_LC] = {
|
[NPC_LID_LC] = {
|
||||||
[NPC_LT_LC_IP6] = {
|
[NPC_LT_LC_IP6] = {
|
||||||
true,
|
false,
|
||||||
true,
|
false,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
@ -240,13 +240,15 @@ void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, const u8 addr[6],
|
||||||
void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable)
|
void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable)
|
||||||
{
|
{
|
||||||
u32 value = readl(ioaddr + GMAC_CONFIG);
|
u32 value = readl(ioaddr + GMAC_CONFIG);
|
||||||
|
u32 old_val = value;
|
||||||
|
|
||||||
if (enable)
|
if (enable)
|
||||||
value |= GMAC_CONFIG_RE | GMAC_CONFIG_TE;
|
value |= GMAC_CONFIG_RE | GMAC_CONFIG_TE;
|
||||||
else
|
else
|
||||||
value &= ~(GMAC_CONFIG_TE | GMAC_CONFIG_RE);
|
value &= ~(GMAC_CONFIG_TE | GMAC_CONFIG_RE);
|
||||||
|
|
||||||
writel(value, ioaddr + GMAC_CONFIG);
|
if (value != old_val)
|
||||||
|
writel(value, ioaddr + GMAC_CONFIG);
|
||||||
}
|
}
|
||||||
|
|
||||||
void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
|
void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
|
||||||
|
|
|
@ -273,16 +273,15 @@ static int ipa_filter_reset(struct ipa *ipa, bool modem)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
ret = ipa_filter_reset_table(ipa, false, true, modem);
|
||||||
|
if (ret || !ipa_table_hash_support(ipa))
|
||||||
|
return ret;
|
||||||
|
|
||||||
ret = ipa_filter_reset_table(ipa, true, false, modem);
|
ret = ipa_filter_reset_table(ipa, true, false, modem);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = ipa_filter_reset_table(ipa, false, true, modem);
|
return ipa_filter_reset_table(ipa, true, true, modem);
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
ret = ipa_filter_reset_table(ipa, true, true, modem);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The AP routes and modem routes are each contiguous within the
|
/* The AP routes and modem routes are each contiguous within the
|
||||||
|
@ -291,12 +290,13 @@ static int ipa_filter_reset(struct ipa *ipa, bool modem)
|
||||||
* */
|
* */
|
||||||
static int ipa_route_reset(struct ipa *ipa, bool modem)
|
static int ipa_route_reset(struct ipa *ipa, bool modem)
|
||||||
{
|
{
|
||||||
|
bool hash_support = ipa_table_hash_support(ipa);
|
||||||
u32 modem_route_count = ipa->modem_route_count;
|
u32 modem_route_count = ipa->modem_route_count;
|
||||||
struct gsi_trans *trans;
|
struct gsi_trans *trans;
|
||||||
u16 first;
|
u16 first;
|
||||||
u16 count;
|
u16 count;
|
||||||
|
|
||||||
trans = ipa_cmd_trans_alloc(ipa, 4);
|
trans = ipa_cmd_trans_alloc(ipa, hash_support ? 4 : 2);
|
||||||
if (!trans) {
|
if (!trans) {
|
||||||
dev_err(&ipa->pdev->dev,
|
dev_err(&ipa->pdev->dev,
|
||||||
"no transaction for %s route reset\n",
|
"no transaction for %s route reset\n",
|
||||||
|
@ -313,10 +313,12 @@ static int ipa_route_reset(struct ipa *ipa, bool modem)
|
||||||
}
|
}
|
||||||
|
|
||||||
ipa_table_reset_add(trans, false, false, false, first, count);
|
ipa_table_reset_add(trans, false, false, false, first, count);
|
||||||
ipa_table_reset_add(trans, false, true, false, first, count);
|
|
||||||
|
|
||||||
ipa_table_reset_add(trans, false, false, true, first, count);
|
ipa_table_reset_add(trans, false, false, true, first, count);
|
||||||
ipa_table_reset_add(trans, false, true, true, first, count);
|
|
||||||
|
if (hash_support) {
|
||||||
|
ipa_table_reset_add(trans, false, true, false, first, count);
|
||||||
|
ipa_table_reset_add(trans, false, true, true, first, count);
|
||||||
|
}
|
||||||
|
|
||||||
gsi_trans_commit_wait(trans);
|
gsi_trans_commit_wait(trans);
|
||||||
|
|
||||||
|
|
|
@ -1746,6 +1746,7 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
|
||||||
[IFLA_MACVLAN_MACADDR_COUNT] = { .type = NLA_U32 },
|
[IFLA_MACVLAN_MACADDR_COUNT] = { .type = NLA_U32 },
|
||||||
[IFLA_MACVLAN_BC_QUEUE_LEN] = { .type = NLA_U32 },
|
[IFLA_MACVLAN_BC_QUEUE_LEN] = { .type = NLA_U32 },
|
||||||
[IFLA_MACVLAN_BC_QUEUE_LEN_USED] = { .type = NLA_REJECT },
|
[IFLA_MACVLAN_BC_QUEUE_LEN_USED] = { .type = NLA_REJECT },
|
||||||
|
[IFLA_MACVLAN_BC_CUTOFF] = { .type = NLA_S32 },
|
||||||
};
|
};
|
||||||
|
|
||||||
int macvlan_link_register(struct rtnl_link_ops *ops)
|
int macvlan_link_register(struct rtnl_link_ops *ops)
|
||||||
|
|
|
@ -328,6 +328,13 @@ static int mv3310_power_up(struct phy_device *phydev)
|
||||||
ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
|
ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
|
||||||
MV_V2_PORT_CTRL_PWRDOWN);
|
MV_V2_PORT_CTRL_PWRDOWN);
|
||||||
|
|
||||||
|
/* Sometimes, the power down bit doesn't clear immediately, and
|
||||||
|
* a read of this register causes the bit not to clear. Delay
|
||||||
|
* 100us to allow the PHY to come out of power down mode before
|
||||||
|
* the next access.
|
||||||
|
*/
|
||||||
|
udelay(100);
|
||||||
|
|
||||||
if (phydev->drv->phy_id != MARVELL_PHY_ID_88X3310 ||
|
if (phydev->drv->phy_id != MARVELL_PHY_ID_88X3310 ||
|
||||||
priv->firmware_ver < 0x00030000)
|
priv->firmware_ver < 0x00030000)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -2135,6 +2135,15 @@ static void team_setup_by_port(struct net_device *dev,
|
||||||
dev->mtu = port_dev->mtu;
|
dev->mtu = port_dev->mtu;
|
||||||
memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
|
memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
|
||||||
eth_hw_addr_inherit(dev, port_dev);
|
eth_hw_addr_inherit(dev, port_dev);
|
||||||
|
|
||||||
|
if (port_dev->flags & IFF_POINTOPOINT) {
|
||||||
|
dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
|
||||||
|
dev->flags |= (IFF_POINTOPOINT | IFF_NOARP);
|
||||||
|
} else if ((port_dev->flags & (IFF_BROADCAST | IFF_MULTICAST)) ==
|
||||||
|
(IFF_BROADCAST | IFF_MULTICAST)) {
|
||||||
|
dev->flags |= (IFF_BROADCAST | IFF_MULTICAST);
|
||||||
|
dev->flags &= ~(IFF_POINTOPOINT | IFF_NOARP);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int team_dev_type_check_change(struct net_device *dev,
|
static int team_dev_type_check_change(struct net_device *dev,
|
||||||
|
|
|
@ -4219,6 +4219,8 @@ static int virtnet_probe(struct virtio_device *vdev)
|
||||||
if (vi->has_rss || vi->has_rss_hash_report)
|
if (vi->has_rss || vi->has_rss_hash_report)
|
||||||
virtnet_init_default_rss(vi);
|
virtnet_init_default_rss(vi);
|
||||||
|
|
||||||
|
_virtnet_set_queues(vi, vi->curr_queue_pairs);
|
||||||
|
|
||||||
/* serialize netdev register + virtio_device_ready() with ndo_open() */
|
/* serialize netdev register + virtio_device_ready() with ndo_open() */
|
||||||
rtnl_lock();
|
rtnl_lock();
|
||||||
|
|
||||||
|
@ -4257,8 +4259,6 @@ static int virtnet_probe(struct virtio_device *vdev)
|
||||||
goto free_unregister_netdev;
|
goto free_unregister_netdev;
|
||||||
}
|
}
|
||||||
|
|
||||||
virtnet_set_queues(vi, vi->curr_queue_pairs);
|
|
||||||
|
|
||||||
/* Assume link up if device can't report link status,
|
/* Assume link up if device can't report link status,
|
||||||
otherwise get link status from config. */
|
otherwise get link status from config. */
|
||||||
netif_carrier_off(dev);
|
netif_carrier_off(dev);
|
||||||
|
|
|
@ -623,6 +623,32 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool vxlan_parse_gpe_proto(struct vxlanhdr *hdr, __be16 *protocol)
|
||||||
|
{
|
||||||
|
struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)hdr;
|
||||||
|
|
||||||
|
/* Need to have Next Protocol set for interfaces in GPE mode. */
|
||||||
|
if (!gpe->np_applied)
|
||||||
|
return false;
|
||||||
|
/* "The initial version is 0. If a receiver does not support the
|
||||||
|
* version indicated it MUST drop the packet.
|
||||||
|
*/
|
||||||
|
if (gpe->version != 0)
|
||||||
|
return false;
|
||||||
|
/* "When the O bit is set to 1, the packet is an OAM packet and OAM
|
||||||
|
* processing MUST occur." However, we don't implement OAM
|
||||||
|
* processing, thus drop the packet.
|
||||||
|
*/
|
||||||
|
if (gpe->oam_flag)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
*protocol = tun_p_to_eth_p(gpe->next_protocol);
|
||||||
|
if (!*protocol)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
|
static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
|
||||||
unsigned int off,
|
unsigned int off,
|
||||||
struct vxlanhdr *vh, size_t hdrlen,
|
struct vxlanhdr *vh, size_t hdrlen,
|
||||||
|
@ -649,26 +675,24 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
|
||||||
return vh;
|
return vh;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sk_buff *vxlan_gro_receive(struct sock *sk,
|
static struct vxlanhdr *vxlan_gro_prepare_receive(struct sock *sk,
|
||||||
struct list_head *head,
|
struct list_head *head,
|
||||||
struct sk_buff *skb)
|
struct sk_buff *skb,
|
||||||
|
struct gro_remcsum *grc)
|
||||||
{
|
{
|
||||||
struct sk_buff *pp = NULL;
|
|
||||||
struct sk_buff *p;
|
struct sk_buff *p;
|
||||||
struct vxlanhdr *vh, *vh2;
|
struct vxlanhdr *vh, *vh2;
|
||||||
unsigned int hlen, off_vx;
|
unsigned int hlen, off_vx;
|
||||||
int flush = 1;
|
|
||||||
struct vxlan_sock *vs = rcu_dereference_sk_user_data(sk);
|
struct vxlan_sock *vs = rcu_dereference_sk_user_data(sk);
|
||||||
__be32 flags;
|
__be32 flags;
|
||||||
struct gro_remcsum grc;
|
|
||||||
|
|
||||||
skb_gro_remcsum_init(&grc);
|
skb_gro_remcsum_init(grc);
|
||||||
|
|
||||||
off_vx = skb_gro_offset(skb);
|
off_vx = skb_gro_offset(skb);
|
||||||
hlen = off_vx + sizeof(*vh);
|
hlen = off_vx + sizeof(*vh);
|
||||||
vh = skb_gro_header(skb, hlen, off_vx);
|
vh = skb_gro_header(skb, hlen, off_vx);
|
||||||
if (unlikely(!vh))
|
if (unlikely(!vh))
|
||||||
goto out;
|
return NULL;
|
||||||
|
|
||||||
skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
|
skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
|
||||||
|
|
||||||
|
@ -676,12 +700,12 @@ static struct sk_buff *vxlan_gro_receive(struct sock *sk,
|
||||||
|
|
||||||
if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
|
if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
|
||||||
vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr),
|
vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr),
|
||||||
vh->vx_vni, &grc,
|
vh->vx_vni, grc,
|
||||||
!!(vs->flags &
|
!!(vs->flags &
|
||||||
VXLAN_F_REMCSUM_NOPARTIAL));
|
VXLAN_F_REMCSUM_NOPARTIAL));
|
||||||
|
|
||||||
if (!vh)
|
if (!vh)
|
||||||
goto out;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
|
skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
|
||||||
|
@ -698,12 +722,48 @@ static struct sk_buff *vxlan_gro_receive(struct sock *sk,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pp = call_gro_receive(eth_gro_receive, head, skb);
|
return vh;
|
||||||
flush = 0;
|
}
|
||||||
|
|
||||||
|
static struct sk_buff *vxlan_gro_receive(struct sock *sk,
|
||||||
|
struct list_head *head,
|
||||||
|
struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
struct sk_buff *pp = NULL;
|
||||||
|
struct gro_remcsum grc;
|
||||||
|
int flush = 1;
|
||||||
|
|
||||||
|
if (vxlan_gro_prepare_receive(sk, head, skb, &grc)) {
|
||||||
|
pp = call_gro_receive(eth_gro_receive, head, skb);
|
||||||
|
flush = 0;
|
||||||
|
}
|
||||||
|
skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
|
||||||
|
return pp;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct sk_buff *vxlan_gpe_gro_receive(struct sock *sk,
|
||||||
|
struct list_head *head,
|
||||||
|
struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
const struct packet_offload *ptype;
|
||||||
|
struct sk_buff *pp = NULL;
|
||||||
|
struct gro_remcsum grc;
|
||||||
|
struct vxlanhdr *vh;
|
||||||
|
__be16 protocol;
|
||||||
|
int flush = 1;
|
||||||
|
|
||||||
|
vh = vxlan_gro_prepare_receive(sk, head, skb, &grc);
|
||||||
|
if (vh) {
|
||||||
|
if (!vxlan_parse_gpe_proto(vh, &protocol))
|
||||||
|
goto out;
|
||||||
|
ptype = gro_find_receive_by_type(protocol);
|
||||||
|
if (!ptype)
|
||||||
|
goto out;
|
||||||
|
pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
|
||||||
|
flush = 0;
|
||||||
|
}
|
||||||
out:
|
out:
|
||||||
skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
|
skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
|
||||||
|
|
||||||
return pp;
|
return pp;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -715,6 +775,21 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
|
||||||
return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
|
return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int vxlan_gpe_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
|
||||||
|
{
|
||||||
|
struct vxlanhdr *vh = (struct vxlanhdr *)(skb->data + nhoff);
|
||||||
|
const struct packet_offload *ptype;
|
||||||
|
int err = -ENOSYS;
|
||||||
|
__be16 protocol;
|
||||||
|
|
||||||
|
if (!vxlan_parse_gpe_proto(vh, &protocol))
|
||||||
|
return err;
|
||||||
|
ptype = gro_find_complete_by_type(protocol);
|
||||||
|
if (ptype)
|
||||||
|
err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan, const u8 *mac,
|
static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan, const u8 *mac,
|
||||||
__u16 state, __be32 src_vni,
|
__u16 state, __be32 src_vni,
|
||||||
__u16 ndm_flags)
|
__u16 ndm_flags)
|
||||||
|
@ -1525,35 +1600,6 @@ out:
|
||||||
unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS;
|
unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed,
|
|
||||||
__be16 *protocol,
|
|
||||||
struct sk_buff *skb, u32 vxflags)
|
|
||||||
{
|
|
||||||
struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)unparsed;
|
|
||||||
|
|
||||||
/* Need to have Next Protocol set for interfaces in GPE mode. */
|
|
||||||
if (!gpe->np_applied)
|
|
||||||
return false;
|
|
||||||
/* "The initial version is 0. If a receiver does not support the
|
|
||||||
* version indicated it MUST drop the packet.
|
|
||||||
*/
|
|
||||||
if (gpe->version != 0)
|
|
||||||
return false;
|
|
||||||
/* "When the O bit is set to 1, the packet is an OAM packet and OAM
|
|
||||||
* processing MUST occur." However, we don't implement OAM
|
|
||||||
* processing, thus drop the packet.
|
|
||||||
*/
|
|
||||||
if (gpe->oam_flag)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
*protocol = tun_p_to_eth_p(gpe->next_protocol);
|
|
||||||
if (!*protocol)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
unparsed->vx_flags &= ~VXLAN_GPE_USED_BITS;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool vxlan_set_mac(struct vxlan_dev *vxlan,
|
static bool vxlan_set_mac(struct vxlan_dev *vxlan,
|
||||||
struct vxlan_sock *vs,
|
struct vxlan_sock *vs,
|
||||||
struct sk_buff *skb, __be32 vni)
|
struct sk_buff *skb, __be32 vni)
|
||||||
|
@ -1655,8 +1701,9 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
|
||||||
* used by VXLAN extensions if explicitly requested.
|
* used by VXLAN extensions if explicitly requested.
|
||||||
*/
|
*/
|
||||||
if (vs->flags & VXLAN_F_GPE) {
|
if (vs->flags & VXLAN_F_GPE) {
|
||||||
if (!vxlan_parse_gpe_hdr(&unparsed, &protocol, skb, vs->flags))
|
if (!vxlan_parse_gpe_proto(&unparsed, &protocol))
|
||||||
goto drop;
|
goto drop;
|
||||||
|
unparsed.vx_flags &= ~VXLAN_GPE_USED_BITS;
|
||||||
raw_proto = true;
|
raw_proto = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2516,7 +2563,7 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
|
||||||
}
|
}
|
||||||
|
|
||||||
ndst = &rt->dst;
|
ndst = &rt->dst;
|
||||||
err = skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM,
|
err = skb_tunnel_check_pmtu(skb, ndst, vxlan_headroom(flags & VXLAN_F_GPE),
|
||||||
netif_is_any_bridge_port(dev));
|
netif_is_any_bridge_port(dev));
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
goto tx_error;
|
goto tx_error;
|
||||||
|
@ -2577,7 +2624,8 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM,
|
err = skb_tunnel_check_pmtu(skb, ndst,
|
||||||
|
vxlan_headroom((flags & VXLAN_F_GPE) | VXLAN_F_IPV6),
|
||||||
netif_is_any_bridge_port(dev));
|
netif_is_any_bridge_port(dev));
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
goto tx_error;
|
goto tx_error;
|
||||||
|
@ -2989,14 +3037,12 @@ static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
|
||||||
struct vxlan_rdst *dst = &vxlan->default_dst;
|
struct vxlan_rdst *dst = &vxlan->default_dst;
|
||||||
struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
|
struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
|
||||||
dst->remote_ifindex);
|
dst->remote_ifindex);
|
||||||
bool use_ipv6 = !!(vxlan->cfg.flags & VXLAN_F_IPV6);
|
|
||||||
|
|
||||||
/* This check is different than dev->max_mtu, because it looks at
|
/* This check is different than dev->max_mtu, because it looks at
|
||||||
* the lowerdev->mtu, rather than the static dev->max_mtu
|
* the lowerdev->mtu, rather than the static dev->max_mtu
|
||||||
*/
|
*/
|
||||||
if (lowerdev) {
|
if (lowerdev) {
|
||||||
int max_mtu = lowerdev->mtu -
|
int max_mtu = lowerdev->mtu - vxlan_headroom(vxlan->cfg.flags);
|
||||||
(use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
|
|
||||||
if (new_mtu > max_mtu)
|
if (new_mtu > max_mtu)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
@ -3379,8 +3425,13 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
|
||||||
tunnel_cfg.encap_rcv = vxlan_rcv;
|
tunnel_cfg.encap_rcv = vxlan_rcv;
|
||||||
tunnel_cfg.encap_err_lookup = vxlan_err_lookup;
|
tunnel_cfg.encap_err_lookup = vxlan_err_lookup;
|
||||||
tunnel_cfg.encap_destroy = NULL;
|
tunnel_cfg.encap_destroy = NULL;
|
||||||
tunnel_cfg.gro_receive = vxlan_gro_receive;
|
if (vs->flags & VXLAN_F_GPE) {
|
||||||
tunnel_cfg.gro_complete = vxlan_gro_complete;
|
tunnel_cfg.gro_receive = vxlan_gpe_gro_receive;
|
||||||
|
tunnel_cfg.gro_complete = vxlan_gpe_gro_complete;
|
||||||
|
} else {
|
||||||
|
tunnel_cfg.gro_receive = vxlan_gro_receive;
|
||||||
|
tunnel_cfg.gro_complete = vxlan_gro_complete;
|
||||||
|
}
|
||||||
|
|
||||||
setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
|
setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
|
||||||
|
|
||||||
|
@ -3644,11 +3695,11 @@ static void vxlan_config_apply(struct net_device *dev,
|
||||||
struct vxlan_dev *vxlan = netdev_priv(dev);
|
struct vxlan_dev *vxlan = netdev_priv(dev);
|
||||||
struct vxlan_rdst *dst = &vxlan->default_dst;
|
struct vxlan_rdst *dst = &vxlan->default_dst;
|
||||||
unsigned short needed_headroom = ETH_HLEN;
|
unsigned short needed_headroom = ETH_HLEN;
|
||||||
bool use_ipv6 = !!(conf->flags & VXLAN_F_IPV6);
|
|
||||||
int max_mtu = ETH_MAX_MTU;
|
int max_mtu = ETH_MAX_MTU;
|
||||||
|
u32 flags = conf->flags;
|
||||||
|
|
||||||
if (!changelink) {
|
if (!changelink) {
|
||||||
if (conf->flags & VXLAN_F_GPE)
|
if (flags & VXLAN_F_GPE)
|
||||||
vxlan_raw_setup(dev);
|
vxlan_raw_setup(dev);
|
||||||
else
|
else
|
||||||
vxlan_ether_setup(dev);
|
vxlan_ether_setup(dev);
|
||||||
|
@ -3673,8 +3724,7 @@ static void vxlan_config_apply(struct net_device *dev,
|
||||||
|
|
||||||
dev->needed_tailroom = lowerdev->needed_tailroom;
|
dev->needed_tailroom = lowerdev->needed_tailroom;
|
||||||
|
|
||||||
max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM :
|
max_mtu = lowerdev->mtu - vxlan_headroom(flags);
|
||||||
VXLAN_HEADROOM);
|
|
||||||
if (max_mtu < ETH_MIN_MTU)
|
if (max_mtu < ETH_MIN_MTU)
|
||||||
max_mtu = ETH_MIN_MTU;
|
max_mtu = ETH_MIN_MTU;
|
||||||
|
|
||||||
|
@ -3685,10 +3735,9 @@ static void vxlan_config_apply(struct net_device *dev,
|
||||||
if (dev->mtu > max_mtu)
|
if (dev->mtu > max_mtu)
|
||||||
dev->mtu = max_mtu;
|
dev->mtu = max_mtu;
|
||||||
|
|
||||||
if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
|
if (flags & VXLAN_F_COLLECT_METADATA)
|
||||||
needed_headroom += VXLAN6_HEADROOM;
|
flags |= VXLAN_F_IPV6;
|
||||||
else
|
needed_headroom += vxlan_headroom(flags);
|
||||||
needed_headroom += VXLAN_HEADROOM;
|
|
||||||
dev->needed_headroom = needed_headroom;
|
dev->needed_headroom = needed_headroom;
|
||||||
|
|
||||||
memcpy(&vxlan->cfg, conf, sizeof(*conf));
|
memcpy(&vxlan->cfg, conf, sizeof(*conf));
|
||||||
|
|
|
@ -876,6 +876,8 @@ ssize_t splice_to_socket(struct pipe_inode_info *pipe, struct file *out,
|
||||||
msg.msg_flags |= MSG_MORE;
|
msg.msg_flags |= MSG_MORE;
|
||||||
if (remain && pipe_occupancy(pipe->head, tail) > 0)
|
if (remain && pipe_occupancy(pipe->head, tail) > 0)
|
||||||
msg.msg_flags |= MSG_MORE;
|
msg.msg_flags |= MSG_MORE;
|
||||||
|
if (out->f_flags & O_NONBLOCK)
|
||||||
|
msg.msg_flags |= MSG_DONTWAIT;
|
||||||
|
|
||||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, bvec, bc,
|
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, bvec, bc,
|
||||||
len - remain);
|
len - remain);
|
||||||
|
|
|
@ -752,12 +752,8 @@ static inline u32 ipv6_addr_hash(const struct in6_addr *a)
|
||||||
/* more secured version of ipv6_addr_hash() */
|
/* more secured version of ipv6_addr_hash() */
|
||||||
static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval)
|
static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval)
|
||||||
{
|
{
|
||||||
u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1];
|
return jhash2((__force const u32 *)a->s6_addr32,
|
||||||
|
ARRAY_SIZE(a->s6_addr32), initval);
|
||||||
return jhash_3words(v,
|
|
||||||
(__force u32)a->s6_addr32[2],
|
|
||||||
(__force u32)a->s6_addr32[3],
|
|
||||||
initval);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool ipv6_addr_loopback(const struct in6_addr *a)
|
static inline bool ipv6_addr_loopback(const struct in6_addr *a)
|
||||||
|
|
|
@ -386,10 +386,15 @@ static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
|
||||||
return features;
|
return features;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* IP header + UDP + VXLAN + Ethernet header */
|
static inline int vxlan_headroom(u32 flags)
|
||||||
#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
|
{
|
||||||
/* IPv6 header + UDP + VXLAN + Ethernet header */
|
/* VXLAN: IP4/6 header + UDP + VXLAN + Ethernet header */
|
||||||
#define VXLAN6_HEADROOM (40 + 8 + 8 + 14)
|
/* VXLAN-GPE: IP4/6 header + UDP + VXLAN */
|
||||||
|
return (flags & VXLAN_F_IPV6 ? sizeof(struct ipv6hdr) :
|
||||||
|
sizeof(struct iphdr)) +
|
||||||
|
sizeof(struct udphdr) + sizeof(struct vxlanhdr) +
|
||||||
|
(flags & VXLAN_F_GPE ? 0 : ETH_HLEN);
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct vxlanhdr *vxlan_hdr(struct sk_buff *skb)
|
static inline struct vxlanhdr *vxlan_hdr(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
|
|
|
@ -18,7 +18,11 @@ struct sockaddr_ll {
|
||||||
unsigned short sll_hatype;
|
unsigned short sll_hatype;
|
||||||
unsigned char sll_pkttype;
|
unsigned char sll_pkttype;
|
||||||
unsigned char sll_halen;
|
unsigned char sll_halen;
|
||||||
unsigned char sll_addr[8];
|
union {
|
||||||
|
unsigned char sll_addr[8];
|
||||||
|
/* Actual length is in sll_halen. */
|
||||||
|
__DECLARE_FLEX_ARRAY(unsigned char, sll_addr_flex);
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Packet types */
|
/* Packet types */
|
||||||
|
|
|
@ -386,9 +386,9 @@ static int raw_release(struct socket *sock)
|
||||||
list_del(&ro->notifier);
|
list_del(&ro->notifier);
|
||||||
spin_unlock(&raw_notifier_lock);
|
spin_unlock(&raw_notifier_lock);
|
||||||
|
|
||||||
|
rtnl_lock();
|
||||||
lock_sock(sk);
|
lock_sock(sk);
|
||||||
|
|
||||||
rtnl_lock();
|
|
||||||
/* remove current filters & unregister */
|
/* remove current filters & unregister */
|
||||||
if (ro->bound) {
|
if (ro->bound) {
|
||||||
if (ro->dev)
|
if (ro->dev)
|
||||||
|
@ -405,12 +405,13 @@ static int raw_release(struct socket *sock)
|
||||||
ro->dev = NULL;
|
ro->dev = NULL;
|
||||||
ro->count = 0;
|
ro->count = 0;
|
||||||
free_percpu(ro->uniq);
|
free_percpu(ro->uniq);
|
||||||
rtnl_unlock();
|
|
||||||
|
|
||||||
sock_orphan(sk);
|
sock_orphan(sk);
|
||||||
sock->sk = NULL;
|
sock->sk = NULL;
|
||||||
|
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
|
rtnl_unlock();
|
||||||
|
|
||||||
sock_put(sk);
|
sock_put(sk);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -2561,12 +2561,18 @@ static void manage_tempaddrs(struct inet6_dev *idev,
|
||||||
ipv6_ifa_notify(0, ift);
|
ipv6_ifa_notify(0, ift);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((create || list_empty(&idev->tempaddr_list)) &&
|
/* Also create a temporary address if it's enabled but no temporary
|
||||||
idev->cnf.use_tempaddr > 0) {
|
* address currently exists.
|
||||||
|
* However, we get called with valid_lft == 0, prefered_lft == 0, create == false
|
||||||
|
* as part of cleanup (ie. deleting the mngtmpaddr).
|
||||||
|
* We don't want that to result in creating a new temporary ip address.
|
||||||
|
*/
|
||||||
|
if (list_empty(&idev->tempaddr_list) && (valid_lft || prefered_lft))
|
||||||
|
create = true;
|
||||||
|
|
||||||
|
if (create && idev->cnf.use_tempaddr > 0) {
|
||||||
/* When a new public address is created as described
|
/* When a new public address is created as described
|
||||||
* in [ADDRCONF], also create a new temporary address.
|
* in [ADDRCONF], also create a new temporary address.
|
||||||
* Also create a temporary address if it's enabled but
|
|
||||||
* no temporary address currently exists.
|
|
||||||
*/
|
*/
|
||||||
read_unlock_bh(&idev->lock);
|
read_unlock_bh(&idev->lock);
|
||||||
ipv6_create_tempaddr(ifp, false);
|
ipv6_create_tempaddr(ifp, false);
|
||||||
|
|
|
@ -3723,10 +3723,9 @@ static int mptcp_listen(struct socket *sock, int backlog)
|
||||||
if (!err) {
|
if (!err) {
|
||||||
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
|
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
|
||||||
mptcp_copy_inaddrs(sk, ssock->sk);
|
mptcp_copy_inaddrs(sk, ssock->sk);
|
||||||
|
mptcp_event_pm_listener(ssock->sk, MPTCP_EVENT_LISTENER_CREATED);
|
||||||
}
|
}
|
||||||
|
|
||||||
mptcp_event_pm_listener(ssock->sk, MPTCP_EVENT_LISTENER_CREATED);
|
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
return err;
|
return err;
|
||||||
|
|
|
@ -3811,8 +3811,6 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
|
||||||
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]);
|
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]);
|
||||||
return PTR_ERR(chain);
|
return PTR_ERR(chain);
|
||||||
}
|
}
|
||||||
if (nft_chain_is_bound(chain))
|
|
||||||
return -EOPNOTSUPP;
|
|
||||||
|
|
||||||
} else if (nla[NFTA_RULE_CHAIN_ID]) {
|
} else if (nla[NFTA_RULE_CHAIN_ID]) {
|
||||||
chain = nft_chain_lookup_byid(net, table, nla[NFTA_RULE_CHAIN_ID],
|
chain = nft_chain_lookup_byid(net, table, nla[NFTA_RULE_CHAIN_ID],
|
||||||
|
@ -3825,6 +3823,9 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (nft_chain_is_bound(chain))
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
if (nla[NFTA_RULE_HANDLE]) {
|
if (nla[NFTA_RULE_HANDLE]) {
|
||||||
handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_HANDLE]));
|
handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_HANDLE]));
|
||||||
rule = __nft_rule_lookup(chain, handle);
|
rule = __nft_rule_lookup(chain, handle);
|
||||||
|
|
|
@ -125,15 +125,27 @@ static void nft_immediate_activate(const struct nft_ctx *ctx,
|
||||||
return nft_data_hold(&priv->data, nft_dreg_to_type(priv->dreg));
|
return nft_data_hold(&priv->data, nft_dreg_to_type(priv->dreg));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void nft_immediate_chain_deactivate(const struct nft_ctx *ctx,
|
||||||
|
struct nft_chain *chain,
|
||||||
|
enum nft_trans_phase phase)
|
||||||
|
{
|
||||||
|
struct nft_ctx chain_ctx;
|
||||||
|
struct nft_rule *rule;
|
||||||
|
|
||||||
|
chain_ctx = *ctx;
|
||||||
|
chain_ctx.chain = chain;
|
||||||
|
|
||||||
|
list_for_each_entry(rule, &chain->rules, list)
|
||||||
|
nft_rule_expr_deactivate(&chain_ctx, rule, phase);
|
||||||
|
}
|
||||||
|
|
||||||
static void nft_immediate_deactivate(const struct nft_ctx *ctx,
|
static void nft_immediate_deactivate(const struct nft_ctx *ctx,
|
||||||
const struct nft_expr *expr,
|
const struct nft_expr *expr,
|
||||||
enum nft_trans_phase phase)
|
enum nft_trans_phase phase)
|
||||||
{
|
{
|
||||||
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
|
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
|
||||||
const struct nft_data *data = &priv->data;
|
const struct nft_data *data = &priv->data;
|
||||||
struct nft_ctx chain_ctx;
|
|
||||||
struct nft_chain *chain;
|
struct nft_chain *chain;
|
||||||
struct nft_rule *rule;
|
|
||||||
|
|
||||||
if (priv->dreg == NFT_REG_VERDICT) {
|
if (priv->dreg == NFT_REG_VERDICT) {
|
||||||
switch (data->verdict.code) {
|
switch (data->verdict.code) {
|
||||||
|
@ -143,20 +155,17 @@ static void nft_immediate_deactivate(const struct nft_ctx *ctx,
|
||||||
if (!nft_chain_binding(chain))
|
if (!nft_chain_binding(chain))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
chain_ctx = *ctx;
|
|
||||||
chain_ctx.chain = chain;
|
|
||||||
|
|
||||||
list_for_each_entry(rule, &chain->rules, list)
|
|
||||||
nft_rule_expr_deactivate(&chain_ctx, rule, phase);
|
|
||||||
|
|
||||||
switch (phase) {
|
switch (phase) {
|
||||||
case NFT_TRANS_PREPARE_ERROR:
|
case NFT_TRANS_PREPARE_ERROR:
|
||||||
nf_tables_unbind_chain(ctx, chain);
|
nf_tables_unbind_chain(ctx, chain);
|
||||||
fallthrough;
|
nft_deactivate_next(ctx->net, chain);
|
||||||
|
break;
|
||||||
case NFT_TRANS_PREPARE:
|
case NFT_TRANS_PREPARE:
|
||||||
|
nft_immediate_chain_deactivate(ctx, chain, phase);
|
||||||
nft_deactivate_next(ctx->net, chain);
|
nft_deactivate_next(ctx->net, chain);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
nft_immediate_chain_deactivate(ctx, chain, phase);
|
||||||
nft_chain_del(chain);
|
nft_chain_del(chain);
|
||||||
chain->bound = false;
|
chain->bound = false;
|
||||||
nft_use_dec(&chain->table->use);
|
nft_use_dec(&chain->table->use);
|
||||||
|
|
|
@ -217,29 +217,37 @@ static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
|
||||||
|
|
||||||
static int nft_rbtree_gc_elem(const struct nft_set *__set,
|
static int nft_rbtree_gc_elem(const struct nft_set *__set,
|
||||||
struct nft_rbtree *priv,
|
struct nft_rbtree *priv,
|
||||||
struct nft_rbtree_elem *rbe)
|
struct nft_rbtree_elem *rbe,
|
||||||
|
u8 genmask)
|
||||||
{
|
{
|
||||||
struct nft_set *set = (struct nft_set *)__set;
|
struct nft_set *set = (struct nft_set *)__set;
|
||||||
struct rb_node *prev = rb_prev(&rbe->node);
|
struct rb_node *prev = rb_prev(&rbe->node);
|
||||||
struct nft_rbtree_elem *rbe_prev = NULL;
|
struct nft_rbtree_elem *rbe_prev;
|
||||||
struct nft_set_gc_batch *gcb;
|
struct nft_set_gc_batch *gcb;
|
||||||
|
|
||||||
gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC);
|
gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC);
|
||||||
if (!gcb)
|
if (!gcb)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
/* search for expired end interval coming before this element. */
|
/* search for end interval coming before this element.
|
||||||
|
* end intervals don't carry a timeout extension, they
|
||||||
|
* are coupled with the interval start element.
|
||||||
|
*/
|
||||||
while (prev) {
|
while (prev) {
|
||||||
rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
|
rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
|
||||||
if (nft_rbtree_interval_end(rbe_prev))
|
if (nft_rbtree_interval_end(rbe_prev) &&
|
||||||
|
nft_set_elem_active(&rbe_prev->ext, genmask))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
prev = rb_prev(prev);
|
prev = rb_prev(prev);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rbe_prev) {
|
if (prev) {
|
||||||
|
rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
|
||||||
|
|
||||||
rb_erase(&rbe_prev->node, &priv->root);
|
rb_erase(&rbe_prev->node, &priv->root);
|
||||||
atomic_dec(&set->nelems);
|
atomic_dec(&set->nelems);
|
||||||
|
nft_set_gc_batch_add(gcb, rbe_prev);
|
||||||
}
|
}
|
||||||
|
|
||||||
rb_erase(&rbe->node, &priv->root);
|
rb_erase(&rbe->node, &priv->root);
|
||||||
|
@ -321,7 +329,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
|
||||||
|
|
||||||
/* perform garbage collection to avoid bogus overlap reports. */
|
/* perform garbage collection to avoid bogus overlap reports. */
|
||||||
if (nft_set_elem_expired(&rbe->ext)) {
|
if (nft_set_elem_expired(&rbe->ext)) {
|
||||||
err = nft_rbtree_gc_elem(set, priv, rbe);
|
err = nft_rbtree_gc_elem(set, priv, rbe, genmask);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
|
|
@ -3601,7 +3601,7 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
|
||||||
if (dev) {
|
if (dev) {
|
||||||
sll->sll_hatype = dev->type;
|
sll->sll_hatype = dev->type;
|
||||||
sll->sll_halen = dev->addr_len;
|
sll->sll_halen = dev->addr_len;
|
||||||
memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
|
memcpy(sll->sll_addr_flex, dev->dev_addr, dev->addr_len);
|
||||||
} else {
|
} else {
|
||||||
sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
|
sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
|
||||||
sll->sll_halen = 0;
|
sll->sll_halen = 0;
|
||||||
|
|
|
@ -290,6 +290,13 @@ static int mqprio_parse_nlattr(struct Qdisc *sch, struct tc_mqprio_qopt *qopt,
|
||||||
"Attribute type expected to be TCA_MQPRIO_MIN_RATE64");
|
"Attribute type expected to be TCA_MQPRIO_MIN_RATE64");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (nla_len(attr) != sizeof(u64)) {
|
||||||
|
NL_SET_ERR_MSG_ATTR(extack, attr,
|
||||||
|
"Attribute TCA_MQPRIO_MIN_RATE64 expected to have 8 bytes length");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
if (i >= qopt->num_tc)
|
if (i >= qopt->num_tc)
|
||||||
break;
|
break;
|
||||||
priv->min_rate[i] = nla_get_u64(attr);
|
priv->min_rate[i] = nla_get_u64(attr);
|
||||||
|
@ -312,6 +319,13 @@ static int mqprio_parse_nlattr(struct Qdisc *sch, struct tc_mqprio_qopt *qopt,
|
||||||
"Attribute type expected to be TCA_MQPRIO_MAX_RATE64");
|
"Attribute type expected to be TCA_MQPRIO_MAX_RATE64");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (nla_len(attr) != sizeof(u64)) {
|
||||||
|
NL_SET_ERR_MSG_ATTR(extack, attr,
|
||||||
|
"Attribute TCA_MQPRIO_MAX_RATE64 expected to have 8 bytes length");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
if (i >= qopt->num_tc)
|
if (i >= qopt->num_tc)
|
||||||
break;
|
break;
|
||||||
priv->max_rate[i] = nla_get_u64(attr);
|
priv->max_rate[i] = nla_get_u64(attr);
|
||||||
|
|
|
@ -1960,7 +1960,8 @@ rcv:
|
||||||
|
|
||||||
skb_reset_network_header(*skb);
|
skb_reset_network_header(*skb);
|
||||||
skb_pull(*skb, tipc_ehdr_size(ehdr));
|
skb_pull(*skb, tipc_ehdr_size(ehdr));
|
||||||
pskb_trim(*skb, (*skb)->len - aead->authsize);
|
if (pskb_trim(*skb, (*skb)->len - aead->authsize))
|
||||||
|
goto free_skb;
|
||||||
|
|
||||||
/* Validate TIPCv2 message */
|
/* Validate TIPCv2 message */
|
||||||
if (unlikely(!tipc_msg_validate(skb))) {
|
if (unlikely(!tipc_msg_validate(skb))) {
|
||||||
|
|
|
@ -583,7 +583,7 @@ update:
|
||||||
n->capabilities, &n->bc_entry.inputq1,
|
n->capabilities, &n->bc_entry.inputq1,
|
||||||
&n->bc_entry.namedq, snd_l, &n->bc_entry.link)) {
|
&n->bc_entry.namedq, snd_l, &n->bc_entry.link)) {
|
||||||
pr_warn("Broadcast rcv link creation failed, no memory\n");
|
pr_warn("Broadcast rcv link creation failed, no memory\n");
|
||||||
kfree(n);
|
tipc_node_put(n);
|
||||||
n = NULL;
|
n = NULL;
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
|
@ -289,17 +289,29 @@ static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len)
|
static int unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len)
|
||||||
{
|
{
|
||||||
|
struct sockaddr_storage *addr = (struct sockaddr_storage *)sunaddr;
|
||||||
|
short offset = offsetof(struct sockaddr_storage, __data);
|
||||||
|
|
||||||
|
BUILD_BUG_ON(offset != offsetof(struct sockaddr_un, sun_path));
|
||||||
|
|
||||||
/* This may look like an off by one error but it is a bit more
|
/* This may look like an off by one error but it is a bit more
|
||||||
* subtle. 108 is the longest valid AF_UNIX path for a binding.
|
* subtle. 108 is the longest valid AF_UNIX path for a binding.
|
||||||
* sun_path[108] doesn't as such exist. However in kernel space
|
* sun_path[108] doesn't as such exist. However in kernel space
|
||||||
* we are guaranteed that it is a valid memory location in our
|
* we are guaranteed that it is a valid memory location in our
|
||||||
* kernel address buffer because syscall functions always pass
|
* kernel address buffer because syscall functions always pass
|
||||||
* a pointer of struct sockaddr_storage which has a bigger buffer
|
* a pointer of struct sockaddr_storage which has a bigger buffer
|
||||||
* than 108.
|
* than 108. Also, we must terminate sun_path for strlen() in
|
||||||
|
* getname_kernel().
|
||||||
*/
|
*/
|
||||||
((char *)sunaddr)[addr_len] = 0;
|
addr->__data[addr_len - offset] = 0;
|
||||||
|
|
||||||
|
/* Don't pass sunaddr->sun_path to strlen(). Otherwise, 108 will
|
||||||
|
* cause panic if CONFIG_FORTIFY_SOURCE=y. Let __fortify_strlen()
|
||||||
|
* know the actual buffer.
|
||||||
|
*/
|
||||||
|
return strlen(addr->__data) + offset + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __unix_remove_socket(struct sock *sk)
|
static void __unix_remove_socket(struct sock *sk)
|
||||||
|
@ -1208,10 +1220,7 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
|
||||||
struct path parent;
|
struct path parent;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
unix_mkname_bsd(sunaddr, addr_len);
|
addr_len = unix_mkname_bsd(sunaddr, addr_len);
|
||||||
addr_len = strlen(sunaddr->sun_path) +
|
|
||||||
offsetof(struct sockaddr_un, sun_path) + 1;
|
|
||||||
|
|
||||||
addr = unix_create_addr(sunaddr, addr_len);
|
addr = unix_create_addr(sunaddr, addr_len);
|
||||||
if (!addr)
|
if (!addr)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
|
@ -417,11 +417,10 @@ class YnlFamily(SpecFamily):
|
||||||
pad = b'\x00' * ((4 - len(attr_payload) % 4) % 4)
|
pad = b'\x00' * ((4 - len(attr_payload) % 4) % 4)
|
||||||
return struct.pack('HH', len(attr_payload) + 4, nl_type) + attr_payload + pad
|
return struct.pack('HH', len(attr_payload) + 4, nl_type) + attr_payload + pad
|
||||||
|
|
||||||
def _decode_enum(self, rsp, attr_spec):
|
def _decode_enum(self, raw, attr_spec):
|
||||||
raw = rsp[attr_spec['name']]
|
|
||||||
enum = self.consts[attr_spec['enum']]
|
enum = self.consts[attr_spec['enum']]
|
||||||
i = attr_spec.get('value-start', 0)
|
|
||||||
if 'enum-as-flags' in attr_spec and attr_spec['enum-as-flags']:
|
if 'enum-as-flags' in attr_spec and attr_spec['enum-as-flags']:
|
||||||
|
i = 0
|
||||||
value = set()
|
value = set()
|
||||||
while raw:
|
while raw:
|
||||||
if raw & 1:
|
if raw & 1:
|
||||||
|
@ -429,8 +428,8 @@ class YnlFamily(SpecFamily):
|
||||||
raw >>= 1
|
raw >>= 1
|
||||||
i += 1
|
i += 1
|
||||||
else:
|
else:
|
||||||
value = enum.entries_by_val[raw - i].name
|
value = enum.entries_by_val[raw].name
|
||||||
rsp[attr_spec['name']] = value
|
return value
|
||||||
|
|
||||||
def _decode_binary(self, attr, attr_spec):
|
def _decode_binary(self, attr, attr_spec):
|
||||||
if attr_spec.struct_name:
|
if attr_spec.struct_name:
|
||||||
|
@ -438,7 +437,7 @@ class YnlFamily(SpecFamily):
|
||||||
decoded = attr.as_struct(members)
|
decoded = attr.as_struct(members)
|
||||||
for m in members:
|
for m in members:
|
||||||
if m.enum:
|
if m.enum:
|
||||||
self._decode_enum(decoded, m)
|
decoded[m.name] = self._decode_enum(decoded[m.name], m)
|
||||||
elif attr_spec.sub_type:
|
elif attr_spec.sub_type:
|
||||||
decoded = attr.as_c_array(attr_spec.sub_type)
|
decoded = attr.as_c_array(attr_spec.sub_type)
|
||||||
else:
|
else:
|
||||||
|
@ -466,6 +465,9 @@ class YnlFamily(SpecFamily):
|
||||||
else:
|
else:
|
||||||
raise Exception(f'Unknown {attr_spec["type"]} with name {attr_spec["name"]}')
|
raise Exception(f'Unknown {attr_spec["type"]} with name {attr_spec["name"]}')
|
||||||
|
|
||||||
|
if 'enum' in attr_spec:
|
||||||
|
decoded = self._decode_enum(decoded, attr_spec)
|
||||||
|
|
||||||
if not attr_spec.is_multi:
|
if not attr_spec.is_multi:
|
||||||
rsp[attr_spec['name']] = decoded
|
rsp[attr_spec['name']] = decoded
|
||||||
elif attr_spec.name in rsp:
|
elif attr_spec.name in rsp:
|
||||||
|
@ -473,8 +475,6 @@ class YnlFamily(SpecFamily):
|
||||||
else:
|
else:
|
||||||
rsp[attr_spec.name] = [decoded]
|
rsp[attr_spec.name] = [decoded]
|
||||||
|
|
||||||
if 'enum' in attr_spec:
|
|
||||||
self._decode_enum(rsp, attr_spec)
|
|
||||||
return rsp
|
return rsp
|
||||||
|
|
||||||
def _decode_extack_path(self, attrs, attr_set, offset, target):
|
def _decode_extack_path(self, attrs, attr_set, offset, target):
|
||||||
|
|
|
@ -162,9 +162,7 @@ check_tools()
|
||||||
elif ! iptables -V &> /dev/null; then
|
elif ! iptables -V &> /dev/null; then
|
||||||
echo "SKIP: Could not run all tests without iptables tool"
|
echo "SKIP: Could not run all tests without iptables tool"
|
||||||
exit $ksft_skip
|
exit $ksft_skip
|
||||||
fi
|
elif ! ip6tables -V &> /dev/null; then
|
||||||
|
|
||||||
if ! ip6tables -V &> /dev/null; then
|
|
||||||
echo "SKIP: Could not run all tests without ip6tables tool"
|
echo "SKIP: Could not run all tests without ip6tables tool"
|
||||||
exit $ksft_skip
|
exit $ksft_skip
|
||||||
fi
|
fi
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue