mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-04-24 14:07:52 -04:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1674 commits) qlcnic: adding co maintainer ixgbe: add support for active DA cables ixgbe: dcb, do not tag tc_prio_control frames ixgbe: fix ixgbe_tx_is_paused logic ixgbe: always enable vlan strip/insert when DCB is enabled ixgbe: remove some redundant code in setting FCoE FIP filter ixgbe: fix wrong offset to fc_frame_header in ixgbe_fcoe_ddp ixgbe: fix header len when unsplit packet overflows to data buffer ipv6: Never schedule DAD timer on dead address ipv6: Use POSTDAD state ipv6: Use state_lock to protect ifa state ipv6: Replace inet6_ifaddr->dead with state cxgb4: notify upper drivers if the device is already up when they load cxgb4: keep interrupts available when the ports are brought down cxgb4: fix initial addition of MAC address cnic: Return SPQ credit to bnx2x after ring setup and shutdown. cnic: Convert cnic_local_flags to atomic ops. can: Fix SJA1000 command register writes on SMP systems bridge: fix build for CONFIG_SYSFS disabled ARCNET: Limit com20020 PCI ID matches for SOHARD cards ... Fix up various conflicts with pcmcia tree drivers/net/ {pcmcia/3c589_cs.c, wireless/orinoco/orinoco_cs.c and wireless/orinoco/spectrum_cs.c} and feature removal (Documentation/feature-removal-schedule.txt). Also fix a non-content conflict due to pm_qos_requirement getting renamed in the PM tree (now pm_qos_request) in net/mac80211/scan.c
This commit is contained in:
commit
f8965467f3
1455 changed files with 95973 additions and 48342 deletions
|
@ -219,34 +219,6 @@ struct neighbour;
|
|||
struct neigh_parms;
|
||||
struct sk_buff;
|
||||
|
||||
struct netif_rx_stats {
|
||||
unsigned total;
|
||||
unsigned dropped;
|
||||
unsigned time_squeeze;
|
||||
unsigned cpu_collision;
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
|
||||
|
||||
struct dev_addr_list {
|
||||
struct dev_addr_list *next;
|
||||
u8 da_addr[MAX_ADDR_LEN];
|
||||
u8 da_addrlen;
|
||||
u8 da_synced;
|
||||
int da_users;
|
||||
int da_gusers;
|
||||
};
|
||||
|
||||
/*
|
||||
* We tag multicasts with these structures.
|
||||
*/
|
||||
|
||||
#define dev_mc_list dev_addr_list
|
||||
#define dmi_addr da_addr
|
||||
#define dmi_addrlen da_addrlen
|
||||
#define dmi_users da_users
|
||||
#define dmi_gusers da_gusers
|
||||
|
||||
struct netdev_hw_addr {
|
||||
struct list_head list;
|
||||
unsigned char addr[MAX_ADDR_LEN];
|
||||
|
@ -255,8 +227,10 @@ struct netdev_hw_addr {
|
|||
#define NETDEV_HW_ADDR_T_SAN 2
|
||||
#define NETDEV_HW_ADDR_T_SLAVE 3
|
||||
#define NETDEV_HW_ADDR_T_UNICAST 4
|
||||
#define NETDEV_HW_ADDR_T_MULTICAST 5
|
||||
int refcount;
|
||||
bool synced;
|
||||
bool global_use;
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
||||
|
@ -265,16 +239,20 @@ struct netdev_hw_addr_list {
|
|||
int count;
|
||||
};
|
||||
|
||||
#define netdev_uc_count(dev) ((dev)->uc.count)
|
||||
#define netdev_uc_empty(dev) ((dev)->uc.count == 0)
|
||||
#define netdev_hw_addr_list_count(l) ((l)->count)
|
||||
#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
|
||||
#define netdev_hw_addr_list_for_each(ha, l) \
|
||||
list_for_each_entry(ha, &(l)->list, list)
|
||||
|
||||
#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
|
||||
#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
|
||||
#define netdev_for_each_uc_addr(ha, dev) \
|
||||
list_for_each_entry(ha, &dev->uc.list, list)
|
||||
netdev_hw_addr_list_for_each(ha, &(dev)->uc)
|
||||
|
||||
#define netdev_mc_count(dev) ((dev)->mc_count)
|
||||
#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0)
|
||||
|
||||
#define netdev_for_each_mc_addr(mclist, dev) \
|
||||
for (mclist = dev->mc_list; mclist; mclist = mclist->next)
|
||||
#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
|
||||
#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
|
||||
#define netdev_for_each_mc_addr(ha, dev) \
|
||||
netdev_hw_addr_list_for_each(ha, &(dev)->mc)
|
||||
|
||||
struct hh_cache {
|
||||
struct hh_cache *hh_next; /* Next entry */
|
||||
|
@ -531,6 +509,85 @@ struct netdev_queue {
|
|||
unsigned long tx_dropped;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
#ifdef CONFIG_RPS
|
||||
/*
|
||||
* This structure holds an RPS map which can be of variable length. The
|
||||
* map is an array of CPUs.
|
||||
*/
|
||||
struct rps_map {
|
||||
unsigned int len;
|
||||
struct rcu_head rcu;
|
||||
u16 cpus[0];
|
||||
};
|
||||
#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16)))
|
||||
|
||||
/*
|
||||
* The rps_dev_flow structure contains the mapping of a flow to a CPU and the
|
||||
* tail pointer for that CPU's input queue at the time of last enqueue.
|
||||
*/
|
||||
struct rps_dev_flow {
|
||||
u16 cpu;
|
||||
u16 fill;
|
||||
unsigned int last_qtail;
|
||||
};
|
||||
|
||||
/*
|
||||
* The rps_dev_flow_table structure contains a table of flow mappings.
|
||||
*/
|
||||
struct rps_dev_flow_table {
|
||||
unsigned int mask;
|
||||
struct rcu_head rcu;
|
||||
struct work_struct free_work;
|
||||
struct rps_dev_flow flows[0];
|
||||
};
|
||||
#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
|
||||
(_num * sizeof(struct rps_dev_flow)))
|
||||
|
||||
/*
|
||||
* The rps_sock_flow_table contains mappings of flows to the last CPU
|
||||
* on which they were processed by the application (set in recvmsg).
|
||||
*/
|
||||
struct rps_sock_flow_table {
|
||||
unsigned int mask;
|
||||
u16 ents[0];
|
||||
};
|
||||
#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
|
||||
(_num * sizeof(u16)))
|
||||
|
||||
#define RPS_NO_CPU 0xffff
|
||||
|
||||
static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
|
||||
u32 hash)
|
||||
{
|
||||
if (table && hash) {
|
||||
unsigned int cpu, index = hash & table->mask;
|
||||
|
||||
/* We only give a hint, preemption can change cpu under us */
|
||||
cpu = raw_smp_processor_id();
|
||||
|
||||
if (table->ents[index] != cpu)
|
||||
table->ents[index] = cpu;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
|
||||
u32 hash)
|
||||
{
|
||||
if (table && hash)
|
||||
table->ents[hash & table->mask] = RPS_NO_CPU;
|
||||
}
|
||||
|
||||
extern struct rps_sock_flow_table *rps_sock_flow_table;
|
||||
|
||||
/* This structure contains an instance of an RX queue. */
|
||||
struct netdev_rx_queue {
|
||||
struct rps_map *rps_map;
|
||||
struct rps_dev_flow_table *rps_flow_table;
|
||||
struct kobject kobj;
|
||||
struct netdev_rx_queue *first;
|
||||
atomic_t count;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
#endif /* CONFIG_RPS */
|
||||
|
||||
/*
|
||||
* This structure defines the management hooks for network devices.
|
||||
|
@ -630,6 +687,9 @@ struct netdev_queue {
|
|||
* int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate);
|
||||
* int (*ndo_get_vf_config)(struct net_device *dev,
|
||||
* int vf, struct ifla_vf_info *ivf);
|
||||
* int (*ndo_set_vf_port)(struct net_device *dev, int vf,
|
||||
* struct nlattr *port[]);
|
||||
* int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
|
||||
*/
|
||||
#define HAVE_NET_DEVICE_OPS
|
||||
struct net_device_ops {
|
||||
|
@ -668,6 +728,7 @@ struct net_device_ops {
|
|||
unsigned short vid);
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
void (*ndo_poll_controller)(struct net_device *dev);
|
||||
void (*ndo_netpoll_cleanup)(struct net_device *dev);
|
||||
#endif
|
||||
int (*ndo_set_vf_mac)(struct net_device *dev,
|
||||
int queue, u8 *mac);
|
||||
|
@ -678,6 +739,11 @@ struct net_device_ops {
|
|||
int (*ndo_get_vf_config)(struct net_device *dev,
|
||||
int vf,
|
||||
struct ifla_vf_info *ivf);
|
||||
int (*ndo_set_vf_port)(struct net_device *dev,
|
||||
int vf,
|
||||
struct nlattr *port[]);
|
||||
int (*ndo_get_vf_port)(struct net_device *dev,
|
||||
int vf, struct sk_buff *skb);
|
||||
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
|
||||
int (*ndo_fcoe_enable)(struct net_device *dev);
|
||||
int (*ndo_fcoe_disable)(struct net_device *dev);
|
||||
|
@ -768,6 +834,7 @@ struct net_device {
|
|||
#define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */
|
||||
#define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/
|
||||
#define NETIF_F_NTUPLE (1 << 27) /* N-tuple filters supported */
|
||||
#define NETIF_F_RXHASH (1 << 28) /* Receive hashing offload */
|
||||
|
||||
/* Segmentation offload features */
|
||||
#define NETIF_F_GSO_SHIFT 16
|
||||
|
@ -824,7 +891,7 @@ struct net_device {
|
|||
unsigned char operstate; /* RFC2863 operstate */
|
||||
unsigned char link_mode; /* mapping policy to operstate */
|
||||
|
||||
unsigned mtu; /* interface MTU value */
|
||||
unsigned int mtu; /* interface MTU value */
|
||||
unsigned short type; /* interface hardware type */
|
||||
unsigned short hard_header_len; /* hardware hdr length */
|
||||
|
||||
|
@ -844,12 +911,10 @@ struct net_device {
|
|||
unsigned char addr_len; /* hardware address length */
|
||||
unsigned short dev_id; /* for shared network cards */
|
||||
|
||||
struct netdev_hw_addr_list uc; /* Secondary unicast
|
||||
mac addresses */
|
||||
int uc_promisc;
|
||||
spinlock_t addr_list_lock;
|
||||
struct dev_addr_list *mc_list; /* Multicast mac addresses */
|
||||
int mc_count; /* Number of installed mcasts */
|
||||
struct netdev_hw_addr_list uc; /* Unicast mac addresses */
|
||||
struct netdev_hw_addr_list mc; /* Multicast mac addresses */
|
||||
int uc_promisc;
|
||||
unsigned int promiscuity;
|
||||
unsigned int allmulti;
|
||||
|
||||
|
@ -882,6 +947,15 @@ struct net_device {
|
|||
|
||||
unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
|
||||
|
||||
#ifdef CONFIG_RPS
|
||||
struct kset *queues_kset;
|
||||
|
||||
struct netdev_rx_queue *_rx;
|
||||
|
||||
/* Number of RX queues allocated at alloc_netdev_mq() time */
|
||||
unsigned int num_rx_queues;
|
||||
#endif
|
||||
|
||||
struct netdev_queue rx_queue;
|
||||
|
||||
struct netdev_queue *_tx ____cacheline_aligned_in_smp;
|
||||
|
@ -1310,19 +1384,44 @@ static inline int unregister_gifconf(unsigned int family)
|
|||
}
|
||||
|
||||
/*
|
||||
* Incoming packets are placed on per-cpu queues so that
|
||||
* no locking is needed.
|
||||
* Incoming packets are placed on per-cpu queues
|
||||
*/
|
||||
struct softnet_data {
|
||||
struct Qdisc *output_queue;
|
||||
struct sk_buff_head input_pkt_queue;
|
||||
struct Qdisc **output_queue_tailp;
|
||||
struct list_head poll_list;
|
||||
struct sk_buff *completion_queue;
|
||||
struct sk_buff_head process_queue;
|
||||
|
||||
/* stats */
|
||||
unsigned int processed;
|
||||
unsigned int time_squeeze;
|
||||
unsigned int cpu_collision;
|
||||
unsigned int received_rps;
|
||||
|
||||
#ifdef CONFIG_RPS
|
||||
struct softnet_data *rps_ipi_list;
|
||||
|
||||
/* Elements below can be accessed between CPUs for RPS */
|
||||
struct call_single_data csd ____cacheline_aligned_in_smp;
|
||||
struct softnet_data *rps_ipi_next;
|
||||
unsigned int cpu;
|
||||
unsigned int input_queue_head;
|
||||
#endif
|
||||
unsigned dropped;
|
||||
struct sk_buff_head input_pkt_queue;
|
||||
struct napi_struct backlog;
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(struct softnet_data,softnet_data);
|
||||
static inline void input_queue_head_add(struct softnet_data *sd,
|
||||
unsigned int len)
|
||||
{
|
||||
#ifdef CONFIG_RPS
|
||||
sd->input_queue_head += len;
|
||||
#endif
|
||||
}
|
||||
|
||||
DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
|
||||
|
||||
#define HAVE_NETIF_QUEUE
|
||||
|
||||
|
@ -1949,6 +2048,22 @@ extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
|
|||
extern int register_netdev(struct net_device *dev);
|
||||
extern void unregister_netdev(struct net_device *dev);
|
||||
|
||||
/* General hardware address lists handling functions */
|
||||
extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
|
||||
struct netdev_hw_addr_list *from_list,
|
||||
int addr_len, unsigned char addr_type);
|
||||
extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
|
||||
struct netdev_hw_addr_list *from_list,
|
||||
int addr_len, unsigned char addr_type);
|
||||
extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
|
||||
struct netdev_hw_addr_list *from_list,
|
||||
int addr_len);
|
||||
extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
|
||||
struct netdev_hw_addr_list *from_list,
|
||||
int addr_len);
|
||||
extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
|
||||
extern void __hw_addr_init(struct netdev_hw_addr_list *list);
|
||||
|
||||
/* Functions used for device addresses handling */
|
||||
extern int dev_addr_add(struct net_device *dev, unsigned char *addr,
|
||||
unsigned char addr_type);
|
||||
|
@ -1960,26 +2075,34 @@ extern int dev_addr_add_multiple(struct net_device *to_dev,
|
|||
extern int dev_addr_del_multiple(struct net_device *to_dev,
|
||||
struct net_device *from_dev,
|
||||
unsigned char addr_type);
|
||||
extern void dev_addr_flush(struct net_device *dev);
|
||||
extern int dev_addr_init(struct net_device *dev);
|
||||
|
||||
/* Functions used for unicast addresses handling */
|
||||
extern int dev_uc_add(struct net_device *dev, unsigned char *addr);
|
||||
extern int dev_uc_del(struct net_device *dev, unsigned char *addr);
|
||||
extern int dev_uc_sync(struct net_device *to, struct net_device *from);
|
||||
extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
|
||||
extern void dev_uc_flush(struct net_device *dev);
|
||||
extern void dev_uc_init(struct net_device *dev);
|
||||
|
||||
/* Functions used for multicast addresses handling */
|
||||
extern int dev_mc_add(struct net_device *dev, unsigned char *addr);
|
||||
extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr);
|
||||
extern int dev_mc_del(struct net_device *dev, unsigned char *addr);
|
||||
extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr);
|
||||
extern int dev_mc_sync(struct net_device *to, struct net_device *from);
|
||||
extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
|
||||
extern void dev_mc_flush(struct net_device *dev);
|
||||
extern void dev_mc_init(struct net_device *dev);
|
||||
|
||||
/* Functions used for secondary unicast and multicast support */
|
||||
extern void dev_set_rx_mode(struct net_device *dev);
|
||||
extern void __dev_set_rx_mode(struct net_device *dev);
|
||||
extern int dev_unicast_delete(struct net_device *dev, void *addr);
|
||||
extern int dev_unicast_add(struct net_device *dev, void *addr);
|
||||
extern int dev_unicast_sync(struct net_device *to, struct net_device *from);
|
||||
extern void dev_unicast_unsync(struct net_device *to, struct net_device *from);
|
||||
extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);
|
||||
extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
|
||||
extern int dev_mc_sync(struct net_device *to, struct net_device *from);
|
||||
extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
|
||||
extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all);
|
||||
extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);
|
||||
extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
|
||||
extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
|
||||
extern int dev_set_promiscuity(struct net_device *dev, int inc);
|
||||
extern int dev_set_allmulti(struct net_device *dev, int inc);
|
||||
extern void netdev_state_change(struct net_device *dev);
|
||||
extern void netdev_bonding_change(struct net_device *dev,
|
||||
extern int netdev_bonding_change(struct net_device *dev,
|
||||
unsigned long event);
|
||||
extern void netdev_features_change(struct net_device *dev);
|
||||
/* Load a device via the kmod */
|
||||
|
@ -1989,6 +2112,7 @@ extern const struct net_device_stats *dev_get_stats(struct net_device *dev);
|
|||
extern void dev_txq_stats_fold(const struct net_device *dev, struct net_device_stats *stats);
|
||||
|
||||
extern int netdev_max_backlog;
|
||||
extern int netdev_tstamp_prequeue;
|
||||
extern int weight_p;
|
||||
extern int netdev_set_master(struct net_device *dev, struct net_device *master);
|
||||
extern int skb_checksum_help(struct sk_buff *skb);
|
||||
|
@ -2049,54 +2173,14 @@ static inline void netif_set_gso_max_size(struct net_device *dev,
|
|||
dev->gso_max_size = size;
|
||||
}
|
||||
|
||||
static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
|
||||
struct net_device *master)
|
||||
{
|
||||
if (skb->pkt_type == PACKET_HOST) {
|
||||
u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
|
||||
extern int __skb_bond_should_drop(struct sk_buff *skb,
|
||||
struct net_device *master);
|
||||
|
||||
memcpy(dest, master->dev_addr, ETH_ALEN);
|
||||
}
|
||||
}
|
||||
|
||||
/* On bonding slaves other than the currently active slave, suppress
|
||||
* duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
|
||||
* ARP on active-backup slaves with arp_validate enabled.
|
||||
*/
|
||||
static inline int skb_bond_should_drop(struct sk_buff *skb,
|
||||
struct net_device *master)
|
||||
{
|
||||
if (master) {
|
||||
struct net_device *dev = skb->dev;
|
||||
|
||||
if (master->priv_flags & IFF_MASTER_ARPMON)
|
||||
dev->last_rx = jiffies;
|
||||
|
||||
if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) {
|
||||
/* Do address unmangle. The local destination address
|
||||
* will be always the one master has. Provides the right
|
||||
* functionality in a bridge.
|
||||
*/
|
||||
skb_bond_set_mac_by_master(skb, master);
|
||||
}
|
||||
|
||||
if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
|
||||
if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
|
||||
skb->protocol == __cpu_to_be16(ETH_P_ARP))
|
||||
return 0;
|
||||
|
||||
if (master->priv_flags & IFF_MASTER_ALB) {
|
||||
if (skb->pkt_type != PACKET_BROADCAST &&
|
||||
skb->pkt_type != PACKET_MULTICAST)
|
||||
return 0;
|
||||
}
|
||||
if (master->priv_flags & IFF_MASTER_8023AD &&
|
||||
skb->protocol == __cpu_to_be16(ETH_P_SLOW))
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
if (master)
|
||||
return __skb_bond_should_drop(skb, master);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue