mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-04-26 14:17:26 -04:00
netpoll: more efficient locking
Callers of netpoll_poll_lock() own NAPI_STATE_SCHED
Callers of netpoll_poll_unlock() have BH blocked between
the NAPI_STATE_SCHED being cleared and poll_lock is released.
We can avoid the spinlock which has no contention, and use cmpxchg()
on poll_owner which we need to set anyway.
This removes a possible lockdep violation after the cited commit,
since sk_busy_loop() re-enables BH before calling busy_poll_stop()
Fixes: 217f697436
("net: busy-poll: allow preemption in sk_busy_loop()")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
1629dd4f76
commit
89c4b442b7
4 changed files with 10 additions and 11 deletions
|
@ -78,8 +78,11 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi)
|
|||
struct net_device *dev = napi->dev;
|
||||
|
||||
if (dev && dev->npinfo) {
|
||||
spin_lock(&napi->poll_lock);
|
||||
napi->poll_owner = smp_processor_id();
|
||||
int owner = smp_processor_id();
|
||||
|
||||
while (cmpxchg(&napi->poll_owner, -1, owner) != -1)
|
||||
cpu_relax();
|
||||
|
||||
return napi;
|
||||
}
|
||||
return NULL;
|
||||
|
@ -89,10 +92,8 @@ static inline void netpoll_poll_unlock(void *have)
|
|||
{
|
||||
struct napi_struct *napi = have;
|
||||
|
||||
if (napi) {
|
||||
napi->poll_owner = -1;
|
||||
spin_unlock(&napi->poll_lock);
|
||||
}
|
||||
if (napi)
|
||||
smp_store_release(&napi->poll_owner, -1);
|
||||
}
|
||||
|
||||
static inline bool netpoll_tx_running(struct net_device *dev)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue