diff options
Diffstat (limited to 'net/core')
| -rw-r--r-- | net/core/skbuff.c | 15 |
1 files changed, 8 insertions, 7 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index f34372666e67..88b5530f9c46 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -280,17 +280,18 @@ EXPORT_SYMBOL(__netdev_alloc_frag_align); */ static u32 skbuff_cache_size __read_mostly; -static struct sk_buff *napi_skb_cache_get(void) +static struct sk_buff *napi_skb_cache_get(bool alloc) { struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); struct sk_buff *skb; local_lock_nested_bh(&napi_alloc_cache.bh_lock); if (unlikely(!nc->skb_count)) { - nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, - GFP_ATOMIC | __GFP_NOWARN, - NAPI_SKB_CACHE_BULK, - nc->skb_cache); + if (alloc) + nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, + GFP_ATOMIC | __GFP_NOWARN, + NAPI_SKB_CACHE_BULK, + nc->skb_cache); if (unlikely(!nc->skb_count)) { local_unlock_nested_bh(&napi_alloc_cache.bh_lock); return NULL; @@ -530,7 +531,7 @@ static struct sk_buff *__napi_build_skb(void *data, unsigned int frag_size) { struct sk_buff *skb; - skb = napi_skb_cache_get(); + skb = napi_skb_cache_get(true); if (unlikely(!skb)) return NULL; @@ -659,7 +660,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, /* Get the HEAD */ if ((flags & (SKB_ALLOC_FCLONE | SKB_ALLOC_NAPI)) == SKB_ALLOC_NAPI && likely(node == NUMA_NO_NODE || node == numa_mem_id())) - skb = napi_skb_cache_get(); + skb = napi_skb_cache_get(true); else skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node); if (unlikely(!skb)) |