diff options
Diffstat (limited to 'mm/slub.c')
| -rw-r--r-- | mm/slub.c | 44 |
1 files changed, 36 insertions, 8 deletions
diff --git a/mm/slub.c b/mm/slub.c index 074abe8e79f8..0237a329d4e5 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5540,6 +5540,9 @@ int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp, * * The gfp parameter is meant only to specify __GFP_ZERO or __GFP_ACCOUNT * memcg charging is forced over limit if necessary, to avoid failure. + * + * It is possible that the allocation comes from kfence and then the sheaf + * size is not decreased. */ void * kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *s, gfp_t gfp, @@ -5551,7 +5554,10 @@ kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *s, gfp_t gfp, if (sheaf->size == 0) goto out; - ret = sheaf->objects[--sheaf->size]; + ret = kfence_alloc(s, s->object_size, gfp); + + if (likely(!ret)) + ret = sheaf->objects[--sheaf->size]; init = slab_want_init_on_alloc(gfp, s); @@ -7399,14 +7405,8 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, local_lock_irqsave(&s->cpu_slab->lock, irqflags); for (i = 0; i < size; i++) { - void *object = kfence_alloc(s, s->object_size, flags); - - if (unlikely(object)) { - p[i] = object; - continue; - } + void *object = c->freelist; - object = c->freelist; if (unlikely(!object)) { /* * We may have removed an object from c->freelist using @@ -7487,6 +7487,7 @@ int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size, void **p) { unsigned int i = 0; + void *kfence_obj; if (!size) return 0; @@ -7495,6 +7496,20 @@ int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size, if (unlikely(!s)) return 0; + /* + * to make things simpler, only assume at most once kfence allocated + * object per bulk allocation and choose its index randomly + */ + kfence_obj = kfence_alloc(s, s->object_size, flags); + + if (unlikely(kfence_obj)) { + if (unlikely(size == 1)) { + p[0] = kfence_obj; + goto out; + } + size--; + } + if (s->cpu_sheaves) i = alloc_from_pcs_bulk(s, size, p); @@ -7506,10 +7521,23 @@ int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size, if (unlikely(__kmem_cache_alloc_bulk(s, flags, size - i, p + i) == 0)) { if (i > 0) __kmem_cache_free_bulk(s, i, p); + if (kfence_obj) + __kfence_free(kfence_obj); return 0; } } + if (unlikely(kfence_obj)) { + int idx = get_random_u32_below(size + 1); + + if (idx != size) + p[size] = p[idx]; + p[idx] = kfence_obj; + + size++; + } + +out: /* * memcg and kmem_cache debug support and memory initialization. * Done outside of the IRQ disabled fastpath loop. |