diff options
58 files changed, 195 insertions, 190 deletions
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index 9106ceac323c..7d2f93dc1e91 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c @@ -704,7 +704,7 @@ static inline void arc_slc_enable(void) void flush_dcache_folio(struct folio *folio) { - clear_bit(PG_dc_clean, &folio->flags); + clear_bit(PG_dc_clean, &folio->flags.f); return; } EXPORT_SYMBOL(flush_dcache_folio); @@ -889,8 +889,8 @@ void copy_user_highpage(struct page *to, struct page *from, copy_page(kto, kfrom); - clear_bit(PG_dc_clean, &dst->flags); - clear_bit(PG_dc_clean, &src->flags); + clear_bit(PG_dc_clean, &dst->flags.f); + clear_bit(PG_dc_clean, &src->flags.f); kunmap_atomic(kto); kunmap_atomic(kfrom); @@ -900,7 +900,7 @@ void clear_user_page(void *to, unsigned long u_vaddr, struct page *page) { struct folio *folio = page_folio(page); clear_page(to); - clear_bit(PG_dc_clean, &folio->flags); + clear_bit(PG_dc_clean, &folio->flags.f); } EXPORT_SYMBOL(clear_user_page); diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index cae4a7aae0ed..ed6915ba76ec 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c @@ -488,7 +488,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, */ if (vma->vm_flags & VM_EXEC) { struct folio *folio = page_folio(page); - int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags); + int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags.f); if (dirty) { unsigned long offset = offset_in_folio(folio, paddr); nr = folio_nr_pages(folio); diff --git a/arch/arm/include/asm/hugetlb.h b/arch/arm/include/asm/hugetlb.h index b766c4b373f6..700055b1ccb3 100644 --- a/arch/arm/include/asm/hugetlb.h +++ b/arch/arm/include/asm/hugetlb.h @@ -17,7 +17,7 @@ static inline void arch_clear_hugetlb_flags(struct folio *folio) { - clear_bit(PG_dcache_clean, &folio->flags); + clear_bit(PG_dcache_clean, &folio->flags.f); } #define arch_clear_hugetlb_flags arch_clear_hugetlb_flags diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 7ddd82b9fe8b..ed843bb22020 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -67,7 +67,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from, struct folio *src = page_folio(from); void *kto = kmap_atomic(to); - if (!test_and_set_bit(PG_dcache_clean, &src->flags)) + if (!test_and_set_bit(PG_dcache_clean, &src->flags.f)) __flush_dcache_folio(folio_flush_mapping(src), src); raw_spin_lock(&minicache_lock); diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index a1a71f36d850..0710dba5c0bf 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -73,7 +73,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to, unsigned int offset = CACHE_COLOUR(vaddr); unsigned long kfrom, kto; - if (!test_and_set_bit(PG_dcache_clean, &src->flags)) + if (!test_and_set_bit(PG_dcache_clean, &src->flags.f)) __flush_dcache_folio(folio_flush_mapping(src), src); /* FIXME: not highmem safe */ diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index f1e29d3e8193..e16af68d709f 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -87,7 +87,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from, struct folio *src = page_folio(from); void *kto = kmap_atomic(to); - if (!test_and_set_bit(PG_dcache_clean, &src->flags)) + if (!test_and_set_bit(PG_dcache_clean, &src->flags.f)) __flush_dcache_folio(folio_flush_mapping(src), src); raw_spin_lock(&minicache_lock); diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 88c2d68a69c9..08641a936394 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -718,7 +718,7 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, if (size < sz) break; if (!offset) - set_bit(PG_dcache_clean, &folio->flags); + set_bit(PG_dcache_clean, &folio->flags.f); offset = 0; size -= sz; if (!size) diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 39fd5df73317..91e488767783 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -203,7 +203,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, folio = page_folio(pfn_to_page(pfn)); mapping = folio_flush_mapping(folio); - if (!test_and_set_bit(PG_dcache_clean, &folio->flags)) + if (!test_and_set_bit(PG_dcache_clean, &folio->flags.f)) __flush_dcache_folio(mapping, folio); if (mapping) { if (cache_is_vivt()) diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 5219158d54cf..19470d938b23 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -304,7 +304,7 @@ void __sync_icache_dcache(pte_t pteval) else mapping = NULL; - if (!test_and_set_bit(PG_dcache_clean, &folio->flags)) + if (!test_and_set_bit(PG_dcache_clean, &folio->flags.f)) __flush_dcache_folio(mapping, folio); if (pte_exec(pteval)) @@ -343,8 +343,8 @@ void flush_dcache_folio(struct folio *folio) return; if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) { - if (test_bit(PG_dcache_clean, &folio->flags)) - clear_bit(PG_dcache_clean, &folio->flags); + if (test_bit(PG_dcache_clean, &folio->flags.f)) + clear_bit(PG_dcache_clean, &folio->flags.f); return; } @@ -352,14 +352,14 @@ void flush_dcache_folio(struct folio *folio) if (!cache_ops_need_broadcast() && mapping && !folio_mapped(folio)) - clear_bit(PG_dcache_clean, &folio->flags); + clear_bit(PG_dcache_clean, &folio->flags.f); else { __flush_dcache_folio(mapping, folio); if (mapping && cache_is_vivt()) __flush_dcache_aliases(mapping, folio); else if (mapping) __flush_icache_all(); - set_bit(PG_dcache_clean, &folio->flags); + set_bit(PG_dcache_clean, &folio->flags.f); } } EXPORT_SYMBOL(flush_dcache_folio); diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h index 2a8155c4a882..44c1f757bfcf 100644 --- a/arch/arm64/include/asm/hugetlb.h +++ b/arch/arm64/include/asm/hugetlb.h @@ -21,12 +21,12 @@ extern bool arch_hugetlb_migration_supported(struct hstate *h); static inline void arch_clear_hugetlb_flags(struct folio *folio) { - clear_bit(PG_dcache_clean, &folio->flags); + clear_bit(PG_dcache_clean, &folio->flags.f); #ifdef CONFIG_ARM64_MTE if (system_supports_mte()) { - clear_bit(PG_mte_tagged, &folio->flags); - clear_bit(PG_mte_lock, &folio->flags); + clear_bit(PG_mte_tagged, &folio->flags.f); + clear_bit(PG_mte_lock, &folio->flags.f); } #endif } diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h index 6567df8ec8ca..3b5069f4683d 100644 --- a/arch/arm64/include/asm/mte.h +++ b/arch/arm64/include/asm/mte.h @@ -48,12 +48,12 @@ static inline void set_page_mte_tagged(struct page *page) * before the page flags update. */ smp_wmb(); - set_bit(PG_mte_tagged, &page->flags); + set_bit(PG_mte_tagged, &page->flags.f); } static inline bool page_mte_tagged(struct page *page) { - bool ret = test_bit(PG_mte_tagged, &page->flags); + bool ret = test_bit(PG_mte_tagged, &page->flags.f); VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page))); @@ -82,7 +82,7 @@ static inline bool try_page_mte_tagging(struct page *page) { VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page))); - if (!test_and_set_bit(PG_mte_lock, &page->flags)) + if (!test_and_set_bit(PG_mte_lock, &page->flags.f)) return true; /* @@ -90,7 +90,7 @@ static inline bool try_page_mte_tagging(struct page *page) * already. Check if the PG_mte_tagged flag has been set or wait * otherwise. */ - smp_cond_load_acquire(&page->flags, VAL & (1UL << PG_mte_tagged)); + smp_cond_load_acquire(&page->flags.f, VAL & (1UL << PG_mte_tagged)); return false; } @@ -173,13 +173,13 @@ static inline void folio_set_hugetlb_mte_tagged(struct folio *folio) * before the folio flags update. */ smp_wmb(); - set_bit(PG_mte_tagged, &folio->flags); + set_bit(PG_mte_tagged, &folio->flags.f); } static inline bool folio_test_hugetlb_mte_tagged(struct folio *folio) { - bool ret = test_bit(PG_mte_tagged, &folio->flags); + bool ret = test_bit(PG_mte_tagged, &folio->flags.f); VM_WARN_ON_ONCE(!folio_test_hugetlb(folio)); @@ -196,7 +196,7 @@ static inline bool folio_try_hugetlb_mte_tagging(struct folio *folio) { VM_WARN_ON_ONCE(!folio_test_hugetlb(folio)); - if (!test_and_set_bit(PG_mte_lock, &folio->flags)) + if (!test_and_set_bit(PG_mte_lock, &folio->flags.f)) return true; /* @@ -204,7 +204,7 @@ static inline bool folio_try_hugetlb_mte_tagging(struct folio *folio) * already. Check if the PG_mte_tagged flag has been set or wait * otherwise. */ - smp_cond_load_acquire(&folio->flags, VAL & (1UL << PG_mte_tagged)); + smp_cond_load_acquire(&folio->flags.f, VAL & (1UL << PG_mte_tagged)); return false; } diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index 013eead9b695..fbf08b543c3f 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -53,11 +53,11 @@ void __sync_icache_dcache(pte_t pte) { struct folio *folio = page_folio(pte_page(pte)); - if (!test_bit(PG_dcache_clean, &folio->flags)) { + if (!test_bit(PG_dcache_clean, &folio->flags.f)) { sync_icache_aliases((unsigned long)folio_address(folio), (unsigned long)folio_address(folio) + folio_size(folio)); - set_bit(PG_dcache_clean, &folio->flags); + set_bit(PG_dcache_clean, &folio->flags.f); } } EXPORT_SYMBOL_GPL(__sync_icache_dcache); @@ -69,8 +69,8 @@ EXPORT_SYMBOL_GPL(__sync_icache_dcache); */ void flush_dcache_folio(struct folio *folio) { - if (test_bit(PG_dcache_clean, &folio->flags)) - clear_bit(PG_dcache_clean, &folio->flags); + if (test_bit(PG_dcache_clean, &folio->flags.f)) + clear_bit(PG_dcache_clean, &folio->flags.f); } EXPORT_SYMBOL(flush_dcache_folio); diff --git a/arch/csky/abiv1/cacheflush.c b/arch/csky/abiv1/cacheflush.c index 171e8fb32285..4bc0aad3cf8a 100644 --- a/arch/csky/abiv1/cacheflush.c +++ b/arch/csky/abiv1/cacheflush.c @@ -25,12 +25,12 @@ void flush_dcache_folio(struct folio *folio) mapping = folio_flush_mapping(folio); if (mapping && !folio_mapped(folio)) - clear_bit(PG_dcache_clean, &folio->flags); + clear_bit(PG_dcache_clean, &folio->flags.f); else { dcache_wbinv_all(); if (mapping) icache_inv_all(); - set_bit(PG_dcache_clean, &folio->flags); + set_bit(PG_dcache_clean, &folio->flags.f); } } EXPORT_SYMBOL(flush_dcache_folio); @@ -56,7 +56,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, return; folio = page_folio(pfn_to_page(pfn)); - if (!test_and_set_bit(PG_dcache_clean, &folio->flags)) + if (!test_and_set_bit(PG_dcache_clean, &folio->flags.f)) dcache_wbinv_all(); if (folio_flush_mapping(folio)) { diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h index 1f14132b3fc9..5d283ef89d90 100644 --- a/arch/mips/include/asm/cacheflush.h +++ b/arch/mips/include/asm/cacheflush.h @@ -37,11 +37,11 @@ #define PG_dcache_dirty PG_arch_1 #define folio_test_dcache_dirty(folio) \ - test_bit(PG_dcache_dirty, &(folio)->flags) + test_bit(PG_dcache_dirty, &(folio)->flags.f) #define folio_set_dcache_dirty(folio) \ - set_bit(PG_dcache_dirty, &(folio)->flags) + set_bit(PG_dcache_dirty, &(folio)->flags.f) #define folio_clear_dcache_dirty(folio) \ - clear_bit(PG_dcache_dirty, &(folio)->flags) + clear_bit(PG_dcache_dirty, &(folio)->flags.f) extern void (*flush_cache_all)(void); extern void (*__flush_cache_all)(void); diff --git a/arch/nios2/mm/cacheflush.c b/arch/nios2/mm/cacheflush.c index 0ee9c5f02e08..8321182eb927 100644 --- a/arch/nios2/mm/cacheflush.c +++ b/arch/nios2/mm/cacheflush.c @@ -187,7 +187,7 @@ void flush_dcache_folio(struct folio *folio) /* Flush this page if there are aliases. */ if (mapping && !mapping_mapped(mapping)) { - clear_bit(PG_dcache_clean, &folio->flags); + clear_bit(PG_dcache_clean, &folio->flags.f); } else { __flush_dcache_folio(folio); if (mapping) { @@ -195,7 +195,7 @@ void flush_dcache_folio(struct folio *folio) flush_aliases(mapping, folio); flush_icache_range(start, start + folio_size(folio)); } - set_bit(PG_dcache_clean, &folio->flags); + set_bit(PG_dcache_clean, &folio->flags.f); } } EXPORT_SYMBOL(flush_dcache_folio); @@ -227,7 +227,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, return; folio = page_folio(pfn_to_page(pfn)); - if (!test_and_set_bit(PG_dcache_clean, &folio->flags)) + if (!test_and_set_bit(PG_dcache_clean, &folio->flags.f)) __flush_dcache_folio(folio); mapping = folio_flush_mapping(folio); diff --git a/arch/openrisc/include/asm/cacheflush.h b/arch/openrisc/include/asm/cacheflush.h index 0e60af486ec1..cd8f971c0fec 100644 --- a/arch/openrisc/include/asm/cacheflush.h +++ b/arch/openrisc/include/asm/cacheflush.h @@ -75,7 +75,7 @@ static inline void sync_icache_dcache(struct page *page) static inline void flush_dcache_folio(struct folio *folio) { - clear_bit(PG_dc_clean, &folio->flags); + clear_bit(PG_dc_clean, &folio->flags.f); } #define flush_dcache_folio flush_dcache_folio diff --git a/arch/openrisc/mm/cache.c b/arch/openrisc/mm/cache.c index 0f265b8e73ec..f33df46dae4e 100644 --- a/arch/openrisc/mm/cache.c +++ b/arch/openrisc/mm/cache.c @@ -83,7 +83,7 @@ void update_cache(struct vm_area_struct *vma, unsigned long address, { unsigned long pfn = pte_val(*pte) >> PAGE_SHIFT; struct folio *folio = page_folio(pfn_to_page(pfn)); - int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags); + int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags.f); /* * Since icaches do not snoop for updated data on OpenRISC, we diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 37ca484cc495..4c5240d3a3c7 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -122,10 +122,10 @@ void __update_cache(pte_t pte) pfn = folio_pfn(folio); nr = folio_nr_pages(folio); if (folio_flush_mapping(folio) && - test_bit(PG_dcache_dirty, &folio->flags)) { + test_bit(PG_dcache_dirty, &folio->flags.f)) { while (nr--) flush_kernel_dcache_page_addr(pfn_va(pfn + nr)); - clear_bit(PG_dcache_dirty, &folio->flags); + clear_bit(PG_dcache_dirty, &folio->flags.f); } else if (parisc_requires_coherency()) while (nr--) flush_kernel_dcache_page_addr(pfn_va(pfn + nr)); @@ -481,7 +481,7 @@ void flush_dcache_folio(struct folio *folio) pgoff_t pgoff; if (mapping && !mapping_mapped(mapping)) { - set_bit(PG_dcache_dirty, &folio->flags); + set_bit(PG_dcache_dirty, &folio->flags.f); return; } diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h index f2656774aaa9..1fea42928f64 100644 --- a/arch/powerpc/include/asm/cacheflush.h +++ b/arch/powerpc/include/asm/cacheflush.h @@ -40,8 +40,8 @@ static inline void flush_dcache_folio(struct folio *folio) if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) return; /* avoid an atomic op if possible */ - if (test_bit(PG_dcache_clean, &folio->flags)) - clear_bit(PG_dcache_clean, &folio->flags); + if (test_bit(PG_dcache_clean, &folio->flags.f)) + clear_bit(PG_dcache_clean, &folio->flags.f); } #define flush_dcache_folio flush_dcache_folio diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index ca3829d47ab7..0953f2daa466 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -939,9 +939,9 @@ static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn) /* Clear i-cache for new pages */ folio = page_folio(pfn_to_page(pfn)); - if (!test_bit(PG_dcache_clean, &folio->flags)) { + if (!test_bit(PG_dcache_clean, &folio->flags.f)) { flush_dcache_icache_folio(folio); - set_bit(PG_dcache_clean, &folio->flags); + set_bit(PG_dcache_clean, &folio->flags.f); } } diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 4693c464fc5a..3aee3af614af 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -1562,11 +1562,11 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap) folio = page_folio(pte_page(pte)); /* page is dirty */ - if (!test_bit(PG_dcache_clean, &folio->flags) && + if (!test_bit(PG_dcache_clean, &folio->flags.f) && !folio_test_reserved(folio)) { if (trap == INTERRUPT_INST_STORAGE) { flush_dcache_icache_folio(folio); - set_bit(PG_dcache_clean, &folio->flags); + set_bit(PG_dcache_clean, &folio->flags.f); } else pp |= HPTE_R_N; } diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index dfaa9fd86f7e..56d7e8960e77 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -87,9 +87,9 @@ static pte_t set_pte_filter_hash(pte_t pte, unsigned long addr) struct folio *folio = maybe_pte_to_folio(pte); if (!folio) return pte; - if (!test_bit(PG_dcache_clean, &folio->flags)) { + if (!test_bit(PG_dcache_clean, &folio->flags.f)) { flush_dcache_icache_folio(folio); - set_bit(PG_dcache_clean, &folio->flags); + set_bit(PG_dcache_clean, &folio->flags.f); } } return pte; @@ -127,13 +127,13 @@ static inline pte_t set_pte_filter(pte_t pte, unsigned long addr) return pte; /* If the page clean, we move on */ - if (test_bit(PG_dcache_clean, &folio->flags)) + if (test_bit(PG_dcache_clean, &folio->flags.f)) return pte; /* If it's an exec fault, we flush the cache and make it clean */ if (is_exec_fault()) { flush_dcache_icache_folio(folio); - set_bit(PG_dcache_clean, &folio->flags); + set_bit(PG_dcache_clean, &folio->flags.f); return pte; } @@ -175,12 +175,12 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, goto bail; /* If the page is already clean, we move on */ - if (test_bit(PG_dcache_clean, &folio->flags)) + if (test_bit(PG_dcache_clean, &folio->flags.f)) goto bail; /* Clean the page and set PG_dcache_clean */ flush_dcache_icache_folio(folio); - set_bit(PG_dcache_clean, &folio->flags); + set_bit(PG_dcache_clean, &folio->flags.f); bail: return pte_mkexec(pte); diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h index 6086b38d5427..0092513c3376 100644 --- a/arch/riscv/include/asm/cacheflush.h +++ b/arch/riscv/include/asm/cacheflush.h @@ -23,8 +23,8 @@ static inline void local_flush_icache_range(unsigned long start, static inline void flush_dcache_folio(struct folio *folio) { - if (test_bit(PG_dcache_clean, &folio->flags)) - clear_bit(PG_dcache_clean, &folio->flags); + if (test_bit(PG_dcache_clean, &folio->flags.f)) + clear_bit(PG_dcache_clean, &folio->flags.f); } #define flush_dcache_folio flush_dcache_folio #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h index 446126497768..0872d43fc0c0 100644 --- a/arch/riscv/include/asm/hugetlb.h +++ b/arch/riscv/include/asm/hugetlb.h @@ -7,7 +7,7 @@ static inline void arch_clear_hugetlb_flags(struct folio *folio) { - clear_bit(PG_dcache_clean, &folio->flags); + clear_bit(PG_dcache_clean, &folio->flags.f); } #define arch_clear_hugetlb_flags arch_clear_hugetlb_flags diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c index 4ca5aafce22e..d83a612464f6 100644 --- a/arch/riscv/mm/cacheflush.c +++ b/arch/riscv/mm/cacheflush.c @@ -101,9 +101,9 @@ void flush_icache_pte(struct mm_struct *mm, pte_t pte) { struct folio *folio = page_folio(pte_page(pte)); - if (!test_bit(PG_dcache_clean, &folio->flags)) { + if (!test_bit(PG_dcache_clean, &folio->flags.f)) { flush_icache_mm(mm, false); - set_bit(PG_dcache_clean, &folio->flags); + set_bit(PG_dcache_clean, &folio->flags.f); } } #endif /* CONFIG_MMU */ diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h index 931fcc413598..69131736daaa 100644 --- a/arch/s390/include/asm/hugetlb.h +++ b/arch/s390/include/asm/hugetlb.h @@ -39,7 +39,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, static inline void arch_clear_hugetlb_flags(struct folio *folio) { - clear_bit(PG_arch_1, &folio->flags); + clear_bit(PG_arch_1, &folio->flags.f); } #define arch_clear_hugetlb_flags arch_clear_hugetlb_flags diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c index 47f574cd1728..93b2a01bae40 100644 --- a/arch/s390/kernel/uv.c +++ b/arch/s390/kernel/uv.c @@ -144,7 +144,7 @@ int uv_destroy_folio(struct folio *folio) folio_get(folio); rc = uv_destroy(folio_to_phys(folio)); if (!rc) - clear_bit(PG_arch_1, &folio->flags); + clear_bit(PG_arch_1, &folio->flags.f); folio_put(folio); return rc; } @@ -193,7 +193,7 @@ int uv_convert_from_secure_folio(struct folio *folio) folio_get(folio); rc = uv_convert_from_secure(folio_to_phys(folio)); if (!rc) - clear_bit(PG_arch_1, &folio->flags); + clear_bit(PG_arch_1, &folio->flags.f); folio_put(folio); return rc; } @@ -289,7 +289,7 @@ static int __make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb) expected = expected_folio_refs(folio) + 1; if (!folio_ref_freeze(folio, expected)) return -EBUSY; - set_bit(PG_arch_1, &folio->flags); + set_bit(PG_arch_1, &folio->flags.f); /* * If the UVC does not succeed or fail immediately, we don't want to * loop for long, or we might get stall notifications. @@ -483,18 +483,18 @@ int arch_make_folio_accessible(struct folio *folio) * convert_to_secure. * As secure pages are never large folios, both variants can co-exists. */ - if (!test_bit(PG_arch_1, &folio->flags)) + if (!test_bit(PG_arch_1, &folio->flags.f)) return 0; rc = uv_pin_shared(folio_to_phys(folio)); if (!rc) { - clear_bit(PG_arch_1, &folio->flags); + clear_bit(PG_arch_1, &folio->flags.f); return 0; } rc = uv_convert_from_secure(folio_to_phys(folio)); if (!rc) { - clear_bit(PG_arch_1, &folio->flags); + clear_bit(PG_arch_1, &folio->flags.f); return 0; } diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index c7defe4ed1f6..8ff6bba107e8 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -2272,7 +2272,7 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr, start = pmd_val(*pmd) & HPAGE_MASK; end = start + HPAGE_SIZE; __storage_key_init_range(start, end); - set_bit(PG_arch_1, &folio->flags); + set_bit(PG_arch_1, &folio->flags.f); cond_resched(); return 0; } diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index e88c02c9e642..72e8fa136af5 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -155,7 +155,7 @@ static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste) paddr = rste & PMD_MASK; } - if (!test_and_set_bit(PG_arch_1, &folio->flags)) + if (!test_and_set_bit(PG_arch_1, &folio->flags.f)) __storage_key_init_range(paddr, paddr + size); } diff --git a/arch/sh/include/asm/hugetlb.h b/arch/sh/include/asm/hugetlb.h index 4a92e6e4d627..974512f359f0 100644 --- a/arch/sh/include/asm/hugetlb.h +++ b/arch/sh/include/asm/hugetlb.h @@ -14,7 +14,7 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, static inline void arch_clear_hugetlb_flags(struct folio *folio) { - clear_bit(PG_dcache_clean, &folio->flags); + clear_bit(PG_dcache_clean, &folio->flags.f); } #define arch_clear_hugetlb_flags arch_clear_hugetlb_flags diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index 46393b00137e..83fb34b39ca7 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -114,7 +114,7 @@ static void sh4_flush_dcache_folio(void *arg) struct address_space *mapping = folio_flush_mapping(folio); if (mapping && !mapping_mapped(mapping)) - clear_bit(PG_dcache_clean, &folio->flags); + clear_bit(PG_dcache_clean, &folio->flags.f); else #endif { diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index b509a407588f..71f8be9fc8e0 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c @@ -138,7 +138,7 @@ static void sh7705_flush_dcache_folio(void *arg) struct address_space *mapping = folio_flush_mapping(folio); if (mapping && !mapping_mapped(mapping)) - clear_bit(PG_dcache_clean, &folio->flags); + clear_bit(PG_dcache_clean, &folio->flags.f); else { unsigned long pfn = folio_pfn(folio); unsigned int i, nr = folio_nr_pages(folio); diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index 6ebdeaff3021..c3f028bed049 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -64,14 +64,14 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, struct folio *folio = page_folio(page); if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) && - test_bit(PG_dcache_clean, &folio->flags)) { + test_bit(PG_dcache_clean, &folio->flags.f)) { void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); memcpy(vto, src, len); kunmap_coherent(vto); } else { memcpy(dst, src, len); if (boot_cpu_data.dcache.n_aliases) - clear_bit(PG_dcache_clean, &folio->flags); + clear_bit(PG_dcache_clean, &folio->flags.f); } if (vma->vm_flags & VM_EXEC) @@ -85,14 +85,14 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page, struct folio *folio = page_folio(page); if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) && - test_bit(PG_dcache_clean, &folio->flags)) { + test_bit(PG_dcache_clean, &folio->flags.f)) { void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); memcpy(dst, vfrom, len); kunmap_coherent(vfrom); } else { memcpy(dst, src, len); if (boot_cpu_data.dcache.n_aliases) - clear_bit(PG_dcache_clean, &folio->flags); + clear_bit(PG_dcache_clean, &folio->flags.f); } } @@ -105,7 +105,7 @@ void copy_user_highpage(struct page *to, struct page *from, vto = kmap_atomic(to); if (boot_cpu_data.dcache.n_aliases && folio_mapped(src) && - test_bit(PG_dcache_clean, &src->flags)) { + test_bit(PG_dcache_clean, &src->flags.f)) { vfrom = kmap_coherent(from, vaddr); copy_page(vto, vfrom); kunmap_coherent(vfrom); @@ -148,7 +148,7 @@ void __update_cache(struct vm_area_struct *vma, if (pfn_valid(pfn)) { struct folio *folio = page_folio(pfn_to_page(pfn)); - int dirty = !test_and_set_bit(PG_dcache_clean, &folio->flags); + int dirty = !test_and_set_bit(PG_dcache_clean, &folio->flags.f); if (dirty) __flush_purge_region(folio_address(folio), folio_size(folio)); @@ -162,7 +162,7 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr) if (pages_do_alias(addr, vmaddr)) { if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) && - test_bit(PG_dcache_clean, &folio->flags)) { + test_bit(PG_dcache_clean, &folio->flags.f)) { void *kaddr; kaddr = kmap_coherent(page, vmaddr); diff --git a/arch/sh/mm/kmap.c b/arch/sh/mm/kmap.c index fa50e8f6e7a9..c9f32d5a54b8 100644 --- a/arch/sh/mm/kmap.c +++ b/arch/sh/mm/kmap.c @@ -31,7 +31,7 @@ void *kmap_coherent(struct page *page, unsigned long addr) enum fixed_addresses idx; unsigned long vaddr; - BUG_ON(!test_bit(PG_dcache_clean, &folio->flags)); + BUG_ON(!test_bit(PG_dcache_clean, &folio->flags.f)); preempt_disable(); pagefault_disable(); diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 7ed58bf3aaca..df9f7c444c39 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -224,7 +224,7 @@ inline void flush_dcache_folio_impl(struct folio *folio) ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL) #define dcache_dirty_cpu(folio) \ - (((folio)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask) + (((folio)->flags.f >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask) static inline void set_dcache_dirty(struct folio *folio, int this_cpu) { @@ -243,7 +243,7 @@ static inline void set_dcache_dirty(struct folio *folio, int this_cpu) "bne,pn %%xcc, 1b\n\t" " nop" : /* no outputs */ - : "r" (mask), "r" (non_cpu_bits), "r" (&folio->flags) + : "r" (mask), "r" (non_cpu_bits), "r" (&folio->flags.f) : "g1", "g7"); } @@ -265,7 +265,7 @@ static inline void clear_dcache_dirty_cpu(struct folio *folio, unsigned long cpu " nop\n" "2:" : /* no outputs */ - : "r" (cpu), "r" (mask), "r" (&folio->flags), + : "r" (cpu), "r" (mask), "r" (&folio->flags.f), "i" (PG_dcache_cpu_mask), "i" (PG_dcache_cpu_shift) : "g1", "g7"); @@ -292,7 +292,7 @@ static void flush_dcache(unsigned long pfn) struct folio *folio = page_folio(page); unsigned long pg_flags; - pg_flags = folio->flags; + pg_flags = folio->flags.f; if (pg_flags & (1UL << PG_dcache_dirty)) { int cpu = ((pg_flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask); @@ -480,7 +480,7 @@ void flush_dcache_folio(struct folio *folio) mapping = folio_flush_mapping(folio); if (mapping && !mapping_mapped(mapping)) { - bool dirty = test_bit(PG_dcache_dirty, &folio->flags); + bool dirty = test_bit(PG_dcache_dirty, &folio->flags.f); if (dirty) { int dirty_cpu = dcache_dirty_cpu(folio); diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c index c09284302dd3..b68200a0e0c6 100644 --- a/arch/x86/mm/pat/memtype.c +++ b/arch/x86/mm/pat/memtype.c @@ -126,7 +126,7 @@ __setup("debugpat", pat_debug_setup); static inline enum page_cache_mode get_page_memtype(struct page *pg) { - unsigned long pg_flags = pg->flags & _PGMT_MASK; + unsigned long pg_flags = pg->flags.f & _PGMT_MASK; if (pg_flags == _PGMT_WB) return _PAGE_CACHE_MODE_WB; @@ -161,10 +161,10 @@ static inline void set_page_memtype(struct page *pg, break; } - old_flags = READ_ONCE(pg->flags); + old_flags = READ_ONCE(pg->flags.f); do { new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags; - } while (!try_cmpxchg(&pg->flags, &old_flags, new_flags)); + } while (!try_cmpxchg(&pg->flags.f, &old_flags, new_flags)); } #else static inline enum page_cache_mode get_page_memtype(struct page *pg) diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c index 23be0e7516ce..5354df52d61f 100644 --- a/arch/xtensa/mm/cache.c +++ b/arch/xtensa/mm/cache.c @@ -134,8 +134,8 @@ void flush_dcache_folio(struct folio *folio) */ if (mapping && !mapping_mapped(mapping)) { - if (!test_bit(PG_arch_1, &folio->flags)) - set_bit(PG_arch_1, &folio->flags); + if (!test_bit(PG_arch_1, &folio->flags.f)) + set_bit(PG_arch_1, &folio->flags.f); return; } else { @@ -232,7 +232,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, #if (DCACHE_WAY_SIZE > PAGE_SIZE) - if (!folio_test_reserved(folio) && test_bit(PG_arch_1, &folio->flags)) { + if (!folio_test_reserved(folio) && test_bit(PG_arch_1, &folio->flags.f)) { unsigned long phys = folio_pfn(folio) * PAGE_SIZE; unsigned long tmp; @@ -247,10 +247,10 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, } preempt_enable(); - clear_bit(PG_arch_1, &folio->flags); + clear_bit(PG_arch_1, &folio->flags.f); } #else - if (!folio_test_reserved(folio) && !test_bit(PG_arch_1, &folio->flags) + if (!folio_test_reserved(folio) && !test_bit(PG_arch_1, &folio->flags.f) && (vma->vm_flags & VM_EXEC) != 0) { for (i = 0; i < nr; i++) { void *paddr = kmap_local_folio(folio, i * PAGE_SIZE); @@ -258,7 +258,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, __invalidate_icache_page((unsigned long)paddr); kunmap_local(paddr); } - set_bit(PG_arch_1, &folio->flags); + set_bit(PG_arch_1, &folio->flags.f); } #endif } diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index e80cd8f2c049..8a89f0aa1d4d 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -935,7 +935,7 @@ static int fuse_check_folio(struct folio *folio) { if (folio_mapped(folio) || folio->mapping != NULL || - (folio->flags & PAGE_FLAGS_CHECK_AT_PREP & + (folio->flags.f & PAGE_FLAGS_CHECK_AT_PREP & ~(1 << PG_locked | 1 << PG_referenced | 1 << PG_lru | diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index fe0faad4892f..0c0a80b3baca 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -40,7 +40,7 @@ static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page " "state 0x%lx\n", bh, (unsigned long long)bh->b_blocknr, bh->b_state, - bh->b_folio->mapping, bh->b_folio->flags); + bh->b_folio->mapping, bh->b_folio->flags.f); fs_err(sdp, "AIL glock %u:%llu mapping %p\n", gl->gl_name.ln_type, gl->gl_name.ln_number, gfs2_glock2aspace(gl)); diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c index dd3dff95cb24..b697f3c259ef 100644 --- a/fs/jffs2/file.c +++ b/fs/jffs2/file.c @@ -230,7 +230,7 @@ static int jffs2_write_begin(const struct kiocb *iocb, goto release_sem; } } - jffs2_dbg(1, "end write_begin(). folio->flags %lx\n", folio->flags); + jffs2_dbg(1, "end write_begin(). folio->flags %lx\n", folio->flags.f); release_sem: mutex_unlock(&c->alloc_sem); @@ -259,7 +259,7 @@ static int jffs2_write_end(const struct kiocb *iocb, jffs2_dbg(1, "%s(): ino #%lu, page at 0x%llx, range %d-%d, flags %lx\n", __func__, inode->i_ino, folio_pos(folio), - start, end, folio->flags); + start, end, folio->flags.f); /* We need to avoid deadlock with page_cache_read() in jffs2_garbage_collect_pass(). So the folio must be diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index 806b056d2260..56c4da417b6a 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c @@ -167,7 +167,7 @@ void nilfs_folio_bug(struct folio *folio) printk(KERN_CRIT "NILFS_FOLIO_BUG(%p): cnt=%d index#=%llu flags=0x%lx " "mapping=%p ino=%lu\n", folio, folio_ref_count(folio), - (unsigned long long)folio->index, folio->flags, m, ino); + (unsigned long long)folio->index, folio->flags.f, m, ino); head = folio_buffers(folio); if (head) { diff --git a/fs/proc/page.c b/fs/proc/page.c index ba3568e97fd1..771e0b6bc630 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c @@ -163,7 +163,7 @@ u64 stable_page_flags(const struct page *page) snapshot_page(&ps, page); folio = &ps.folio_snapshot; - k = folio->flags; + k = folio->flags.f; mapping = (unsigned long)folio->mapping; is_anon = mapping & FOLIO_MAPPING_ANON; @@ -238,7 +238,7 @@ u64 stable_page_flags(const struct page *page) if (u & (1 << KPF_HUGE)) u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison); else - u |= kpf_copy_bit(ps.page_snapshot.flags, KPF_HWPOISON, PG_hwpoison); + u |= kpf_copy_bit(ps.page_snapshot.flags.f, KPF_HWPOISON, PG_hwpoison); #endif u |= kpf_copy_bit(k, KPF_RESERVED, PG_reserved); diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index e75a6cec67be..ca41ce8208c4 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -107,7 +107,7 @@ static int do_readpage(struct folio *folio) size_t offset = 0; dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx", - inode->i_ino, folio->index, i_size, folio->flags); + inode->i_ino, folio->index, i_size, folio->flags.f); ubifs_assert(c, !folio_test_checked(folio)); ubifs_assert(c, !folio->private); @@ -600,7 +600,7 @@ static int populate_page(struct ubifs_info *c, struct folio *folio, pgoff_t end_index; dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx", - inode->i_ino, folio->index, i_size, folio->flags); + inode->i_ino, folio->index, i_size, folio->flags.f); end_index = (i_size - 1) >> PAGE_SHIFT; if (!i_size || folio->index > end_index) { @@ -988,7 +988,7 @@ static int ubifs_writepage(struct folio *folio, struct writeback_control *wbc) int err, len = folio_size(folio); dbg_gen("ino %lu, pg %lu, pg flags %#lx", - inode->i_ino, folio->index, folio->flags); + inode->i_ino, folio->index, folio->flags.f); ubifs_assert(c, folio->private != NULL); /* Is the folio fully outside @i_size? (truncate in progress) */ diff --git a/include/linux/mm.h b/include/linux/mm.h index b61e2d4858cf..da562f23f50c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1024,7 +1024,7 @@ static inline unsigned int compound_order(struct page *page) { struct folio *folio = (struct folio *)page; - if (!test_bit(PG_head, &folio->flags)) + if (!test_bit(PG_head, &folio->flags.f)) return 0; return folio_large_order(folio); } @@ -1554,7 +1554,7 @@ static inline bool is_nommu_shared_mapping(vm_flags_t flags) */ static inline int page_zone_id(struct page *page) { - return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK; + return (page->flags.f >> ZONEID_PGSHIFT) & ZONEID_MASK; } #ifdef NODE_NOT_IN_PAGE_FLAGS @@ -1562,7 +1562,7 @@ int page_to_nid(const struct page *page); #else static inline int page_to_nid(const struct page *page) { - return (PF_POISONED_CHECK(page)->flags >> NODES_PGSHIFT) & NODES_MASK; + return (PF_POISONED_CHECK(page)->flags.f >> NODES_PGSHIFT) & NODES_MASK; } #endif @@ -1637,14 +1637,14 @@ static inline void page_cpupid_reset_last(struct page *page) #else static inline int folio_last_cpupid(struct folio *folio) { - return (folio->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; + return (folio->flags.f >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; } int folio_xchg_last_cpupid(struct folio *folio, int cpupid); static inline void page_cpupid_reset_last(struct page *page) { - page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT; + page->flags.f |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT; } #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */ @@ -1740,7 +1740,7 @@ static inline u8 page_kasan_tag(const struct page *page) u8 tag = KASAN_TAG_KERNEL; if (kasan_enabled()) { - tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK; + tag = (page->flags.f >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK; tag ^= 0xff; } @@ -1755,12 +1755,12 @@ static inline void page_kasan_tag_set(struct page *page, u8 tag) return; tag ^= 0xff; - old_flags = READ_ONCE(page->flags); + old_flags = READ_ONCE(page->flags.f); do { flags = old_flags; flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT); flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT; - } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags))); + } while (unlikely(!try_cmpxchg(&page->flags.f, &old_flags, flags))); } static inline void page_kasan_tag_reset(struct page *page) @@ -1804,13 +1804,13 @@ static inline pg_data_t *folio_pgdat(const struct folio *folio) #ifdef SECTION_IN_PAGE_FLAGS static inline void set_page_section(struct page *page, unsigned long section) { - page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); - page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; + page->flags.f &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); + page->flags.f |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; } static inline unsigned long page_to_section(const struct page *page) { - return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; + return (page->flags.f >> SECTIONS_PGSHIFT) & SECTIONS_MASK; } #endif @@ -2015,14 +2015,14 @@ static inline bool folio_is_longterm_pinnable(struct folio *folio) static inline void set_page_zone(struct page *page, enum zone_type zone) { - page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); - page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; + page->flags.f &= ~(ZONES_MASK << ZONES_PGSHIFT); + page->flags.f |= (zone & ZONES_MASK) << ZONES_PGSHIFT; } static inline void set_page_node(struct page *page, unsigned long node) { - page->flags &= ~(NODES_MASK << NODES_PGSHIFT); - page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; + page->flags.f &= ~(NODES_MASK << NODES_PGSHIFT); + page->flags.f |= (node & NODES_MASK) << NODES_PGSHIFT; } static inline void set_page_links(struct page *page, enum zone_type zone, @@ -2064,7 +2064,7 @@ static inline long compound_nr(struct page *page) { struct folio *folio = (struct folio *)page; - if (!test_bit(PG_head, &folio->flags)) + if (!test_bit(PG_head, &folio->flags.f)) return 1; return folio_large_nr_pages(folio); } diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 89b518ff097e..150302b4a905 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -143,7 +143,7 @@ static inline int lru_tier_from_refs(int refs, bool workingset) static inline int folio_lru_refs(struct folio *folio) { - unsigned long flags = READ_ONCE(folio->flags); + unsigned long flags = READ_ONCE(folio->flags.f); if (!(flags & BIT(PG_referenced))) return 0; @@ -156,7 +156,7 @@ static inline int folio_lru_refs(struct folio *folio) static inline int folio_lru_gen(struct folio *folio) { - unsigned long flags = READ_ONCE(folio->flags); + unsigned long flags = READ_ONCE(folio->flags.f); return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; } @@ -268,7 +268,7 @@ static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, gen = lru_gen_from_seq(seq); flags = (gen + 1UL) << LRU_GEN_PGOFF; /* see the comment on MIN_NR_GENS about PG_active */ - set_mask_bits(&folio->flags, LRU_GEN_MASK | BIT(PG_active), flags); + set_mask_bits(&folio->flags.f, LRU_GEN_MASK | BIT(PG_active), flags); lru_gen_update_size(lruvec, folio, -1, gen); /* for folio_rotate_reclaimable() */ @@ -293,7 +293,7 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, /* for folio_migrate_flags() */ flags = !reclaiming && lru_gen_is_active(lruvec, gen) ? BIT(PG_active) : 0; - flags = set_mask_bits(&folio->flags, LRU_GEN_MASK, flags); + flags = set_mask_bits(&folio->flags.f, LRU_GEN_MASK, flags); gen = ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; lru_gen_update_size(lruvec, folio, gen, -1); @@ -304,9 +304,9 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, static inline void folio_migrate_refs(struct folio *new, struct folio *old) { - unsigned long refs = READ_ONCE(old->flags) & LRU_REFS_MASK; + unsigned long refs = READ_ONCE(old->flags.f) & LRU_REFS_MASK; - set_mask_bits(&new->flags, LRU_REFS_MASK, refs); + set_mask_bits(&new->flags.f, LRU_REFS_MASK, refs); } #else /* !CONFIG_LRU_GEN */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index d247da2fdb52..d934a3a5b443 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -34,6 +34,10 @@ struct address_space; struct futex_private_hash; struct mem_cgroup; +typedef struct { + unsigned long f; +} memdesc_flags_t; + /* * Each physical page in the system has a struct page associated with * it to keep track of whatever it is we are using the page for at the @@ -72,7 +76,7 @@ struct mem_cgroup; #endif struct page { - unsigned long flags; /* Atomic flags, some possibly + memdesc_flags_t flags; /* Atomic flags, some possibly * updated asynchronously */ /* * Five words (20/40 bytes) are available in this union. @@ -383,7 +387,7 @@ struct folio { union { struct { /* public: */ - unsigned long flags; + memdesc_flags_t flags; union { struct list_head lru; /* private: avoid cluttering the output */ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 9d3ea9085556..990560cd99ee 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -1186,7 +1186,7 @@ static inline bool zone_is_empty(struct zone *zone) static inline enum zone_type page_zonenum(const struct page *page) { ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT); - return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; + return (page->flags.f >> ZONES_PGSHIFT) & ZONES_MASK; } static inline enum zone_type folio_zonenum(const struct folio *folio) diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 8d3fa3a91ce4..d53a86e68c89 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -217,7 +217,7 @@ static __always_inline const struct page *page_fixed_fake_head(const struct page * cold cacheline in some cases. */ if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) && - test_bit(PG_head, &page->flags)) { + test_bit(PG_head, &page->flags.f)) { /* * We can safely access the field of the @page[1] with PG_head * because the @page is a compound page composed with at least @@ -325,14 +325,14 @@ static __always_inline int PageTail(const struct page *page) static __always_inline int PageCompound(const struct page *page) { - return test_bit(PG_head, &page->flags) || + return test_bit(PG_head, &page->flags.f) || READ_ONCE(page->compound_head) & 1; } #define PAGE_POISON_PATTERN -1l static inline int PagePoisoned(const struct page *page) { - return READ_ONCE(page->flags) == PAGE_POISON_PATTERN; + return READ_ONCE(page->flags.f) == PAGE_POISON_PATTERN; } #ifdef CONFIG_DEBUG_VM @@ -349,8 +349,8 @@ static const unsigned long *const_folio_flags(const struct folio *folio, const struct page *page = &folio->page; VM_BUG_ON_PGFLAGS(page->compound_head & 1, page); - VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); - return &page[n].flags; + VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags.f), page); + return &page[n].flags.f; } static unsigned long *folio_flags(struct folio *folio, unsigned n) @@ -358,8 +358,8 @@ static unsigned long *folio_flags(struct folio *folio, unsigned n) struct page *page = &folio->page; VM_BUG_ON_PGFLAGS(page->compound_head & 1, page); - VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); - return &page[n].flags; + VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags.f), page); + return &page[n].flags.f; } /* @@ -449,37 +449,37 @@ FOLIO_CLEAR_FLAG(name, page) #define TESTPAGEFLAG(uname, lname, policy) \ FOLIO_TEST_FLAG(lname, FOLIO_##policy) \ static __always_inline int Page##uname(const struct page *page) \ -{ return test_bit(PG_##lname, &policy(page, 0)->flags); } +{ return test_bit(PG_##lname, &policy(page, 0)->flags.f); } #define SETPAGEFLAG(uname, lname, policy) \ FOLIO_SET_FLAG(lname, FOLIO_##policy) \ static __always_inline void SetPage##uname(struct page *page) \ -{ set_bit(PG_##lname, &policy(page, 1)->flags); } +{ set_bit(PG_##lname, &policy(page, 1)->flags.f); } #define CLEARPAGEFLAG(uname, lname, policy) \ FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \ static __always_inline void ClearPage##uname(struct page *page) \ -{ clear_bit(PG_##lname, &policy(page, 1)->flags); } +{ clear_bit(PG_##lname, &policy(page, 1)->flags.f); } #define __SETPAGEFLAG(uname, lname, policy) \ __FOLIO_SET_FLAG(lname, FOLIO_##policy) \ static __always_inline void __SetPage##uname(struct page *page) \ -{ __set_bit(PG_##lname, &policy(page, 1)->flags); } +{ __set_bit(PG_##lname, &policy(page, 1)->flags.f); } #define __CLEARPAGEFLAG(uname, lname, policy) \ __FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \ static __always_inline void __ClearPage##uname(struct page *page) \ -{ __clear_bit(PG_##lname, &policy(page, 1)->flags); } +{ __clear_bit(PG_##lname, &policy(page, 1)->flags.f); } #define TESTSETFLAG(uname, lname, policy) \ FOLIO_TEST_SET_FLAG(lname, FOLIO_##policy) \ static __always_inline int TestSetPage##uname(struct page *page) \ -{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); } +{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags.f); } #define TESTCLEARFLAG(uname, lname, policy) \ FOLIO_TEST_CLEAR_FLAG(lname, FOLIO_##policy) \ static __always_inline int TestClearPage##uname(struct page *page) \ -{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); } +{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags.f); } #define PAGEFLAG(uname, lname, policy) \ TESTPAGEFLAG(uname, lname, policy) \ @@ -846,7 +846,7 @@ static __always_inline bool folio_test_head(const struct folio *folio) static __always_inline int PageHead(const struct page *page) { PF_POISONED_CHECK(page); - return test_bit(PG_head, &page->flags) && !page_is_fake_head(page); + return test_bit(PG_head, &page->flags.f) && !page_is_fake_head(page); } __SETPAGEFLAG(Head, head, PF_ANY) @@ -1170,28 +1170,28 @@ static __always_inline int PageAnonExclusive(const struct page *page) */ if (PageHuge(page)) page = compound_head(page); - return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); + return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f); } static __always_inline void SetPageAnonExclusive(struct page *page) { VM_BUG_ON_PGFLAGS(!PageAnonNotKsm(page), page); VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); - set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); + set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f); } static __always_inline void ClearPageAnonExclusive(struct page *page) { VM_BUG_ON_PGFLAGS(!PageAnonNotKsm(page), page); VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); - clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); + clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f); } static __always_inline void __ClearPageAnonExclusive(struct page *page) { VM_BUG_ON_PGFLAGS(!PageAnon(page), page); VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); - __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); + __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f); } #ifdef CONFIG_MMU @@ -1241,7 +1241,7 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page) */ static inline int folio_has_private(const struct folio *folio) { - return !!(folio->flags & PAGE_FLAGS_PRIVATE); + return !!(folio->flags.f & PAGE_FLAGS_PRIVATE); } #undef PF_ANY diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h index 8a7f4f802c57..38a82d65e58e 100644 --- a/include/linux/pgalloc_tag.h +++ b/include/linux/pgalloc_tag.h @@ -107,7 +107,8 @@ static inline bool get_page_tag_ref(struct page *page, union codetag_ref *ref, if (static_key_enabled(&mem_profiling_compressed)) { pgalloc_tag_idx idx; - idx = (page->flags >> alloc_tag_ref_offs) & alloc_tag_ref_mask; + idx = (page->flags.f >> alloc_tag_ref_offs) & + alloc_tag_ref_mask; idx_to_ref(idx, ref); handle->page = page; } else { @@ -149,11 +150,11 @@ static inline void update_page_tag_ref(union pgtag_ref_handle handle, union code idx = (unsigned long)ref_to_idx(ref); idx = (idx & alloc_tag_ref_mask) << alloc_tag_ref_offs; do { - old_flags = READ_ONCE(page->flags); + old_flags = READ_ONCE(page->flags.f); flags = old_flags; flags &= ~(alloc_tag_ref_mask << alloc_tag_ref_offs); flags |= idx; - } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags))); + } while (unlikely(!try_cmpxchg(&page->flags.f, &old_flags, flags))); } else { if (WARN_ON(!handle.ref || !ref)) return; diff --git a/include/trace/events/page_ref.h b/include/trace/events/page_ref.h index fe33a255b7d0..ea6b5c4baf3d 100644 --- a/include/trace/events/page_ref.h +++ b/include/trace/events/page_ref.h @@ -28,7 +28,7 @@ DECLARE_EVENT_CLASS(page_ref_mod_template, TP_fast_assign( __entry->pfn = page_to_pfn(page); - __entry->flags = page->flags; + __entry->flags = page->flags.f; __entry->count = page_ref_count(page); __entry->mapcount = atomic_read(&page->_mapcount); __entry->mapping = page->mapping; @@ -77,7 +77,7 @@ DECLARE_EVENT_CLASS(page_ref_mod_and_test_template, TP_fast_assign( __entry->pfn = page_to_pfn(page); - __entry->flags = page->flags; + __entry->flags = page->flags.f; __entry->count = page_ref_count(page); __entry->mapcount = atomic_read(&page->_mapcount); __entry->mapping = page->mapping; diff --git a/mm/filemap.c b/mm/filemap.c index 1a388b11cfa9..f3a6c24897f4 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1140,10 +1140,10 @@ static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, */ flags = wait->flags; if (flags & WQ_FLAG_EXCLUSIVE) { - if (test_bit(key->bit_nr, &key->folio->flags)) + if (test_bit(key->bit_nr, &key->folio->flags.f)) return -1; if (flags & WQ_FLAG_CUSTOM) { - if (test_and_set_bit(key->bit_nr, &key->folio->flags)) + if (test_and_set_bit(key->bit_nr, &key->folio->flags.f)) return -1; flags |= WQ_FLAG_DONE; } @@ -1226,9 +1226,9 @@ static inline bool folio_trylock_flag(struct folio *folio, int bit_nr, struct wait_queue_entry *wait) { if (wait->flags & WQ_FLAG_EXCLUSIVE) { - if (test_and_set_bit(bit_nr, &folio->flags)) + if (test_and_set_bit(bit_nr, &folio->flags.f)) return false; - } else if (test_bit(bit_nr, &folio->flags)) + } else if (test_bit(bit_nr, &folio->flags.f)) return false; wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index d89992b65acc..aac5f0a2cb54 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3303,8 +3303,8 @@ static void __split_folio_to_order(struct folio *folio, int old_order, * unreferenced sub-pages of an anonymous THP: we can simply drop * PG_anon_exclusive (-> PG_mappedtodisk) for these here. */ - new_folio->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; - new_folio->flags |= (folio->flags & + new_folio->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP; + new_folio->flags.f |= (folio->flags.f & ((1L << PG_referenced) | (1L << PG_swapbacked) | (1L << PG_swapcache) | diff --git a/mm/memory-failure.c b/mm/memory-failure.c index fc30ca4804bf..c15ffee7d32b 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1707,10 +1707,10 @@ static int identify_page_state(unsigned long pfn, struct page *p, * carried out only if the first check can't determine the page status. */ for (ps = error_states;; ps++) - if ((p->flags & ps->mask) == ps->res) + if ((p->flags.f & ps->mask) == ps->res) break; - page_flags |= (p->flags & (1UL << PG_dirty)); + page_flags |= (p->flags.f & (1UL << PG_dirty)); if (!ps->mask) for (ps = error_states;; ps++) @@ -2137,7 +2137,7 @@ retry: return action_result(pfn, MF_MSG_FREE_HUGE, res); } - page_flags = folio->flags; + page_flags = folio->flags.f; if (!hwpoison_user_mappings(folio, p, pfn, flags)) { folio_unlock(folio); @@ -2398,7 +2398,7 @@ try_again: * folio_remove_rmap_*() in try_to_unmap_one(). So to determine page * status correctly, we save a copy of the page flags at this time. */ - page_flags = folio->flags; + page_flags = folio->flags.f; /* * __munlock_folio() may clear a writeback folio's LRU flag without @@ -2744,13 +2744,13 @@ static int soft_offline_in_use_page(struct page *page) putback_movable_pages(&pagelist); pr_info("%#lx: %s migration failed %ld, type %pGp\n", - pfn, msg_page[huge], ret, &page->flags); + pfn, msg_page[huge], ret, &page->flags.f); if (ret > 0) ret = -EBUSY; } } else { pr_info("%#lx: %s isolation failed, page count %d, type %pGp\n", - pfn, msg_page[huge], page_count(page), &page->flags); + pfn, msg_page[huge], page_count(page), &page->flags.f); ret = -EBUSY; } return ret; diff --git a/mm/mmzone.c b/mm/mmzone.c index f9baa8882fbf..0c8f181d9d50 100644 --- a/mm/mmzone.c +++ b/mm/mmzone.c @@ -99,14 +99,14 @@ int folio_xchg_last_cpupid(struct folio *folio, int cpupid) unsigned long old_flags, flags; int last_cpupid; - old_flags = READ_ONCE(folio->flags); + old_flags = READ_ONCE(folio->flags.f); do { flags = old_flags; last_cpupid = (flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT); flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT; - } while (unlikely(!try_cmpxchg(&folio->flags, &old_flags, flags))); + } while (unlikely(!try_cmpxchg(&folio->flags.f, &old_flags, flags))); return last_cpupid; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2ee21e46f0fb..ca9e6b9633f7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -950,7 +950,7 @@ static inline void __free_one_page(struct page *page, bool to_tail; VM_BUG_ON(!zone_is_initialized(zone)); - VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); + VM_BUG_ON_PAGE(page->flags.f & PAGE_FLAGS_CHECK_AT_PREP, page); VM_BUG_ON(migratetype == -1); VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); @@ -1043,7 +1043,7 @@ static inline bool page_expected_state(struct page *page, page->memcg_data | #endif page_pool_page_is_pp(page) | - (page->flags & check_flags))) + (page->flags.f & check_flags))) return false; return true; @@ -1059,7 +1059,7 @@ static const char *page_bad_reason(struct page *page, unsigned long flags) bad_reason = "non-NULL mapping"; if (unlikely(page_ref_count(page) != 0)) bad_reason = "nonzero _refcount"; - if (unlikely(page->flags & flags)) { + if (unlikely(page->flags.f & flags)) { if (flags == PAGE_FLAGS_CHECK_AT_PREP) bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set"; else @@ -1358,7 +1358,7 @@ __always_inline bool free_pages_prepare(struct page *page, int i; if (compound) { - page[1].flags &= ~PAGE_FLAGS_SECOND; + page[1].flags.f &= ~PAGE_FLAGS_SECOND; #ifdef NR_PAGES_IN_LARGE_FOLIO folio->_nr_pages = 0; #endif @@ -1372,7 +1372,7 @@ __always_inline bool free_pages_prepare(struct page *page, continue; } } - (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; + (page + i)->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP; } } if (folio_test_anon(folio)) { @@ -1391,7 +1391,7 @@ __always_inline bool free_pages_prepare(struct page *page, } page_cpupid_reset_last(page); - page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; + page->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP; reset_page_owner(page, order); page_table_check_free(page, order); pgalloc_tag_sub(page, 1 << order); diff --git a/mm/swap.c b/mm/swap.c index cb164f9ef9e3..6dd22a904b37 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -387,14 +387,14 @@ static void __lru_cache_activate_folio(struct folio *folio) static void lru_gen_inc_refs(struct folio *folio) { - unsigned long new_flags, old_flags = READ_ONCE(folio->flags); + unsigned long new_flags, old_flags = READ_ONCE(folio->flags.f); if (folio_test_unevictable(folio)) return; /* see the comment on LRU_REFS_FLAGS */ if (!folio_test_referenced(folio)) { - set_mask_bits(&folio->flags, LRU_REFS_MASK, BIT(PG_referenced)); + set_mask_bits(&folio->flags.f, LRU_REFS_MASK, BIT(PG_referenced)); return; } @@ -406,7 +406,7 @@ static void lru_gen_inc_refs(struct folio *folio) } new_flags = old_flags + BIT(LRU_REFS_PGOFF); - } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); + } while (!try_cmpxchg(&folio->flags.f, &old_flags, new_flags)); } static bool lru_gen_clear_refs(struct folio *folio) @@ -418,7 +418,7 @@ static bool lru_gen_clear_refs(struct folio *folio) if (gen < 0) return true; - set_mask_bits(&folio->flags, LRU_REFS_FLAGS | BIT(PG_workingset), 0); + set_mask_bits(&folio->flags.f, LRU_REFS_FLAGS | BIT(PG_workingset), 0); lrugen = &folio_lruvec(folio)->lrugen; /* whether can do without shuffling under the LRU lock */ diff --git a/mm/vmscan.c b/mm/vmscan.c index b9a1cfeb2ddf..e336577c4454 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -888,11 +888,11 @@ static bool lru_gen_set_refs(struct folio *folio) { /* see the comment on LRU_REFS_FLAGS */ if (!folio_test_referenced(folio) && !folio_test_workingset(folio)) { - set_mask_bits(&folio->flags, LRU_REFS_MASK, BIT(PG_referenced)); + set_mask_bits(&folio->flags.f, LRU_REFS_MASK, BIT(PG_referenced)); return false; } - set_mask_bits(&folio->flags, LRU_REFS_FLAGS, BIT(PG_workingset)); + set_mask_bits(&folio->flags.f, LRU_REFS_FLAGS, BIT(PG_workingset)); return true; } #else @@ -3257,13 +3257,13 @@ static bool positive_ctrl_err(struct ctrl_pos *sp, struct ctrl_pos *pv) /* promote pages accessed through page tables */ static int folio_update_gen(struct folio *folio, int gen) { - unsigned long new_flags, old_flags = READ_ONCE(folio->flags); + unsigned long new_flags, old_flags = READ_ONCE(folio->flags.f); VM_WARN_ON_ONCE(gen >= MAX_NR_GENS); /* see the comment on LRU_REFS_FLAGS */ if (!folio_test_referenced(folio) && !folio_test_workingset(folio)) { - set_mask_bits(&folio->flags, LRU_REFS_MASK, BIT(PG_referenced)); + set_mask_bits(&folio->flags.f, LRU_REFS_MASK, BIT(PG_referenced)); return -1; } @@ -3274,7 +3274,7 @@ static int folio_update_gen(struct folio *folio, int gen) new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_FLAGS); new_flags |= ((gen + 1UL) << LRU_GEN_PGOFF) | BIT(PG_workingset); - } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); + } while (!try_cmpxchg(&folio->flags.f, &old_flags, new_flags)); return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; } @@ -3285,7 +3285,7 @@ static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclai int type = folio_is_file_lru(folio); struct lru_gen_folio *lrugen = &lruvec->lrugen; int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]); - unsigned long new_flags, old_flags = READ_ONCE(folio->flags); + unsigned long new_flags, old_flags = READ_ONCE(folio->flags.f); VM_WARN_ON_ONCE_FOLIO(!(old_flags & LRU_GEN_MASK), folio); @@ -3302,7 +3302,7 @@ static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclai /* for folio_end_writeback() */ if (reclaiming) new_flags |= BIT(PG_reclaim); - } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); + } while (!try_cmpxchg(&folio->flags.f, &old_flags, new_flags)); lru_gen_update_size(lruvec, folio, old_gen, new_gen); @@ -4553,7 +4553,7 @@ static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct sca /* see the comment on LRU_REFS_FLAGS */ if (!folio_test_referenced(folio)) - set_mask_bits(&folio->flags, LRU_REFS_MASK, 0); + set_mask_bits(&folio->flags.f, LRU_REFS_MASK, 0); /* for shrink_folio_list() */ folio_clear_reclaim(folio); @@ -4766,7 +4766,7 @@ retry: /* don't add rejected folios to the oldest generation */ if (lru_gen_folio_seq(lruvec, folio, false) == min_seq[type]) - set_mask_bits(&folio->flags, LRU_REFS_FLAGS, BIT(PG_active)); + set_mask_bits(&folio->flags.f, LRU_REFS_FLAGS, BIT(PG_active)); } spin_lock_irq(&lruvec->lru_lock); diff --git a/mm/workingset.c b/mm/workingset.c index 6e7f4cb1b9a7..68a76a91111f 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -318,7 +318,7 @@ static void lru_gen_refault(struct folio *folio, void *shadow) folio_set_workingset(folio); mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + type, delta); } else - set_mask_bits(&folio->flags, LRU_REFS_MASK, (refs - 1UL) << LRU_REFS_PGOFF); + set_mask_bits(&folio->flags.f, LRU_REFS_MASK, (refs - 1UL) << LRU_REFS_PGOFF); unlock: rcu_read_unlock(); } |