mm, swap: use unified helper for swap cache look up
The swap cache lookup helper swap_cache_get_folio currently does readahead updates as well, so callers that are not doing swapin from any VMA or mapping are forced to reuse filemap helpers instead, and have to access the swap cache space directly. So decouple readahead update with swap cache lookup. Move the readahead update part into a standalone helper. Let the caller call the readahead update helper if they do readahead. And convert all swap cache lookups to use swap_cache_get_folio. After this commit, there are only three special cases for accessing swap cache space now: huge memory splitting, migration, and shmem replacing, because they need to lock the XArray. The following commits will wrap their accesses to the swap cache too, with special helpers. And worth noting, currently dropbehind is not supported for anon folio, and we will never see a dropbehind folio in swap cache. The unified helper can be updated later to handle that. While at it, add proper kernedoc for touched helpers. No functional change. Link: https://lkml.kernel.org/r/20250916160100.31545-3-ryncsn@gmail.com Signed-off-by: Kairui Song <kasong@tencent.com> Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com> Reviewed-by: Barry Song <baohua@kernel.org> Acked-by: David Hildenbrand <david@redhat.com> Acked-by: Chris Li <chrisl@kernel.org> Acked-by: Nhat Pham <nphamcs@gmail.com> Suggested-by: Chris Li <chrisl@kernel.org> Cc: Baoquan He <bhe@redhat.com> Cc: "Huang, Ying" <ying.huang@linux.alibaba.com> Cc: Hugh Dickins <hughd@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Kemeng Shi <shikemeng@huaweicloud.com> Cc: kernel test robot <oliver.sang@intel.com> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Yosry Ahmed <yosryahmed@google.com> Cc: Zi Yan <ziy@nvidia.com> Cc: SeongJae Park <sj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
87cc51571a
commit
f28124617f
|
@ -4660,9 +4660,11 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
|
|||
if (unlikely(!si))
|
||||
goto out;
|
||||
|
||||
folio = swap_cache_get_folio(entry, vma, vmf->address);
|
||||
if (folio)
|
||||
folio = swap_cache_get_folio(entry);
|
||||
if (folio) {
|
||||
swap_update_readahead(folio, vma, vmf->address);
|
||||
page = folio_file_page(folio, swp_offset(entry));
|
||||
}
|
||||
swapcache = folio;
|
||||
|
||||
if (!folio) {
|
||||
|
|
|
@ -76,8 +76,7 @@ static unsigned char mincore_swap(swp_entry_t entry, bool shmem)
|
|||
if (!si)
|
||||
return 0;
|
||||
}
|
||||
folio = filemap_get_entry(swap_address_space(entry),
|
||||
swap_cache_index(entry));
|
||||
folio = swap_cache_get_folio(entry);
|
||||
if (shmem)
|
||||
put_swap_device(si);
|
||||
/* The swap cache space contains either folio, shadow or NULL */
|
||||
|
|
|
@ -2317,7 +2317,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
|
|||
}
|
||||
|
||||
/* Look it up and read it in.. */
|
||||
folio = swap_cache_get_folio(swap, NULL, 0);
|
||||
folio = swap_cache_get_folio(swap);
|
||||
if (!folio) {
|
||||
if (data_race(si->flags & SWP_SYNCHRONOUS_IO)) {
|
||||
/* Direct swapin skipping swap cache & readahead */
|
||||
|
@ -2342,6 +2342,8 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
|
|||
count_vm_event(PGMAJFAULT);
|
||||
count_memcg_event_mm(fault_mm, PGMAJFAULT);
|
||||
}
|
||||
} else {
|
||||
swap_update_readahead(folio, NULL, 0);
|
||||
}
|
||||
|
||||
if (order > folio_order(folio)) {
|
||||
|
|
13
mm/swap.h
13
mm/swap.h
|
@ -62,8 +62,7 @@ void delete_from_swap_cache(struct folio *folio);
|
|||
void clear_shadow_from_swap_cache(int type, unsigned long begin,
|
||||
unsigned long end);
|
||||
void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr);
|
||||
struct folio *swap_cache_get_folio(swp_entry_t entry,
|
||||
struct vm_area_struct *vma, unsigned long addr);
|
||||
struct folio *swap_cache_get_folio(swp_entry_t entry);
|
||||
struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
||||
struct vm_area_struct *vma, unsigned long addr,
|
||||
struct swap_iocb **plug);
|
||||
|
@ -74,6 +73,8 @@ struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
|
|||
struct mempolicy *mpol, pgoff_t ilx);
|
||||
struct folio *swapin_readahead(swp_entry_t entry, gfp_t flag,
|
||||
struct vm_fault *vmf);
|
||||
void swap_update_readahead(struct folio *folio, struct vm_area_struct *vma,
|
||||
unsigned long addr);
|
||||
|
||||
static inline unsigned int folio_swap_flags(struct folio *folio)
|
||||
{
|
||||
|
@ -159,6 +160,11 @@ static inline struct folio *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static inline void swap_update_readahead(struct folio *folio,
|
||||
struct vm_area_struct *vma, unsigned long addr)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int swap_writeout(struct folio *folio,
|
||||
struct swap_iocb **swap_plug)
|
||||
{
|
||||
|
@ -169,8 +175,7 @@ static inline void swapcache_clear(struct swap_info_struct *si, swp_entry_t entr
|
|||
{
|
||||
}
|
||||
|
||||
static inline struct folio *swap_cache_get_folio(swp_entry_t entry,
|
||||
struct vm_area_struct *vma, unsigned long addr)
|
||||
static inline struct folio *swap_cache_get_folio(swp_entry_t entry)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
|
105
mm/swap_state.c
105
mm/swap_state.c
|
@ -69,6 +69,27 @@ void show_swap_cache_info(void)
|
|||
printk("Total swap = %lukB\n", K(total_swap_pages));
|
||||
}
|
||||
|
||||
/**
|
||||
* swap_cache_get_folio - Looks up a folio in the swap cache.
|
||||
* @entry: swap entry used for the lookup.
|
||||
*
|
||||
* A found folio will be returned unlocked and with its refcount increased.
|
||||
*
|
||||
* Context: Caller must ensure @entry is valid and protect the swap device
|
||||
* with reference count or locks.
|
||||
* Return: Returns the found folio on success, NULL otherwise. The caller
|
||||
* must lock and check if the folio still matches the swap entry before
|
||||
* use.
|
||||
*/
|
||||
struct folio *swap_cache_get_folio(swp_entry_t entry)
|
||||
{
|
||||
struct folio *folio = filemap_get_folio(swap_address_space(entry),
|
||||
swap_cache_index(entry));
|
||||
if (IS_ERR(folio))
|
||||
return NULL;
|
||||
return folio;
|
||||
}
|
||||
|
||||
void *get_shadow_from_swap_cache(swp_entry_t entry)
|
||||
{
|
||||
struct address_space *address_space = swap_address_space(entry);
|
||||
|
@ -272,55 +293,43 @@ static inline bool swap_use_vma_readahead(void)
|
|||
return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
|
||||
}
|
||||
|
||||
/*
|
||||
* Lookup a swap entry in the swap cache. A found folio will be returned
|
||||
* unlocked and with its refcount incremented - we rely on the kernel
|
||||
* lock getting page table operations atomic even if we drop the folio
|
||||
* lock before returning.
|
||||
*
|
||||
* Caller must lock the swap device or hold a reference to keep it valid.
|
||||
/**
|
||||
* swap_update_readahead - Update the readahead statistics of VMA or globally.
|
||||
* @folio: the swap cache folio that just got hit.
|
||||
* @vma: the VMA that should be updated, could be NULL for global update.
|
||||
* @addr: the addr that triggered the swapin, ignored if @vma is NULL.
|
||||
*/
|
||||
struct folio *swap_cache_get_folio(swp_entry_t entry,
|
||||
struct vm_area_struct *vma, unsigned long addr)
|
||||
void swap_update_readahead(struct folio *folio, struct vm_area_struct *vma,
|
||||
unsigned long addr)
|
||||
{
|
||||
struct folio *folio;
|
||||
bool readahead, vma_ra = swap_use_vma_readahead();
|
||||
|
||||
folio = filemap_get_folio(swap_address_space(entry), swap_cache_index(entry));
|
||||
if (!IS_ERR(folio)) {
|
||||
bool vma_ra = swap_use_vma_readahead();
|
||||
bool readahead;
|
||||
/*
|
||||
* At the moment, we don't support PG_readahead for anon THP
|
||||
* so let's bail out rather than confusing the readahead stat.
|
||||
*/
|
||||
if (unlikely(folio_test_large(folio)))
|
||||
return;
|
||||
|
||||
/*
|
||||
* At the moment, we don't support PG_readahead for anon THP
|
||||
* so let's bail out rather than confusing the readahead stat.
|
||||
*/
|
||||
if (unlikely(folio_test_large(folio)))
|
||||
return folio;
|
||||
readahead = folio_test_clear_readahead(folio);
|
||||
if (vma && vma_ra) {
|
||||
unsigned long ra_val;
|
||||
int win, hits;
|
||||
|
||||
readahead = folio_test_clear_readahead(folio);
|
||||
if (vma && vma_ra) {
|
||||
unsigned long ra_val;
|
||||
int win, hits;
|
||||
|
||||
ra_val = GET_SWAP_RA_VAL(vma);
|
||||
win = SWAP_RA_WIN(ra_val);
|
||||
hits = SWAP_RA_HITS(ra_val);
|
||||
if (readahead)
|
||||
hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
|
||||
atomic_long_set(&vma->swap_readahead_info,
|
||||
SWAP_RA_VAL(addr, win, hits));
|
||||
}
|
||||
|
||||
if (readahead) {
|
||||
count_vm_event(SWAP_RA_HIT);
|
||||
if (!vma || !vma_ra)
|
||||
atomic_inc(&swapin_readahead_hits);
|
||||
}
|
||||
} else {
|
||||
folio = NULL;
|
||||
ra_val = GET_SWAP_RA_VAL(vma);
|
||||
win = SWAP_RA_WIN(ra_val);
|
||||
hits = SWAP_RA_HITS(ra_val);
|
||||
if (readahead)
|
||||
hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
|
||||
atomic_long_set(&vma->swap_readahead_info,
|
||||
SWAP_RA_VAL(addr, win, hits));
|
||||
}
|
||||
|
||||
return folio;
|
||||
if (readahead) {
|
||||
count_vm_event(SWAP_RA_HIT);
|
||||
if (!vma || !vma_ra)
|
||||
atomic_inc(&swapin_readahead_hits);
|
||||
}
|
||||
}
|
||||
|
||||
struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
||||
|
@ -336,14 +345,10 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
|||
*new_page_allocated = false;
|
||||
for (;;) {
|
||||
int err;
|
||||
/*
|
||||
* First check the swap cache. Since this is normally
|
||||
* called after swap_cache_get_folio() failed, re-calling
|
||||
* that would confuse statistics.
|
||||
*/
|
||||
folio = filemap_get_folio(swap_address_space(entry),
|
||||
swap_cache_index(entry));
|
||||
if (!IS_ERR(folio))
|
||||
|
||||
/* Check the swap cache in case the folio is already there */
|
||||
folio = swap_cache_get_folio(entry);
|
||||
if (folio)
|
||||
goto got_folio;
|
||||
|
||||
/*
|
||||
|
|
|
@ -213,15 +213,14 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si,
|
|||
unsigned long offset, unsigned long flags)
|
||||
{
|
||||
swp_entry_t entry = swp_entry(si->type, offset);
|
||||
struct address_space *address_space = swap_address_space(entry);
|
||||
struct swap_cluster_info *ci;
|
||||
struct folio *folio;
|
||||
int ret, nr_pages;
|
||||
bool need_reclaim;
|
||||
|
||||
again:
|
||||
folio = filemap_get_folio(address_space, swap_cache_index(entry));
|
||||
if (IS_ERR(folio))
|
||||
folio = swap_cache_get_folio(entry);
|
||||
if (!folio)
|
||||
return 0;
|
||||
|
||||
nr_pages = folio_nr_pages(folio);
|
||||
|
@ -2131,7 +2130,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
|||
pte_unmap(pte);
|
||||
pte = NULL;
|
||||
|
||||
folio = swap_cache_get_folio(entry, vma, addr);
|
||||
folio = swap_cache_get_folio(entry);
|
||||
if (!folio) {
|
||||
struct vm_fault vmf = {
|
||||
.vma = vma,
|
||||
|
@ -2357,8 +2356,8 @@ retry:
|
|||
(i = find_next_to_unuse(si, i)) != 0) {
|
||||
|
||||
entry = swp_entry(type, i);
|
||||
folio = filemap_get_folio(swap_address_space(entry), swap_cache_index(entry));
|
||||
if (IS_ERR(folio))
|
||||
folio = swap_cache_get_folio(entry);
|
||||
if (!folio)
|
||||
continue;
|
||||
|
||||
/*
|
||||
|
|
|
@ -1489,9 +1489,8 @@ retry:
|
|||
* separately to allow proper handling.
|
||||
*/
|
||||
if (!src_folio)
|
||||
folio = filemap_get_folio(swap_address_space(entry),
|
||||
swap_cache_index(entry));
|
||||
if (!IS_ERR_OR_NULL(folio)) {
|
||||
folio = swap_cache_get_folio(entry);
|
||||
if (folio) {
|
||||
if (folio_test_large(folio)) {
|
||||
ret = -EBUSY;
|
||||
folio_put(folio);
|
||||
|
|
Loading…
Reference in New Issue