mm, swap: cleanup swap cache API and add kerneldoc
In preparation for replacing the swap cache backend with the swap table, clean up and add proper kernel doc for all swap cache APIs. Now all swap cache APIs are well-defined with consistent names. No feature change, only renaming and documenting. Link: https://lkml.kernel.org/r/20250916160100.31545-9-ryncsn@gmail.com Signed-off-by: Kairui Song <kasong@tencent.com> Acked-by: Chris Li <chrisl@kernel.org> Reviewed-by: Barry Song <baohua@kernel.org> Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com> Acked-by: David Hildenbrand <david@redhat.com> Suggested-by: Chris Li <chrisl@kernel.org> Cc: Baoquan He <bhe@redhat.com> Cc: "Huang, Ying" <ying.huang@linux.alibaba.com> Cc: Hugh Dickins <hughd@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Kemeng Shi <shikemeng@huaweicloud.com> Cc: kernel test robot <oliver.sang@intel.com> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Nhat Pham <nphamcs@gmail.com> Cc: Yosry Ahmed <yosryahmed@google.com> Cc: Zi Yan <ziy@nvidia.com> Cc: SeongJae Park <sj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
0fcf8ef4fd
commit
fd8d4f862f
|
@ -4525,7 +4525,7 @@ static void filemap_cachestat(struct address_space *mapping,
|
|||
* invalidation, so there might not be
|
||||
* a shadow in the swapcache (yet).
|
||||
*/
|
||||
shadow = get_shadow_from_swap_cache(swp);
|
||||
shadow = swap_cache_get_shadow(swp);
|
||||
if (!shadow)
|
||||
goto resched;
|
||||
}
|
||||
|
|
|
@ -1127,7 +1127,7 @@ static int me_swapcache_clean(struct page_state *ps, struct page *p)
|
|||
struct folio *folio = page_folio(p);
|
||||
int ret;
|
||||
|
||||
delete_from_swap_cache(folio);
|
||||
swap_cache_del_folio(folio);
|
||||
|
||||
ret = delete_from_lru_cache(folio) ? MF_FAILED : MF_RECOVERED;
|
||||
folio_unlock(folio);
|
||||
|
|
|
@ -4699,7 +4699,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
|
|||
|
||||
memcg1_swapin(entry, nr_pages);
|
||||
|
||||
shadow = get_shadow_from_swap_cache(entry);
|
||||
shadow = swap_cache_get_shadow(entry);
|
||||
if (shadow)
|
||||
workingset_refault(folio, shadow);
|
||||
|
||||
|
|
10
mm/shmem.c
10
mm/shmem.c
|
@ -1661,13 +1661,13 @@ try_split:
|
|||
}
|
||||
|
||||
/*
|
||||
* The delete_from_swap_cache() below could be left for
|
||||
* The swap_cache_del_folio() below could be left for
|
||||
* shrink_folio_list()'s folio_free_swap() to dispose of;
|
||||
* but I'm a little nervous about letting this folio out of
|
||||
* shmem_writeout() in a hybrid half-tmpfs-half-swap state
|
||||
* e.g. folio_mapping(folio) might give an unexpected answer.
|
||||
*/
|
||||
delete_from_swap_cache(folio);
|
||||
swap_cache_del_folio(folio);
|
||||
goto redirty;
|
||||
}
|
||||
if (nr_pages > 1)
|
||||
|
@ -2045,7 +2045,7 @@ retry:
|
|||
new->swap = entry;
|
||||
|
||||
memcg1_swapin(entry, nr_pages);
|
||||
shadow = get_shadow_from_swap_cache(entry);
|
||||
shadow = swap_cache_get_shadow(entry);
|
||||
if (shadow)
|
||||
workingset_refault(new, shadow);
|
||||
folio_add_lru(new);
|
||||
|
@ -2183,7 +2183,7 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
|
|||
nr_pages = folio_nr_pages(folio);
|
||||
folio_wait_writeback(folio);
|
||||
if (!skip_swapcache)
|
||||
delete_from_swap_cache(folio);
|
||||
swap_cache_del_folio(folio);
|
||||
/*
|
||||
* Don't treat swapin error folio as alloced. Otherwise inode->i_blocks
|
||||
* won't be 0 when inode is released and thus trigger WARN_ON(i_blocks)
|
||||
|
@ -2422,7 +2422,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
|
|||
folio->swap.val = 0;
|
||||
swapcache_clear(si, swap, nr_pages);
|
||||
} else {
|
||||
delete_from_swap_cache(folio);
|
||||
swap_cache_del_folio(folio);
|
||||
}
|
||||
folio_mark_dirty(folio);
|
||||
swap_free_nr(swap, nr_pages);
|
||||
|
|
50
mm/swap.h
50
mm/swap.h
|
@ -167,17 +167,29 @@ static inline bool folio_matches_swap_entry(const struct folio *folio,
|
|||
return folio_entry.val == round_down(entry.val, nr_pages);
|
||||
}
|
||||
|
||||
void show_swap_cache_info(void);
|
||||
void *get_shadow_from_swap_cache(swp_entry_t entry);
|
||||
int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
|
||||
gfp_t gfp, void **shadowp);
|
||||
void __delete_from_swap_cache(struct folio *folio,
|
||||
swp_entry_t entry, void *shadow);
|
||||
void delete_from_swap_cache(struct folio *folio);
|
||||
void clear_shadow_from_swap_cache(int type, unsigned long begin,
|
||||
unsigned long end);
|
||||
void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr);
|
||||
/*
|
||||
* All swap cache helpers below require the caller to ensure the swap entries
|
||||
* used are valid and stablize the device by any of the following ways:
|
||||
* - Hold a reference by get_swap_device(): this ensures a single entry is
|
||||
* valid and increases the swap device's refcount.
|
||||
* - Locking a folio in the swap cache: this ensures the folio's swap entries
|
||||
* are valid and pinned, also implies reference to the device.
|
||||
* - Locking anything referencing the swap entry: e.g. PTL that protects
|
||||
* swap entries in the page table, similar to locking swap cache folio.
|
||||
* - See the comment of get_swap_device() for more complex usage.
|
||||
*/
|
||||
struct folio *swap_cache_get_folio(swp_entry_t entry);
|
||||
void *swap_cache_get_shadow(swp_entry_t entry);
|
||||
int swap_cache_add_folio(struct folio *folio, swp_entry_t entry,
|
||||
gfp_t gfp, void **shadow);
|
||||
void swap_cache_del_folio(struct folio *folio);
|
||||
void __swap_cache_del_folio(struct folio *folio,
|
||||
swp_entry_t entry, void *shadow);
|
||||
void swap_cache_clear_shadow(int type, unsigned long begin,
|
||||
unsigned long end);
|
||||
|
||||
void show_swap_cache_info(void);
|
||||
void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr);
|
||||
struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
||||
struct vm_area_struct *vma, unsigned long addr,
|
||||
struct swap_iocb **plug);
|
||||
|
@ -305,28 +317,22 @@ static inline struct folio *swap_cache_get_folio(swp_entry_t entry)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static inline void *get_shadow_from_swap_cache(swp_entry_t entry)
|
||||
static inline void *swap_cache_get_shadow(swp_entry_t entry)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
|
||||
gfp_t gfp_mask, void **shadowp)
|
||||
static inline int swap_cache_add_folio(swp_entry_t entry, struct folio *folio,
|
||||
gfp_t gfp, void **shadow)
|
||||
{
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline void __delete_from_swap_cache(struct folio *folio,
|
||||
swp_entry_t entry, void *shadow)
|
||||
static inline void swap_cache_del_folio(struct folio *folio)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void delete_from_swap_cache(struct folio *folio)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void clear_shadow_from_swap_cache(int type, unsigned long begin,
|
||||
unsigned long end)
|
||||
static inline void __swap_cache_del_folio(struct folio *folio, swp_entry_t entry, void *shadow)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -78,8 +78,8 @@ void show_swap_cache_info(void)
|
|||
* Context: Caller must ensure @entry is valid and protect the swap device
|
||||
* with reference count or locks.
|
||||
* Return: Returns the found folio on success, NULL otherwise. The caller
|
||||
* must lock and check if the folio still matches the swap entry before
|
||||
* use (e.g. with folio_matches_swap_entry).
|
||||
* must lock nd check if the folio still matches the swap entry before
|
||||
* use (e.g., folio_matches_swap_entry).
|
||||
*/
|
||||
struct folio *swap_cache_get_folio(swp_entry_t entry)
|
||||
{
|
||||
|
@ -90,7 +90,15 @@ struct folio *swap_cache_get_folio(swp_entry_t entry)
|
|||
return folio;
|
||||
}
|
||||
|
||||
void *get_shadow_from_swap_cache(swp_entry_t entry)
|
||||
/**
|
||||
* swap_cache_get_shadow - Looks up a shadow in the swap cache.
|
||||
* @entry: swap entry used for the lookup.
|
||||
*
|
||||
* Context: Caller must ensure @entry is valid and protect the swap device
|
||||
* with reference count or locks.
|
||||
* Return: Returns either NULL or an XA_VALUE (shadow).
|
||||
*/
|
||||
void *swap_cache_get_shadow(swp_entry_t entry)
|
||||
{
|
||||
struct address_space *address_space = swap_address_space(entry);
|
||||
pgoff_t idx = swap_cache_index(entry);
|
||||
|
@ -102,12 +110,21 @@ void *get_shadow_from_swap_cache(swp_entry_t entry)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* add_to_swap_cache resembles filemap_add_folio on swapper_space,
|
||||
* but sets SwapCache flag and 'swap' instead of mapping and index.
|
||||
/**
|
||||
* swap_cache_add_folio - Add a folio into the swap cache.
|
||||
* @folio: The folio to be added.
|
||||
* @entry: The swap entry corresponding to the folio.
|
||||
* @gfp: gfp_mask for XArray node allocation.
|
||||
* @shadowp: If a shadow is found, return the shadow.
|
||||
*
|
||||
* Context: Caller must ensure @entry is valid and protect the swap device
|
||||
* with reference count or locks.
|
||||
* The caller also needs to mark the corresponding swap_map slots with
|
||||
* SWAP_HAS_CACHE to avoid race or conflict.
|
||||
* Return: Returns 0 on success, error code otherwise.
|
||||
*/
|
||||
int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
|
||||
gfp_t gfp, void **shadowp)
|
||||
int swap_cache_add_folio(struct folio *folio, swp_entry_t entry,
|
||||
gfp_t gfp, void **shadowp)
|
||||
{
|
||||
struct address_space *address_space = swap_address_space(entry);
|
||||
pgoff_t idx = swap_cache_index(entry);
|
||||
|
@ -155,12 +172,20 @@ unlock:
|
|||
return xas_error(&xas);
|
||||
}
|
||||
|
||||
/*
|
||||
* This must be called only on folios that have
|
||||
* been verified to be in the swap cache.
|
||||
/**
|
||||
* __swap_cache_del_folio - Removes a folio from the swap cache.
|
||||
* @folio: The folio.
|
||||
* @entry: The first swap entry that the folio corresponds to.
|
||||
* @shadow: shadow value to be filled in the swap cache.
|
||||
*
|
||||
* Removes a folio from the swap cache and fills a shadow in place.
|
||||
* This won't put the folio's refcount. The caller has to do that.
|
||||
*
|
||||
* Context: Caller must hold the xa_lock, ensure the folio is
|
||||
* locked and in the swap cache, using the index of @entry.
|
||||
*/
|
||||
void __delete_from_swap_cache(struct folio *folio,
|
||||
swp_entry_t entry, void *shadow)
|
||||
void __swap_cache_del_folio(struct folio *folio,
|
||||
swp_entry_t entry, void *shadow)
|
||||
{
|
||||
struct address_space *address_space = swap_address_space(entry);
|
||||
int i;
|
||||
|
@ -186,27 +211,40 @@ void __delete_from_swap_cache(struct folio *folio,
|
|||
__lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr);
|
||||
}
|
||||
|
||||
/*
|
||||
* This must be called only on folios that have
|
||||
* been verified to be in the swap cache and locked.
|
||||
* It will never put the folio into the free list,
|
||||
* the caller has a reference on the folio.
|
||||
/**
|
||||
* swap_cache_del_folio - Removes a folio from the swap cache.
|
||||
* @folio: The folio.
|
||||
*
|
||||
* Same as __swap_cache_del_folio, but handles lock and refcount. The
|
||||
* caller must ensure the folio is either clean or has a swap count
|
||||
* equal to zero, or it may cause data loss.
|
||||
*
|
||||
* Context: Caller must ensure the folio is locked and in the swap cache.
|
||||
*/
|
||||
void delete_from_swap_cache(struct folio *folio)
|
||||
void swap_cache_del_folio(struct folio *folio)
|
||||
{
|
||||
swp_entry_t entry = folio->swap;
|
||||
struct address_space *address_space = swap_address_space(entry);
|
||||
|
||||
xa_lock_irq(&address_space->i_pages);
|
||||
__delete_from_swap_cache(folio, entry, NULL);
|
||||
__swap_cache_del_folio(folio, entry, NULL);
|
||||
xa_unlock_irq(&address_space->i_pages);
|
||||
|
||||
put_swap_folio(folio, entry);
|
||||
folio_ref_sub(folio, folio_nr_pages(folio));
|
||||
}
|
||||
|
||||
void clear_shadow_from_swap_cache(int type, unsigned long begin,
|
||||
unsigned long end)
|
||||
/**
|
||||
* swap_cache_clear_shadow - Clears a set of shadows in the swap cache.
|
||||
* @type: Indicates the swap device.
|
||||
* @begin: Beginning offset of the range.
|
||||
* @end: Ending offset of the range.
|
||||
*
|
||||
* Context: Caller must ensure the range is valid and hold a reference to
|
||||
* the swap device.
|
||||
*/
|
||||
void swap_cache_clear_shadow(int type, unsigned long begin,
|
||||
unsigned long end)
|
||||
{
|
||||
unsigned long curr = begin;
|
||||
void *old;
|
||||
|
@ -393,7 +431,7 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
|||
goto put_and_return;
|
||||
|
||||
/*
|
||||
* We might race against __delete_from_swap_cache(), and
|
||||
* We might race against __swap_cache_del_folio(), and
|
||||
* stumble across a swap_map entry whose SWAP_HAS_CACHE
|
||||
* has not yet been cleared. Or race against another
|
||||
* __read_swap_cache_async(), which has set SWAP_HAS_CACHE
|
||||
|
@ -412,7 +450,7 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
|||
goto fail_unlock;
|
||||
|
||||
/* May fail (-ENOMEM) if XArray node allocation failed. */
|
||||
if (add_to_swap_cache(new_folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
|
||||
if (swap_cache_add_folio(new_folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
|
||||
goto fail_unlock;
|
||||
|
||||
memcg1_swapin(entry, 1);
|
||||
|
|
|
@ -267,7 +267,7 @@ again:
|
|||
if (!need_reclaim)
|
||||
goto out_unlock;
|
||||
|
||||
delete_from_swap_cache(folio);
|
||||
swap_cache_del_folio(folio);
|
||||
folio_set_dirty(folio);
|
||||
ret = nr_pages;
|
||||
out_unlock:
|
||||
|
@ -1124,7 +1124,7 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
|
|||
swap_slot_free_notify(si->bdev, offset);
|
||||
offset++;
|
||||
}
|
||||
clear_shadow_from_swap_cache(si->type, begin, end);
|
||||
swap_cache_clear_shadow(si->type, begin, end);
|
||||
|
||||
/*
|
||||
* Make sure that try_to_unuse() observes si->inuse_pages reaching 0
|
||||
|
@ -1289,7 +1289,7 @@ int folio_alloc_swap(struct folio *folio, gfp_t gfp)
|
|||
* TODO: this could cause a theoretical memory reclaim
|
||||
* deadlock in the swap out path.
|
||||
*/
|
||||
if (add_to_swap_cache(folio, entry, gfp | __GFP_NOMEMALLOC, NULL))
|
||||
if (swap_cache_add_folio(folio, entry, gfp | __GFP_NOMEMALLOC, NULL))
|
||||
goto out_free;
|
||||
|
||||
return 0;
|
||||
|
@ -1759,7 +1759,7 @@ bool folio_free_swap(struct folio *folio)
|
|||
if (folio_swapped(folio))
|
||||
return false;
|
||||
|
||||
delete_from_swap_cache(folio);
|
||||
swap_cache_del_folio(folio);
|
||||
folio_set_dirty(folio);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -776,7 +776,7 @@ static int __remove_mapping(struct address_space *mapping, struct folio *folio,
|
|||
|
||||
if (reclaimed && !mapping_exiting(mapping))
|
||||
shadow = workingset_eviction(folio, target_memcg);
|
||||
__delete_from_swap_cache(folio, swap, shadow);
|
||||
__swap_cache_del_folio(folio, swap, shadow);
|
||||
memcg1_swapout(folio, swap);
|
||||
xa_unlock_irq(&mapping->i_pages);
|
||||
put_swap_folio(folio, swap);
|
||||
|
|
|
@ -1069,7 +1069,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
|
|||
|
||||
out:
|
||||
if (ret && ret != -EEXIST) {
|
||||
delete_from_swap_cache(folio);
|
||||
swap_cache_del_folio(folio);
|
||||
folio_unlock(folio);
|
||||
}
|
||||
folio_put(folio);
|
||||
|
|
Loading…
Reference in New Issue