mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git
synced 2026-04-18 03:23:53 -04:00
mm/shmem: never bypass the swap cache for SWP_SYNCHRONOUS_IO
Now the overhead of the swap cache is trivial to none, bypassing the swap cache is no longer a good optimization. We have removed the cache bypass swapin for anon memory, now do the same for shmem. Many helpers and functions can be dropped now. The performance may slightly drop because of the co-existence and double update of swap_map and swap table, and this problem will be improved very soon in later commits by dropping the swap_map update partially: Swapin of 24 GB file with tmpfs with transparent_hugepage_tmpfs=within_size and ZRAM, 3 test runs on my machine: Before: After this commit: After this series: 5.99s 6.29s 6.08s And later swap table phases will drop the swap_map completely to avoid overhead and reduce memory usage. Link: https://lkml.kernel.org/r/20251219195751.61328-1-ryncsn@gmail.com Signed-off-by: Kairui Song <kasong@tencent.com> Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com> Tested-by: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Baoquan He <bhe@redhat.com> Cc: Barry Song <baohua@kernel.org> Cc: Chris Li <chrisl@kernel.org> Cc: Nhat Pham <nphamcs@gmail.com> Cc: Rafael J. Wysocki (Intel) <rafael@kernel.org> Cc: Yosry Ahmed <yosry.ahmed@linux.dev> Cc: Deepanshu Kartikey <kartikey406@gmail.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
4b34f1d82c
commit
c246d236b1
@@ -1614,22 +1614,6 @@ put_out:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void swap_entries_put_cache(struct swap_info_struct *si,
|
||||
swp_entry_t entry, int nr)
|
||||
{
|
||||
unsigned long offset = swp_offset(entry);
|
||||
struct swap_cluster_info *ci;
|
||||
|
||||
ci = swap_cluster_lock(si, offset);
|
||||
if (swap_only_has_cache(si, offset, nr)) {
|
||||
swap_entries_free(si, ci, entry, nr);
|
||||
} else {
|
||||
for (int i = 0; i < nr; i++, entry.val++)
|
||||
swap_entry_put_locked(si, ci, entry, SWAP_HAS_CACHE);
|
||||
}
|
||||
swap_cluster_unlock(ci);
|
||||
}
|
||||
|
||||
static bool swap_entries_put_map(struct swap_info_struct *si,
|
||||
swp_entry_t entry, int nr)
|
||||
{
|
||||
@@ -1765,13 +1749,21 @@ void swap_free_nr(swp_entry_t entry, int nr_pages)
|
||||
void put_swap_folio(struct folio *folio, swp_entry_t entry)
|
||||
{
|
||||
struct swap_info_struct *si;
|
||||
struct swap_cluster_info *ci;
|
||||
unsigned long offset = swp_offset(entry);
|
||||
int size = 1 << swap_entry_order(folio_order(folio));
|
||||
|
||||
si = _swap_info_get(entry);
|
||||
if (!si)
|
||||
return;
|
||||
|
||||
swap_entries_put_cache(si, entry, size);
|
||||
ci = swap_cluster_lock(si, offset);
|
||||
if (swap_only_has_cache(si, offset, size))
|
||||
swap_entries_free(si, ci, entry, size);
|
||||
else
|
||||
for (int i = 0; i < size; i++, entry.val++)
|
||||
swap_entry_put_locked(si, ci, entry, SWAP_HAS_CACHE);
|
||||
swap_cluster_unlock(ci);
|
||||
}
|
||||
|
||||
int __swap_count(swp_entry_t entry)
|
||||
@@ -3784,15 +3776,6 @@ int swapcache_prepare(swp_entry_t entry, int nr)
|
||||
return __swap_duplicate(entry, SWAP_HAS_CACHE, nr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Caller should ensure entries belong to the same folio so
|
||||
* the entries won't span cross cluster boundary.
|
||||
*/
|
||||
void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr)
|
||||
{
|
||||
swap_entries_put_cache(si, entry, nr);
|
||||
}
|
||||
|
||||
/*
|
||||
* add_swap_count_continuation - called when a swap count is duplicated
|
||||
* beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
|
||||
|
||||
Reference in New Issue
Block a user