mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git/
synced 2026-04-05 00:07:48 -04:00
fs: cosmetic fixes to lru handling
1. inode_bit_waitqueue() was somehow placed between __inode_add_lru() and inode_add_lru(). move it up 2. assert ->i_lock is held in __inode_add_lru instead of just claiming it is needed 3. s/__inode_add_lru/__inode_lru_list_add/ for consistency with itself (inode_lru_list_del()) and similar routines for sb and io list management 4. push list presence check into inode_lru_list_del(), just like sb and io list Signed-off-by: Mateusz Guzik <mjguzik@gmail.com> Link: https://patch.msgid.link/20251029131428.654761-2-mjguzik@gmail.com Reviewed-by: Jan Kara <jack@suse.cz> Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
committed by
Christian Brauner
parent
a27628f436
commit
4c6b40877b
@@ -256,7 +256,7 @@ void filemap_remove_folio(struct folio *folio)
|
||||
__filemap_remove_folio(folio, NULL);
|
||||
xa_unlock_irq(&mapping->i_pages);
|
||||
if (mapping_shrinkable(mapping))
|
||||
inode_add_lru(mapping->host);
|
||||
inode_lru_list_add(mapping->host);
|
||||
spin_unlock(&mapping->host->i_lock);
|
||||
|
||||
filemap_free_folio(mapping, folio);
|
||||
@@ -335,7 +335,7 @@ void delete_from_page_cache_batch(struct address_space *mapping,
|
||||
page_cache_delete_batch(mapping, fbatch);
|
||||
xa_unlock_irq(&mapping->i_pages);
|
||||
if (mapping_shrinkable(mapping))
|
||||
inode_add_lru(mapping->host);
|
||||
inode_lru_list_add(mapping->host);
|
||||
spin_unlock(&mapping->host->i_lock);
|
||||
|
||||
for (i = 0; i < folio_batch_count(fbatch); i++)
|
||||
|
||||
Reference in New Issue
Block a user