mm: remove redundant __GFP_NOWARN
Commit 16f5dfbc85
("gfp: include __GFP_NOWARN in GFP_NOWAIT") made
GFP_NOWAIT implicitly include __GFP_NOWARN.
Therefore, explicit __GFP_NOWARN combined with GFP_NOWAIT (e.g.,
`GFP_NOWAIT | __GFP_NOWARN`) is now redundant. Let's clean up these
redundant flags across subsystems.
No functional changes.
Link: https://lkml.kernel.org/r/20250812135225.274316-1-rongqianfeng@vivo.com
Signed-off-by: Qianfeng Rong <rongqianfeng@vivo.com>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Reviewed-by: SeongJae Park <sj@kernel.org>
Acked-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
8166353fb8
commit
adf085ff0d
|
@ -303,7 +303,7 @@ static unsigned int __damon_migrate_folio_list(
|
|||
* instead of migrated.
|
||||
*/
|
||||
.gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) |
|
||||
__GFP_NOWARN | __GFP_NOMEMALLOC | GFP_NOWAIT,
|
||||
__GFP_NOMEMALLOC | GFP_NOWAIT,
|
||||
.nid = target_nid,
|
||||
};
|
||||
|
||||
|
|
|
@ -1961,7 +1961,7 @@ no_page:
|
|||
gfp &= ~__GFP_FS;
|
||||
if (fgp_flags & FGP_NOWAIT) {
|
||||
gfp &= ~GFP_KERNEL;
|
||||
gfp |= GFP_NOWAIT | __GFP_NOWARN;
|
||||
gfp |= GFP_NOWAIT;
|
||||
}
|
||||
if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
|
||||
fgp_flags |= FGP_LOCK;
|
||||
|
|
|
@ -32,7 +32,7 @@ static bool tlb_next_batch(struct mmu_gather *tlb)
|
|||
if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
|
||||
return false;
|
||||
|
||||
batch = (void *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
|
||||
batch = (void *)__get_free_page(GFP_NOWAIT);
|
||||
if (!batch)
|
||||
return false;
|
||||
|
||||
|
@ -364,7 +364,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
|
|||
struct mmu_table_batch **batch = &tlb->batch;
|
||||
|
||||
if (*batch == NULL) {
|
||||
*batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
|
||||
*batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT);
|
||||
if (*batch == NULL) {
|
||||
tlb_table_invalidate(tlb);
|
||||
tlb_remove_table_one(table);
|
||||
|
|
|
@ -285,7 +285,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
|
|||
list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
|
||||
struct anon_vma *anon_vma;
|
||||
|
||||
avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
|
||||
avc = anon_vma_chain_alloc(GFP_NOWAIT);
|
||||
if (unlikely(!avc)) {
|
||||
unlock_anon_vma_root(root);
|
||||
root = NULL;
|
||||
|
|
|
@ -5196,7 +5196,7 @@ static void vmap_init_nodes(void)
|
|||
int n = clamp_t(unsigned int, num_possible_cpus(), 1, 128);
|
||||
|
||||
if (n > 1) {
|
||||
vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT | __GFP_NOWARN);
|
||||
vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT);
|
||||
if (vn) {
|
||||
/* Node partition is 16 pages. */
|
||||
vmap_zone_size = (1 << 4) * PAGE_SIZE;
|
||||
|
|
Loading…
Reference in New Issue