mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git
synced 2026-04-18 03:23:53 -04:00
Recent changes require the raw folio flags to be accessed via ".f". The
merge commit introducing this change adapted most architecture code but
forgot the csky abiv2.
[rppt@kernel.org: add fix for arch/csky/abiv2/cacheflush.c]
Link: https://lkml.kernel.org/r/aPCE238oxAB9QcZa@kernel.org
Fixes: 53fbef56e0 ("mm: introduce memdesc_flags_t")
Signed-off-by: Thomas Weißschuh <thomas.weissschuh@linutronix.de>
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Acked-by: Guo Ren <guoren@kernel.org>
Acked-by: Zi Yan <ziy@nvidia.com>
Cc: Guo Ren <guoren@kernel.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
62 lines
1.9 KiB
C
62 lines
1.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef __ABI_CSKY_CACHEFLUSH_H
|
|
#define __ABI_CSKY_CACHEFLUSH_H
|
|
|
|
/* Keep includes the same across arches. */
|
|
#include <linux/mm.h>
|
|
|
|
/*
|
|
* The cache doesn't need to be flushed when TLB entries change when
|
|
* the cache is mapped to physical memory, not virtual memory
|
|
*/
|
|
#define flush_cache_all() do { } while (0)
|
|
#define flush_cache_mm(mm) do { } while (0)
|
|
#define flush_cache_dup_mm(mm) do { } while (0)
|
|
#define flush_cache_range(vma, start, end) do { } while (0)
|
|
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
|
|
|
#define PG_dcache_clean PG_arch_1
|
|
|
|
static inline void flush_dcache_folio(struct folio *folio)
|
|
{
|
|
if (test_bit(PG_dcache_clean, &folio->flags.f))
|
|
clear_bit(PG_dcache_clean, &folio->flags.f);
|
|
}
|
|
#define flush_dcache_folio flush_dcache_folio
|
|
|
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
|
static inline void flush_dcache_page(struct page *page)
|
|
{
|
|
flush_dcache_folio(page_folio(page));
|
|
}
|
|
|
|
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
|
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
|
|
|
#define flush_icache_range(start, end) cache_wbinv_range(start, end)
|
|
|
|
void flush_icache_mm_range(struct mm_struct *mm,
|
|
unsigned long start, unsigned long end);
|
|
void flush_icache_deferred(struct mm_struct *mm);
|
|
|
|
#define flush_cache_vmap(start, end) do { } while (0)
|
|
#define flush_cache_vmap_early(start, end) do { } while (0)
|
|
#define flush_cache_vunmap(start, end) do { } while (0)
|
|
|
|
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
|
|
do { \
|
|
memcpy(dst, src, len); \
|
|
if (vma->vm_flags & VM_EXEC) { \
|
|
dcache_wb_range((unsigned long)dst, \
|
|
(unsigned long)dst + len); \
|
|
flush_icache_mm_range(current->mm, \
|
|
(unsigned long)dst, \
|
|
(unsigned long)dst + len); \
|
|
} \
|
|
} while (0)
|
|
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
|
|
memcpy(dst, src, len)
|
|
|
|
#endif /* __ABI_CSKY_CACHEFLUSH_H */
|