s390 updates for 6.18 merge window
- Refactor SCLP memory hotplug code - Introduce common boot_panic() decompressor helper macro and use it to get rid of nearly few identical implementations - Take into account additional key generation flags and forward it to the ep11 implementation. With that allow users to modify the key generation process, e.g. provide valid combinations of XCP_BLOB_* flags - Replace kmalloc() + copy_from_user() with memdup_user_nul() in s390 debug facility and HMC driver - Add DAX support for DCSS memory block devices - Make the compiler statement attribute "assume" available with a new __assume macro - Rework ffs() and fls() family bitops functions, including source code improvements and generated code optimizations. Use the newly introduced __assume macro for that - Enable additional network features in default configurations - Use __GFP_ACCOUNT flag for user page table allocations to add missing kmemcg accounting - Add WQ_PERCPU flag to explicitly request the use of the per-CPU workqueue for 3590 tape driver - Switch power reading to the per-CPU and the Hiperdispatch to the default workqueue - Add memory allocation profiling hooks to allow better profiling data and the /proc/allocinfo output similar to other architectures -----BEGIN PGP SIGNATURE----- iI0EABYKADUWIQQrtrZiYVkVzKQcYivNdxKlNrRb8AUCaNpOnhccYWdvcmRlZXZA bGludXguaWJtLmNvbQAKCRDNdxKlNrRb8LJ0AP98TkWDCXLb02dNTST36dtoNaM+ I9HoosjbZIm8oHwhngD+JisTRWFogplXnE2z+JQrJJcshWvUpFDVtkk2pCSeOQM= =Cztn -----END PGP SIGNATURE----- Merge tag 's390-6.18-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux Pull s390 updates from Alexander Gordeev: - Refactor SCLP memory hotplug code - Introduce common boot_panic() decompressor helper macro and use it to get rid of nearly few identical implementations - Take into account additional key generation flags and forward it to the ep11 implementation. With that allow users to modify the key generation process, e.g. provide valid combinations of XCP_BLOB_* flags - Replace kmalloc() + copy_from_user() with memdup_user_nul() in s390 debug facility and HMC driver - Add DAX support for DCSS memory block devices - Make the compiler statement attribute "assume" available with a new __assume macro - Rework ffs() and fls() family bitops functions, including source code improvements and generated code optimizations. Use the newly introduced __assume macro for that - Enable additional network features in default configurations - Use __GFP_ACCOUNT flag for user page table allocations to add missing kmemcg accounting - Add WQ_PERCPU flag to explicitly request the use of the per-CPU workqueue for 3590 tape driver - Switch power reading to the per-CPU and the Hiperdispatch to the default workqueue - Add memory allocation profiling hooks to allow better profiling data and the /proc/allocinfo output similar to other architectures * tag 's390-6.18-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (21 commits) s390/mm: Add memory allocation profiling hooks s390: Replace use of system_wq with system_dfl_wq s390/diag324: Replace use of system_wq with system_percpu_wq s390/tape: Add WQ_PERCPU to alloc_workqueue users s390/bitops: Switch to generic ffs() if supported by compiler s390/bitops: Switch to generic fls(), fls64(), etc. s390/mm: Use __GFP_ACCOUNT for user page table allocations s390/configs: Enable additional network features s390/bitops: Cleanup __flogr() s390/bitops: Use __assume() for __flogr() inline assembly return value compiler_types: Add __assume macro s390/bitops: Limit return value range of __flogr() s390/dcssblk: Add DAX support s390/hmcdrv: Replace kmalloc() + copy_from_user() with memdup_user_nul() s390/debug: Replace kmalloc() + copy_from_user() with memdup_user_nul() s390/pkey: Forward keygenflags to ep11_unwrapkey s390/boot: Add common boot_panic() code s390/bitops: Optimize inlining s390/bitops: Slightly optimize ffs() and fls64() s390/sclp: Move memory hotplug code for better modularity ...
This commit is contained in:
commit
9cc220a422
|
@ -49,6 +49,13 @@ config KASAN_SHADOW_OFFSET
|
||||||
depends on KASAN
|
depends on KASAN
|
||||||
default 0x1C000000000000
|
default 0x1C000000000000
|
||||||
|
|
||||||
|
config CC_HAS_BUILTIN_FFS
|
||||||
|
def_bool !(CC_IS_GCC && GCC_VERSION < 160000)
|
||||||
|
help
|
||||||
|
GCC versions before 16.0.0 generate library calls to ffs()
|
||||||
|
for __builtin_ffs() even when __has_builtin(__builtin_ffs)
|
||||||
|
is true.
|
||||||
|
|
||||||
config CC_ASM_FLAG_OUTPUT_BROKEN
|
config CC_ASM_FLAG_OUTPUT_BROKEN
|
||||||
def_bool CC_IS_GCC && GCC_VERSION < 140200
|
def_bool CC_IS_GCC && GCC_VERSION < 140200
|
||||||
help
|
help
|
||||||
|
|
|
@ -10,6 +10,7 @@
|
||||||
|
|
||||||
#include <linux/printk.h>
|
#include <linux/printk.h>
|
||||||
#include <asm/physmem_info.h>
|
#include <asm/physmem_info.h>
|
||||||
|
#include <asm/stacktrace.h>
|
||||||
|
|
||||||
struct vmlinux_info {
|
struct vmlinux_info {
|
||||||
unsigned long entry;
|
unsigned long entry;
|
||||||
|
@ -89,6 +90,13 @@ void __noreturn jump_to_kernel(psw_t *psw);
|
||||||
#define boot_info(fmt, ...) boot_printk(KERN_INFO boot_fmt(fmt), ##__VA_ARGS__)
|
#define boot_info(fmt, ...) boot_printk(KERN_INFO boot_fmt(fmt), ##__VA_ARGS__)
|
||||||
#define boot_debug(fmt, ...) boot_printk(KERN_DEBUG boot_fmt(fmt), ##__VA_ARGS__)
|
#define boot_debug(fmt, ...) boot_printk(KERN_DEBUG boot_fmt(fmt), ##__VA_ARGS__)
|
||||||
|
|
||||||
|
#define boot_panic(...) do { \
|
||||||
|
boot_emerg(__VA_ARGS__); \
|
||||||
|
print_stacktrace(current_frame_address()); \
|
||||||
|
boot_emerg(" -- System halted\n"); \
|
||||||
|
disabled_wait(); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
extern struct machine_info machine;
|
extern struct machine_info machine;
|
||||||
extern int boot_console_loglevel;
|
extern int boot_console_loglevel;
|
||||||
extern bool boot_ignore_loglevel;
|
extern bool boot_ignore_loglevel;
|
||||||
|
|
|
@ -68,9 +68,7 @@ static void decompress_error(char *m)
|
||||||
{
|
{
|
||||||
if (bootdebug)
|
if (bootdebug)
|
||||||
boot_rb_dump();
|
boot_rb_dump();
|
||||||
boot_emerg("Decompression error: %s\n", m);
|
boot_panic("Decompression error: %s\n", m);
|
||||||
boot_emerg(" -- System halted\n");
|
|
||||||
disabled_wait();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned long mem_safe_offset(void)
|
unsigned long mem_safe_offset(void)
|
||||||
|
|
|
@ -228,9 +228,7 @@ static void die_oom(unsigned long size, unsigned long align, unsigned long min,
|
||||||
boot_emerg("Usable online memory total: %lu Reserved: %lu Free: %lu\n",
|
boot_emerg("Usable online memory total: %lu Reserved: %lu Free: %lu\n",
|
||||||
total_mem, total_reserved_mem,
|
total_mem, total_reserved_mem,
|
||||||
total_mem > total_reserved_mem ? total_mem - total_reserved_mem : 0);
|
total_mem > total_reserved_mem ? total_mem - total_reserved_mem : 0);
|
||||||
print_stacktrace(current_frame_address());
|
boot_panic("Oom\n");
|
||||||
boot_emerg(" -- System halted\n");
|
|
||||||
disabled_wait();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void _physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size)
|
static void _physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size)
|
||||||
|
|
|
@ -44,13 +44,6 @@ u64 __bootdata_preserved(clock_comparator_max) = -1UL;
|
||||||
u64 __bootdata_preserved(stfle_fac_list[16]);
|
u64 __bootdata_preserved(stfle_fac_list[16]);
|
||||||
struct oldmem_data __bootdata_preserved(oldmem_data);
|
struct oldmem_data __bootdata_preserved(oldmem_data);
|
||||||
|
|
||||||
void error(char *x)
|
|
||||||
{
|
|
||||||
boot_emerg("%s\n", x);
|
|
||||||
boot_emerg(" -- System halted\n");
|
|
||||||
disabled_wait();
|
|
||||||
}
|
|
||||||
|
|
||||||
static char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
|
static char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
|
||||||
|
|
||||||
static void detect_machine_type(void)
|
static void detect_machine_type(void)
|
||||||
|
@ -220,10 +213,10 @@ static void rescue_initrd(unsigned long min, unsigned long max)
|
||||||
static void copy_bootdata(void)
|
static void copy_bootdata(void)
|
||||||
{
|
{
|
||||||
if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
|
if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
|
||||||
error(".boot.data section size mismatch");
|
boot_panic(".boot.data section size mismatch\n");
|
||||||
memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
|
memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
|
||||||
if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size)
|
if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size)
|
||||||
error(".boot.preserved.data section size mismatch");
|
boot_panic(".boot.preserved.data section size mismatch\n");
|
||||||
memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size);
|
memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -237,7 +230,7 @@ static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr,
|
||||||
for (reloc = (int *)__vmlinux_relocs_64_start; reloc < (int *)__vmlinux_relocs_64_end; reloc++) {
|
for (reloc = (int *)__vmlinux_relocs_64_start; reloc < (int *)__vmlinux_relocs_64_end; reloc++) {
|
||||||
loc = (long)*reloc + phys_offset;
|
loc = (long)*reloc + phys_offset;
|
||||||
if (loc < min_addr || loc > max_addr)
|
if (loc < min_addr || loc > max_addr)
|
||||||
error("64-bit relocation outside of kernel!\n");
|
boot_panic("64-bit relocation outside of kernel!\n");
|
||||||
*(u64 *)loc += offset;
|
*(u64 *)loc += offset;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -118,8 +118,13 @@ CONFIG_PACKET=y
|
||||||
CONFIG_PACKET_DIAG=m
|
CONFIG_PACKET_DIAG=m
|
||||||
CONFIG_UNIX=y
|
CONFIG_UNIX=y
|
||||||
CONFIG_UNIX_DIAG=m
|
CONFIG_UNIX_DIAG=m
|
||||||
|
CONFIG_TLS=m
|
||||||
|
CONFIG_TLS_DEVICE=y
|
||||||
|
CONFIG_TLS_TOE=y
|
||||||
CONFIG_XFRM_USER=m
|
CONFIG_XFRM_USER=m
|
||||||
CONFIG_NET_KEY=m
|
CONFIG_NET_KEY=m
|
||||||
|
CONFIG_XDP_SOCKETS=y
|
||||||
|
CONFIG_XDP_SOCKETS_DIAG=m
|
||||||
CONFIG_SMC_DIAG=m
|
CONFIG_SMC_DIAG=m
|
||||||
CONFIG_SMC_LO=y
|
CONFIG_SMC_LO=y
|
||||||
CONFIG_INET=y
|
CONFIG_INET=y
|
||||||
|
@ -542,6 +547,7 @@ CONFIG_NLMON=m
|
||||||
CONFIG_MLX4_EN=m
|
CONFIG_MLX4_EN=m
|
||||||
CONFIG_MLX5_CORE=m
|
CONFIG_MLX5_CORE=m
|
||||||
CONFIG_MLX5_CORE_EN=y
|
CONFIG_MLX5_CORE_EN=y
|
||||||
|
CONFIG_MLX5_SF=y
|
||||||
# CONFIG_NET_VENDOR_META is not set
|
# CONFIG_NET_VENDOR_META is not set
|
||||||
# CONFIG_NET_VENDOR_MICREL is not set
|
# CONFIG_NET_VENDOR_MICREL is not set
|
||||||
# CONFIG_NET_VENDOR_MICROCHIP is not set
|
# CONFIG_NET_VENDOR_MICROCHIP is not set
|
||||||
|
|
|
@ -109,8 +109,13 @@ CONFIG_PACKET=y
|
||||||
CONFIG_PACKET_DIAG=m
|
CONFIG_PACKET_DIAG=m
|
||||||
CONFIG_UNIX=y
|
CONFIG_UNIX=y
|
||||||
CONFIG_UNIX_DIAG=m
|
CONFIG_UNIX_DIAG=m
|
||||||
|
CONFIG_TLS=m
|
||||||
|
CONFIG_TLS_DEVICE=y
|
||||||
|
CONFIG_TLS_TOE=y
|
||||||
CONFIG_XFRM_USER=m
|
CONFIG_XFRM_USER=m
|
||||||
CONFIG_NET_KEY=m
|
CONFIG_NET_KEY=m
|
||||||
|
CONFIG_XDP_SOCKETS=y
|
||||||
|
CONFIG_XDP_SOCKETS_DIAG=m
|
||||||
CONFIG_SMC_DIAG=m
|
CONFIG_SMC_DIAG=m
|
||||||
CONFIG_SMC_LO=y
|
CONFIG_SMC_LO=y
|
||||||
CONFIG_INET=y
|
CONFIG_INET=y
|
||||||
|
@ -532,6 +537,7 @@ CONFIG_NLMON=m
|
||||||
CONFIG_MLX4_EN=m
|
CONFIG_MLX4_EN=m
|
||||||
CONFIG_MLX5_CORE=m
|
CONFIG_MLX5_CORE=m
|
||||||
CONFIG_MLX5_CORE_EN=y
|
CONFIG_MLX5_CORE_EN=y
|
||||||
|
CONFIG_MLX5_SF=y
|
||||||
# CONFIG_NET_VENDOR_META is not set
|
# CONFIG_NET_VENDOR_META is not set
|
||||||
# CONFIG_NET_VENDOR_MICREL is not set
|
# CONFIG_NET_VENDOR_MICREL is not set
|
||||||
# CONFIG_NET_VENDOR_MICROCHIP is not set
|
# CONFIG_NET_VENDOR_MICROCHIP is not set
|
||||||
|
|
|
@ -122,6 +122,8 @@ static inline bool test_bit_inv(unsigned long nr,
|
||||||
return test_bit(nr ^ (BITS_PER_LONG - 1), ptr);
|
return test_bit(nr ^ (BITS_PER_LONG - 1), ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef CONFIG_CC_HAS_BUILTIN_FFS
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __flogr - find leftmost one
|
* __flogr - find leftmost one
|
||||||
* @word - The word to search
|
* @word - The word to search
|
||||||
|
@ -130,11 +132,12 @@ static inline bool test_bit_inv(unsigned long nr,
|
||||||
* where the most significant bit has bit number 0.
|
* where the most significant bit has bit number 0.
|
||||||
* If no bit is set this function returns 64.
|
* If no bit is set this function returns 64.
|
||||||
*/
|
*/
|
||||||
static inline unsigned char __flogr(unsigned long word)
|
static __always_inline __attribute_const__ unsigned long __flogr(unsigned long word)
|
||||||
{
|
{
|
||||||
if (__builtin_constant_p(word)) {
|
unsigned long bit;
|
||||||
unsigned long bit = 0;
|
|
||||||
|
|
||||||
|
if (__builtin_constant_p(word)) {
|
||||||
|
bit = 0;
|
||||||
if (!word)
|
if (!word)
|
||||||
return 64;
|
return 64;
|
||||||
if (!(word & 0xffffffff00000000UL)) {
|
if (!(word & 0xffffffff00000000UL)) {
|
||||||
|
@ -163,27 +166,22 @@ static inline unsigned char __flogr(unsigned long word)
|
||||||
}
|
}
|
||||||
return bit;
|
return bit;
|
||||||
} else {
|
} else {
|
||||||
union register_pair rp;
|
union register_pair rp __uninitialized;
|
||||||
|
|
||||||
rp.even = word;
|
rp.even = word;
|
||||||
asm volatile(
|
asm("flogr %[rp],%[rp]"
|
||||||
" flogr %[rp],%[rp]\n"
|
: [rp] "+d" (rp.pair) : : "cc");
|
||||||
: [rp] "+d" (rp.pair) : : "cc");
|
bit = rp.even;
|
||||||
return rp.even;
|
/*
|
||||||
|
* The result of the flogr instruction is a value in the range
|
||||||
|
* of 0..64. Let the compiler know that the AND operation can
|
||||||
|
* be optimized away.
|
||||||
|
*/
|
||||||
|
__assume(bit <= 64);
|
||||||
|
return bit & 127;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* __ffs - find first bit in word.
|
|
||||||
* @word: The word to search
|
|
||||||
*
|
|
||||||
* Undefined if no bit exists, so code should check against 0 first.
|
|
||||||
*/
|
|
||||||
static inline __attribute_const__ unsigned long __ffs(unsigned long word)
|
|
||||||
{
|
|
||||||
return __flogr(-word & word) ^ (BITS_PER_LONG - 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ffs - find first bit set
|
* ffs - find first bit set
|
||||||
* @word: the word to search
|
* @word: the word to search
|
||||||
|
@ -191,58 +189,26 @@ static inline __attribute_const__ unsigned long __ffs(unsigned long word)
|
||||||
* This is defined the same way as the libc and
|
* This is defined the same way as the libc and
|
||||||
* compiler builtin ffs routines (man ffs).
|
* compiler builtin ffs routines (man ffs).
|
||||||
*/
|
*/
|
||||||
static inline __attribute_const__ int ffs(int word)
|
static __always_inline __flatten __attribute_const__ int ffs(int word)
|
||||||
{
|
{
|
||||||
unsigned long mask = 2 * BITS_PER_LONG - 1;
|
|
||||||
unsigned int val = (unsigned int)word;
|
unsigned int val = (unsigned int)word;
|
||||||
|
|
||||||
return (1 + (__flogr(-val & val) ^ (BITS_PER_LONG - 1))) & mask;
|
return BITS_PER_LONG - __flogr(-val & val);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
#else /* CONFIG_CC_HAS_BUILTIN_FFS */
|
||||||
* __fls - find last (most-significant) set bit in a long word
|
|
||||||
* @word: the word to search
|
|
||||||
*
|
|
||||||
* Undefined if no set bit exists, so code should check against 0 first.
|
|
||||||
*/
|
|
||||||
static inline __attribute_const__ unsigned long __fls(unsigned long word)
|
|
||||||
{
|
|
||||||
return __flogr(word) ^ (BITS_PER_LONG - 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
#include <asm-generic/bitops/builtin-ffs.h>
|
||||||
* fls64 - find last set bit in a 64-bit word
|
|
||||||
* @word: the word to search
|
|
||||||
*
|
|
||||||
* This is defined in a similar way as the libc and compiler builtin
|
|
||||||
* ffsll, but returns the position of the most significant set bit.
|
|
||||||
*
|
|
||||||
* fls64(value) returns 0 if value is 0 or the position of the last
|
|
||||||
* set bit if value is nonzero. The last (most significant) bit is
|
|
||||||
* at position 64.
|
|
||||||
*/
|
|
||||||
static inline __attribute_const__ int fls64(unsigned long word)
|
|
||||||
{
|
|
||||||
unsigned long mask = 2 * BITS_PER_LONG - 1;
|
|
||||||
|
|
||||||
return (1 + (__flogr(word) ^ (BITS_PER_LONG - 1))) & mask;
|
#endif /* CONFIG_CC_HAS_BUILTIN_FFS */
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* fls - find last (most-significant) bit set
|
|
||||||
* @word: the word to search
|
|
||||||
*
|
|
||||||
* This is defined the same way as ffs.
|
|
||||||
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
|
|
||||||
*/
|
|
||||||
static inline __attribute_const__ int fls(unsigned int word)
|
|
||||||
{
|
|
||||||
return fls64(word);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
#include <asm-generic/bitops/builtin-__ffs.h>
|
||||||
|
#include <asm-generic/bitops/ffz.h>
|
||||||
|
#include <asm-generic/bitops/builtin-__fls.h>
|
||||||
|
#include <asm-generic/bitops/builtin-fls.h>
|
||||||
|
#include <asm-generic/bitops/fls64.h>
|
||||||
#include <asm/arch_hweight.h>
|
#include <asm/arch_hweight.h>
|
||||||
#include <asm-generic/bitops/const_hweight.h>
|
#include <asm-generic/bitops/const_hweight.h>
|
||||||
#include <asm-generic/bitops/ffz.h>
|
|
||||||
#include <asm-generic/bitops/sched.h>
|
#include <asm-generic/bitops/sched.h>
|
||||||
#include <asm-generic/bitops/le.h>
|
#include <asm-generic/bitops/le.h>
|
||||||
#include <asm-generic/bitops/ext2-atomic-setbit.h>
|
#include <asm-generic/bitops/ext2-atomic-setbit.h>
|
||||||
|
|
|
@ -19,12 +19,16 @@
|
||||||
|
|
||||||
#define CRST_ALLOC_ORDER 2
|
#define CRST_ALLOC_ORDER 2
|
||||||
|
|
||||||
unsigned long *crst_table_alloc(struct mm_struct *);
|
unsigned long *crst_table_alloc_noprof(struct mm_struct *);
|
||||||
|
#define crst_table_alloc(...) alloc_hooks(crst_table_alloc_noprof(__VA_ARGS__))
|
||||||
void crst_table_free(struct mm_struct *, unsigned long *);
|
void crst_table_free(struct mm_struct *, unsigned long *);
|
||||||
|
|
||||||
unsigned long *page_table_alloc(struct mm_struct *);
|
unsigned long *page_table_alloc_noprof(struct mm_struct *);
|
||||||
struct ptdesc *page_table_alloc_pgste(struct mm_struct *mm);
|
#define page_table_alloc(...) alloc_hooks(page_table_alloc_noprof(__VA_ARGS__))
|
||||||
void page_table_free(struct mm_struct *, unsigned long *);
|
void page_table_free(struct mm_struct *, unsigned long *);
|
||||||
|
|
||||||
|
struct ptdesc *page_table_alloc_pgste_noprof(struct mm_struct *mm);
|
||||||
|
#define page_table_alloc_pgste(...) alloc_hooks(page_table_alloc_pgste_noprof(__VA_ARGS__))
|
||||||
void page_table_free_pgste(struct ptdesc *ptdesc);
|
void page_table_free_pgste(struct ptdesc *ptdesc);
|
||||||
|
|
||||||
static inline void crst_table_init(unsigned long *crst, unsigned long entry)
|
static inline void crst_table_init(unsigned long *crst, unsigned long entry)
|
||||||
|
@ -48,9 +52,9 @@ static inline unsigned long check_asce_limit(struct mm_struct *mm, unsigned long
|
||||||
return addr;
|
return addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
|
static inline p4d_t *p4d_alloc_one_noprof(struct mm_struct *mm, unsigned long address)
|
||||||
{
|
{
|
||||||
unsigned long *table = crst_table_alloc(mm);
|
unsigned long *table = crst_table_alloc_noprof(mm);
|
||||||
|
|
||||||
if (!table)
|
if (!table)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -59,6 +63,7 @@ static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
|
||||||
|
|
||||||
return (p4d_t *) table;
|
return (p4d_t *) table;
|
||||||
}
|
}
|
||||||
|
#define p4d_alloc_one(...) alloc_hooks(p4d_alloc_one_noprof(__VA_ARGS__))
|
||||||
|
|
||||||
static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
|
static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
|
||||||
{
|
{
|
||||||
|
@ -69,9 +74,9 @@ static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
|
||||||
crst_table_free(mm, (unsigned long *) p4d);
|
crst_table_free(mm, (unsigned long *) p4d);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
|
static inline pud_t *pud_alloc_one_noprof(struct mm_struct *mm, unsigned long address)
|
||||||
{
|
{
|
||||||
unsigned long *table = crst_table_alloc(mm);
|
unsigned long *table = crst_table_alloc_noprof(mm);
|
||||||
|
|
||||||
if (!table)
|
if (!table)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -80,6 +85,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
|
||||||
|
|
||||||
return (pud_t *) table;
|
return (pud_t *) table;
|
||||||
}
|
}
|
||||||
|
#define pud_alloc_one(...) alloc_hooks(pud_alloc_one_noprof(__VA_ARGS__))
|
||||||
|
|
||||||
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
||||||
{
|
{
|
||||||
|
@ -90,9 +96,9 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
||||||
crst_table_free(mm, (unsigned long *) pud);
|
crst_table_free(mm, (unsigned long *) pud);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
|
static inline pmd_t *pmd_alloc_one_noprof(struct mm_struct *mm, unsigned long vmaddr)
|
||||||
{
|
{
|
||||||
unsigned long *table = crst_table_alloc(mm);
|
unsigned long *table = crst_table_alloc_noprof(mm);
|
||||||
|
|
||||||
if (!table)
|
if (!table)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -103,6 +109,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
|
||||||
}
|
}
|
||||||
return (pmd_t *) table;
|
return (pmd_t *) table;
|
||||||
}
|
}
|
||||||
|
#define pmd_alloc_one(...) alloc_hooks(pmd_alloc_one_noprof(__VA_ARGS__))
|
||||||
|
|
||||||
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
||||||
{
|
{
|
||||||
|
@ -127,9 +134,9 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
||||||
set_pud(pud, __pud(_REGION3_ENTRY | __pa(pmd)));
|
set_pud(pud, __pud(_REGION3_ENTRY | __pa(pmd)));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
static inline pgd_t *pgd_alloc_noprof(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
unsigned long *table = crst_table_alloc(mm);
|
unsigned long *table = crst_table_alloc_noprof(mm);
|
||||||
|
|
||||||
if (!table)
|
if (!table)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -137,6 +144,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||||
|
|
||||||
return (pgd_t *) table;
|
return (pgd_t *) table;
|
||||||
}
|
}
|
||||||
|
#define pgd_alloc(...) alloc_hooks(pgd_alloc_noprof(__VA_ARGS__))
|
||||||
|
|
||||||
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||||
{
|
{
|
||||||
|
|
|
@ -1416,18 +1416,12 @@ static inline char *debug_get_user_string(const char __user *user_buf,
|
||||||
{
|
{
|
||||||
char *buffer;
|
char *buffer;
|
||||||
|
|
||||||
buffer = kmalloc(user_len + 1, GFP_KERNEL);
|
buffer = memdup_user_nul(user_buf, user_len);
|
||||||
if (!buffer)
|
if (IS_ERR(buffer))
|
||||||
return ERR_PTR(-ENOMEM);
|
return buffer;
|
||||||
if (copy_from_user(buffer, user_buf, user_len) != 0) {
|
|
||||||
kfree(buffer);
|
|
||||||
return ERR_PTR(-EFAULT);
|
|
||||||
}
|
|
||||||
/* got the string, now strip linefeed. */
|
/* got the string, now strip linefeed. */
|
||||||
if (buffer[user_len - 1] == '\n')
|
if (buffer[user_len - 1] == '\n')
|
||||||
buffer[user_len - 1] = 0;
|
buffer[user_len - 1] = 0;
|
||||||
else
|
|
||||||
buffer[user_len] = 0;
|
|
||||||
return buffer;
|
return buffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -116,7 +116,7 @@ static void pibwork_handler(struct work_struct *work)
|
||||||
mutex_lock(&pibmutex);
|
mutex_lock(&pibmutex);
|
||||||
timedout = ktime_add_ns(data->expire, PIBWORK_DELAY);
|
timedout = ktime_add_ns(data->expire, PIBWORK_DELAY);
|
||||||
if (ktime_before(ktime_get(), timedout)) {
|
if (ktime_before(ktime_get(), timedout)) {
|
||||||
mod_delayed_work(system_wq, &pibwork, nsecs_to_jiffies(PIBWORK_DELAY));
|
mod_delayed_work(system_percpu_wq, &pibwork, nsecs_to_jiffies(PIBWORK_DELAY));
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
vfree(data->pib);
|
vfree(data->pib);
|
||||||
|
@ -174,7 +174,7 @@ long diag324_pibbuf(unsigned long arg)
|
||||||
pib_update(data);
|
pib_update(data);
|
||||||
data->sequence++;
|
data->sequence++;
|
||||||
data->expire = ktime_add_ns(ktime_get(), tod_to_ns(data->pib->intv));
|
data->expire = ktime_add_ns(ktime_get(), tod_to_ns(data->pib->intv));
|
||||||
mod_delayed_work(system_wq, &pibwork, nsecs_to_jiffies(PIBWORK_DELAY));
|
mod_delayed_work(system_percpu_wq, &pibwork, nsecs_to_jiffies(PIBWORK_DELAY));
|
||||||
first = false;
|
first = false;
|
||||||
}
|
}
|
||||||
rc = data->rc;
|
rc = data->rc;
|
||||||
|
|
|
@ -191,7 +191,7 @@ int hd_enable_hiperdispatch(void)
|
||||||
return 0;
|
return 0;
|
||||||
if (hd_online_cores <= hd_entitled_cores)
|
if (hd_online_cores <= hd_entitled_cores)
|
||||||
return 0;
|
return 0;
|
||||||
mod_delayed_work(system_wq, &hd_capacity_work, HD_DELAY_INTERVAL * hd_delay_factor);
|
mod_delayed_work(system_dfl_wq, &hd_capacity_work, HD_DELAY_INTERVAL * hd_delay_factor);
|
||||||
hd_update_capacities();
|
hd_update_capacities();
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,11 +14,15 @@
|
||||||
#include <asm/pgalloc.h>
|
#include <asm/pgalloc.h>
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
|
|
||||||
unsigned long *crst_table_alloc(struct mm_struct *mm)
|
unsigned long *crst_table_alloc_noprof(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
|
gfp_t gfp = GFP_KERNEL_ACCOUNT;
|
||||||
|
struct ptdesc *ptdesc;
|
||||||
unsigned long *table;
|
unsigned long *table;
|
||||||
|
|
||||||
|
if (mm == &init_mm)
|
||||||
|
gfp &= ~__GFP_ACCOUNT;
|
||||||
|
ptdesc = pagetable_alloc_noprof(gfp, CRST_ALLOC_ORDER);
|
||||||
if (!ptdesc)
|
if (!ptdesc)
|
||||||
return NULL;
|
return NULL;
|
||||||
table = ptdesc_to_virt(ptdesc);
|
table = ptdesc_to_virt(ptdesc);
|
||||||
|
@ -112,12 +116,12 @@ err_p4d:
|
||||||
|
|
||||||
#ifdef CONFIG_PGSTE
|
#ifdef CONFIG_PGSTE
|
||||||
|
|
||||||
struct ptdesc *page_table_alloc_pgste(struct mm_struct *mm)
|
struct ptdesc *page_table_alloc_pgste_noprof(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
struct ptdesc *ptdesc;
|
struct ptdesc *ptdesc;
|
||||||
u64 *table;
|
u64 *table;
|
||||||
|
|
||||||
ptdesc = pagetable_alloc(GFP_KERNEL, 0);
|
ptdesc = pagetable_alloc_noprof(GFP_KERNEL_ACCOUNT, 0);
|
||||||
if (ptdesc) {
|
if (ptdesc) {
|
||||||
table = (u64 *)ptdesc_to_virt(ptdesc);
|
table = (u64 *)ptdesc_to_virt(ptdesc);
|
||||||
__arch_set_page_dat(table, 1);
|
__arch_set_page_dat(table, 1);
|
||||||
|
@ -134,12 +138,15 @@ void page_table_free_pgste(struct ptdesc *ptdesc)
|
||||||
|
|
||||||
#endif /* CONFIG_PGSTE */
|
#endif /* CONFIG_PGSTE */
|
||||||
|
|
||||||
unsigned long *page_table_alloc(struct mm_struct *mm)
|
unsigned long *page_table_alloc_noprof(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
|
gfp_t gfp = GFP_KERNEL_ACCOUNT;
|
||||||
struct ptdesc *ptdesc;
|
struct ptdesc *ptdesc;
|
||||||
unsigned long *table;
|
unsigned long *table;
|
||||||
|
|
||||||
ptdesc = pagetable_alloc(GFP_KERNEL, 0);
|
if (mm == &init_mm)
|
||||||
|
gfp &= ~__GFP_ACCOUNT;
|
||||||
|
ptdesc = pagetable_alloc_noprof(gfp, 0);
|
||||||
if (!ptdesc)
|
if (!ptdesc)
|
||||||
return NULL;
|
return NULL;
|
||||||
if (!pagetable_pte_ctor(mm, ptdesc)) {
|
if (!pagetable_pte_ctor(mm, ptdesc)) {
|
||||||
|
|
|
@ -5,19 +5,11 @@ comment "S/390 block device drivers"
|
||||||
config DCSSBLK
|
config DCSSBLK
|
||||||
def_tristate m
|
def_tristate m
|
||||||
prompt "DCSSBLK support"
|
prompt "DCSSBLK support"
|
||||||
depends on S390 && BLOCK && (DAX || DAX=n)
|
depends on S390 && BLOCK && ZONE_DEVICE
|
||||||
|
select FS_DAX
|
||||||
help
|
help
|
||||||
Support for dcss block device
|
Support for dcss block device
|
||||||
|
|
||||||
config DCSSBLK_DAX
|
|
||||||
def_bool y
|
|
||||||
depends on DCSSBLK
|
|
||||||
# requires S390 ZONE_DEVICE support
|
|
||||||
depends on BROKEN
|
|
||||||
prompt "DCSSBLK DAX support"
|
|
||||||
help
|
|
||||||
Enable DAX operation for the dcss block device
|
|
||||||
|
|
||||||
config DASD
|
config DASD
|
||||||
def_tristate y
|
def_tristate y
|
||||||
prompt "Support for DASD devices"
|
prompt "Support for DASD devices"
|
||||||
|
|
|
@ -79,6 +79,8 @@ struct dcssblk_dev_info {
|
||||||
int num_of_segments;
|
int num_of_segments;
|
||||||
struct list_head seg_list;
|
struct list_head seg_list;
|
||||||
struct dax_device *dax_dev;
|
struct dax_device *dax_dev;
|
||||||
|
struct dev_pagemap pgmap;
|
||||||
|
void *pgmap_addr;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct segment_info {
|
struct segment_info {
|
||||||
|
@ -415,6 +417,8 @@ removeseg:
|
||||||
dax_remove_host(dev_info->gd);
|
dax_remove_host(dev_info->gd);
|
||||||
kill_dax(dev_info->dax_dev);
|
kill_dax(dev_info->dax_dev);
|
||||||
put_dax(dev_info->dax_dev);
|
put_dax(dev_info->dax_dev);
|
||||||
|
if (dev_info->pgmap_addr)
|
||||||
|
devm_memunmap_pages(&dev_info->dev, &dev_info->pgmap);
|
||||||
del_gendisk(dev_info->gd);
|
del_gendisk(dev_info->gd);
|
||||||
put_disk(dev_info->gd);
|
put_disk(dev_info->gd);
|
||||||
|
|
||||||
|
@ -537,9 +541,6 @@ static int dcssblk_setup_dax(struct dcssblk_dev_info *dev_info)
|
||||||
{
|
{
|
||||||
struct dax_device *dax_dev;
|
struct dax_device *dax_dev;
|
||||||
|
|
||||||
if (!IS_ENABLED(CONFIG_DCSSBLK_DAX))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
dax_dev = alloc_dax(dev_info, &dcssblk_dax_ops);
|
dax_dev = alloc_dax(dev_info, &dcssblk_dax_ops);
|
||||||
if (IS_ERR(dax_dev))
|
if (IS_ERR(dax_dev))
|
||||||
return PTR_ERR(dax_dev);
|
return PTR_ERR(dax_dev);
|
||||||
|
@ -562,6 +563,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
|
||||||
struct dcssblk_dev_info *dev_info;
|
struct dcssblk_dev_info *dev_info;
|
||||||
struct segment_info *seg_info, *temp;
|
struct segment_info *seg_info, *temp;
|
||||||
char *local_buf;
|
char *local_buf;
|
||||||
|
void *addr;
|
||||||
unsigned long seg_byte_size;
|
unsigned long seg_byte_size;
|
||||||
|
|
||||||
dev_info = NULL;
|
dev_info = NULL;
|
||||||
|
@ -687,9 +689,26 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
|
||||||
if (rc)
|
if (rc)
|
||||||
goto put_dev;
|
goto put_dev;
|
||||||
|
|
||||||
rc = dcssblk_setup_dax(dev_info);
|
if (!IS_ALIGNED(dev_info->start, SUBSECTION_SIZE) ||
|
||||||
if (rc)
|
!IS_ALIGNED(dev_info->end + 1, SUBSECTION_SIZE)) {
|
||||||
goto out_dax;
|
pr_info("DCSS %s is not aligned to %lu bytes, DAX support disabled\n",
|
||||||
|
local_buf, SUBSECTION_SIZE);
|
||||||
|
} else {
|
||||||
|
dev_info->pgmap.type = MEMORY_DEVICE_FS_DAX;
|
||||||
|
dev_info->pgmap.range.start = dev_info->start;
|
||||||
|
dev_info->pgmap.range.end = dev_info->end;
|
||||||
|
dev_info->pgmap.nr_range = 1;
|
||||||
|
addr = devm_memremap_pages(&dev_info->dev, &dev_info->pgmap);
|
||||||
|
if (IS_ERR(addr)) {
|
||||||
|
rc = PTR_ERR(addr);
|
||||||
|
goto put_dev;
|
||||||
|
}
|
||||||
|
dev_info->pgmap_addr = addr;
|
||||||
|
rc = dcssblk_setup_dax(dev_info);
|
||||||
|
if (rc)
|
||||||
|
goto out_dax;
|
||||||
|
pr_info("DAX support enabled for DCSS %s\n", local_buf);
|
||||||
|
}
|
||||||
|
|
||||||
get_device(&dev_info->dev);
|
get_device(&dev_info->dev);
|
||||||
rc = device_add_disk(&dev_info->dev, dev_info->gd, NULL);
|
rc = device_add_disk(&dev_info->dev, dev_info->gd, NULL);
|
||||||
|
@ -716,6 +735,8 @@ out_dax_host:
|
||||||
out_dax:
|
out_dax:
|
||||||
kill_dax(dev_info->dax_dev);
|
kill_dax(dev_info->dax_dev);
|
||||||
put_dax(dev_info->dax_dev);
|
put_dax(dev_info->dax_dev);
|
||||||
|
if (dev_info->pgmap_addr)
|
||||||
|
devm_memunmap_pages(&dev_info->dev, &dev_info->pgmap);
|
||||||
put_dev:
|
put_dev:
|
||||||
list_del(&dev_info->lh);
|
list_del(&dev_info->lh);
|
||||||
put_disk(dev_info->gd);
|
put_disk(dev_info->gd);
|
||||||
|
@ -801,6 +822,8 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
|
||||||
dax_remove_host(dev_info->gd);
|
dax_remove_host(dev_info->gd);
|
||||||
kill_dax(dev_info->dax_dev);
|
kill_dax(dev_info->dax_dev);
|
||||||
put_dax(dev_info->dax_dev);
|
put_dax(dev_info->dax_dev);
|
||||||
|
if (dev_info->pgmap_addr)
|
||||||
|
devm_memunmap_pages(&dev_info->dev, &dev_info->pgmap);
|
||||||
del_gendisk(dev_info->gd);
|
del_gendisk(dev_info->gd);
|
||||||
put_disk(dev_info->gd);
|
put_disk(dev_info->gd);
|
||||||
|
|
||||||
|
|
|
@ -21,6 +21,7 @@ obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
|
||||||
sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
|
sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
|
||||||
sclp_early.o sclp_early_core.o sclp_sd.o
|
sclp_early.o sclp_early_core.o sclp_sd.o
|
||||||
|
|
||||||
|
obj-$(CONFIG_MEMORY_HOTPLUG) += sclp_mem.o
|
||||||
obj-$(CONFIG_TN3270) += raw3270.o con3270.o
|
obj-$(CONFIG_TN3270) += raw3270.o con3270.o
|
||||||
obj-$(CONFIG_TN3270_FS) += fs3270.o
|
obj-$(CONFIG_TN3270_FS) += fs3270.o
|
||||||
|
|
||||||
|
|
|
@ -244,24 +244,17 @@ static ssize_t hmcdrv_dev_write(struct file *fp, const char __user *ubuf,
|
||||||
size_t len, loff_t *pos)
|
size_t len, loff_t *pos)
|
||||||
{
|
{
|
||||||
ssize_t retlen;
|
ssize_t retlen;
|
||||||
|
void *pdata;
|
||||||
|
|
||||||
pr_debug("writing file '/dev/%pD' at pos. %lld with length %zd\n",
|
pr_debug("writing file '/dev/%pD' at pos. %lld with length %zd\n",
|
||||||
fp, (long long) *pos, len);
|
fp, (long long) *pos, len);
|
||||||
|
|
||||||
if (!fp->private_data) { /* first expect a cmd write */
|
if (!fp->private_data) { /* first expect a cmd write */
|
||||||
fp->private_data = kmalloc(len + 1, GFP_KERNEL);
|
pdata = memdup_user_nul(ubuf, len);
|
||||||
|
if (IS_ERR(pdata))
|
||||||
if (!fp->private_data)
|
return PTR_ERR(pdata);
|
||||||
return -ENOMEM;
|
fp->private_data = pdata;
|
||||||
|
return len;
|
||||||
if (!copy_from_user(fp->private_data, ubuf, len)) {
|
|
||||||
((char *)fp->private_data)[len] = '\0';
|
|
||||||
return len;
|
|
||||||
}
|
|
||||||
|
|
||||||
kfree(fp->private_data);
|
|
||||||
fp->private_data = NULL;
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
retlen = hmcdrv_dev_transfer((char *) fp->private_data,
|
retlen = hmcdrv_dev_transfer((char *) fp->private_data,
|
||||||
|
|
|
@ -8,31 +8,46 @@
|
||||||
#define KMSG_COMPONENT "sclp_cmd"
|
#define KMSG_COMPONENT "sclp_cmd"
|
||||||
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
#include <linux/cpufeature.h>
|
|
||||||
#include <linux/completion.h>
|
#include <linux/completion.h>
|
||||||
#include <linux/init.h>
|
|
||||||
#include <linux/errno.h>
|
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
|
#include <linux/errno.h>
|
||||||
|
#include <linux/init.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/mm.h>
|
|
||||||
#include <linux/mmzone.h>
|
|
||||||
#include <linux/memory.h>
|
|
||||||
#include <linux/memory_hotplug.h>
|
|
||||||
#include <linux/module.h>
|
|
||||||
#include <asm/ctlreg.h>
|
|
||||||
#include <asm/chpid.h>
|
#include <asm/chpid.h>
|
||||||
#include <asm/setup.h>
|
#include <asm/ctlreg.h>
|
||||||
#include <asm/page.h>
|
|
||||||
#include <asm/sclp.h>
|
#include <asm/sclp.h>
|
||||||
#include <asm/numa.h>
|
|
||||||
#include <asm/facility.h>
|
|
||||||
#include <asm/page-states.h>
|
|
||||||
|
|
||||||
#include "sclp.h"
|
#include "sclp.h"
|
||||||
|
|
||||||
#define SCLP_CMDW_ASSIGN_STORAGE 0x000d0001
|
/* CPU configuration related functions */
|
||||||
#define SCLP_CMDW_UNASSIGN_STORAGE 0x000c0001
|
#define SCLP_CMDW_CONFIGURE_CPU 0x00110001
|
||||||
|
#define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
|
||||||
|
/* Channel path configuration related functions */
|
||||||
|
#define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001
|
||||||
|
#define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001
|
||||||
|
#define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001
|
||||||
|
|
||||||
|
struct cpu_configure_sccb {
|
||||||
|
struct sccb_header header;
|
||||||
|
} __packed __aligned(8);
|
||||||
|
|
||||||
|
struct chp_cfg_sccb {
|
||||||
|
struct sccb_header header;
|
||||||
|
u8 ccm;
|
||||||
|
u8 reserved[6];
|
||||||
|
u8 cssid;
|
||||||
|
} __packed;
|
||||||
|
|
||||||
|
struct chp_info_sccb {
|
||||||
|
struct sccb_header header;
|
||||||
|
u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
|
||||||
|
u8 standby[SCLP_CHP_INFO_MASK_SIZE];
|
||||||
|
u8 configured[SCLP_CHP_INFO_MASK_SIZE];
|
||||||
|
u8 ccm;
|
||||||
|
u8 reserved[6];
|
||||||
|
u8 cssid;
|
||||||
|
} __packed;
|
||||||
|
|
||||||
static void sclp_sync_callback(struct sclp_req *req, void *data)
|
static void sclp_sync_callback(struct sclp_req *req, void *data)
|
||||||
{
|
{
|
||||||
|
@ -64,13 +79,11 @@ int sclp_sync_request_timeout(sclp_cmdw_t cmd, void *sccb, int timeout)
|
||||||
request->callback_data = &completion;
|
request->callback_data = &completion;
|
||||||
init_completion(&completion);
|
init_completion(&completion);
|
||||||
|
|
||||||
/* Perform sclp request. */
|
|
||||||
rc = sclp_add_request(request);
|
rc = sclp_add_request(request);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto out;
|
goto out;
|
||||||
wait_for_completion(&completion);
|
wait_for_completion(&completion);
|
||||||
|
|
||||||
/* Check response. */
|
|
||||||
if (request->status != SCLP_REQ_DONE) {
|
if (request->status != SCLP_REQ_DONE) {
|
||||||
pr_warn("sync request failed (cmd=0x%08x, status=0x%02x)\n",
|
pr_warn("sync request failed (cmd=0x%08x, status=0x%02x)\n",
|
||||||
cmd, request->status);
|
cmd, request->status);
|
||||||
|
@ -81,22 +94,15 @@ out:
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* CPU configuration related functions.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define SCLP_CMDW_CONFIGURE_CPU 0x00110001
|
|
||||||
#define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
|
|
||||||
|
|
||||||
int _sclp_get_core_info(struct sclp_core_info *info)
|
int _sclp_get_core_info(struct sclp_core_info *info)
|
||||||
{
|
{
|
||||||
int rc;
|
|
||||||
int length = test_facility(140) ? EXT_SCCB_READ_CPU : PAGE_SIZE;
|
|
||||||
struct read_cpu_info_sccb *sccb;
|
struct read_cpu_info_sccb *sccb;
|
||||||
|
int rc, length;
|
||||||
|
|
||||||
if (!SCLP_HAS_CPU_INFO)
|
if (!SCLP_HAS_CPU_INFO)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
length = test_facility(140) ? EXT_SCCB_READ_CPU : PAGE_SIZE;
|
||||||
sccb = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA | __GFP_ZERO, get_order(length));
|
sccb = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA | __GFP_ZERO, get_order(length));
|
||||||
if (!sccb)
|
if (!sccb)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -114,14 +120,10 @@ int _sclp_get_core_info(struct sclp_core_info *info)
|
||||||
}
|
}
|
||||||
sclp_fill_core_info(info, sccb);
|
sclp_fill_core_info(info, sccb);
|
||||||
out:
|
out:
|
||||||
free_pages((unsigned long) sccb, get_order(length));
|
free_pages((unsigned long)sccb, get_order(length));
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct cpu_configure_sccb {
|
|
||||||
struct sccb_header header;
|
|
||||||
} __attribute__((packed, aligned(8)));
|
|
||||||
|
|
||||||
static int do_core_configure(sclp_cmdw_t cmd)
|
static int do_core_configure(sclp_cmdw_t cmd)
|
||||||
{
|
{
|
||||||
struct cpu_configure_sccb *sccb;
|
struct cpu_configure_sccb *sccb;
|
||||||
|
@ -130,8 +132,8 @@ static int do_core_configure(sclp_cmdw_t cmd)
|
||||||
if (!SCLP_HAS_CPU_RECONFIG)
|
if (!SCLP_HAS_CPU_RECONFIG)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
/*
|
/*
|
||||||
* This is not going to cross a page boundary since we force
|
* Use kmalloc to have a minimum alignment of 8 bytes and ensure sccb
|
||||||
* kmalloc to have a minimum alignment of 8 bytes on s390.
|
* is not going to cross a page boundary.
|
||||||
*/
|
*/
|
||||||
sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
|
sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
|
||||||
if (!sccb)
|
if (!sccb)
|
||||||
|
@ -165,394 +167,6 @@ int sclp_core_deconfigure(u8 core)
|
||||||
return do_core_configure(SCLP_CMDW_DECONFIGURE_CPU | core << 8);
|
return do_core_configure(SCLP_CMDW_DECONFIGURE_CPU | core << 8);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
|
||||||
|
|
||||||
static DEFINE_MUTEX(sclp_mem_mutex);
|
|
||||||
static LIST_HEAD(sclp_mem_list);
|
|
||||||
static u8 sclp_max_storage_id;
|
|
||||||
static DECLARE_BITMAP(sclp_storage_ids, 256);
|
|
||||||
|
|
||||||
struct memory_increment {
|
|
||||||
struct list_head list;
|
|
||||||
u16 rn;
|
|
||||||
int standby;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct assign_storage_sccb {
|
|
||||||
struct sccb_header header;
|
|
||||||
u16 rn;
|
|
||||||
} __packed;
|
|
||||||
|
|
||||||
int arch_get_memory_phys_device(unsigned long start_pfn)
|
|
||||||
{
|
|
||||||
if (!sclp.rzm)
|
|
||||||
return 0;
|
|
||||||
return PFN_PHYS(start_pfn) >> ilog2(sclp.rzm);
|
|
||||||
}
|
|
||||||
|
|
||||||
static unsigned long long rn2addr(u16 rn)
|
|
||||||
{
|
|
||||||
return (unsigned long long) (rn - 1) * sclp.rzm;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
|
|
||||||
{
|
|
||||||
struct assign_storage_sccb *sccb;
|
|
||||||
int rc;
|
|
||||||
|
|
||||||
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
|
||||||
if (!sccb)
|
|
||||||
return -ENOMEM;
|
|
||||||
sccb->header.length = PAGE_SIZE;
|
|
||||||
sccb->rn = rn;
|
|
||||||
rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
|
|
||||||
if (rc)
|
|
||||||
goto out;
|
|
||||||
switch (sccb->header.response_code) {
|
|
||||||
case 0x0020:
|
|
||||||
case 0x0120:
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
pr_warn("assign storage failed (cmd=0x%08x, response=0x%04x, rn=0x%04x)\n",
|
|
||||||
cmd, sccb->header.response_code, rn);
|
|
||||||
rc = -EIO;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
out:
|
|
||||||
free_page((unsigned long) sccb);
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int sclp_assign_storage(u16 rn)
|
|
||||||
{
|
|
||||||
unsigned long long start;
|
|
||||||
int rc;
|
|
||||||
|
|
||||||
rc = do_assign_storage(SCLP_CMDW_ASSIGN_STORAGE, rn);
|
|
||||||
if (rc)
|
|
||||||
return rc;
|
|
||||||
start = rn2addr(rn);
|
|
||||||
storage_key_init_range(start, start + sclp.rzm);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int sclp_unassign_storage(u16 rn)
|
|
||||||
{
|
|
||||||
return do_assign_storage(SCLP_CMDW_UNASSIGN_STORAGE, rn);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct attach_storage_sccb {
|
|
||||||
struct sccb_header header;
|
|
||||||
u16 :16;
|
|
||||||
u16 assigned;
|
|
||||||
u32 :32;
|
|
||||||
u32 entries[];
|
|
||||||
} __packed;
|
|
||||||
|
|
||||||
static int sclp_attach_storage(u8 id)
|
|
||||||
{
|
|
||||||
struct attach_storage_sccb *sccb;
|
|
||||||
int rc;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
|
||||||
if (!sccb)
|
|
||||||
return -ENOMEM;
|
|
||||||
sccb->header.length = PAGE_SIZE;
|
|
||||||
sccb->header.function_code = 0x40;
|
|
||||||
rc = sclp_sync_request_timeout(0x00080001 | id << 8, sccb,
|
|
||||||
SCLP_QUEUE_INTERVAL);
|
|
||||||
if (rc)
|
|
||||||
goto out;
|
|
||||||
switch (sccb->header.response_code) {
|
|
||||||
case 0x0020:
|
|
||||||
set_bit(id, sclp_storage_ids);
|
|
||||||
for (i = 0; i < sccb->assigned; i++) {
|
|
||||||
if (sccb->entries[i])
|
|
||||||
sclp_unassign_storage(sccb->entries[i] >> 16);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
rc = -EIO;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
out:
|
|
||||||
free_page((unsigned long) sccb);
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int sclp_mem_change_state(unsigned long start, unsigned long size,
|
|
||||||
int online)
|
|
||||||
{
|
|
||||||
struct memory_increment *incr;
|
|
||||||
unsigned long long istart;
|
|
||||||
int rc = 0;
|
|
||||||
|
|
||||||
list_for_each_entry(incr, &sclp_mem_list, list) {
|
|
||||||
istart = rn2addr(incr->rn);
|
|
||||||
if (start + size - 1 < istart)
|
|
||||||
break;
|
|
||||||
if (start > istart + sclp.rzm - 1)
|
|
||||||
continue;
|
|
||||||
if (online)
|
|
||||||
rc |= sclp_assign_storage(incr->rn);
|
|
||||||
else
|
|
||||||
sclp_unassign_storage(incr->rn);
|
|
||||||
if (rc == 0)
|
|
||||||
incr->standby = online ? 0 : 1;
|
|
||||||
}
|
|
||||||
return rc ? -EIO : 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool contains_standby_increment(unsigned long start, unsigned long end)
|
|
||||||
{
|
|
||||||
struct memory_increment *incr;
|
|
||||||
unsigned long istart;
|
|
||||||
|
|
||||||
list_for_each_entry(incr, &sclp_mem_list, list) {
|
|
||||||
istart = rn2addr(incr->rn);
|
|
||||||
if (end - 1 < istart)
|
|
||||||
continue;
|
|
||||||
if (start > istart + sclp.rzm - 1)
|
|
||||||
continue;
|
|
||||||
if (incr->standby)
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int sclp_mem_notifier(struct notifier_block *nb,
|
|
||||||
unsigned long action, void *data)
|
|
||||||
{
|
|
||||||
unsigned long start, size;
|
|
||||||
struct memory_notify *arg;
|
|
||||||
unsigned char id;
|
|
||||||
int rc = 0;
|
|
||||||
|
|
||||||
arg = data;
|
|
||||||
start = arg->start_pfn << PAGE_SHIFT;
|
|
||||||
size = arg->nr_pages << PAGE_SHIFT;
|
|
||||||
mutex_lock(&sclp_mem_mutex);
|
|
||||||
for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
|
|
||||||
sclp_attach_storage(id);
|
|
||||||
switch (action) {
|
|
||||||
case MEM_GOING_OFFLINE:
|
|
||||||
/*
|
|
||||||
* We do not allow to set memory blocks offline that contain
|
|
||||||
* standby memory. This is done to simplify the "memory online"
|
|
||||||
* case.
|
|
||||||
*/
|
|
||||||
if (contains_standby_increment(start, start + size))
|
|
||||||
rc = -EPERM;
|
|
||||||
break;
|
|
||||||
case MEM_PREPARE_ONLINE:
|
|
||||||
/*
|
|
||||||
* Access the altmap_start_pfn and altmap_nr_pages fields
|
|
||||||
* within the struct memory_notify specifically when dealing
|
|
||||||
* with only MEM_PREPARE_ONLINE/MEM_FINISH_OFFLINE notifiers.
|
|
||||||
*
|
|
||||||
* When altmap is in use, take the specified memory range
|
|
||||||
* online, which includes the altmap.
|
|
||||||
*/
|
|
||||||
if (arg->altmap_nr_pages) {
|
|
||||||
start = PFN_PHYS(arg->altmap_start_pfn);
|
|
||||||
size += PFN_PHYS(arg->altmap_nr_pages);
|
|
||||||
}
|
|
||||||
rc = sclp_mem_change_state(start, size, 1);
|
|
||||||
if (rc || !arg->altmap_nr_pages)
|
|
||||||
break;
|
|
||||||
/*
|
|
||||||
* Set CMMA state to nodat here, since the struct page memory
|
|
||||||
* at the beginning of the memory block will not go through the
|
|
||||||
* buddy allocator later.
|
|
||||||
*/
|
|
||||||
__arch_set_page_nodat((void *)__va(start), arg->altmap_nr_pages);
|
|
||||||
break;
|
|
||||||
case MEM_FINISH_OFFLINE:
|
|
||||||
/*
|
|
||||||
* When altmap is in use, take the specified memory range
|
|
||||||
* offline, which includes the altmap.
|
|
||||||
*/
|
|
||||||
if (arg->altmap_nr_pages) {
|
|
||||||
start = PFN_PHYS(arg->altmap_start_pfn);
|
|
||||||
size += PFN_PHYS(arg->altmap_nr_pages);
|
|
||||||
}
|
|
||||||
sclp_mem_change_state(start, size, 0);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
mutex_unlock(&sclp_mem_mutex);
|
|
||||||
return rc ? NOTIFY_BAD : NOTIFY_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct notifier_block sclp_mem_nb = {
|
|
||||||
.notifier_call = sclp_mem_notifier,
|
|
||||||
};
|
|
||||||
|
|
||||||
static void __init align_to_block_size(unsigned long long *start,
|
|
||||||
unsigned long long *size,
|
|
||||||
unsigned long long alignment)
|
|
||||||
{
|
|
||||||
unsigned long long start_align, size_align;
|
|
||||||
|
|
||||||
start_align = roundup(*start, alignment);
|
|
||||||
size_align = rounddown(*start + *size, alignment) - start_align;
|
|
||||||
|
|
||||||
pr_info("Standby memory at 0x%llx (%lluM of %lluM usable)\n",
|
|
||||||
*start, size_align >> 20, *size >> 20);
|
|
||||||
*start = start_align;
|
|
||||||
*size = size_align;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __init add_memory_merged(u16 rn)
|
|
||||||
{
|
|
||||||
unsigned long long start, size, addr, block_size;
|
|
||||||
static u16 first_rn, num;
|
|
||||||
|
|
||||||
if (rn && first_rn && (first_rn + num == rn)) {
|
|
||||||
num++;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (!first_rn)
|
|
||||||
goto skip_add;
|
|
||||||
start = rn2addr(first_rn);
|
|
||||||
size = (unsigned long long) num * sclp.rzm;
|
|
||||||
if (start >= ident_map_size)
|
|
||||||
goto skip_add;
|
|
||||||
if (start + size > ident_map_size)
|
|
||||||
size = ident_map_size - start;
|
|
||||||
block_size = memory_block_size_bytes();
|
|
||||||
align_to_block_size(&start, &size, block_size);
|
|
||||||
if (!size)
|
|
||||||
goto skip_add;
|
|
||||||
for (addr = start; addr < start + size; addr += block_size)
|
|
||||||
add_memory(0, addr, block_size,
|
|
||||||
cpu_has_edat1() ?
|
|
||||||
MHP_MEMMAP_ON_MEMORY | MHP_OFFLINE_INACCESSIBLE : MHP_NONE);
|
|
||||||
skip_add:
|
|
||||||
first_rn = rn;
|
|
||||||
num = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __init sclp_add_standby_memory(void)
|
|
||||||
{
|
|
||||||
struct memory_increment *incr;
|
|
||||||
|
|
||||||
list_for_each_entry(incr, &sclp_mem_list, list)
|
|
||||||
if (incr->standby)
|
|
||||||
add_memory_merged(incr->rn);
|
|
||||||
add_memory_merged(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __init insert_increment(u16 rn, int standby, int assigned)
|
|
||||||
{
|
|
||||||
struct memory_increment *incr, *new_incr;
|
|
||||||
struct list_head *prev;
|
|
||||||
u16 last_rn;
|
|
||||||
|
|
||||||
new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
|
|
||||||
if (!new_incr)
|
|
||||||
return;
|
|
||||||
new_incr->rn = rn;
|
|
||||||
new_incr->standby = standby;
|
|
||||||
last_rn = 0;
|
|
||||||
prev = &sclp_mem_list;
|
|
||||||
list_for_each_entry(incr, &sclp_mem_list, list) {
|
|
||||||
if (assigned && incr->rn > rn)
|
|
||||||
break;
|
|
||||||
if (!assigned && incr->rn - last_rn > 1)
|
|
||||||
break;
|
|
||||||
last_rn = incr->rn;
|
|
||||||
prev = &incr->list;
|
|
||||||
}
|
|
||||||
if (!assigned)
|
|
||||||
new_incr->rn = last_rn + 1;
|
|
||||||
if (new_incr->rn > sclp.rnmax) {
|
|
||||||
kfree(new_incr);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
list_add(&new_incr->list, prev);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __init sclp_detect_standby_memory(void)
|
|
||||||
{
|
|
||||||
struct read_storage_sccb *sccb;
|
|
||||||
int i, id, assigned, rc;
|
|
||||||
|
|
||||||
if (oldmem_data.start) /* No standby memory in kdump mode */
|
|
||||||
return 0;
|
|
||||||
if ((sclp.facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
|
|
||||||
return 0;
|
|
||||||
rc = -ENOMEM;
|
|
||||||
sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
|
|
||||||
if (!sccb)
|
|
||||||
goto out;
|
|
||||||
assigned = 0;
|
|
||||||
for (id = 0; id <= sclp_max_storage_id; id++) {
|
|
||||||
memset(sccb, 0, PAGE_SIZE);
|
|
||||||
sccb->header.length = PAGE_SIZE;
|
|
||||||
rc = sclp_sync_request(SCLP_CMDW_READ_STORAGE_INFO | id << 8, sccb);
|
|
||||||
if (rc)
|
|
||||||
goto out;
|
|
||||||
switch (sccb->header.response_code) {
|
|
||||||
case 0x0010:
|
|
||||||
set_bit(id, sclp_storage_ids);
|
|
||||||
for (i = 0; i < sccb->assigned; i++) {
|
|
||||||
if (!sccb->entries[i])
|
|
||||||
continue;
|
|
||||||
assigned++;
|
|
||||||
insert_increment(sccb->entries[i] >> 16, 0, 1);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case 0x0310:
|
|
||||||
break;
|
|
||||||
case 0x0410:
|
|
||||||
for (i = 0; i < sccb->assigned; i++) {
|
|
||||||
if (!sccb->entries[i])
|
|
||||||
continue;
|
|
||||||
assigned++;
|
|
||||||
insert_increment(sccb->entries[i] >> 16, 1, 1);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
rc = -EIO;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (!rc)
|
|
||||||
sclp_max_storage_id = sccb->max_id;
|
|
||||||
}
|
|
||||||
if (rc || list_empty(&sclp_mem_list))
|
|
||||||
goto out;
|
|
||||||
for (i = 1; i <= sclp.rnmax - assigned; i++)
|
|
||||||
insert_increment(0, 1, 0);
|
|
||||||
rc = register_memory_notifier(&sclp_mem_nb);
|
|
||||||
if (rc)
|
|
||||||
goto out;
|
|
||||||
sclp_add_standby_memory();
|
|
||||||
out:
|
|
||||||
free_page((unsigned long) sccb);
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
__initcall(sclp_detect_standby_memory);
|
|
||||||
|
|
||||||
#endif /* CONFIG_MEMORY_HOTPLUG */
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Channel path configuration related functions.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001
|
|
||||||
#define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001
|
|
||||||
#define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001
|
|
||||||
|
|
||||||
struct chp_cfg_sccb {
|
|
||||||
struct sccb_header header;
|
|
||||||
u8 ccm;
|
|
||||||
u8 reserved[6];
|
|
||||||
u8 cssid;
|
|
||||||
} __attribute__((packed));
|
|
||||||
|
|
||||||
static int do_chp_configure(sclp_cmdw_t cmd)
|
static int do_chp_configure(sclp_cmdw_t cmd)
|
||||||
{
|
{
|
||||||
struct chp_cfg_sccb *sccb;
|
struct chp_cfg_sccb *sccb;
|
||||||
|
@ -560,8 +174,7 @@ static int do_chp_configure(sclp_cmdw_t cmd)
|
||||||
|
|
||||||
if (!SCLP_HAS_CHP_RECONFIG)
|
if (!SCLP_HAS_CHP_RECONFIG)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
/* Prepare sccb. */
|
sccb = (struct chp_cfg_sccb *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
||||||
sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
|
||||||
if (!sccb)
|
if (!sccb)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
sccb->header.length = sizeof(*sccb);
|
sccb->header.length = sizeof(*sccb);
|
||||||
|
@ -581,7 +194,7 @@ static int do_chp_configure(sclp_cmdw_t cmd)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
free_page((unsigned long) sccb);
|
free_page((unsigned long)sccb);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -609,16 +222,6 @@ int sclp_chp_deconfigure(struct chp_id chpid)
|
||||||
return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
|
return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct chp_info_sccb {
|
|
||||||
struct sccb_header header;
|
|
||||||
u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
|
|
||||||
u8 standby[SCLP_CHP_INFO_MASK_SIZE];
|
|
||||||
u8 configured[SCLP_CHP_INFO_MASK_SIZE];
|
|
||||||
u8 ccm;
|
|
||||||
u8 reserved[6];
|
|
||||||
u8 cssid;
|
|
||||||
} __attribute__((packed));
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sclp_chp_read_info - perform read channel-path information sclp command
|
* sclp_chp_read_info - perform read channel-path information sclp command
|
||||||
* @info: resulting channel-path information data
|
* @info: resulting channel-path information data
|
||||||
|
@ -634,8 +237,7 @@ int sclp_chp_read_info(struct sclp_chp_info *info)
|
||||||
|
|
||||||
if (!SCLP_HAS_CHP_INFO)
|
if (!SCLP_HAS_CHP_INFO)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
/* Prepare sccb. */
|
sccb = (struct chp_info_sccb *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
||||||
sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
|
||||||
if (!sccb)
|
if (!sccb)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
sccb->header.length = sizeof(*sccb);
|
sccb->header.length = sizeof(*sccb);
|
||||||
|
@ -652,6 +254,6 @@ int sclp_chp_read_info(struct sclp_chp_info *info)
|
||||||
memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE);
|
memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE);
|
||||||
memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE);
|
memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE);
|
||||||
out:
|
out:
|
||||||
free_page((unsigned long) sccb);
|
free_page((unsigned long)sccb);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,399 @@
|
||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
/*
|
||||||
|
* Memory hotplug support via sclp
|
||||||
|
*
|
||||||
|
* Copyright IBM Corp. 2025
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "sclp_mem"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
|
#include <linux/cpufeature.h>
|
||||||
|
#include <linux/err.h>
|
||||||
|
#include <linux/errno.h>
|
||||||
|
#include <linux/init.h>
|
||||||
|
#include <linux/memory.h>
|
||||||
|
#include <linux/memory_hotplug.h>
|
||||||
|
#include <linux/mm.h>
|
||||||
|
#include <linux/mmzone.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
#include <asm/facility.h>
|
||||||
|
#include <asm/page.h>
|
||||||
|
#include <asm/page-states.h>
|
||||||
|
#include <asm/sclp.h>
|
||||||
|
|
||||||
|
#include "sclp.h"
|
||||||
|
|
||||||
|
#define SCLP_CMDW_ASSIGN_STORAGE 0x000d0001
|
||||||
|
#define SCLP_CMDW_UNASSIGN_STORAGE 0x000c0001
|
||||||
|
|
||||||
|
static DEFINE_MUTEX(sclp_mem_mutex);
|
||||||
|
static LIST_HEAD(sclp_mem_list);
|
||||||
|
static u8 sclp_max_storage_id;
|
||||||
|
static DECLARE_BITMAP(sclp_storage_ids, 256);
|
||||||
|
|
||||||
|
struct memory_increment {
|
||||||
|
struct list_head list;
|
||||||
|
u16 rn;
|
||||||
|
int standby;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct assign_storage_sccb {
|
||||||
|
struct sccb_header header;
|
||||||
|
u16 rn;
|
||||||
|
} __packed;
|
||||||
|
|
||||||
|
struct attach_storage_sccb {
|
||||||
|
struct sccb_header header;
|
||||||
|
u16 :16;
|
||||||
|
u16 assigned;
|
||||||
|
u32 :32;
|
||||||
|
u32 entries[];
|
||||||
|
} __packed;
|
||||||
|
|
||||||
|
int arch_get_memory_phys_device(unsigned long start_pfn)
|
||||||
|
{
|
||||||
|
if (!sclp.rzm)
|
||||||
|
return 0;
|
||||||
|
return PFN_PHYS(start_pfn) >> ilog2(sclp.rzm);
|
||||||
|
}
|
||||||
|
|
||||||
|
static unsigned long rn2addr(u16 rn)
|
||||||
|
{
|
||||||
|
return (unsigned long)(rn - 1) * sclp.rzm;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
|
||||||
|
{
|
||||||
|
struct assign_storage_sccb *sccb;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
sccb = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
||||||
|
if (!sccb)
|
||||||
|
return -ENOMEM;
|
||||||
|
sccb->header.length = PAGE_SIZE;
|
||||||
|
sccb->rn = rn;
|
||||||
|
rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
|
||||||
|
if (rc)
|
||||||
|
goto out;
|
||||||
|
switch (sccb->header.response_code) {
|
||||||
|
case 0x0020:
|
||||||
|
case 0x0120:
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
pr_warn("assign storage failed (cmd=0x%08x, response=0x%04x, rn=0x%04x)\n",
|
||||||
|
cmd, sccb->header.response_code, rn);
|
||||||
|
rc = -EIO;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
out:
|
||||||
|
free_page((unsigned long)sccb);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int sclp_assign_storage(u16 rn)
|
||||||
|
{
|
||||||
|
unsigned long start;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
rc = do_assign_storage(SCLP_CMDW_ASSIGN_STORAGE, rn);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
start = rn2addr(rn);
|
||||||
|
storage_key_init_range(start, start + sclp.rzm);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int sclp_unassign_storage(u16 rn)
|
||||||
|
{
|
||||||
|
return do_assign_storage(SCLP_CMDW_UNASSIGN_STORAGE, rn);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int sclp_attach_storage(u8 id)
|
||||||
|
{
|
||||||
|
struct attach_storage_sccb *sccb;
|
||||||
|
int rc, i;
|
||||||
|
|
||||||
|
sccb = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
||||||
|
if (!sccb)
|
||||||
|
return -ENOMEM;
|
||||||
|
sccb->header.length = PAGE_SIZE;
|
||||||
|
sccb->header.function_code = 0x40;
|
||||||
|
rc = sclp_sync_request_timeout(0x00080001 | id << 8, sccb,
|
||||||
|
SCLP_QUEUE_INTERVAL);
|
||||||
|
if (rc)
|
||||||
|
goto out;
|
||||||
|
switch (sccb->header.response_code) {
|
||||||
|
case 0x0020:
|
||||||
|
set_bit(id, sclp_storage_ids);
|
||||||
|
for (i = 0; i < sccb->assigned; i++) {
|
||||||
|
if (sccb->entries[i])
|
||||||
|
sclp_unassign_storage(sccb->entries[i] >> 16);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
rc = -EIO;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
out:
|
||||||
|
free_page((unsigned long)sccb);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int sclp_mem_change_state(unsigned long start, unsigned long size,
|
||||||
|
int online)
|
||||||
|
{
|
||||||
|
struct memory_increment *incr;
|
||||||
|
unsigned long istart;
|
||||||
|
int rc = 0;
|
||||||
|
|
||||||
|
list_for_each_entry(incr, &sclp_mem_list, list) {
|
||||||
|
istart = rn2addr(incr->rn);
|
||||||
|
if (start + size - 1 < istart)
|
||||||
|
break;
|
||||||
|
if (start > istart + sclp.rzm - 1)
|
||||||
|
continue;
|
||||||
|
if (online)
|
||||||
|
rc |= sclp_assign_storage(incr->rn);
|
||||||
|
else
|
||||||
|
sclp_unassign_storage(incr->rn);
|
||||||
|
if (rc == 0)
|
||||||
|
incr->standby = online ? 0 : 1;
|
||||||
|
}
|
||||||
|
return rc ? -EIO : 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool contains_standby_increment(unsigned long start, unsigned long end)
|
||||||
|
{
|
||||||
|
struct memory_increment *incr;
|
||||||
|
unsigned long istart;
|
||||||
|
|
||||||
|
list_for_each_entry(incr, &sclp_mem_list, list) {
|
||||||
|
istart = rn2addr(incr->rn);
|
||||||
|
if (end - 1 < istart)
|
||||||
|
continue;
|
||||||
|
if (start > istart + sclp.rzm - 1)
|
||||||
|
continue;
|
||||||
|
if (incr->standby)
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int sclp_mem_notifier(struct notifier_block *nb,
|
||||||
|
unsigned long action, void *data)
|
||||||
|
{
|
||||||
|
unsigned long start, size;
|
||||||
|
struct memory_notify *arg;
|
||||||
|
unsigned char id;
|
||||||
|
int rc = 0;
|
||||||
|
|
||||||
|
arg = data;
|
||||||
|
start = arg->start_pfn << PAGE_SHIFT;
|
||||||
|
size = arg->nr_pages << PAGE_SHIFT;
|
||||||
|
mutex_lock(&sclp_mem_mutex);
|
||||||
|
for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
|
||||||
|
sclp_attach_storage(id);
|
||||||
|
switch (action) {
|
||||||
|
case MEM_GOING_OFFLINE:
|
||||||
|
/*
|
||||||
|
* Do not allow to set memory blocks offline that contain
|
||||||
|
* standby memory. This is done to simplify the "memory online"
|
||||||
|
* case.
|
||||||
|
*/
|
||||||
|
if (contains_standby_increment(start, start + size))
|
||||||
|
rc = -EPERM;
|
||||||
|
break;
|
||||||
|
case MEM_PREPARE_ONLINE:
|
||||||
|
/*
|
||||||
|
* Access the altmap_start_pfn and altmap_nr_pages fields
|
||||||
|
* within the struct memory_notify specifically when dealing
|
||||||
|
* with only MEM_PREPARE_ONLINE/MEM_FINISH_OFFLINE notifiers.
|
||||||
|
*
|
||||||
|
* When altmap is in use, take the specified memory range
|
||||||
|
* online, which includes the altmap.
|
||||||
|
*/
|
||||||
|
if (arg->altmap_nr_pages) {
|
||||||
|
start = PFN_PHYS(arg->altmap_start_pfn);
|
||||||
|
size += PFN_PHYS(arg->altmap_nr_pages);
|
||||||
|
}
|
||||||
|
rc = sclp_mem_change_state(start, size, 1);
|
||||||
|
if (rc || !arg->altmap_nr_pages)
|
||||||
|
break;
|
||||||
|
/*
|
||||||
|
* Set CMMA state to nodat here, since the struct page memory
|
||||||
|
* at the beginning of the memory block will not go through the
|
||||||
|
* buddy allocator later.
|
||||||
|
*/
|
||||||
|
__arch_set_page_nodat((void *)__va(start), arg->altmap_nr_pages);
|
||||||
|
break;
|
||||||
|
case MEM_FINISH_OFFLINE:
|
||||||
|
/*
|
||||||
|
* When altmap is in use, take the specified memory range
|
||||||
|
* offline, which includes the altmap.
|
||||||
|
*/
|
||||||
|
if (arg->altmap_nr_pages) {
|
||||||
|
start = PFN_PHYS(arg->altmap_start_pfn);
|
||||||
|
size += PFN_PHYS(arg->altmap_nr_pages);
|
||||||
|
}
|
||||||
|
sclp_mem_change_state(start, size, 0);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
mutex_unlock(&sclp_mem_mutex);
|
||||||
|
return rc ? NOTIFY_BAD : NOTIFY_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct notifier_block sclp_mem_nb = {
|
||||||
|
.notifier_call = sclp_mem_notifier,
|
||||||
|
};
|
||||||
|
|
||||||
|
static void __init align_to_block_size(unsigned long *start,
|
||||||
|
unsigned long *size,
|
||||||
|
unsigned long alignment)
|
||||||
|
{
|
||||||
|
unsigned long start_align, size_align;
|
||||||
|
|
||||||
|
start_align = roundup(*start, alignment);
|
||||||
|
size_align = rounddown(*start + *size, alignment) - start_align;
|
||||||
|
|
||||||
|
pr_info("Standby memory at 0x%lx (%luM of %luM usable)\n",
|
||||||
|
*start, size_align >> 20, *size >> 20);
|
||||||
|
*start = start_align;
|
||||||
|
*size = size_align;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init add_memory_merged(u16 rn)
|
||||||
|
{
|
||||||
|
unsigned long start, size, addr, block_size;
|
||||||
|
static u16 first_rn, num;
|
||||||
|
|
||||||
|
if (rn && first_rn && (first_rn + num == rn)) {
|
||||||
|
num++;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (!first_rn)
|
||||||
|
goto skip_add;
|
||||||
|
start = rn2addr(first_rn);
|
||||||
|
size = (unsigned long)num * sclp.rzm;
|
||||||
|
if (start >= ident_map_size)
|
||||||
|
goto skip_add;
|
||||||
|
if (start + size > ident_map_size)
|
||||||
|
size = ident_map_size - start;
|
||||||
|
block_size = memory_block_size_bytes();
|
||||||
|
align_to_block_size(&start, &size, block_size);
|
||||||
|
if (!size)
|
||||||
|
goto skip_add;
|
||||||
|
for (addr = start; addr < start + size; addr += block_size) {
|
||||||
|
add_memory(0, addr, block_size,
|
||||||
|
cpu_has_edat1() ?
|
||||||
|
MHP_MEMMAP_ON_MEMORY | MHP_OFFLINE_INACCESSIBLE : MHP_NONE);
|
||||||
|
}
|
||||||
|
skip_add:
|
||||||
|
first_rn = rn;
|
||||||
|
num = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init sclp_add_standby_memory(void)
|
||||||
|
{
|
||||||
|
struct memory_increment *incr;
|
||||||
|
|
||||||
|
list_for_each_entry(incr, &sclp_mem_list, list) {
|
||||||
|
if (incr->standby)
|
||||||
|
add_memory_merged(incr->rn);
|
||||||
|
}
|
||||||
|
add_memory_merged(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init insert_increment(u16 rn, int standby, int assigned)
|
||||||
|
{
|
||||||
|
struct memory_increment *incr, *new_incr;
|
||||||
|
struct list_head *prev;
|
||||||
|
u16 last_rn;
|
||||||
|
|
||||||
|
new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
|
||||||
|
if (!new_incr)
|
||||||
|
return;
|
||||||
|
new_incr->rn = rn;
|
||||||
|
new_incr->standby = standby;
|
||||||
|
last_rn = 0;
|
||||||
|
prev = &sclp_mem_list;
|
||||||
|
list_for_each_entry(incr, &sclp_mem_list, list) {
|
||||||
|
if (assigned && incr->rn > rn)
|
||||||
|
break;
|
||||||
|
if (!assigned && incr->rn - last_rn > 1)
|
||||||
|
break;
|
||||||
|
last_rn = incr->rn;
|
||||||
|
prev = &incr->list;
|
||||||
|
}
|
||||||
|
if (!assigned)
|
||||||
|
new_incr->rn = last_rn + 1;
|
||||||
|
if (new_incr->rn > sclp.rnmax) {
|
||||||
|
kfree(new_incr);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
list_add(&new_incr->list, prev);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __init sclp_detect_standby_memory(void)
|
||||||
|
{
|
||||||
|
struct read_storage_sccb *sccb;
|
||||||
|
int i, id, assigned, rc;
|
||||||
|
|
||||||
|
/* No standby memory in kdump mode */
|
||||||
|
if (oldmem_data.start)
|
||||||
|
return 0;
|
||||||
|
if ((sclp.facilities & 0xe00000000000UL) != 0xe00000000000UL)
|
||||||
|
return 0;
|
||||||
|
rc = -ENOMEM;
|
||||||
|
sccb = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
|
||||||
|
if (!sccb)
|
||||||
|
goto out;
|
||||||
|
assigned = 0;
|
||||||
|
for (id = 0; id <= sclp_max_storage_id; id++) {
|
||||||
|
memset(sccb, 0, PAGE_SIZE);
|
||||||
|
sccb->header.length = PAGE_SIZE;
|
||||||
|
rc = sclp_sync_request(SCLP_CMDW_READ_STORAGE_INFO | id << 8, sccb);
|
||||||
|
if (rc)
|
||||||
|
goto out;
|
||||||
|
switch (sccb->header.response_code) {
|
||||||
|
case 0x0010:
|
||||||
|
set_bit(id, sclp_storage_ids);
|
||||||
|
for (i = 0; i < sccb->assigned; i++) {
|
||||||
|
if (!sccb->entries[i])
|
||||||
|
continue;
|
||||||
|
assigned++;
|
||||||
|
insert_increment(sccb->entries[i] >> 16, 0, 1);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case 0x0310:
|
||||||
|
break;
|
||||||
|
case 0x0410:
|
||||||
|
for (i = 0; i < sccb->assigned; i++) {
|
||||||
|
if (!sccb->entries[i])
|
||||||
|
continue;
|
||||||
|
assigned++;
|
||||||
|
insert_increment(sccb->entries[i] >> 16, 1, 1);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
rc = -EIO;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (!rc)
|
||||||
|
sclp_max_storage_id = sccb->max_id;
|
||||||
|
}
|
||||||
|
if (rc || list_empty(&sclp_mem_list))
|
||||||
|
goto out;
|
||||||
|
for (i = 1; i <= sclp.rnmax - assigned; i++)
|
||||||
|
insert_increment(0, 1, 0);
|
||||||
|
rc = register_memory_notifier(&sclp_mem_nb);
|
||||||
|
if (rc)
|
||||||
|
goto out;
|
||||||
|
sclp_add_standby_memory();
|
||||||
|
out:
|
||||||
|
free_page((unsigned long)sccb);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
__initcall(sclp_detect_standby_memory);
|
|
@ -1671,7 +1671,7 @@ tape_3590_init(void)
|
||||||
|
|
||||||
DBF_EVENT(3, "3590 init\n");
|
DBF_EVENT(3, "3590 init\n");
|
||||||
|
|
||||||
tape_3590_wq = alloc_workqueue("tape_3590", 0, 0);
|
tape_3590_wq = alloc_workqueue("tape_3590", WQ_PERCPU, 0);
|
||||||
if (!tape_3590_wq)
|
if (!tape_3590_wq)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
|
@ -1405,7 +1405,9 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
|
||||||
/* Step 3: import the encrypted key value as a new key */
|
/* Step 3: import the encrypted key value as a new key */
|
||||||
rc = ep11_unwrapkey(card, domain, kek, keklen,
|
rc = ep11_unwrapkey(card, domain, kek, keklen,
|
||||||
encbuf, encbuflen, 0, def_iv,
|
encbuf, encbuflen, 0, def_iv,
|
||||||
keybitsize, 0, keybuf, keybufsize, keytype, xflags);
|
keybitsize, keygenflags,
|
||||||
|
keybuf, keybufsize,
|
||||||
|
keytype, xflags);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
ZCRYPT_DBF_ERR("%s importing key value as new key failed, rc=%d\n",
|
ZCRYPT_DBF_ERR("%s importing key value as new key failed, rc=%d\n",
|
||||||
__func__, rc);
|
__func__, rc);
|
||||||
|
|
|
@ -329,6 +329,29 @@ struct ftrace_likely_data {
|
||||||
#define __no_sanitize_or_inline __always_inline
|
#define __no_sanitize_or_inline __always_inline
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The assume attribute is used to indicate that a certain condition is
|
||||||
|
* assumed to be true. If this condition is violated at runtime, the behavior
|
||||||
|
* is undefined. Compilers may or may not use this indication to generate
|
||||||
|
* optimized code.
|
||||||
|
*
|
||||||
|
* Note that the clang documentation states that optimizers may react
|
||||||
|
* differently to this attribute, and this may even have a negative
|
||||||
|
* performance impact. Therefore this attribute should be used with care.
|
||||||
|
*
|
||||||
|
* Optional: only supported since gcc >= 13
|
||||||
|
* Optional: only supported since clang >= 19
|
||||||
|
*
|
||||||
|
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Statement-Attributes.html#index-assume-statement-attribute
|
||||||
|
* clang: https://clang.llvm.org/docs/AttributeReference.html#id13
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_CC_HAS_ASSUME
|
||||||
|
# define __assume(expr) __attribute__((__assume__(expr)))
|
||||||
|
#else
|
||||||
|
# define __assume(expr)
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Optional: only supported since gcc >= 15
|
* Optional: only supported since gcc >= 15
|
||||||
* Optional: only supported since clang >= 18
|
* Optional: only supported since clang >= 18
|
||||||
|
|
10
init/Kconfig
10
init/Kconfig
|
@ -115,6 +115,16 @@ config TOOLS_SUPPORT_RELR
|
||||||
config CC_HAS_ASM_INLINE
|
config CC_HAS_ASM_INLINE
|
||||||
def_bool $(success,echo 'void foo(void) { asm inline (""); }' | $(CC) -x c - -c -o /dev/null)
|
def_bool $(success,echo 'void foo(void) { asm inline (""); }' | $(CC) -x c - -c -o /dev/null)
|
||||||
|
|
||||||
|
config CC_HAS_ASSUME
|
||||||
|
bool
|
||||||
|
# clang needs to be at least 19.1.0 since the meaning of the assume
|
||||||
|
# attribute changed:
|
||||||
|
# https://github.com/llvm/llvm-project/commit/c44fa3e8a9a44c2e9a575768a3c185354b9f6c17
|
||||||
|
default y if CC_IS_CLANG && CLANG_VERSION >= 190100
|
||||||
|
# supported since gcc 13.1.0
|
||||||
|
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=106654
|
||||||
|
default y if CC_IS_GCC && GCC_VERSION >= 130100
|
||||||
|
|
||||||
config CC_HAS_NO_PROFILE_FN_ATTR
|
config CC_HAS_NO_PROFILE_FN_ATTR
|
||||||
def_bool $(success,echo '__attribute__((no_profile_instrument_function)) int x();' | $(CC) -x c - -c -o /dev/null -Werror)
|
def_bool $(success,echo '__attribute__((no_profile_instrument_function)) int x();' | $(CC) -x c - -c -o /dev/null -Werror)
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue