mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git
synced 2026-05-02 18:17:50 -04:00
lib: mul_u64_u64_div_u64(): optimise multiply on 32bit x86
gcc generates horrid code for both ((u64)u32_a * u32_b) and (u64_a + u32_b). As well as the extra instructions it can generate a lot of spills to stack (including spills of constant zeros and even multiplies by constant zero). mul_u32_u32() already exists to optimise the multiply. Add a similar add_u64_32() for the addition. Disable both for clang - it generates better code without them. Move the 64x64 => 128 multiply into a static inline helper function for code clarity. No need for the a/b_hi/lo variables, the implicit casts on the function calls do the work for us. Should have minimal effect on the generated code. Use mul_u32_u32() and add_u64_u32() in the 64x64 => 128 multiply in mul_u64_add_u64_div_u64(). Link: https://lkml.kernel.org/r/20251105201035.64043-8-david.laight.linux@gmail.com Signed-off-by: David Laight <david.laight.linux@gmail.com> Reviewed-by: Nicolas Pitre <npitre@baylibre.com> Cc: Biju Das <biju.das.jz@bp.renesas.com> Cc: Borislav Betkov <bp@alien8.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Li RongQing <lirongqing@baidu.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleinxer <tglx@linutronix.de> Cc: Uwe Kleine-König <u.kleine-koenig@baylibre.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
f0bff2eb04
commit
630f96a687
@@ -186,33 +186,45 @@ EXPORT_SYMBOL(iter_div_u64_rem);
|
||||
#endif
|
||||
|
||||
#if !defined(mul_u64_add_u64_div_u64) || defined(test_mul_u64_add_u64_div_u64)
|
||||
u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d)
|
||||
{
|
||||
|
||||
#define mul_add(a, b, c) add_u64_u32(mul_u32_u32(a, b), c)
|
||||
|
||||
#if defined(__SIZEOF_INT128__) && !defined(test_mul_u64_add_u64_div_u64)
|
||||
|
||||
static inline u64 mul_u64_u64_add_u64(u64 *p_lo, u64 a, u64 b, u64 c)
|
||||
{
|
||||
/* native 64x64=128 bits multiplication */
|
||||
u128 prod = (u128)a * b + c;
|
||||
u64 n_lo = prod, n_hi = prod >> 64;
|
||||
|
||||
*p_lo = prod;
|
||||
return prod >> 64;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
/* perform a 64x64=128 bits multiplication manually */
|
||||
u32 a_lo = a, a_hi = a >> 32, b_lo = b, b_hi = b >> 32;
|
||||
static inline u64 mul_u64_u64_add_u64(u64 *p_lo, u64 a, u64 b, u64 c)
|
||||
{
|
||||
/* perform a 64x64=128 bits multiplication in 32bit chunks */
|
||||
u64 x, y, z;
|
||||
|
||||
/* Since (x-1)(x-1) + 2(x-1) == x.x - 1 two u32 can be added to a u64 */
|
||||
x = (u64)a_lo * b_lo + (u32)c;
|
||||
y = (u64)a_lo * b_hi + (u32)(c >> 32);
|
||||
y += (u32)(x >> 32);
|
||||
z = (u64)a_hi * b_hi + (u32)(y >> 32);
|
||||
y = (u64)a_hi * b_lo + (u32)y;
|
||||
z += (u32)(y >> 32);
|
||||
x = (y << 32) + (u32)x;
|
||||
|
||||
u64 n_lo = x, n_hi = z;
|
||||
x = mul_add(a, b, c);
|
||||
y = mul_add(a, b >> 32, c >> 32);
|
||||
y = add_u64_u32(y, x >> 32);
|
||||
z = mul_add(a >> 32, b >> 32, y >> 32);
|
||||
y = mul_add(a >> 32, b, y);
|
||||
*p_lo = (y << 32) + (u32)x;
|
||||
return add_u64_u32(z, y >> 32);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d)
|
||||
{
|
||||
u64 n_lo, n_hi;
|
||||
|
||||
n_hi = mul_u64_u64_add_u64(&n_lo, a, b, c);
|
||||
|
||||
if (!n_hi)
|
||||
return div64_u64(n_lo, d);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user