cse.c: Use HOST_WIDE_INT_M1 instead of ~(HOST_WIDE_INT) 0.

* cse.c: Use HOST_WIDE_INT_M1 instead of ~(HOST_WIDE_INT) 0.
	* combine.c: Use HOST_WIDE_INT_M1U instead of
	~(unsigned HOST_WIDE_INT) 0.
	* double-int.h: Ditto.
	* dse.c: Ditto.
	* dwarf2asm.c:Ditto.
	* expmed.c: Ditto.
	* genmodes.c: Ditto.
	* match.pd: Ditto.
	* read-rtl.c: Ditto.
	* tree-ssa-loop-ivopts.c: Ditto.
	* tree-ssa-loop-prefetch.c: Ditto.
	* tree-vect-generic.c: Ditto.
	* tree-vect-patterns.c: Ditto.
	* tree.c: Ditto.

From-SVN: r238529
This commit is contained in:
Uros Bizjak 2016-07-20 17:47:33 +02:00 committed by Uros Bizjak
parent dbe9dfdd50
commit dd4786fe81
15 changed files with 43 additions and 25 deletions

View File

@ -1,3 +1,21 @@
2016-07-20 Uros Bizjak <ubizjak@gmail.com>
* cse.c: Use HOST_WIDE_INT_M1 instead of ~(HOST_WIDE_INT) 0.
* combine.c: Use HOST_WIDE_INT_M1U instead of
~(unsigned HOST_WIDE_INT) 0.
* double-int.h: Ditto.
* dse.c: Ditto.
* dwarf2asm.c:Ditto.
* expmed.c: Ditto.
* genmodes.c: Ditto.
* match.pd: Ditto.
* read-rtl.c: Ditto.
* tree-ssa-loop-ivopts.c: Ditto.
* tree-ssa-loop-prefetch.c: Ditto.
* tree-vect-generic.c: Ditto.
* tree-vect-patterns.c: Ditto.
* tree.c: Ditto.
2016-07-20 Georg-Johann Lay <avr@gjlay.de> 2016-07-20 Georg-Johann Lay <avr@gjlay.de>
* gcc/config/avr.c (avr_legitimize_address) [AVR_TINY]: Force * gcc/config/avr.c (avr_legitimize_address) [AVR_TINY]: Force

View File

@ -1660,7 +1660,7 @@ update_rsp_from_reg_equal (reg_stat_type *rsp, rtx_insn *insn, const_rtx set,
} }
/* Don't call nonzero_bits if it cannot change anything. */ /* Don't call nonzero_bits if it cannot change anything. */
if (rsp->nonzero_bits != ~(unsigned HOST_WIDE_INT) 0) if (rsp->nonzero_bits != HOST_WIDE_INT_M1U)
{ {
bits = nonzero_bits (src, nonzero_bits_mode); bits = nonzero_bits (src, nonzero_bits_mode);
if (reg_equal && bits) if (reg_equal && bits)
@ -6541,7 +6541,7 @@ simplify_set (rtx x)
if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode)) if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode))
{ {
src = force_to_mode (src, mode, ~(unsigned HOST_WIDE_INT) 0, 0); src = force_to_mode (src, mode, HOST_WIDE_INT_M1U, 0);
SUBST (SET_SRC (x), src); SUBST (SET_SRC (x), src);
} }
@ -7446,7 +7446,7 @@ make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
else else
new_rtx = force_to_mode (inner, tmode, new_rtx = force_to_mode (inner, tmode,
len >= HOST_BITS_PER_WIDE_INT len >= HOST_BITS_PER_WIDE_INT
? ~(unsigned HOST_WIDE_INT) 0 ? HOST_WIDE_INT_M1U
: (HOST_WIDE_INT_1U << len) - 1, : (HOST_WIDE_INT_1U << len) - 1,
0); 0);
@ -7635,7 +7635,7 @@ make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
inner = force_to_mode (inner, wanted_inner_mode, inner = force_to_mode (inner, wanted_inner_mode,
pos_rtx pos_rtx
|| len + orig_pos >= HOST_BITS_PER_WIDE_INT || len + orig_pos >= HOST_BITS_PER_WIDE_INT
? ~(unsigned HOST_WIDE_INT) 0 ? HOST_WIDE_INT_M1U
: (((HOST_WIDE_INT_1U << len) - 1) : (((HOST_WIDE_INT_1U << len) - 1)
<< orig_pos), << orig_pos),
0); 0);
@ -8110,7 +8110,7 @@ make_compound_operation (rtx x, enum rtx_code in_code)
&& subreg_lowpart_p (x)) && subreg_lowpart_p (x))
{ {
rtx newer rtx newer
= force_to_mode (tem, mode, ~(unsigned HOST_WIDE_INT) 0, 0); = force_to_mode (tem, mode, HOST_WIDE_INT_M1U, 0);
/* If we have something other than a SUBREG, we might have /* If we have something other than a SUBREG, we might have
done an expansion, so rerun ourselves. */ done an expansion, so rerun ourselves. */
@ -8390,7 +8390,7 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask,
do not know, we need to assume that all bits up to the highest-order do not know, we need to assume that all bits up to the highest-order
bit in MASK will be needed. This is how we form such a mask. */ bit in MASK will be needed. This is how we form such a mask. */
if (mask & (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1))) if (mask & (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1)))
fuller_mask = ~(unsigned HOST_WIDE_INT) 0; fuller_mask = HOST_WIDE_INT_M1U;
else else
fuller_mask = ((HOST_WIDE_INT_1U << (floor_log2 (mask) + 1)) fuller_mask = ((HOST_WIDE_INT_1U << (floor_log2 (mask) + 1))
- 1); - 1);
@ -8733,7 +8733,7 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask,
if (GET_MODE_PRECISION (GET_MODE (x)) > HOST_BITS_PER_WIDE_INT) if (GET_MODE_PRECISION (GET_MODE (x)) > HOST_BITS_PER_WIDE_INT)
{ {
nonzero = ~(unsigned HOST_WIDE_INT) 0; nonzero = HOST_WIDE_INT_M1U;
/* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1)) /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
is the number of bits a full-width mask would have set. is the number of bits a full-width mask would have set.
@ -9496,7 +9496,7 @@ make_field_assignment (rtx x)
dest); dest);
src = force_to_mode (src, mode, src = force_to_mode (src, mode,
GET_MODE_PRECISION (mode) >= HOST_BITS_PER_WIDE_INT GET_MODE_PRECISION (mode) >= HOST_BITS_PER_WIDE_INT
? ~(unsigned HOST_WIDE_INT) 0 ? HOST_WIDE_INT_M1U
: (HOST_WIDE_INT_1U << len) - 1, : (HOST_WIDE_INT_1U << len) - 1,
0); 0);

View File

@ -4565,7 +4565,7 @@ cse_insn (rtx_insn *insn)
else else
shift = INTVAL (pos); shift = INTVAL (pos);
if (INTVAL (width) == HOST_BITS_PER_WIDE_INT) if (INTVAL (width) == HOST_BITS_PER_WIDE_INT)
mask = ~(HOST_WIDE_INT) 0; mask = HOST_WIDE_INT_M1;
else else
mask = (HOST_WIDE_INT_1 << INTVAL (width)) - 1; mask = (HOST_WIDE_INT_1 << INTVAL (width)) - 1;
val = (val >> shift) & mask; val = (val >> shift) & mask;
@ -5233,7 +5233,7 @@ cse_insn (rtx_insn *insn)
else else
shift = INTVAL (pos); shift = INTVAL (pos);
if (INTVAL (width) == HOST_BITS_PER_WIDE_INT) if (INTVAL (width) == HOST_BITS_PER_WIDE_INT)
mask = ~(HOST_WIDE_INT) 0; mask = HOST_WIDE_INT_M1;
else else
mask = (HOST_WIDE_INT_1 << INTVAL (width)) - 1; mask = (HOST_WIDE_INT_1 << INTVAL (width)) - 1;
val &= ~(mask << shift); val &= ~(mask << shift);

View File

@ -365,7 +365,7 @@ double_int::operator ^ (double_int b) const
void dump_double_int (FILE *, double_int, bool); void dump_double_int (FILE *, double_int, bool);
#define ALL_ONES (~((unsigned HOST_WIDE_INT) 0)) #define ALL_ONES HOST_WIDE_INT_M1U
/* The operands of the following comparison functions must be processed /* The operands of the following comparison functions must be processed
with double_int_ext, if their precision is less than with double_int_ext, if their precision is less than

View File

@ -288,7 +288,7 @@ struct store_info
static unsigned HOST_WIDE_INT static unsigned HOST_WIDE_INT
lowpart_bitmask (int n) lowpart_bitmask (int n)
{ {
unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT) 0; unsigned HOST_WIDE_INT mask = HOST_WIDE_INT_M1U;
return mask >> (HOST_BITS_PER_WIDE_INT - n); return mask >> (HOST_BITS_PER_WIDE_INT - n);
} }

View File

@ -97,7 +97,7 @@ dw2_asm_output_data (int size, unsigned HOST_WIDE_INT value,
va_start (ap, comment); va_start (ap, comment);
if (size * 8 < HOST_BITS_PER_WIDE_INT) if (size * 8 < HOST_BITS_PER_WIDE_INT)
value &= ~(~(unsigned HOST_WIDE_INT) 0 << (size * 8)); value &= ~(HOST_WIDE_INT_M1U << (size * 8));
if (op) if (op)
{ {

View File

@ -3513,7 +3513,7 @@ invert_mod2n (unsigned HOST_WIDE_INT x, int n)
int nbit = 3; int nbit = 3;
mask = (n == HOST_BITS_PER_WIDE_INT mask = (n == HOST_BITS_PER_WIDE_INT
? ~(unsigned HOST_WIDE_INT) 0 ? HOST_WIDE_INT_M1U
: (HOST_WIDE_INT_1U << n) - 1); : (HOST_WIDE_INT_1U << n) - 1);
while (nbit < n) while (nbit < n)
@ -4423,7 +4423,7 @@ expand_divmod (int rem_flag, enum tree_code code, machine_mode mode,
|| size - 1 >= BITS_PER_WORD) || size - 1 >= BITS_PER_WORD)
goto fail1; goto fail1;
ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1); ml |= HOST_WIDE_INT_M1U << (size - 1);
mlr = gen_int_mode (ml, compute_mode); mlr = gen_int_mode (ml, compute_mode);
extra_cost = (shift_cost (speed, compute_mode, post_shift) extra_cost = (shift_cost (speed, compute_mode, post_shift)
+ shift_cost (speed, compute_mode, size - 1) + shift_cost (speed, compute_mode, size - 1)

View File

@ -1409,7 +1409,7 @@ emit_mode_mask (void)
puts ("\ puts ("\
#define MODE_MASK(m) \\\n\ #define MODE_MASK(m) \\\n\
((m) >= HOST_BITS_PER_WIDE_INT) \\\n\ ((m) >= HOST_BITS_PER_WIDE_INT) \\\n\
? ~(unsigned HOST_WIDE_INT) 0 \\\n\ ? HOST_WIDE_INT_M1U \\\n\
: (HOST_WIDE_INT_1U << (m)) - 1\n"); : (HOST_WIDE_INT_1U << (m)) - 1\n");
for_all_modes (c, m) for_all_modes (c, m)

View File

@ -1487,7 +1487,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
is all ones. */ is all ones. */
} }
} }
zerobits = ~(unsigned HOST_WIDE_INT) 0; zerobits = HOST_WIDE_INT_M1U;
if (shiftc < prec) if (shiftc < prec)
{ {
zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc; zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
@ -1522,7 +1522,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
break; break;
} }
(if (prec < HOST_BITS_PER_WIDE_INT (if (prec < HOST_BITS_PER_WIDE_INT
|| newmask == ~(unsigned HOST_WIDE_INT) 0) || newmask == HOST_WIDE_INT_M1U)
(with (with
{ tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); } { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); }
(if (!tree_int_cst_equal (newmaskt, @2)) (if (!tree_int_cst_equal (newmaskt, @2))

View File

@ -711,7 +711,7 @@ atoll (const char *p)
if (new_wide < tmp_wide) if (new_wide < tmp_wide)
{ {
/* Return INT_MAX equiv on overflow. */ /* Return INT_MAX equiv on overflow. */
tmp_wide = (~(unsigned HOST_WIDE_INT) 0) >> 1; tmp_wide = HOST_WIDE_INT_M1U >> 1;
break; break;
} }
tmp_wide = new_wide; tmp_wide = new_wide;

View File

@ -4217,7 +4217,7 @@ get_address_cost (bool symbol_present, bool var_present,
} }
bits = GET_MODE_BITSIZE (address_mode); bits = GET_MODE_BITSIZE (address_mode);
mask = ~(~(unsigned HOST_WIDE_INT) 0 << (bits - 1) << 1); mask = ~(HOST_WIDE_INT_M1U << (bits - 1) << 1);
offset &= mask; offset &= mask;
if ((offset >> (bits - 1) & 1)) if ((offset >> (bits - 1) & 1))
offset |= ~mask; offset |= ~mask;

View File

@ -233,7 +233,7 @@ struct mem_ref_group
/* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */ /* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */
#define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0) #define PREFETCH_ALL HOST_WIDE_INT_M1U
/* Do not generate a prefetch if the unroll factor is significantly less /* Do not generate a prefetch if the unroll factor is significantly less
than what is required by the prefetch. This is to avoid redundant than what is required by the prefetch. This is to avoid redundant

View File

@ -575,7 +575,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0,
if (ml >= HOST_WIDE_INT_1U << (prec - 1)) if (ml >= HOST_WIDE_INT_1U << (prec - 1))
{ {
this_mode = 4 + (d < 0); this_mode = 4 + (d < 0);
ml |= (~(unsigned HOST_WIDE_INT) 0) << (prec - 1); ml |= HOST_WIDE_INT_M1U << (prec - 1);
} }
else else
this_mode = 2 + (d < 0); this_mode = 2 + (d < 0);

View File

@ -2861,7 +2861,7 @@ vect_recog_divmod_pattern (vec<gimple *> *stmts,
if (ml >= HOST_WIDE_INT_1U << (prec - 1)) if (ml >= HOST_WIDE_INT_1U << (prec - 1))
{ {
add = true; add = true;
ml |= (~(unsigned HOST_WIDE_INT) 0) << (prec - 1); ml |= HOST_WIDE_INT_M1U << (prec - 1);
} }
if (post_shift >= prec) if (post_shift >= prec)
return NULL; return NULL;

View File

@ -11338,9 +11338,9 @@ int_cst_value (const_tree x)
{ {
bool negative = ((val >> (bits - 1)) & 1) != 0; bool negative = ((val >> (bits - 1)) & 1) != 0;
if (negative) if (negative)
val |= (~(unsigned HOST_WIDE_INT) 0) << (bits - 1) << 1; val |= HOST_WIDE_INT_M1U << (bits - 1) << 1;
else else
val &= ~((~(unsigned HOST_WIDE_INT) 0) << (bits - 1) << 1); val &= ~(HOST_WIDE_INT_M1U << (bits - 1) << 1);
} }
return val; return val;