mirror of git://gcc.gnu.org/git/gcc.git
mode-switching.c (create_pre_exit): Rename maybe_builtin_apply to multi_reg_return.
* mode-switching.c (create_pre_exit): Rename maybe_builtin_apply to multi_reg_return. Clarify that we are skipping USEs of multiple return registers. Use bool type where appropriate. From-SVN: r203856
This commit is contained in:
parent
41ee845b75
commit
c07757e536
|
|
@ -1,3 +1,9 @@
|
||||||
|
2013-10-19 Uros Bizjak <ubizjak@gmail.com>
|
||||||
|
|
||||||
|
* mode-switching.c (create_pre_exit): Rename maybe_builtin_apply
|
||||||
|
to multi_reg_return. Clarify that we are skipping USEs of multiple
|
||||||
|
return registers. Use bool type where appropriate.
|
||||||
|
|
||||||
2013-10-18 Jan Hubicka <jh@suse.cz>
|
2013-10-18 Jan Hubicka <jh@suse.cz>
|
||||||
|
|
||||||
* config/i386/i386.h (ACCUMULATE_OUTGOING_ARGS): Disable accumulation
|
* config/i386/i386.h (ACCUMULATE_OUTGOING_ARGS): Disable accumulation
|
||||||
|
|
@ -6,7 +12,8 @@
|
||||||
(X86_TUNE_PUSH_MEMORY): Likewise.
|
(X86_TUNE_PUSH_MEMORY): Likewise.
|
||||||
(X86_TUNE_AVX256_UNALIGNED_LOAD_OPTIMAL,
|
(X86_TUNE_AVX256_UNALIGNED_LOAD_OPTIMAL,
|
||||||
X86_TUNE_AVX256_UNALIGNED_STORE_OPTIMAL): New.
|
X86_TUNE_AVX256_UNALIGNED_STORE_OPTIMAL): New.
|
||||||
(X86_TUNE_ACCUMULATE_OUTGOING_ARGS, X86_TUNE_ALWAYS_FANCY_MATH_387): New.
|
(X86_TUNE_ACCUMULATE_OUTGOING_ARGS, X86_TUNE_ALWAYS_FANCY_MATH_387):
|
||||||
|
New.
|
||||||
* i386.c (x86_accumulate_outgoing_args, x86_arch_always_fancy_math_387,
|
* i386.c (x86_accumulate_outgoing_args, x86_arch_always_fancy_math_387,
|
||||||
x86_avx256_split_unaligned_load, x86_avx256_split_unaligned_store):
|
x86_avx256_split_unaligned_load, x86_avx256_split_unaligned_store):
|
||||||
Remove.
|
Remove.
|
||||||
|
|
@ -21,8 +28,8 @@
|
||||||
|
|
||||||
2013-10-18 Andrew MacLeod <amacleod@redhat.com>
|
2013-10-18 Andrew MacLeod <amacleod@redhat.com>
|
||||||
|
|
||||||
* tree-ssa.h: Don't include gimple-low.h, tree-ssa-address.h, sbitmap.h,
|
* tree-ssa.h: Don't include gimple-low.h, tree-ssa-address.h,
|
||||||
tree-ssa-threadedge.h, tree-ssa-dom.h, and tree-cfgcleanup.h.
|
sbitmap.h, tree-ssa-threadedge.h, tree-ssa-dom.h and tree-cfgcleanup.h.
|
||||||
* gimple-low.c (gimple_check_call_arg,
|
* gimple-low.c (gimple_check_call_arg,
|
||||||
gimple_check_call_matching_types): Move to cgraph.c.
|
gimple_check_call_matching_types): Move to cgraph.c.
|
||||||
* gimple-low.h: Remove prototype.
|
* gimple-low.h: Remove prototype.
|
||||||
|
|
@ -90,7 +97,8 @@
|
||||||
* tree-ssa-threadupdate.c: Do not include "tm.h" or "tm_p.h".
|
* tree-ssa-threadupdate.c: Do not include "tm.h" or "tm_p.h".
|
||||||
|
|
||||||
* tree-ssa-threadupdate.c: Include "dbgcnt.h".
|
* tree-ssa-threadupdate.c: Include "dbgcnt.h".
|
||||||
(register_jump_thread): Add "registered_jump_thread" debug counter support.
|
(register_jump_thread): Add "registered_jump_thread" debug
|
||||||
|
counter support.
|
||||||
* dbgcnt.def (registered_jump_thread): New debug counter.
|
* dbgcnt.def (registered_jump_thread): New debug counter.
|
||||||
|
|
||||||
2013-10-18 Andrew MacLeod <amacleod@redhat.com>
|
2013-10-18 Andrew MacLeod <amacleod@redhat.com>
|
||||||
|
|
|
||||||
|
|
@ -229,9 +229,9 @@ create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
|
||||||
int ret_start = REGNO (ret_reg);
|
int ret_start = REGNO (ret_reg);
|
||||||
int nregs = hard_regno_nregs[ret_start][GET_MODE (ret_reg)];
|
int nregs = hard_regno_nregs[ret_start][GET_MODE (ret_reg)];
|
||||||
int ret_end = ret_start + nregs;
|
int ret_end = ret_start + nregs;
|
||||||
int short_block = 0;
|
bool short_block = false;
|
||||||
int maybe_builtin_apply = 0;
|
bool multi_reg_return = false;
|
||||||
int forced_late_switch = 0;
|
bool forced_late_switch = false;
|
||||||
rtx before_return_copy;
|
rtx before_return_copy;
|
||||||
|
|
||||||
do
|
do
|
||||||
|
|
@ -251,19 +251,20 @@ create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
|
||||||
copy yet, the copy must have been deleted. */
|
copy yet, the copy must have been deleted. */
|
||||||
if (CALL_P (return_copy))
|
if (CALL_P (return_copy))
|
||||||
{
|
{
|
||||||
short_block = 1;
|
short_block = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
return_copy_pat = PATTERN (return_copy);
|
return_copy_pat = PATTERN (return_copy);
|
||||||
switch (GET_CODE (return_copy_pat))
|
switch (GET_CODE (return_copy_pat))
|
||||||
{
|
{
|
||||||
case USE:
|
case USE:
|
||||||
/* Skip __builtin_apply pattern. */
|
/* Skip USEs of multiple return registers.
|
||||||
|
__builtin_apply pattern is also handled here. */
|
||||||
if (GET_CODE (XEXP (return_copy_pat, 0)) == REG
|
if (GET_CODE (XEXP (return_copy_pat, 0)) == REG
|
||||||
&& (targetm.calls.function_value_regno_p
|
&& (targetm.calls.function_value_regno_p
|
||||||
(REGNO (XEXP (return_copy_pat, 0)))))
|
(REGNO (XEXP (return_copy_pat, 0)))))
|
||||||
{
|
{
|
||||||
maybe_builtin_apply = 1;
|
multi_reg_return = true;
|
||||||
last_insn = return_copy;
|
last_insn = return_copy;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
@ -326,7 +327,7 @@ create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
|
||||||
there are no return copy insns at all. This
|
there are no return copy insns at all. This
|
||||||
avoids an ice on that invalid function. */
|
avoids an ice on that invalid function. */
|
||||||
if (ret_start + nregs == ret_end)
|
if (ret_start + nregs == ret_end)
|
||||||
short_block = 1;
|
short_block = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (!targetm.calls.function_value_regno_p (copy_start))
|
if (!targetm.calls.function_value_regno_p (copy_start))
|
||||||
|
|
@ -354,10 +355,10 @@ create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
|
||||||
another mode than MODE_EXIT, even if it is
|
another mode than MODE_EXIT, even if it is
|
||||||
unrelated to the return value, so we want to put
|
unrelated to the return value, so we want to put
|
||||||
the final mode switch after it. */
|
the final mode switch after it. */
|
||||||
if (maybe_builtin_apply
|
if (multi_reg_return
|
||||||
&& targetm.calls.function_value_regno_p
|
&& targetm.calls.function_value_regno_p
|
||||||
(copy_start))
|
(copy_start))
|
||||||
forced_late_switch = 1;
|
forced_late_switch = true;
|
||||||
|
|
||||||
/* For the SH4, floating point loads depend on fpscr,
|
/* For the SH4, floating point loads depend on fpscr,
|
||||||
thus we might need to put the final mode switch
|
thus we might need to put the final mode switch
|
||||||
|
|
@ -367,7 +368,7 @@ create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
|
||||||
if (copy_start >= ret_start
|
if (copy_start >= ret_start
|
||||||
&& copy_start + copy_num <= ret_end
|
&& copy_start + copy_num <= ret_end
|
||||||
&& OBJECT_P (SET_SRC (return_copy_pat)))
|
&& OBJECT_P (SET_SRC (return_copy_pat)))
|
||||||
forced_late_switch = 1;
|
forced_late_switch = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (copy_num == 0)
|
if (copy_num == 0)
|
||||||
|
|
@ -379,7 +380,7 @@ create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
|
||||||
if (copy_start >= ret_start
|
if (copy_start >= ret_start
|
||||||
&& copy_start + copy_num <= ret_end)
|
&& copy_start + copy_num <= ret_end)
|
||||||
nregs -= copy_num;
|
nregs -= copy_num;
|
||||||
else if (!maybe_builtin_apply
|
else if (!multi_reg_return
|
||||||
|| !targetm.calls.function_value_regno_p
|
|| !targetm.calls.function_value_regno_p
|
||||||
(copy_start))
|
(copy_start))
|
||||||
break;
|
break;
|
||||||
|
|
@ -393,7 +394,7 @@ create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
|
||||||
isolated use. */
|
isolated use. */
|
||||||
if (return_copy == BB_HEAD (src_bb))
|
if (return_copy == BB_HEAD (src_bb))
|
||||||
{
|
{
|
||||||
short_block = 1;
|
short_block = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
last_insn = return_copy;
|
last_insn = return_copy;
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue