mirror of git://gcc.gnu.org/git/gcc.git
reg-notes.def (DEP_CONTROL): New.
* reg-notes.def (DEP_CONTROL): New. * sched-ebb.c (add_deps_for_risky_insns): Add a REG_DEP_CONTROL when not doing speculation. * rtlanal.c (record_hard_reg_sets, find_all_hard_reg_sets, record_hard_reg_uses_1, record_hard_reg_uses): New functions. * function.c (record_hard_reg_sets, record_hard_reg_uses, record_hard_reg_uses_1): Remove; move to rtlanal.c. * lists.c (copy_INSN_LIST, concat_INSN_LIST): New functions. * haifa-sched.c: Swap includes of "rtl.h" and "hard-reg-set.h". (MUST_RECOMPUTE_SPEC_P): New macro. (real_insn_for_shadow): New function. (cond_clobbered_p, recompute_todo_spec, check_clobbered_conditions, toggle_cancelled_flags): New static functions. (schedule_insn): Relax an assert to only check for empty hard back dependencies. Skip cancelled dependencies. Call check_clobbered_conditions. (copy_insn_list): Remove function, renamed moved to lists.c. (save_backtrack_point): Use new spelling copy_INSN_LIST. (unschedule_insns_until): Ensure TODO_SPEC is reset properly. (restore_last_backtrack_point): Likewise. Call toggle_cancelled_flags. (estimate_insn_tick): Ignore cancelled dependencies. (haifa_speculate_insn): Move declaration. (try_ready): Move code into recompute_todo_spec and call it. Tweak some asserts. Ensure predicated patterns are restored if necessary. Dump DEP_CONTROL flag. (haifa_change_pattern): Merge with sched_change_pattern. (sched_change_pattern): Remove function. * sched-deps.c (NON_FLUSH_JUMP_KIND, NON_FLUSH_JUMP): Remove. All uses changed to simply not test NON_FLUSH_JUMP_P. (ds_to_dk, dk_to_ds, dump_dep, ds_to_dt, dump_ds, check_dep): Handle REG_DEP_CONTROL. (dep_spec_p): If DO_PREDICATION, REG_DEP_CONTROL is speculative. (reg_pending_control_uses, control_dependency_cache): New static variables. (sched_get_reverse_condition_uncached): New function. (sd_find_dep_between): Remove pointless assert. Look in control_dependency_cache. (ask_dependency_caches, set_dependency_caches, sd_delete_dep, extend_dependency_caches, sched_deps_finish): Handle REG_DEP_CONTROL and control_dependency_cache. (sd_unresolve_dep): Use dep_spec_p. (add_dependence): Now a wrapper around add_dependence_1, handling REG_DEP_CONTROL specially. (flush_pending_lists): Clear pending_jump_insns. (sched_analyze_1): Handle pending_jump_insns like a memory flush. (sched_analyze_2): Unconditionally add to pending memory flushes, keep previous behaviour but apply it to pending_jump_insns instead. (sched_analyze_insn): Defer adding jump reg dependencies using reg_pending_control_uses; add them to the control_uses list. Handle pending_jump_insns and control_uses when adding dependence lists. (deps_analyze_insn): Update INSN_COND_DEPS. (deps_analyze_insn): Add jumps to pending_jump_insns rather than last_pending_memory_flush. (init_deps): Initialize pending_jump_insns. (free_deps): Free control_uses. (remove_from_deps): Remove from pending_jump_insns. (init_deps_global): Allocate reg_pending_control_uses). (finish_deps_global): Free it. (add_dependence_1): Renamed from add_dependence. Handle REG_DEP_CONTROL. * rtl.h (record_hard_reg_uses, find_all_hard_reg_sets): Declare. (copy_INSN_LIST, concat_INSN_LIST): Declare. * sched-int.h (struct deps_reg): Add control_uses. (struct deps_desc): Add pending_jump_insns. (struct _haifa_deps_insn_data): Add cond_deps. (struct _haifa_insn_data): Add must_recompute_spec and predicated_pat. (INSN_COND_DEPS, PREDICATED_PAT): New macros. (BITS_PER_DEP_WEAK): Adjust for two extra bits in the word. (DEP_CONTROL): New macro. (DEP_TYPES): Include it. (HARD_DEP): Adjust definition. (DEP_CANCELLED): New macro. (enum SCHED_FLAGS): Add DO_PREDICATION. (sched_get_reverse_condition_uncached, real_insn_for_shadow): Declare. * sched-rgn.c (concat_INSN_LIST): Remove function. (deps_join): Handle pending_jump_insns. (free_pending_lists): Likewise. * config/c6x/c6x.c (c6x_set_sched_flags): Set DO_PREDICATION for final schedule. From-SVN: r180302
This commit is contained in:
parent
b9af306b97
commit
e2724e63c6
|
|
@ -1,3 +1,85 @@
|
|||
2011-10-21 Bernd Schmidt <bernds@codesourcery.com>
|
||||
|
||||
* reg-notes.def (DEP_CONTROL): New.
|
||||
* sched-ebb.c (add_deps_for_risky_insns): Add a REG_DEP_CONTROL when
|
||||
not doing speculation.
|
||||
* rtlanal.c (record_hard_reg_sets, find_all_hard_reg_sets,
|
||||
record_hard_reg_uses_1, record_hard_reg_uses): New functions.
|
||||
* function.c (record_hard_reg_sets, record_hard_reg_uses,
|
||||
record_hard_reg_uses_1): Remove; move to rtlanal.c.
|
||||
* lists.c (copy_INSN_LIST, concat_INSN_LIST): New functions.
|
||||
* haifa-sched.c: Swap includes of "rtl.h" and "hard-reg-set.h".
|
||||
(MUST_RECOMPUTE_SPEC_P): New macro.
|
||||
(real_insn_for_shadow): New function.
|
||||
(cond_clobbered_p, recompute_todo_spec, check_clobbered_conditions,
|
||||
toggle_cancelled_flags): New static functions.
|
||||
(schedule_insn): Relax an assert to only check for empty hard back
|
||||
dependencies. Skip cancelled dependencies. Call
|
||||
check_clobbered_conditions.
|
||||
(copy_insn_list): Remove function, renamed moved to lists.c.
|
||||
(save_backtrack_point): Use new spelling copy_INSN_LIST.
|
||||
(unschedule_insns_until): Ensure TODO_SPEC is reset properly.
|
||||
(restore_last_backtrack_point): Likewise. Call toggle_cancelled_flags.
|
||||
(estimate_insn_tick): Ignore cancelled dependencies.
|
||||
(haifa_speculate_insn): Move declaration.
|
||||
(try_ready): Move code into recompute_todo_spec and call it. Tweak
|
||||
some asserts. Ensure predicated patterns are restored if necessary.
|
||||
Dump DEP_CONTROL flag.
|
||||
(haifa_change_pattern): Merge with sched_change_pattern.
|
||||
(sched_change_pattern): Remove function.
|
||||
* sched-deps.c (NON_FLUSH_JUMP_KIND, NON_FLUSH_JUMP): Remove. All
|
||||
uses changed to simply not test NON_FLUSH_JUMP_P.
|
||||
(ds_to_dk, dk_to_ds, dump_dep, ds_to_dt, dump_ds, check_dep): Handle
|
||||
REG_DEP_CONTROL.
|
||||
(dep_spec_p): If DO_PREDICATION, REG_DEP_CONTROL is speculative.
|
||||
(reg_pending_control_uses, control_dependency_cache): New static
|
||||
variables.
|
||||
(sched_get_reverse_condition_uncached): New function.
|
||||
(sd_find_dep_between): Remove pointless assert. Look in
|
||||
control_dependency_cache.
|
||||
(ask_dependency_caches, set_dependency_caches, sd_delete_dep,
|
||||
extend_dependency_caches, sched_deps_finish): Handle REG_DEP_CONTROL
|
||||
and control_dependency_cache.
|
||||
(sd_unresolve_dep): Use dep_spec_p.
|
||||
(add_dependence): Now a wrapper around add_dependence_1, handling
|
||||
REG_DEP_CONTROL specially.
|
||||
(flush_pending_lists): Clear pending_jump_insns.
|
||||
(sched_analyze_1): Handle pending_jump_insns like a memory flush.
|
||||
(sched_analyze_2): Unconditionally add to pending memory flushes,
|
||||
keep previous behaviour but apply it to pending_jump_insns instead.
|
||||
(sched_analyze_insn): Defer adding jump reg dependencies using
|
||||
reg_pending_control_uses; add them to the control_uses list. Handle
|
||||
pending_jump_insns and control_uses when adding dependence lists.
|
||||
(deps_analyze_insn): Update INSN_COND_DEPS.
|
||||
(deps_analyze_insn): Add jumps to pending_jump_insns rather than
|
||||
last_pending_memory_flush.
|
||||
(init_deps): Initialize pending_jump_insns.
|
||||
(free_deps): Free control_uses.
|
||||
(remove_from_deps): Remove from pending_jump_insns.
|
||||
(init_deps_global): Allocate reg_pending_control_uses).
|
||||
(finish_deps_global): Free it.
|
||||
(add_dependence_1): Renamed from add_dependence. Handle
|
||||
REG_DEP_CONTROL.
|
||||
* rtl.h (record_hard_reg_uses, find_all_hard_reg_sets): Declare.
|
||||
(copy_INSN_LIST, concat_INSN_LIST): Declare.
|
||||
* sched-int.h (struct deps_reg): Add control_uses.
|
||||
(struct deps_desc): Add pending_jump_insns.
|
||||
(struct _haifa_deps_insn_data): Add cond_deps.
|
||||
(struct _haifa_insn_data): Add must_recompute_spec and predicated_pat.
|
||||
(INSN_COND_DEPS, PREDICATED_PAT): New macros.
|
||||
(BITS_PER_DEP_WEAK): Adjust for two extra bits in the word.
|
||||
(DEP_CONTROL): New macro.
|
||||
(DEP_TYPES): Include it.
|
||||
(HARD_DEP): Adjust definition.
|
||||
(DEP_CANCELLED): New macro.
|
||||
(enum SCHED_FLAGS): Add DO_PREDICATION.
|
||||
(sched_get_reverse_condition_uncached, real_insn_for_shadow): Declare.
|
||||
* sched-rgn.c (concat_INSN_LIST): Remove function.
|
||||
(deps_join): Handle pending_jump_insns.
|
||||
(free_pending_lists): Likewise.
|
||||
* config/c6x/c6x.c (c6x_set_sched_flags): Set DO_PREDICATION for final
|
||||
schedule.
|
||||
|
||||
2011-10-21 Georg-Johann Lay <avr@gjlay.de>
|
||||
|
||||
PR target/50820
|
||||
|
|
|
|||
|
|
@ -3927,7 +3927,7 @@ c6x_set_sched_flags (spec_info_t spec_info)
|
|||
|
||||
if (*flags & SCHED_EBB)
|
||||
{
|
||||
*flags |= DO_BACKTRACKING;
|
||||
*flags |= DO_BACKTRACKING | DO_PREDICATION;
|
||||
}
|
||||
|
||||
spec_info->mask = 0;
|
||||
|
|
|
|||
|
|
@ -2892,17 +2892,6 @@ assign_parm_setup_block (struct assign_parm_data_all *all,
|
|||
SET_DECL_RTL (parm, stack_parm);
|
||||
}
|
||||
|
||||
/* A subroutine of assign_parm_setup_reg, called through note_stores.
|
||||
This collects sets and clobbers of hard registers in a HARD_REG_SET,
|
||||
which is pointed to by DATA. */
|
||||
static void
|
||||
record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
|
||||
{
|
||||
HARD_REG_SET *pset = (HARD_REG_SET *)data;
|
||||
if (REG_P (x) && HARD_REGISTER_P (x))
|
||||
add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
|
||||
}
|
||||
|
||||
/* A subroutine of assign_parms. Allocate a pseudo to hold the current
|
||||
parameter. Get it there. Perform all ABI specified conversions. */
|
||||
|
||||
|
|
@ -5289,25 +5278,6 @@ prologue_epilogue_contains (const_rtx insn)
|
|||
|
||||
#ifdef HAVE_simple_return
|
||||
|
||||
/* A for_each_rtx subroutine of record_hard_reg_sets. */
|
||||
static int
|
||||
record_hard_reg_uses_1 (rtx *px, void *data)
|
||||
{
|
||||
rtx x = *px;
|
||||
HARD_REG_SET *pused = (HARD_REG_SET *)data;
|
||||
|
||||
if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
|
||||
add_to_hard_reg_set (pused, GET_MODE (x), REGNO (x));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Like record_hard_reg_sets, but called through note_uses. */
|
||||
static void
|
||||
record_hard_reg_uses (rtx *px, void *data)
|
||||
{
|
||||
for_each_rtx (px, record_hard_reg_uses_1, data);
|
||||
}
|
||||
|
||||
/* Return true if INSN requires the stack frame to be set up.
|
||||
PROLOGUE_USED contains the hard registers used in the function
|
||||
prologue. SET_UP_BY_PROLOGUE is the set of registers we expect the
|
||||
|
|
|
|||
|
|
@ -129,9 +129,9 @@ along with GCC; see the file COPYING3. If not see
|
|||
#include "coretypes.h"
|
||||
#include "tm.h"
|
||||
#include "diagnostic-core.h"
|
||||
#include "hard-reg-set.h"
|
||||
#include "rtl.h"
|
||||
#include "tm_p.h"
|
||||
#include "hard-reg-set.h"
|
||||
#include "regs.h"
|
||||
#include "function.h"
|
||||
#include "flags.h"
|
||||
|
|
@ -213,6 +213,7 @@ struct common_sched_info_def *common_sched_info;
|
|||
#define INTER_TICK(INSN) (HID (INSN)->inter_tick)
|
||||
#define FEEDS_BACKTRACK_INSN(INSN) (HID (INSN)->feeds_backtrack_insn)
|
||||
#define SHADOW_P(INSN) (HID (INSN)->shadow_p)
|
||||
#define MUST_RECOMPUTE_SPEC_P(INSN) (HID (INSN)->must_recompute_spec)
|
||||
|
||||
/* If INSN_TICK of an instruction is equal to INVALID_TICK,
|
||||
then it should be recalculated from scratch. */
|
||||
|
|
@ -706,6 +707,24 @@ record_delay_slot_pair (rtx i1, rtx i2, int cycles, int stages)
|
|||
*slot = p;
|
||||
}
|
||||
|
||||
/* Examine the delay pair hashtable to see if INSN is a shadow for another,
|
||||
and return the other insn if so. Return NULL otherwise. */
|
||||
rtx
|
||||
real_insn_for_shadow (rtx insn)
|
||||
{
|
||||
struct delay_pair *pair;
|
||||
|
||||
if (delay_htab == NULL)
|
||||
return NULL_RTX;
|
||||
|
||||
pair
|
||||
= (struct delay_pair *)htab_find_with_hash (delay_htab_i2, insn,
|
||||
htab_hash_pointer (insn));
|
||||
if (!pair || pair->stages > 0)
|
||||
return NULL_RTX;
|
||||
return pair->i1;
|
||||
}
|
||||
|
||||
/* For a pair P of insns, return the fixed distance in cycles from the first
|
||||
insn after which the second must be scheduled. */
|
||||
static int
|
||||
|
|
@ -820,6 +839,7 @@ static void change_queue_index (rtx, int);
|
|||
|
||||
static void extend_h_i_d (void);
|
||||
static void init_h_i_d (rtx);
|
||||
static int haifa_speculate_insn (rtx, ds_t, rtx *);
|
||||
static void generate_recovery_code (rtx);
|
||||
static void process_insn_forw_deps_be_in_spec (rtx, rtx, ds_t);
|
||||
static void begin_speculative_block (rtx);
|
||||
|
|
@ -827,7 +847,7 @@ static void add_to_speculative_block (rtx);
|
|||
static void init_before_recovery (basic_block *);
|
||||
static void create_check_block_twin (rtx, bool);
|
||||
static void fix_recovery_deps (basic_block);
|
||||
static void haifa_change_pattern (rtx, rtx);
|
||||
static bool haifa_change_pattern (rtx, rtx);
|
||||
static void dump_new_block_header (int, basic_block, rtx, rtx);
|
||||
static void restore_bb_notes (basic_block);
|
||||
static void fix_jump_move (rtx);
|
||||
|
|
@ -1056,7 +1076,178 @@ print_curr_reg_pressure (void)
|
|||
}
|
||||
fprintf (sched_dump, "\n");
|
||||
}
|
||||
|
||||
/* Determine if INSN has a condition that is clobbered if a register
|
||||
in SET_REGS is modified. */
|
||||
static bool
|
||||
cond_clobbered_p (rtx insn, HARD_REG_SET set_regs)
|
||||
{
|
||||
rtx pat = PATTERN (insn);
|
||||
gcc_assert (GET_CODE (pat) == COND_EXEC);
|
||||
if (TEST_HARD_REG_BIT (set_regs, REGNO (XEXP (COND_EXEC_TEST (pat), 0))))
|
||||
{
|
||||
sd_iterator_def sd_it;
|
||||
dep_t dep;
|
||||
haifa_change_pattern (insn, ORIG_PAT (insn));
|
||||
FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
|
||||
DEP_STATUS (dep) &= ~DEP_CANCELLED;
|
||||
TODO_SPEC (insn) = HARD_DEP;
|
||||
if (sched_verbose >= 2)
|
||||
fprintf (sched_dump,
|
||||
";;\t\tdequeue insn %s because of clobbered condition\n",
|
||||
(*current_sched_info->print_insn) (insn, 0));
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Look at the remaining dependencies for insn NEXT, and compute and return
|
||||
the TODO_SPEC value we should use for it. This is called after one of
|
||||
NEXT's dependencies has been resolved. */
|
||||
|
||||
static ds_t
|
||||
recompute_todo_spec (rtx next)
|
||||
{
|
||||
ds_t new_ds;
|
||||
sd_iterator_def sd_it;
|
||||
dep_t dep, control_dep = NULL;
|
||||
int n_spec = 0;
|
||||
int n_control = 0;
|
||||
bool first_p = true;
|
||||
|
||||
if (sd_lists_empty_p (next, SD_LIST_BACK))
|
||||
/* NEXT has all its dependencies resolved. */
|
||||
return 0;
|
||||
|
||||
if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
|
||||
return HARD_DEP;
|
||||
|
||||
/* Now we've got NEXT with speculative deps only.
|
||||
1. Look at the deps to see what we have to do.
|
||||
2. Check if we can do 'todo'. */
|
||||
new_ds = 0;
|
||||
|
||||
FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
|
||||
{
|
||||
ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
|
||||
|
||||
if (DEBUG_INSN_P (DEP_PRO (dep)) && !DEBUG_INSN_P (next))
|
||||
continue;
|
||||
|
||||
if (ds)
|
||||
{
|
||||
n_spec++;
|
||||
if (first_p)
|
||||
{
|
||||
first_p = false;
|
||||
|
||||
new_ds = ds;
|
||||
}
|
||||
else
|
||||
new_ds = ds_merge (new_ds, ds);
|
||||
}
|
||||
if (DEP_TYPE (dep) == REG_DEP_CONTROL)
|
||||
{
|
||||
n_control++;
|
||||
control_dep = dep;
|
||||
DEP_STATUS (dep) &= ~DEP_CANCELLED;
|
||||
}
|
||||
}
|
||||
|
||||
if (n_control == 1 && n_spec == 0)
|
||||
{
|
||||
rtx pro, other, new_pat;
|
||||
rtx cond = NULL_RTX;
|
||||
bool success;
|
||||
rtx prev = NULL_RTX;
|
||||
int i;
|
||||
unsigned regno;
|
||||
|
||||
if ((current_sched_info->flags & DO_PREDICATION) == 0
|
||||
|| (ORIG_PAT (next) != NULL_RTX
|
||||
&& PREDICATED_PAT (next) == NULL_RTX))
|
||||
return HARD_DEP;
|
||||
|
||||
pro = DEP_PRO (control_dep);
|
||||
other = real_insn_for_shadow (pro);
|
||||
if (other != NULL_RTX)
|
||||
pro = other;
|
||||
|
||||
cond = sched_get_reverse_condition_uncached (pro);
|
||||
regno = REGNO (XEXP (cond, 0));
|
||||
|
||||
/* Find the last scheduled insn that modifies the condition register.
|
||||
If we have a true dependency on it, it sets it to the correct value,
|
||||
otherwise it must be a later insn scheduled in-between that clobbers
|
||||
the condition. */
|
||||
FOR_EACH_VEC_ELT_REVERSE (rtx, scheduled_insns, i, prev)
|
||||
{
|
||||
sd_iterator_def sd_it;
|
||||
dep_t dep;
|
||||
HARD_REG_SET t;
|
||||
bool found;
|
||||
|
||||
find_all_hard_reg_sets (prev, &t);
|
||||
if (!TEST_HARD_REG_BIT (t, regno))
|
||||
continue;
|
||||
|
||||
found = false;
|
||||
FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
|
||||
{
|
||||
if (DEP_PRO (dep) == prev && DEP_TYPE (dep) == REG_DEP_TRUE)
|
||||
{
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found)
|
||||
return HARD_DEP;
|
||||
break;
|
||||
}
|
||||
if (ORIG_PAT (next) == NULL_RTX)
|
||||
{
|
||||
ORIG_PAT (next) = PATTERN (next);
|
||||
|
||||
new_pat = gen_rtx_COND_EXEC (VOIDmode, cond, PATTERN (next));
|
||||
success = haifa_change_pattern (next, new_pat);
|
||||
if (!success)
|
||||
return HARD_DEP;
|
||||
PREDICATED_PAT (next) = new_pat;
|
||||
}
|
||||
else if (PATTERN (next) != PREDICATED_PAT (next))
|
||||
{
|
||||
bool success = haifa_change_pattern (next,
|
||||
PREDICATED_PAT (next));
|
||||
gcc_assert (success);
|
||||
}
|
||||
DEP_STATUS (control_dep) |= DEP_CANCELLED;
|
||||
return DEP_CONTROL;
|
||||
}
|
||||
|
||||
if (PREDICATED_PAT (next) != NULL_RTX)
|
||||
{
|
||||
int tick = INSN_TICK (next);
|
||||
bool success = haifa_change_pattern (next,
|
||||
ORIG_PAT (next));
|
||||
INSN_TICK (next) = tick;
|
||||
gcc_assert (success);
|
||||
}
|
||||
|
||||
/* We can't handle the case where there are both speculative and control
|
||||
dependencies, so we return HARD_DEP in such a case. Also fail if
|
||||
we have speculative dependencies with not enough points, or more than
|
||||
one control dependency. */
|
||||
if ((n_spec > 0 && n_control > 0)
|
||||
|| (n_spec > 0
|
||||
/* Too few points? */
|
||||
&& ds_weak (new_ds) < spec_info->data_weakness_cutoff)
|
||||
|| (n_control > 1))
|
||||
return HARD_DEP;
|
||||
|
||||
return new_ds;
|
||||
}
|
||||
|
||||
/* Pointer to the last instruction scheduled. */
|
||||
static rtx last_scheduled_insn;
|
||||
|
||||
|
|
@ -1963,6 +2154,51 @@ sched_setup_bb_reg_pressure_info (basic_block bb, rtx after)
|
|||
setup_insn_max_reg_pressure (after, false);
|
||||
}
|
||||
|
||||
/* If doing predication while scheduling, verify whether INSN, which
|
||||
has just been scheduled, clobbers the conditions of any
|
||||
instructions that must be predicated in order to break their
|
||||
dependencies. If so, remove them from the queues so that they will
|
||||
only be scheduled once their control dependency is resolved. */
|
||||
|
||||
static void
|
||||
check_clobbered_conditions (rtx insn)
|
||||
{
|
||||
HARD_REG_SET t;
|
||||
int i;
|
||||
|
||||
if ((current_sched_info->flags & DO_PREDICATION) == 0)
|
||||
return;
|
||||
|
||||
find_all_hard_reg_sets (insn, &t);
|
||||
|
||||
restart:
|
||||
for (i = 0; i < ready.n_ready; i++)
|
||||
{
|
||||
rtx x = ready_element (&ready, i);
|
||||
if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
|
||||
{
|
||||
ready_remove_insn (x);
|
||||
goto restart;
|
||||
}
|
||||
}
|
||||
for (i = 0; i <= max_insn_queue_index; i++)
|
||||
{
|
||||
rtx link;
|
||||
int q = NEXT_Q_AFTER (q_ptr, i);
|
||||
|
||||
restart_queue:
|
||||
for (link = insn_queue[q]; link; link = XEXP (link, 1))
|
||||
{
|
||||
rtx x = XEXP (link, 0);
|
||||
if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
|
||||
{
|
||||
queue_remove (x);
|
||||
goto restart_queue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* A structure that holds local state for the loop in schedule_block. */
|
||||
struct sched_block_state
|
||||
{
|
||||
|
|
@ -2023,7 +2259,7 @@ schedule_insn (rtx insn)
|
|||
|
||||
/* Scheduling instruction should have all its dependencies resolved and
|
||||
should have been removed from the ready list. */
|
||||
gcc_assert (sd_lists_empty_p (insn, SD_LIST_BACK));
|
||||
gcc_assert (sd_lists_empty_p (insn, SD_LIST_HARD_BACK));
|
||||
|
||||
/* Reset debug insns invalidated by moving this insn. */
|
||||
if (MAY_HAVE_DEBUG_INSNS && !DEBUG_INSN_P (insn))
|
||||
|
|
@ -2033,6 +2269,12 @@ schedule_insn (rtx insn)
|
|||
rtx dbg = DEP_PRO (dep);
|
||||
struct reg_use_data *use, *next;
|
||||
|
||||
if (DEP_STATUS (dep) & DEP_CANCELLED)
|
||||
{
|
||||
sd_iterator_next (&sd_it);
|
||||
continue;
|
||||
}
|
||||
|
||||
gcc_assert (DEBUG_INSN_P (dbg));
|
||||
|
||||
if (sched_verbose >= 6)
|
||||
|
|
@ -2086,17 +2328,36 @@ schedule_insn (rtx insn)
|
|||
INSN_TICK untouched. This is a machine-dependent issue, actually. */
|
||||
INSN_TICK (insn) = clock_var;
|
||||
|
||||
check_clobbered_conditions (insn);
|
||||
|
||||
/* Update dependent instructions. */
|
||||
for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
|
||||
sd_iterator_cond (&sd_it, &dep);)
|
||||
{
|
||||
rtx next = DEP_CON (dep);
|
||||
bool cancelled = (DEP_STATUS (dep) & DEP_CANCELLED) != 0;
|
||||
|
||||
/* Resolve the dependence between INSN and NEXT.
|
||||
sd_resolve_dep () moves current dep to another list thus
|
||||
advancing the iterator. */
|
||||
sd_resolve_dep (sd_it);
|
||||
|
||||
if (cancelled)
|
||||
{
|
||||
if (QUEUE_INDEX (next) != QUEUE_SCHEDULED)
|
||||
{
|
||||
int tick = INSN_TICK (next);
|
||||
gcc_assert (ORIG_PAT (next) != NULL_RTX);
|
||||
haifa_change_pattern (next, ORIG_PAT (next));
|
||||
INSN_TICK (next) = tick;
|
||||
if (sd_lists_empty_p (next, SD_LIST_BACK))
|
||||
TODO_SPEC (next) = 0;
|
||||
else if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
|
||||
TODO_SPEC (next) = HARD_DEP;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Don't bother trying to mark next as ready if insn is a debug
|
||||
insn. If insn is the last hard dependency, it will have
|
||||
already been discounted. */
|
||||
|
|
@ -2270,24 +2531,6 @@ mark_backtrack_feeds (rtx insn, int set_p)
|
|||
}
|
||||
}
|
||||
|
||||
/* Make a copy of the INSN_LIST list LINK and return it. */
|
||||
static rtx
|
||||
copy_insn_list (rtx link)
|
||||
{
|
||||
rtx new_queue;
|
||||
rtx *pqueue = &new_queue;
|
||||
|
||||
for (; link; link = XEXP (link, 1))
|
||||
{
|
||||
rtx x = XEXP (link, 0);
|
||||
rtx newlink = alloc_INSN_LIST (x, NULL);
|
||||
*pqueue = newlink;
|
||||
pqueue = &XEXP (newlink, 1);
|
||||
}
|
||||
*pqueue = NULL_RTX;
|
||||
return new_queue;
|
||||
}
|
||||
|
||||
/* Save the current scheduler state so that we can backtrack to it
|
||||
later if necessary. PAIR gives the insns that make it necessary to
|
||||
save this point. SCHED_BLOCK is the local state of schedule_block
|
||||
|
|
@ -2314,7 +2557,7 @@ save_backtrack_point (struct delay_pair *pair,
|
|||
for (i = 0; i <= max_insn_queue_index; i++)
|
||||
{
|
||||
int q = NEXT_Q_AFTER (q_ptr, i);
|
||||
save->insn_queue[i] = copy_insn_list (insn_queue[q]);
|
||||
save->insn_queue[i] = copy_INSN_LIST (insn_queue[q]);
|
||||
}
|
||||
|
||||
save->clock_var = clock_var;
|
||||
|
|
@ -2351,6 +2594,49 @@ save_backtrack_point (struct delay_pair *pair,
|
|||
}
|
||||
}
|
||||
|
||||
/* Walk the ready list and all queues. If any insns have unresolved backwards
|
||||
dependencies, these must be cancelled deps, broken by predication. Set or
|
||||
clear (depending on SET) the DEP_CANCELLED bit in DEP_STATUS. */
|
||||
|
||||
static void
|
||||
toggle_cancelled_flags (bool set)
|
||||
{
|
||||
int i;
|
||||
sd_iterator_def sd_it;
|
||||
dep_t dep;
|
||||
|
||||
if (ready.n_ready > 0)
|
||||
{
|
||||
rtx *first = ready_lastpos (&ready);
|
||||
for (i = 0; i < ready.n_ready; i++)
|
||||
FOR_EACH_DEP (first[i], SD_LIST_BACK, sd_it, dep)
|
||||
if (!DEBUG_INSN_P (DEP_PRO (dep)))
|
||||
{
|
||||
if (set)
|
||||
DEP_STATUS (dep) |= DEP_CANCELLED;
|
||||
else
|
||||
DEP_STATUS (dep) &= ~DEP_CANCELLED;
|
||||
}
|
||||
}
|
||||
for (i = 0; i <= max_insn_queue_index; i++)
|
||||
{
|
||||
int q = NEXT_Q_AFTER (q_ptr, i);
|
||||
rtx link;
|
||||
for (link = insn_queue[q]; link; link = XEXP (link, 1))
|
||||
{
|
||||
rtx insn = XEXP (link, 0);
|
||||
FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
|
||||
if (!DEBUG_INSN_P (DEP_PRO (dep)))
|
||||
{
|
||||
if (set)
|
||||
DEP_STATUS (dep) |= DEP_CANCELLED;
|
||||
else
|
||||
DEP_STATUS (dep) &= ~DEP_CANCELLED;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Pop entries from the SCHEDULED_INSNS vector up to and including INSN.
|
||||
Restore their dependencies to an unresolved state, and mark them as
|
||||
queued nowhere. */
|
||||
|
|
@ -2358,6 +2644,12 @@ save_backtrack_point (struct delay_pair *pair,
|
|||
static void
|
||||
unschedule_insns_until (rtx insn)
|
||||
{
|
||||
VEC (rtx, heap) *recompute_vec;
|
||||
|
||||
recompute_vec = VEC_alloc (rtx, heap, 0);
|
||||
|
||||
/* Make two passes over the insns to be unscheduled. First, we clear out
|
||||
dependencies and other trivial bookkeeping. */
|
||||
for (;;)
|
||||
{
|
||||
rtx last;
|
||||
|
|
@ -2379,14 +2671,40 @@ unschedule_insns_until (rtx insn)
|
|||
sd_iterator_cond (&sd_it, &dep);)
|
||||
{
|
||||
rtx con = DEP_CON (dep);
|
||||
TODO_SPEC (con) = HARD_DEP;
|
||||
INSN_TICK (con) = INVALID_TICK;
|
||||
sd_unresolve_dep (sd_it);
|
||||
if (!MUST_RECOMPUTE_SPEC_P (con))
|
||||
{
|
||||
MUST_RECOMPUTE_SPEC_P (con) = 1;
|
||||
VEC_safe_push (rtx, heap, recompute_vec, con);
|
||||
}
|
||||
}
|
||||
|
||||
if (last == insn)
|
||||
break;
|
||||
}
|
||||
|
||||
/* A second pass, to update ready and speculation status for insns
|
||||
depending on the unscheduled ones. The first pass must have
|
||||
popped the scheduled_insns vector up to the point where we
|
||||
restart scheduling, as recompute_todo_spec requires it to be
|
||||
up-to-date. */
|
||||
while (!VEC_empty (rtx, recompute_vec))
|
||||
{
|
||||
rtx con;
|
||||
|
||||
con = VEC_pop (rtx, recompute_vec);
|
||||
MUST_RECOMPUTE_SPEC_P (con) = 0;
|
||||
if (!sd_lists_empty_p (con, SD_LIST_HARD_BACK))
|
||||
{
|
||||
TODO_SPEC (con) = HARD_DEP;
|
||||
INSN_TICK (con) = INVALID_TICK;
|
||||
if (PREDICATED_PAT (con) != NULL_RTX)
|
||||
haifa_change_pattern (con, ORIG_PAT (con));
|
||||
}
|
||||
else if (QUEUE_INDEX (con) != QUEUE_SCHEDULED)
|
||||
TODO_SPEC (con) = recompute_todo_spec (con);
|
||||
}
|
||||
VEC_free (rtx, heap, recompute_vec);
|
||||
}
|
||||
|
||||
/* Restore scheduler state from the topmost entry on the backtracking queue.
|
||||
|
|
@ -2396,7 +2714,6 @@ unschedule_insns_until (rtx insn)
|
|||
|
||||
static void
|
||||
restore_last_backtrack_point (struct sched_block_state *psched_block)
|
||||
|
||||
{
|
||||
rtx link;
|
||||
int i;
|
||||
|
|
@ -2420,8 +2737,9 @@ restore_last_backtrack_point (struct sched_block_state *psched_block)
|
|||
rtx *first = ready_lastpos (&ready);
|
||||
for (i = 0; i < ready.n_ready; i++)
|
||||
{
|
||||
QUEUE_INDEX (first[i]) = QUEUE_NOWHERE;
|
||||
INSN_TICK (first[i]) = INVALID_TICK;
|
||||
rtx insn = first[i];
|
||||
QUEUE_INDEX (insn) = QUEUE_NOWHERE;
|
||||
INSN_TICK (insn) = INVALID_TICK;
|
||||
}
|
||||
}
|
||||
for (i = 0; i <= max_insn_queue_index; i++)
|
||||
|
|
@ -2445,8 +2763,10 @@ restore_last_backtrack_point (struct sched_block_state *psched_block)
|
|||
rtx *first = ready_lastpos (&ready);
|
||||
for (i = 0; i < ready.n_ready; i++)
|
||||
{
|
||||
QUEUE_INDEX (first[i]) = QUEUE_READY;
|
||||
INSN_TICK (first[i]) = save->clock_var;
|
||||
rtx insn = first[i];
|
||||
QUEUE_INDEX (insn) = QUEUE_READY;
|
||||
TODO_SPEC (insn) = recompute_todo_spec (insn);
|
||||
INSN_TICK (insn) = save->clock_var;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2462,11 +2782,14 @@ restore_last_backtrack_point (struct sched_block_state *psched_block)
|
|||
{
|
||||
rtx x = XEXP (link, 0);
|
||||
QUEUE_INDEX (x) = i;
|
||||
TODO_SPEC (x) = recompute_todo_spec (x);
|
||||
INSN_TICK (x) = save->clock_var + i;
|
||||
}
|
||||
}
|
||||
free (save->insn_queue);
|
||||
|
||||
toggle_cancelled_flags (true);
|
||||
|
||||
clock_var = save->clock_var;
|
||||
last_clock_var = save->last_clock_var;
|
||||
cycle_issued_insns = save->cycle_issued_insns;
|
||||
|
|
@ -2547,6 +2870,9 @@ estimate_insn_tick (bitmap processed, rtx insn, int budget)
|
|||
rtx pro = DEP_PRO (dep);
|
||||
int t;
|
||||
|
||||
if (DEP_STATUS (dep) & DEP_CANCELLED)
|
||||
continue;
|
||||
|
||||
if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED)
|
||||
gcc_assert (INSN_TICK (pro) + dep_cost (dep) <= INSN_TICK (insn));
|
||||
else
|
||||
|
|
@ -4217,6 +4543,7 @@ schedule_block (basic_block *target_bb)
|
|||
gcc_assert (failed);
|
||||
|
||||
failed_insn = failed->delay_pair->i1;
|
||||
toggle_cancelled_flags (false);
|
||||
unschedule_insns_until (failed_insn);
|
||||
while (failed != backtrack_queue)
|
||||
free_topmost_backtrack_point (true);
|
||||
|
|
@ -4732,8 +5059,6 @@ fix_inter_tick (rtx head, rtx tail)
|
|||
bitmap_clear (&processed);
|
||||
}
|
||||
|
||||
static int haifa_speculate_insn (rtx, ds_t, rtx *);
|
||||
|
||||
/* Check if NEXT is ready to be added to the ready or queue list.
|
||||
If "yes", add it to the proper list.
|
||||
Returns:
|
||||
|
|
@ -4747,57 +5072,15 @@ try_ready (rtx next)
|
|||
|
||||
old_ts = TODO_SPEC (next);
|
||||
|
||||
gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
|
||||
gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP | DEP_CONTROL))
|
||||
&& ((old_ts & HARD_DEP)
|
||||
|| (old_ts & SPECULATIVE)));
|
||||
|| (old_ts & SPECULATIVE)
|
||||
|| (old_ts & DEP_CONTROL)));
|
||||
|
||||
if (sd_lists_empty_p (next, SD_LIST_BACK))
|
||||
/* NEXT has all its dependencies resolved. */
|
||||
new_ts = 0;
|
||||
else
|
||||
{
|
||||
/* One of the NEXT's dependencies has been resolved.
|
||||
Recalculate NEXT's status. */
|
||||
|
||||
if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
|
||||
new_ts = HARD_DEP;
|
||||
else
|
||||
/* Now we've got NEXT with speculative deps only.
|
||||
1. Look at the deps to see what we have to do.
|
||||
2. Check if we can do 'todo'. */
|
||||
{
|
||||
sd_iterator_def sd_it;
|
||||
dep_t dep;
|
||||
bool first_p = true;
|
||||
|
||||
new_ts = 0;
|
||||
|
||||
FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
|
||||
{
|
||||
ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
|
||||
|
||||
if (DEBUG_INSN_P (DEP_PRO (dep))
|
||||
&& !DEBUG_INSN_P (next))
|
||||
continue;
|
||||
|
||||
if (first_p)
|
||||
{
|
||||
first_p = false;
|
||||
|
||||
new_ts = ds;
|
||||
}
|
||||
else
|
||||
new_ts = ds_merge (new_ts, ds);
|
||||
}
|
||||
|
||||
if (ds_weak (new_ts) < spec_info->data_weakness_cutoff)
|
||||
/* Too few points. */
|
||||
new_ts = HARD_DEP;
|
||||
}
|
||||
}
|
||||
new_ts = recompute_todo_spec (next);
|
||||
|
||||
if (new_ts & HARD_DEP)
|
||||
gcc_assert (new_ts == HARD_DEP && new_ts == old_ts
|
||||
gcc_assert (new_ts == old_ts
|
||||
&& QUEUE_INDEX (next) == QUEUE_NOWHERE);
|
||||
else if (current_sched_info->new_ready)
|
||||
new_ts = current_sched_info->new_ready (next, new_ts);
|
||||
|
|
@ -4820,7 +5103,7 @@ try_ready (rtx next)
|
|||
int res;
|
||||
rtx new_pat;
|
||||
|
||||
gcc_assert (!(new_ts & ~SPECULATIVE));
|
||||
gcc_assert ((new_ts & SPECULATIVE) && !(new_ts & ~SPECULATIVE));
|
||||
|
||||
res = haifa_speculate_insn (next, new_ts, &new_pat);
|
||||
|
||||
|
|
@ -4846,7 +5129,8 @@ try_ready (rtx next)
|
|||
save it. */
|
||||
ORIG_PAT (next) = PATTERN (next);
|
||||
|
||||
haifa_change_pattern (next, new_pat);
|
||||
res = haifa_change_pattern (next, new_pat);
|
||||
gcc_assert (res);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
@ -4871,16 +5155,19 @@ try_ready (rtx next)
|
|||
/*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
|
||||
|
||||
change_queue_index (next, QUEUE_NOWHERE);
|
||||
|
||||
return -1;
|
||||
}
|
||||
else if (!(new_ts & BEGIN_SPEC)
|
||||
&& ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next))
|
||||
&& ORIG_PAT (next) && PREDICATED_PAT (next) == NULL_RTX
|
||||
&& !IS_SPECULATION_CHECK_P (next))
|
||||
/* We should change pattern of every previously speculative
|
||||
instruction - and we determine if NEXT was speculative by using
|
||||
ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
|
||||
pat too, so skip them. */
|
||||
{
|
||||
haifa_change_pattern (next, ORIG_PAT (next));
|
||||
bool success = haifa_change_pattern (next, ORIG_PAT (next));
|
||||
gcc_assert (success);
|
||||
ORIG_PAT (next) = 0;
|
||||
}
|
||||
|
||||
|
|
@ -4898,7 +5185,8 @@ try_ready (rtx next)
|
|||
if (new_ts & BE_IN_CONTROL)
|
||||
fprintf (spec_info->dump, "; in-control-spec;");
|
||||
}
|
||||
|
||||
if (TODO_SPEC (next) & DEP_CONTROL)
|
||||
fprintf (sched_dump, " predicated");
|
||||
fprintf (sched_dump, "\n");
|
||||
}
|
||||
|
||||
|
|
@ -5874,38 +6162,33 @@ fix_recovery_deps (basic_block rec)
|
|||
add_jump_dependencies (insn, jump);
|
||||
}
|
||||
|
||||
/* Change pattern of INSN to NEW_PAT. */
|
||||
void
|
||||
sched_change_pattern (rtx insn, rtx new_pat)
|
||||
/* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
|
||||
instruction data. */
|
||||
static bool
|
||||
haifa_change_pattern (rtx insn, rtx new_pat)
|
||||
{
|
||||
sd_iterator_def sd_it;
|
||||
dep_t dep;
|
||||
int t;
|
||||
|
||||
t = validate_change (insn, &PATTERN (insn), new_pat, 0);
|
||||
gcc_assert (t);
|
||||
if (!t)
|
||||
return false;
|
||||
dfa_clear_single_insn_cache (insn);
|
||||
|
||||
for (sd_it = sd_iterator_start (insn, (SD_LIST_FORW | SD_LIST_BACK
|
||||
| SD_LIST_RES_BACK));
|
||||
sd_iterator_cond (&sd_it, &dep);)
|
||||
sd_it = sd_iterator_start (insn,
|
||||
SD_LIST_FORW | SD_LIST_BACK | SD_LIST_RES_BACK);
|
||||
while (sd_iterator_cond (&sd_it, &dep))
|
||||
{
|
||||
DEP_COST (dep) = UNKNOWN_DEP_COST;
|
||||
sd_iterator_next (&sd_it);
|
||||
}
|
||||
}
|
||||
|
||||
/* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
|
||||
instruction data. */
|
||||
static void
|
||||
haifa_change_pattern (rtx insn, rtx new_pat)
|
||||
{
|
||||
sched_change_pattern (insn, new_pat);
|
||||
|
||||
/* Invalidate INSN_COST, so it'll be recalculated. */
|
||||
INSN_COST (insn) = -1;
|
||||
/* Invalidate INSN_TICK, so it'll be recalculated. */
|
||||
INSN_TICK (insn) = INVALID_TICK;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* -1 - can't speculate,
|
||||
|
|
|
|||
31
gcc/lists.c
31
gcc/lists.c
|
|
@ -164,6 +164,37 @@ free_INSN_LIST_list (rtx *listp)
|
|||
free_list (listp, &unused_insn_list);
|
||||
}
|
||||
|
||||
/* Make a copy of the INSN_LIST list LINK and return it. */
|
||||
rtx
|
||||
copy_INSN_LIST (rtx link)
|
||||
{
|
||||
rtx new_queue;
|
||||
rtx *pqueue = &new_queue;
|
||||
|
||||
for (; link; link = XEXP (link, 1))
|
||||
{
|
||||
rtx x = XEXP (link, 0);
|
||||
rtx newlink = alloc_INSN_LIST (x, NULL);
|
||||
*pqueue = newlink;
|
||||
pqueue = &XEXP (newlink, 1);
|
||||
}
|
||||
*pqueue = NULL_RTX;
|
||||
return new_queue;
|
||||
}
|
||||
|
||||
/* Duplicate the INSN_LIST elements of COPY and prepend them to OLD. */
|
||||
rtx
|
||||
concat_INSN_LIST (rtx copy, rtx old)
|
||||
{
|
||||
rtx new_rtx = old;
|
||||
for (; copy ; copy = XEXP (copy, 1))
|
||||
{
|
||||
new_rtx = alloc_INSN_LIST (XEXP (copy, 0), new_rtx);
|
||||
PUT_REG_NOTE_KIND (new_rtx, REG_NOTE_KIND (copy));
|
||||
}
|
||||
return new_rtx;
|
||||
}
|
||||
|
||||
/* This function will free up an individual EXPR_LIST node. */
|
||||
void
|
||||
free_EXPR_LIST_node (rtx ptr)
|
||||
|
|
|
|||
|
|
@ -92,6 +92,7 @@ REG_NOTE (LABEL_OPERAND)
|
|||
respectively. */
|
||||
REG_NOTE (DEP_OUTPUT)
|
||||
REG_NOTE (DEP_ANTI)
|
||||
REG_NOTE (DEP_CONTROL)
|
||||
|
||||
/* REG_BR_PROB is attached to JUMP_INSNs and CALL_INSNs. It has an
|
||||
integer value. For jumps, it is the probability that this is a
|
||||
|
|
|
|||
19
gcc/rtl.h
19
gcc/rtl.h
|
|
@ -1941,6 +1941,11 @@ extern rtx find_last_value (rtx, rtx *, rtx, int);
|
|||
extern int refers_to_regno_p (unsigned int, unsigned int, const_rtx, rtx *);
|
||||
extern int reg_overlap_mentioned_p (const_rtx, const_rtx);
|
||||
extern const_rtx set_of (const_rtx, const_rtx);
|
||||
extern void record_hard_reg_sets (rtx, const_rtx, void *);
|
||||
extern void record_hard_reg_uses (rtx *, void *);
|
||||
#ifdef HARD_CONST
|
||||
extern void find_all_hard_reg_sets (const_rtx, HARD_REG_SET *);
|
||||
#endif
|
||||
extern void note_stores (const_rtx, void (*) (rtx, const_rtx, void *), void *);
|
||||
extern void note_uses (rtx *, void (*) (rtx *, void *), void *);
|
||||
extern int dead_or_set_p (const_rtx, const_rtx);
|
||||
|
|
@ -2036,12 +2041,14 @@ extern void subreg_get_info (unsigned int, enum machine_mode,
|
|||
|
||||
/* lists.c */
|
||||
|
||||
extern void free_EXPR_LIST_list (rtx *);
|
||||
extern void free_INSN_LIST_list (rtx *);
|
||||
extern void free_EXPR_LIST_node (rtx);
|
||||
extern void free_INSN_LIST_node (rtx);
|
||||
extern rtx alloc_INSN_LIST (rtx, rtx);
|
||||
extern rtx alloc_EXPR_LIST (int, rtx, rtx);
|
||||
extern void free_EXPR_LIST_list (rtx *);
|
||||
extern void free_INSN_LIST_list (rtx *);
|
||||
extern void free_EXPR_LIST_node (rtx);
|
||||
extern void free_INSN_LIST_node (rtx);
|
||||
extern rtx alloc_INSN_LIST (rtx, rtx);
|
||||
extern rtx copy_INSN_LIST (rtx);
|
||||
extern rtx concat_INSN_LIST (rtx, rtx);
|
||||
extern rtx alloc_EXPR_LIST (int, rtx, rtx);
|
||||
extern void remove_free_INSN_LIST_elem (rtx, rtx *);
|
||||
extern rtx remove_list_elem (rtx, rtx *);
|
||||
extern rtx remove_free_INSN_LIST_node (rtx *);
|
||||
|
|
|
|||
|
|
@ -999,6 +999,56 @@ set_of (const_rtx pat, const_rtx insn)
|
|||
note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
|
||||
return data.found;
|
||||
}
|
||||
|
||||
/* This function, called through note_stores, collects sets and
|
||||
clobbers of hard registers in a HARD_REG_SET, which is pointed to
|
||||
by DATA. */
|
||||
void
|
||||
record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
|
||||
{
|
||||
HARD_REG_SET *pset = (HARD_REG_SET *)data;
|
||||
if (REG_P (x) && HARD_REGISTER_P (x))
|
||||
add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
|
||||
}
|
||||
|
||||
/* Examine INSN, and compute the set of hard registers written by it.
|
||||
Store it in *PSET. Should only be called after reload. */
|
||||
void
|
||||
find_all_hard_reg_sets (const_rtx insn, HARD_REG_SET *pset)
|
||||
{
|
||||
rtx link;
|
||||
|
||||
CLEAR_HARD_REG_SET (*pset);
|
||||
note_stores (PATTERN (insn), record_hard_reg_sets, pset);
|
||||
if (CALL_P (insn))
|
||||
IOR_HARD_REG_SET (*pset, call_used_reg_set);
|
||||
for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
|
||||
if (REG_NOTE_KIND (link) == REG_INC)
|
||||
record_hard_reg_sets (XEXP (link, 0), NULL, pset);
|
||||
}
|
||||
|
||||
/* A for_each_rtx subroutine of record_hard_reg_uses. */
|
||||
static int
|
||||
record_hard_reg_uses_1 (rtx *px, void *data)
|
||||
{
|
||||
rtx x = *px;
|
||||
HARD_REG_SET *pused = (HARD_REG_SET *)data;
|
||||
|
||||
if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
|
||||
{
|
||||
int nregs = hard_regno_nregs[REGNO (x)][GET_MODE (x)];
|
||||
while (nregs-- > 0)
|
||||
SET_HARD_REG_BIT (*pused, REGNO (x) + nregs);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Like record_hard_reg_sets, but called through note_uses. */
|
||||
void
|
||||
record_hard_reg_uses (rtx *px, void *data)
|
||||
{
|
||||
for_each_rtx (px, record_hard_reg_uses_1, data);
|
||||
}
|
||||
|
||||
/* Given an INSN, return a SET expression if this insn has only a single SET.
|
||||
It may also have CLOBBERs, USEs, or SET whose output
|
||||
|
|
|
|||
288
gcc/sched-deps.c
288
gcc/sched-deps.c
|
|
@ -52,12 +52,6 @@ along with GCC; see the file COPYING3. If not see
|
|||
#define CHECK (false)
|
||||
#endif
|
||||
|
||||
/* In deps->last_pending_memory_flush marks JUMP_INSNs that weren't
|
||||
added to the list because of flush_pending_lists, stands just
|
||||
for itself and not for any other pending memory reads/writes. */
|
||||
#define NON_FLUSH_JUMP_KIND REG_DEP_ANTI
|
||||
#define NON_FLUSH_JUMP_P(x) (REG_NOTE_KIND (x) == NON_FLUSH_JUMP_KIND)
|
||||
|
||||
/* Holds current parameters for the dependency analyzer. */
|
||||
struct sched_deps_info_def *sched_deps_info;
|
||||
|
||||
|
|
@ -74,6 +68,9 @@ ds_to_dk (ds_t ds)
|
|||
if (ds & DEP_OUTPUT)
|
||||
return REG_DEP_OUTPUT;
|
||||
|
||||
if (ds & DEP_CONTROL)
|
||||
return REG_DEP_CONTROL;
|
||||
|
||||
gcc_assert (ds & DEP_ANTI);
|
||||
|
||||
return REG_DEP_ANTI;
|
||||
|
|
@ -91,6 +88,9 @@ dk_to_ds (enum reg_note dk)
|
|||
case REG_DEP_OUTPUT:
|
||||
return DEP_OUTPUT;
|
||||
|
||||
case REG_DEP_CONTROL:
|
||||
return DEP_CONTROL;
|
||||
|
||||
default:
|
||||
gcc_assert (dk == REG_DEP_ANTI);
|
||||
return DEP_ANTI;
|
||||
|
|
@ -187,6 +187,10 @@ dump_dep (FILE *dump, dep_t dep, int flags)
|
|||
t = 'o';
|
||||
break;
|
||||
|
||||
case REG_DEP_CONTROL:
|
||||
t = 'c';
|
||||
break;
|
||||
|
||||
case REG_DEP_ANTI:
|
||||
t = 'a';
|
||||
break;
|
||||
|
|
@ -420,13 +424,22 @@ static bool
|
|||
dep_spec_p (dep_t dep)
|
||||
{
|
||||
if (current_sched_info->flags & DO_SPECULATION)
|
||||
return (DEP_STATUS (dep) & SPECULATIVE) != 0;
|
||||
{
|
||||
if (DEP_STATUS (dep) & SPECULATIVE)
|
||||
return true;
|
||||
}
|
||||
if (current_sched_info->flags & DO_PREDICATION)
|
||||
{
|
||||
if (DEP_TYPE (dep) == REG_DEP_CONTROL)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static regset reg_pending_sets;
|
||||
static regset reg_pending_clobbers;
|
||||
static regset reg_pending_uses;
|
||||
static regset reg_pending_control_uses;
|
||||
static enum reg_pending_barrier_mode reg_pending_barrier;
|
||||
|
||||
/* Hard registers implicitly clobbered or used (or may be implicitly
|
||||
|
|
@ -454,10 +467,12 @@ static HARD_REG_SET implicit_reg_pending_uses;
|
|||
static bitmap_head *true_dependency_cache = NULL;
|
||||
static bitmap_head *output_dependency_cache = NULL;
|
||||
static bitmap_head *anti_dependency_cache = NULL;
|
||||
static bitmap_head *control_dependency_cache = NULL;
|
||||
static bitmap_head *spec_dependency_cache = NULL;
|
||||
static int cache_size;
|
||||
|
||||
static int deps_may_trap_p (const_rtx);
|
||||
static void add_dependence_1 (rtx, rtx, enum reg_note);
|
||||
static void add_dependence_list (rtx, rtx, int, enum reg_note);
|
||||
static void add_dependence_list_and_free (struct deps_desc *, rtx,
|
||||
rtx *, int, enum reg_note);
|
||||
|
|
@ -538,6 +553,27 @@ sched_get_condition_with_rev_uncached (const_rtx insn, bool *rev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Return the condition under which INSN does not execute (i.e. the
|
||||
not-taken condition for a conditional branch), or NULL if we cannot
|
||||
find such a condition. The caller should make a copy of the condition
|
||||
before using it. */
|
||||
rtx
|
||||
sched_get_reverse_condition_uncached (const_rtx insn)
|
||||
{
|
||||
bool rev;
|
||||
rtx cond = sched_get_condition_with_rev_uncached (insn, &rev);
|
||||
if (cond == NULL_RTX)
|
||||
return cond;
|
||||
if (!rev)
|
||||
{
|
||||
enum rtx_code revcode = reversed_comparison_code (cond, insn);
|
||||
cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),
|
||||
XEXP (cond, 0),
|
||||
XEXP (cond, 1));
|
||||
}
|
||||
return cond;
|
||||
}
|
||||
|
||||
/* Caching variant of sched_get_condition_with_rev_uncached.
|
||||
We only do actual work the first time we come here for an insn; the
|
||||
results are cached in INSN_CACHED_COND and INSN_REVERSE_COND. */
|
||||
|
|
@ -861,12 +897,10 @@ sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
|
|||
int elem_luid = INSN_LUID (pro);
|
||||
int insn_luid = INSN_LUID (con);
|
||||
|
||||
gcc_assert (output_dependency_cache != NULL
|
||||
&& anti_dependency_cache != NULL);
|
||||
|
||||
if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
|
||||
&& !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
|
||||
&& !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
|
||||
&& !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)
|
||||
&& !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
@ -919,7 +953,8 @@ ask_dependency_caches (dep_t dep)
|
|||
|
||||
gcc_assert (true_dependency_cache != NULL
|
||||
&& output_dependency_cache != NULL
|
||||
&& anti_dependency_cache != NULL);
|
||||
&& anti_dependency_cache != NULL
|
||||
&& control_dependency_cache != NULL);
|
||||
|
||||
if (!(current_sched_info->flags & USE_DEPS_LIST))
|
||||
{
|
||||
|
|
@ -931,6 +966,8 @@ ask_dependency_caches (dep_t dep)
|
|||
present_dep_type = REG_DEP_OUTPUT;
|
||||
else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
|
||||
present_dep_type = REG_DEP_ANTI;
|
||||
else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
|
||||
present_dep_type = REG_DEP_CONTROL;
|
||||
else
|
||||
/* There is no existing dep so it should be created. */
|
||||
return DEP_CREATED;
|
||||
|
|
@ -949,6 +986,8 @@ ask_dependency_caches (dep_t dep)
|
|||
present_dep_types |= DEP_OUTPUT;
|
||||
if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
|
||||
present_dep_types |= DEP_ANTI;
|
||||
if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
|
||||
present_dep_types |= DEP_CONTROL;
|
||||
|
||||
if (present_dep_types == 0)
|
||||
/* There is no existing dep so it should be created. */
|
||||
|
|
@ -1002,6 +1041,10 @@ set_dependency_caches (dep_t dep)
|
|||
bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
|
||||
break;
|
||||
|
||||
case REG_DEP_CONTROL:
|
||||
bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
|
||||
break;
|
||||
|
||||
default:
|
||||
gcc_unreachable ();
|
||||
}
|
||||
|
|
@ -1016,6 +1059,8 @@ set_dependency_caches (dep_t dep)
|
|||
bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
|
||||
if (ds & DEP_ANTI)
|
||||
bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
|
||||
if (ds & DEP_CONTROL)
|
||||
bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
|
||||
|
||||
if (ds & SPECULATIVE)
|
||||
{
|
||||
|
|
@ -1047,6 +1092,10 @@ update_dependency_caches (dep_t dep, enum reg_note old_type)
|
|||
bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
|
||||
break;
|
||||
|
||||
case REG_DEP_CONTROL:
|
||||
bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
|
||||
break;
|
||||
|
||||
default:
|
||||
gcc_unreachable ();
|
||||
}
|
||||
|
|
@ -1330,8 +1379,7 @@ sd_unresolve_dep (sd_iterator_def sd_it)
|
|||
rtx pro = DEP_PRO (dep);
|
||||
rtx con = DEP_CON (dep);
|
||||
|
||||
if ((current_sched_info->flags & DO_SPECULATION)
|
||||
&& (DEP_STATUS (dep) & SPECULATIVE))
|
||||
if (dep_spec_p (dep))
|
||||
move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
|
||||
INSN_SPEC_BACK_DEPS (con));
|
||||
else
|
||||
|
|
@ -1382,6 +1430,7 @@ sd_delete_dep (sd_iterator_def sd_it)
|
|||
|
||||
bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
|
||||
bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
|
||||
bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
|
||||
bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
|
||||
|
||||
if (current_sched_info->flags & DO_SPECULATION)
|
||||
|
|
@ -1447,6 +1496,53 @@ sd_debug_lists (rtx insn, sd_list_types_def types)
|
|||
fprintf (stderr, "\n");
|
||||
}
|
||||
|
||||
/* A wrapper around add_dependence_1, to add a dependence of CON on
|
||||
PRO, with type DEP_TYPE. This function implements special handling
|
||||
for REG_DEP_CONTROL dependencies. For these, we optionally promote
|
||||
the type to REG_DEP_ANTI if we can determine that predication is
|
||||
impossible; otherwise we add additional true dependencies on the
|
||||
INSN_COND_DEPS list of the jump (which PRO must be). */
|
||||
void
|
||||
add_dependence (rtx con, rtx pro, enum reg_note dep_type)
|
||||
{
|
||||
/* A REG_DEP_CONTROL dependence may be eliminated through predication,
|
||||
so we must also make the insn dependent on the setter of the
|
||||
condition. */
|
||||
if (dep_type == REG_DEP_CONTROL)
|
||||
{
|
||||
rtx real_pro = pro;
|
||||
rtx other = real_insn_for_shadow (real_pro);
|
||||
rtx cond;
|
||||
|
||||
if (other != NULL_RTX)
|
||||
real_pro = other;
|
||||
cond = sched_get_reverse_condition_uncached (real_pro);
|
||||
/* Verify that the insn does not use a different value in
|
||||
the condition register than the one that was present at
|
||||
the jump. */
|
||||
if (cond == NULL_RTX)
|
||||
dep_type = REG_DEP_ANTI;
|
||||
else if (INSN_CACHED_COND (real_pro) == const_true_rtx)
|
||||
{
|
||||
HARD_REG_SET uses;
|
||||
CLEAR_HARD_REG_SET (uses);
|
||||
note_uses (&PATTERN (con), record_hard_reg_uses, &uses);
|
||||
if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0))))
|
||||
dep_type = REG_DEP_ANTI;
|
||||
}
|
||||
if (dep_type == REG_DEP_CONTROL)
|
||||
{
|
||||
if (sched_verbose >= 5)
|
||||
fprintf (sched_dump, "making DEP_CONTROL for %d\n",
|
||||
INSN_UID (real_pro));
|
||||
add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
|
||||
REG_DEP_TRUE);
|
||||
}
|
||||
}
|
||||
|
||||
add_dependence_1 (con, pro, dep_type);
|
||||
}
|
||||
|
||||
/* A convenience wrapper to operate on an entire list. */
|
||||
|
||||
static void
|
||||
|
|
@ -1662,6 +1758,10 @@ flush_pending_lists (struct deps_desc *deps, rtx insn, int for_read,
|
|||
add_dependence_list_and_free (deps, insn,
|
||||
&deps->last_pending_memory_flush, 1,
|
||||
for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
|
||||
|
||||
add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1,
|
||||
REG_DEP_ANTI);
|
||||
|
||||
if (!deps->readonly)
|
||||
{
|
||||
free_EXPR_LIST_list (&deps->pending_write_mems);
|
||||
|
|
@ -1783,10 +1883,12 @@ ds_to_dt (ds_t ds)
|
|||
return REG_DEP_TRUE;
|
||||
else if (ds & DEP_OUTPUT)
|
||||
return REG_DEP_OUTPUT;
|
||||
else if (ds & DEP_ANTI)
|
||||
return REG_DEP_ANTI;
|
||||
else
|
||||
{
|
||||
gcc_assert (ds & DEP_ANTI);
|
||||
return REG_DEP_ANTI;
|
||||
gcc_assert (ds & DEP_CONTROL);
|
||||
return REG_DEP_CONTROL;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2394,6 +2496,8 @@ sched_analyze_1 (struct deps_desc *deps, rtx x, rtx insn)
|
|||
|
||||
add_dependence_list (insn, deps->last_pending_memory_flush, 1,
|
||||
REG_DEP_ANTI);
|
||||
add_dependence_list (insn, deps->pending_jump_insns, 1,
|
||||
REG_DEP_CONTROL);
|
||||
|
||||
if (!deps->readonly)
|
||||
add_insn_mem_dependence (deps, false, insn, dest);
|
||||
|
|
@ -2541,23 +2645,22 @@ sched_analyze_2 (struct deps_desc *deps, rtx x, rtx insn)
|
|||
}
|
||||
|
||||
for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
|
||||
{
|
||||
if (! NON_FLUSH_JUMP_P (u))
|
||||
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
|
||||
else if (deps_may_trap_p (x))
|
||||
{
|
||||
if ((sched_deps_info->generate_spec_deps)
|
||||
&& sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
|
||||
{
|
||||
ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
|
||||
MAX_DEP_WEAK);
|
||||
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
|
||||
|
||||
note_dep (XEXP (u, 0), ds);
|
||||
}
|
||||
else
|
||||
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
|
||||
}
|
||||
}
|
||||
for (u = deps->pending_jump_insns; u; u = XEXP (u, 1))
|
||||
if (deps_may_trap_p (x))
|
||||
{
|
||||
if ((sched_deps_info->generate_spec_deps)
|
||||
&& sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
|
||||
{
|
||||
ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
|
||||
MAX_DEP_WEAK);
|
||||
|
||||
note_dep (XEXP (u, 0), ds);
|
||||
}
|
||||
else
|
||||
add_dependence (insn, XEXP (u, 0), REG_DEP_CONTROL);
|
||||
}
|
||||
}
|
||||
|
||||
/* Always add these dependencies to pending_reads, since
|
||||
|
|
@ -2776,13 +2879,11 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
|
|||
|
||||
if (sched_deps_info->compute_jump_reg_dependencies)
|
||||
{
|
||||
regset_head tmp;
|
||||
INIT_REG_SET (&tmp);
|
||||
|
||||
(*sched_deps_info->compute_jump_reg_dependencies) (insn, &tmp);
|
||||
(*sched_deps_info->compute_jump_reg_dependencies)
|
||||
(insn, reg_pending_control_uses);
|
||||
|
||||
/* Make latency of jump equal to 0 by using anti-dependence. */
|
||||
EXECUTE_IF_SET_IN_REG_SET (&tmp, 0, i, rsi)
|
||||
EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
|
||||
{
|
||||
struct deps_reg *reg_last = &deps->reg_last[i];
|
||||
add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
|
||||
|
|
@ -2790,15 +2891,7 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
|
|||
0, REG_DEP_ANTI);
|
||||
add_dependence_list (insn, reg_last->clobbers, 0,
|
||||
REG_DEP_ANTI);
|
||||
|
||||
if (!deps->readonly)
|
||||
{
|
||||
reg_last->uses_length++;
|
||||
reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
|
||||
}
|
||||
}
|
||||
|
||||
CLEAR_REG_SET (&tmp);
|
||||
}
|
||||
|
||||
/* All memory writes and volatile reads must happen before the
|
||||
|
|
@ -2828,6 +2921,8 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
|
|||
|
||||
add_dependence_list (insn, deps->last_pending_memory_flush, 1,
|
||||
REG_DEP_ANTI);
|
||||
add_dependence_list (insn, deps->pending_jump_insns, 1,
|
||||
REG_DEP_ANTI);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2863,13 +2958,15 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
|
|||
REG_DEP_ANTI);
|
||||
|
||||
for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
|
||||
if (! NON_FLUSH_JUMP_P (u) || !sel_sched_p ())
|
||||
if (!sel_sched_p ())
|
||||
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
|
||||
|
||||
EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
|
||||
{
|
||||
struct deps_reg *reg_last = &deps->reg_last[i];
|
||||
add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI);
|
||||
/* There's no point in making REG_DEP_CONTROL dependencies for
|
||||
debug insns. */
|
||||
add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI);
|
||||
|
||||
if (!deps->readonly)
|
||||
|
|
@ -2953,6 +3050,8 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
|
|||
add_dependence_list (insn, reg_last->implicit_sets, 0,
|
||||
REG_DEP_ANTI);
|
||||
add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
|
||||
add_dependence_list (insn, reg_last->control_uses, 0,
|
||||
REG_DEP_CONTROL);
|
||||
|
||||
if (!deps->readonly)
|
||||
{
|
||||
|
|
@ -2969,6 +3068,8 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
|
|||
REG_DEP_ANTI);
|
||||
add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
|
||||
add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
|
||||
add_dependence_list (insn, reg_last->control_uses, 0,
|
||||
REG_DEP_CONTROL);
|
||||
|
||||
if (!deps->readonly)
|
||||
reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
|
||||
|
|
@ -2989,6 +3090,9 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
|
|||
REG_DEP_ANTI);
|
||||
add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
|
||||
REG_DEP_ANTI);
|
||||
add_dependence_list_and_free (deps, insn,
|
||||
®_last->control_uses, 0,
|
||||
REG_DEP_ANTI);
|
||||
add_dependence_list_and_free
|
||||
(deps, insn, ®_last->clobbers, 0, REG_DEP_OUTPUT);
|
||||
|
||||
|
|
@ -3005,6 +3109,8 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
|
|||
add_dependence_list (insn, reg_last->implicit_sets, 0,
|
||||
REG_DEP_ANTI);
|
||||
add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
|
||||
add_dependence_list (insn, reg_last->control_uses, 0,
|
||||
REG_DEP_CONTROL);
|
||||
}
|
||||
|
||||
if (!deps->readonly)
|
||||
|
|
@ -3027,6 +3133,8 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
|
|||
REG_DEP_OUTPUT);
|
||||
add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
|
||||
REG_DEP_ANTI);
|
||||
add_dependence_list (insn, reg_last->control_uses, 0,
|
||||
REG_DEP_CONTROL);
|
||||
|
||||
if (!deps->readonly)
|
||||
{
|
||||
|
|
@ -3036,6 +3144,15 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
|
|||
}
|
||||
}
|
||||
}
|
||||
if (!deps->readonly)
|
||||
{
|
||||
EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
|
||||
{
|
||||
struct deps_reg *reg_last = &deps->reg_last[i];
|
||||
reg_last->control_uses
|
||||
= alloc_INSN_LIST (insn, reg_last->control_uses);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
|
||||
|
|
@ -3045,6 +3162,7 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
|
|||
add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
|
||||
add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI);
|
||||
add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
|
||||
add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI);
|
||||
|
||||
if (!deps->readonly)
|
||||
reg_last->implicit_sets
|
||||
|
|
@ -3068,6 +3186,7 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
|
|||
CLEAR_REG_SET (reg_pending_uses);
|
||||
CLEAR_REG_SET (reg_pending_clobbers);
|
||||
CLEAR_REG_SET (reg_pending_sets);
|
||||
CLEAR_REG_SET (reg_pending_control_uses);
|
||||
CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
|
||||
CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
|
||||
|
||||
|
|
@ -3099,6 +3218,9 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
|
|||
struct deps_reg *reg_last = &deps->reg_last[i];
|
||||
add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
|
||||
REG_DEP_ANTI);
|
||||
add_dependence_list_and_free (deps, insn,
|
||||
®_last->control_uses, 0,
|
||||
REG_DEP_CONTROL);
|
||||
add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
|
||||
reg_pending_barrier == TRUE_BARRIER
|
||||
? REG_DEP_TRUE : REG_DEP_ANTI);
|
||||
|
|
@ -3312,7 +3434,33 @@ deps_analyze_insn (struct deps_desc *deps, rtx insn)
|
|||
|
||||
/* Record the condition for this insn. */
|
||||
if (NONDEBUG_INSN_P (insn))
|
||||
sched_get_condition_with_rev (insn, NULL);
|
||||
{
|
||||
rtx t;
|
||||
sched_get_condition_with_rev (insn, NULL);
|
||||
t = INSN_CACHED_COND (insn);
|
||||
INSN_COND_DEPS (insn) = NULL_RTX;
|
||||
if (reload_completed
|
||||
&& (current_sched_info->flags & DO_PREDICATION)
|
||||
&& COMPARISON_P (t)
|
||||
&& REG_P (XEXP (t, 0))
|
||||
&& CONSTANT_P (XEXP (t, 1)))
|
||||
{
|
||||
unsigned int regno;
|
||||
int nregs;
|
||||
t = XEXP (t, 0);
|
||||
regno = REGNO (t);
|
||||
nregs = hard_regno_nregs[regno][GET_MODE (t)];
|
||||
t = NULL_RTX;
|
||||
while (nregs-- > 0)
|
||||
{
|
||||
struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
|
||||
t = concat_INSN_LIST (reg_last->sets, t);
|
||||
t = concat_INSN_LIST (reg_last->clobbers, t);
|
||||
t = concat_INSN_LIST (reg_last->implicit_sets, t);
|
||||
}
|
||||
INSN_COND_DEPS (insn) = t;
|
||||
}
|
||||
}
|
||||
|
||||
if (JUMP_P (insn))
|
||||
{
|
||||
|
|
@ -3326,15 +3474,8 @@ deps_analyze_insn (struct deps_desc *deps, rtx insn)
|
|||
if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
|
||||
flush_pending_lists (deps, insn, true, true);
|
||||
else
|
||||
{
|
||||
deps->last_pending_memory_flush
|
||||
= alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
|
||||
/* Signal to sched_analyze_insn that this jump stands
|
||||
just for its own, not any other pending memory
|
||||
reads/writes flush_pending_lists had to flush. */
|
||||
PUT_REG_NOTE_KIND (deps->last_pending_memory_flush,
|
||||
NON_FLUSH_JUMP_KIND);
|
||||
}
|
||||
deps->pending_jump_insns
|
||||
= alloc_INSN_LIST (insn, deps->pending_jump_insns);
|
||||
}
|
||||
|
||||
/* For each insn which shouldn't cross a jump, add a dependence. */
|
||||
|
|
@ -3584,6 +3725,7 @@ init_deps (struct deps_desc *deps, bool lazy_reg_last)
|
|||
deps->pending_read_mems = 0;
|
||||
deps->pending_write_insns = 0;
|
||||
deps->pending_write_mems = 0;
|
||||
deps->pending_jump_insns = 0;
|
||||
deps->pending_read_list_length = 0;
|
||||
deps->pending_write_list_length = 0;
|
||||
deps->pending_flush_length = 0;
|
||||
|
|
@ -3644,6 +3786,8 @@ free_deps (struct deps_desc *deps)
|
|||
free_INSN_LIST_list (®_last->sets);
|
||||
if (reg_last->implicit_sets)
|
||||
free_INSN_LIST_list (®_last->implicit_sets);
|
||||
if (reg_last->control_uses)
|
||||
free_INSN_LIST_list (®_last->control_uses);
|
||||
if (reg_last->clobbers)
|
||||
free_INSN_LIST_list (®_last->clobbers);
|
||||
}
|
||||
|
|
@ -3672,6 +3816,9 @@ remove_from_deps (struct deps_desc *deps, rtx insn)
|
|||
removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
|
||||
&deps->pending_write_mems);
|
||||
deps->pending_write_list_length -= removed;
|
||||
|
||||
removed = remove_from_dependence_list (insn, &deps->pending_jump_insns);
|
||||
deps->pending_flush_length -= removed;
|
||||
removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
|
||||
deps->pending_flush_length -= removed;
|
||||
|
||||
|
|
@ -3766,6 +3913,8 @@ extend_dependency_caches (int n, bool create_p)
|
|||
output_dependency_cache, luid);
|
||||
anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
|
||||
luid);
|
||||
control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,
|
||||
luid);
|
||||
|
||||
if (current_sched_info->flags & DO_SPECULATION)
|
||||
spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
|
||||
|
|
@ -3776,6 +3925,7 @@ extend_dependency_caches (int n, bool create_p)
|
|||
bitmap_initialize (&true_dependency_cache[i], 0);
|
||||
bitmap_initialize (&output_dependency_cache[i], 0);
|
||||
bitmap_initialize (&anti_dependency_cache[i], 0);
|
||||
bitmap_initialize (&control_dependency_cache[i], 0);
|
||||
|
||||
if (current_sched_info->flags & DO_SPECULATION)
|
||||
bitmap_initialize (&spec_dependency_cache[i], 0);
|
||||
|
|
@ -3805,6 +3955,7 @@ sched_deps_finish (void)
|
|||
bitmap_clear (&true_dependency_cache[i]);
|
||||
bitmap_clear (&output_dependency_cache[i]);
|
||||
bitmap_clear (&anti_dependency_cache[i]);
|
||||
bitmap_clear (&control_dependency_cache[i]);
|
||||
|
||||
if (sched_deps_info->generate_spec_deps)
|
||||
bitmap_clear (&spec_dependency_cache[i]);
|
||||
|
|
@ -3815,6 +3966,8 @@ sched_deps_finish (void)
|
|||
output_dependency_cache = NULL;
|
||||
free (anti_dependency_cache);
|
||||
anti_dependency_cache = NULL;
|
||||
free (control_dependency_cache);
|
||||
control_dependency_cache = NULL;
|
||||
|
||||
if (sched_deps_info->generate_spec_deps)
|
||||
{
|
||||
|
|
@ -3836,6 +3989,7 @@ init_deps_global (void)
|
|||
reg_pending_sets = ALLOC_REG_SET (®_obstack);
|
||||
reg_pending_clobbers = ALLOC_REG_SET (®_obstack);
|
||||
reg_pending_uses = ALLOC_REG_SET (®_obstack);
|
||||
reg_pending_control_uses = ALLOC_REG_SET (®_obstack);
|
||||
reg_pending_barrier = NOT_A_BARRIER;
|
||||
|
||||
if (!sel_sched_p () || sched_emulate_haifa_p)
|
||||
|
|
@ -3860,6 +4014,7 @@ finish_deps_global (void)
|
|||
FREE_REG_SET (reg_pending_sets);
|
||||
FREE_REG_SET (reg_pending_clobbers);
|
||||
FREE_REG_SET (reg_pending_uses);
|
||||
FREE_REG_SET (reg_pending_control_uses);
|
||||
}
|
||||
|
||||
/* Estimate the weakness of dependence between MEM1 and MEM2. */
|
||||
|
|
@ -3893,8 +4048,8 @@ estimate_dep_weak (rtx mem1, rtx mem2)
|
|||
/* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
|
||||
This function can handle same INSN and ELEM (INSN == ELEM).
|
||||
It is a convenience wrapper. */
|
||||
void
|
||||
add_dependence (rtx insn, rtx elem, enum reg_note dep_type)
|
||||
static void
|
||||
add_dependence_1 (rtx insn, rtx elem, enum reg_note dep_type)
|
||||
{
|
||||
ds_t ds;
|
||||
bool internal;
|
||||
|
|
@ -3903,6 +4058,8 @@ add_dependence (rtx insn, rtx elem, enum reg_note dep_type)
|
|||
ds = DEP_TRUE;
|
||||
else if (dep_type == REG_DEP_OUTPUT)
|
||||
ds = DEP_OUTPUT;
|
||||
else if (dep_type == REG_DEP_CONTROL)
|
||||
ds = DEP_CONTROL;
|
||||
else
|
||||
{
|
||||
gcc_assert (dep_type == REG_DEP_ANTI);
|
||||
|
|
@ -4169,10 +4326,12 @@ dump_ds (FILE *f, ds_t s)
|
|||
|
||||
if (s & DEP_TRUE)
|
||||
fprintf (f, "DEP_TRUE; ");
|
||||
if (s & DEP_ANTI)
|
||||
fprintf (f, "DEP_ANTI; ");
|
||||
if (s & DEP_OUTPUT)
|
||||
fprintf (f, "DEP_OUTPUT; ");
|
||||
if (s & DEP_ANTI)
|
||||
fprintf (f, "DEP_ANTI; ");
|
||||
if (s & DEP_CONTROL)
|
||||
fprintf (f, "DEP_CONTROL; ");
|
||||
|
||||
fprintf (f, "}");
|
||||
}
|
||||
|
|
@ -4207,10 +4366,13 @@ check_dep (dep_t dep, bool relaxed_p)
|
|||
else if (dt == REG_DEP_OUTPUT)
|
||||
gcc_assert ((ds & DEP_OUTPUT)
|
||||
&& !(ds & DEP_TRUE));
|
||||
else
|
||||
gcc_assert ((dt == REG_DEP_ANTI)
|
||||
&& (ds & DEP_ANTI)
|
||||
else if (dt == REG_DEP_ANTI)
|
||||
gcc_assert ((ds & DEP_ANTI)
|
||||
&& !(ds & (DEP_OUTPUT | DEP_TRUE)));
|
||||
else
|
||||
gcc_assert (dt == REG_DEP_CONTROL
|
||||
&& (ds & DEP_CONTROL)
|
||||
&& !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
|
||||
|
||||
/* HARD_DEP can not appear in dep_status of a link. */
|
||||
gcc_assert (!(ds & HARD_DEP));
|
||||
|
|
|
|||
|
|
@ -431,32 +431,23 @@ add_deps_for_risky_insns (rtx head, rtx tail)
|
|||
rank. */
|
||||
if (! sched_insns_conditions_mutex_p (insn, prev))
|
||||
{
|
||||
dep_def _dep, *dep = &_dep;
|
||||
|
||||
init_dep (dep, prev, insn, REG_DEP_ANTI);
|
||||
|
||||
if (!(current_sched_info->flags & USE_DEPS_LIST))
|
||||
if ((current_sched_info->flags & DO_SPECULATION)
|
||||
&& (spec_info->mask & BEGIN_CONTROL))
|
||||
{
|
||||
enum DEPS_ADJUST_RESULT res;
|
||||
dep_def _dep, *dep = &_dep;
|
||||
|
||||
res = sd_add_or_update_dep (dep, false);
|
||||
init_dep (dep, prev, insn, REG_DEP_ANTI);
|
||||
|
||||
/* We can't change an existing dependency with
|
||||
DEP_ANTI. */
|
||||
gcc_assert (res != DEP_CHANGED);
|
||||
if (current_sched_info->flags & USE_DEPS_LIST)
|
||||
{
|
||||
DEP_STATUS (dep) = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
|
||||
MAX_DEP_WEAK);
|
||||
|
||||
}
|
||||
sd_add_or_update_dep (dep, false);
|
||||
}
|
||||
else
|
||||
{
|
||||
if ((current_sched_info->flags & DO_SPECULATION)
|
||||
&& (spec_info->mask & BEGIN_CONTROL))
|
||||
DEP_STATUS (dep) = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
|
||||
MAX_DEP_WEAK);
|
||||
|
||||
sd_add_or_update_dep (dep, false);
|
||||
|
||||
/* Dep_status could have been changed.
|
||||
No assertion here. */
|
||||
}
|
||||
add_dependence (insn, prev, REG_DEP_CONTROL);
|
||||
}
|
||||
|
||||
break;
|
||||
|
|
|
|||
|
|
@ -424,6 +424,7 @@ struct deps_reg
|
|||
rtx uses;
|
||||
rtx sets;
|
||||
rtx implicit_sets;
|
||||
rtx control_uses;
|
||||
rtx clobbers;
|
||||
int uses_length;
|
||||
int clobbers_length;
|
||||
|
|
@ -453,6 +454,9 @@ struct deps_desc
|
|||
/* An EXPR_LIST containing all MEM rtx's which are pending writes. */
|
||||
rtx pending_write_mems;
|
||||
|
||||
/* An INSN_LIST containing all jump insns. */
|
||||
rtx pending_jump_insns;
|
||||
|
||||
/* We must prevent the above lists from ever growing too large since
|
||||
the number of dependencies produced is at least O(N*N),
|
||||
and execution time is at least O(4*N*N), as a function of the
|
||||
|
|
@ -464,8 +468,9 @@ struct deps_desc
|
|||
/* Indicates the length of the pending_write list. */
|
||||
int pending_write_list_length;
|
||||
|
||||
/* Length of the pending memory flush list. Large functions with no
|
||||
calls may build up extremely large lists. */
|
||||
/* Length of the pending memory flush list plus the length of the pending
|
||||
jump insn list. Large functions with no calls may build up extremely
|
||||
large lists. */
|
||||
int pending_flush_length;
|
||||
|
||||
/* The last insn upon which all memory references must depend.
|
||||
|
|
@ -699,6 +704,10 @@ struct _haifa_deps_insn_data
|
|||
condition that has been clobbered by a subsequent insn. */
|
||||
rtx cond;
|
||||
|
||||
/* For a conditional insn, a list of insns that could set the condition
|
||||
register. Used when generating control dependencies. */
|
||||
rtx cond_deps;
|
||||
|
||||
/* True if the condition in 'cond' should be reversed to get the actual
|
||||
condition. */
|
||||
unsigned int reverse_cond : 1;
|
||||
|
|
@ -799,6 +808,10 @@ struct _haifa_insn_data
|
|||
real insns following them. */
|
||||
unsigned int shadow_p : 1;
|
||||
|
||||
/* Used internally in unschedule_insns_until to mark insns that must have
|
||||
their TODO_SPEC recomputed. */
|
||||
unsigned int must_recompute_spec : 1;
|
||||
|
||||
/* '> 0' if priority is valid,
|
||||
'== 0' if priority was not yet computed,
|
||||
'< 0' if priority in invalid and should be recomputed. */
|
||||
|
|
@ -819,6 +832,10 @@ struct _haifa_insn_data
|
|||
/* Original pattern of the instruction. */
|
||||
rtx orig_pat;
|
||||
|
||||
/* For insns with DEP_CONTROL dependencies, the predicated pattern if it
|
||||
was ever successfully constructed. */
|
||||
rtx predicated_pat;
|
||||
|
||||
/* The following array contains info how the insn increases register
|
||||
pressure. There is an element for each cover class of pseudos
|
||||
referenced in insns. */
|
||||
|
|
@ -880,6 +897,7 @@ extern VEC(haifa_deps_insn_data_def, heap) *h_d_i_d;
|
|||
#define INSN_SPEC_BACK_DEPS(INSN) (HDID (INSN)->spec_back_deps)
|
||||
#define INSN_CACHED_COND(INSN) (HDID (INSN)->cond)
|
||||
#define INSN_REVERSE_COND(INSN) (HDID (INSN)->reverse_cond)
|
||||
#define INSN_COND_DEPS(INSN) (HDID (INSN)->cond_deps)
|
||||
#define CANT_MOVE(INSN) (HDID (INSN)->cant_move)
|
||||
#define CANT_MOVE_BY_LUID(LUID) (VEC_index (haifa_deps_insn_data_def, h_d_i_d, \
|
||||
LUID)->cant_move)
|
||||
|
|
@ -893,6 +911,7 @@ extern VEC(haifa_deps_insn_data_def, heap) *h_d_i_d;
|
|||
#define CHECK_SPEC(INSN) (HID (INSN)->check_spec)
|
||||
#define RECOVERY_BLOCK(INSN) (HID (INSN)->recovery_block)
|
||||
#define ORIG_PAT(INSN) (HID (INSN)->orig_pat)
|
||||
#define PREDICATED_PAT(INSN) (HID (INSN)->predicated_pat)
|
||||
|
||||
/* INSN is either a simple or a branchy speculation check. */
|
||||
#define IS_SPECULATION_CHECK_P(INSN) \
|
||||
|
|
@ -932,10 +951,11 @@ extern VEC(haifa_deps_insn_data_def, heap) *h_d_i_d;
|
|||
/* We exclude sign bit. */
|
||||
#define BITS_PER_DEP_STATUS (HOST_BITS_PER_INT - 1)
|
||||
|
||||
/* First '4' stands for 3 dep type bits and HARD_DEP bit.
|
||||
/* First '6' stands for 4 dep type bits and the HARD_DEP and DEP_CANCELLED
|
||||
bits.
|
||||
Second '4' stands for BEGIN_{DATA, CONTROL}, BE_IN_{DATA, CONTROL}
|
||||
dep weakness. */
|
||||
#define BITS_PER_DEP_WEAK ((BITS_PER_DEP_STATUS - 4) / 4)
|
||||
#define BITS_PER_DEP_WEAK ((BITS_PER_DEP_STATUS - 6) / 4)
|
||||
|
||||
/* Mask of speculative weakness in dep_status. */
|
||||
#define DEP_WEAK_MASK ((1 << BITS_PER_DEP_WEAK) - 1)
|
||||
|
|
@ -1009,13 +1029,16 @@ enum SPEC_TYPES_OFFSETS {
|
|||
#define DEP_TRUE (((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + BITS_PER_DEP_WEAK))
|
||||
#define DEP_OUTPUT (DEP_TRUE << 1)
|
||||
#define DEP_ANTI (DEP_OUTPUT << 1)
|
||||
#define DEP_CONTROL (DEP_ANTI << 1)
|
||||
|
||||
#define DEP_TYPES (DEP_TRUE | DEP_OUTPUT | DEP_ANTI)
|
||||
#define DEP_TYPES (DEP_TRUE | DEP_OUTPUT | DEP_ANTI | DEP_CONTROL)
|
||||
|
||||
/* Instruction has non-speculative dependence. This bit represents the
|
||||
property of an instruction - not the one of a dependence.
|
||||
Therefore, it can appear only in TODO_SPEC field of an instruction. */
|
||||
#define HARD_DEP (DEP_ANTI << 1)
|
||||
#define HARD_DEP (DEP_CONTROL << 1)
|
||||
|
||||
#define DEP_CANCELLED (HARD_DEP << 1)
|
||||
|
||||
/* This represents the results of calling sched-deps.c functions,
|
||||
which modify dependencies. */
|
||||
|
|
@ -1041,7 +1064,8 @@ enum SCHED_FLAGS {
|
|||
Requires USE_DEPS_LIST set. */
|
||||
DO_SPECULATION = USE_DEPS_LIST << 1,
|
||||
DO_BACKTRACKING = DO_SPECULATION << 1,
|
||||
SCHED_RGN = DO_BACKTRACKING << 1,
|
||||
DO_PREDICATION = DO_BACKTRACKING << 1,
|
||||
SCHED_RGN = DO_PREDICATION << 1,
|
||||
SCHED_EBB = SCHED_RGN << 1,
|
||||
/* Scheduler can possibly create new basic blocks. Used for assertions. */
|
||||
NEW_BBS = SCHED_EBB << 1,
|
||||
|
|
@ -1202,6 +1226,7 @@ extern struct sched_deps_info_def *sched_deps_info;
|
|||
|
||||
|
||||
/* Functions in sched-deps.c. */
|
||||
extern rtx sched_get_reverse_condition_uncached (const_rtx);
|
||||
extern bool sched_insns_conditions_mutex_p (const_rtx, const_rtx);
|
||||
extern bool sched_insn_is_legitimate_for_speculation_p (const_rtx, ds_t);
|
||||
extern void add_dependence (rtx, rtx, enum reg_note);
|
||||
|
|
@ -1337,6 +1362,7 @@ extern bool sched_no_dce;
|
|||
|
||||
extern void set_modulo_params (int, int, int, int);
|
||||
extern void record_delay_slot_pair (rtx, rtx, int, int);
|
||||
extern rtx real_insn_for_shadow (rtx);
|
||||
extern void discard_delay_pairs_above (int);
|
||||
extern void free_delay_pairs (void);
|
||||
extern void add_delay_dependencies (rtx);
|
||||
|
|
@ -1527,3 +1553,4 @@ extern void print_pattern (char *, const_rtx, int);
|
|||
extern void print_value (char *, const_rtx, int);
|
||||
|
||||
#endif /* GCC_SCHED_INT_H */
|
||||
|
||||
|
|
|
|||
|
|
@ -234,7 +234,6 @@ static void add_branch_dependences (rtx, rtx);
|
|||
static void compute_block_dependences (int);
|
||||
|
||||
static void schedule_region (int);
|
||||
static rtx concat_INSN_LIST (rtx, rtx);
|
||||
static void concat_insn_mem_list (rtx, rtx, rtx *, rtx *);
|
||||
static void propagate_deps (int, struct deps_desc *);
|
||||
static void free_pending_lists (void);
|
||||
|
|
@ -2552,20 +2551,6 @@ add_branch_dependences (rtx head, rtx tail)
|
|||
|
||||
static struct deps_desc *bb_deps;
|
||||
|
||||
/* Duplicate the INSN_LIST elements of COPY and prepend them to OLD. */
|
||||
|
||||
static rtx
|
||||
concat_INSN_LIST (rtx copy, rtx old)
|
||||
{
|
||||
rtx new_rtx = old;
|
||||
for (; copy ; copy = XEXP (copy, 1))
|
||||
{
|
||||
new_rtx = alloc_INSN_LIST (XEXP (copy, 0), new_rtx);
|
||||
PUT_REG_NOTE_KIND (new_rtx, REG_NOTE_KIND (copy));
|
||||
}
|
||||
return new_rtx;
|
||||
}
|
||||
|
||||
static void
|
||||
concat_insn_mem_list (rtx copy_insns, rtx copy_mems, rtx *old_insns_p,
|
||||
rtx *old_mems_p)
|
||||
|
|
@ -2619,6 +2604,9 @@ deps_join (struct deps_desc *succ_deps, struct deps_desc *pred_deps)
|
|||
&succ_deps->pending_write_insns,
|
||||
&succ_deps->pending_write_mems);
|
||||
|
||||
succ_deps->pending_jump_insns
|
||||
= concat_INSN_LIST (pred_deps->pending_jump_insns,
|
||||
succ_deps->pending_jump_insns);
|
||||
succ_deps->last_pending_memory_flush
|
||||
= concat_INSN_LIST (pred_deps->last_pending_memory_flush,
|
||||
succ_deps->last_pending_memory_flush);
|
||||
|
|
@ -2670,12 +2658,14 @@ propagate_deps (int bb, struct deps_desc *pred_deps)
|
|||
bb_deps[bb].pending_read_mems = pred_deps->pending_read_mems;
|
||||
bb_deps[bb].pending_write_insns = pred_deps->pending_write_insns;
|
||||
bb_deps[bb].pending_write_mems = pred_deps->pending_write_mems;
|
||||
bb_deps[bb].pending_jump_insns = pred_deps->pending_jump_insns;
|
||||
|
||||
/* Can't allow these to be freed twice. */
|
||||
pred_deps->pending_read_insns = 0;
|
||||
pred_deps->pending_read_mems = 0;
|
||||
pred_deps->pending_write_insns = 0;
|
||||
pred_deps->pending_write_mems = 0;
|
||||
pred_deps->pending_jump_insns = 0;
|
||||
}
|
||||
|
||||
/* Compute dependences inside bb. In a multiple blocks region:
|
||||
|
|
@ -2754,6 +2744,7 @@ free_pending_lists (void)
|
|||
free_INSN_LIST_list (&bb_deps[bb].pending_write_insns);
|
||||
free_EXPR_LIST_list (&bb_deps[bb].pending_read_mems);
|
||||
free_EXPR_LIST_list (&bb_deps[bb].pending_write_mems);
|
||||
free_INSN_LIST_list (&bb_deps[bb].pending_jump_insns);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue