[18/46] Make SLP_TREE_SCALAR_STMTS a vec<stmt_vec_info>

This patch changes SLP_TREE_SCALAR_STMTS from a vec<gimple *> to
a vec<stmt_vec_info>.  It's longer than the previous conversions
but mostly mechanical.

2018-07-31  Richard Sandiford  <richard.sandiford@arm.com>

gcc/
	* tree-vectorizer.h (_slp_tree::stmts): Change from a vec<gimple *>
	to a vec<stmt_vec_info>.
	* tree-vect-slp.c (vect_free_slp_tree): Update accordingly.
	(vect_create_new_slp_node): Take a vec<gimple *> instead of a
	vec<stmt_vec_info>.
	(_slp_oprnd_info::def_stmts): Change from a vec<gimple *>
	to a vec<stmt_vec_info>.
	(bst_traits::value_type, bst_traits::value_type): Likewise.
	(bst_traits::hash): Update accordingly.
	(vect_get_and_check_slp_defs): Change the stmts parameter from
	a vec<gimple *> to a vec<stmt_vec_info>.
	(vect_two_operations_perm_ok_p, vect_build_slp_tree_1): Likewise.
	(vect_build_slp_tree): Likewise.
	(vect_build_slp_tree_2): Likewise.  Update uses of
	SLP_TREE_SCALAR_STMTS.
	(vect_print_slp_tree): Update uses of SLP_TREE_SCALAR_STMTS.
	(vect_mark_slp_stmts, vect_mark_slp_stmts_relevant)
	(vect_slp_rearrange_stmts, vect_attempt_slp_rearrange_stmts)
	(vect_supported_load_permutation_p, vect_find_last_scalar_stmt_in_slp)
	(vect_detect_hybrid_slp_stmts, vect_slp_analyze_node_operations_1)
	(vect_slp_analyze_node_operations, vect_slp_analyze_operations)
	(vect_bb_slp_scalar_cost, vect_slp_analyze_bb_1)
	(vect_get_constant_vectors, vect_get_slp_defs)
	(vect_transform_slp_perm_load, vect_schedule_slp_instance)
	(vect_remove_slp_scalar_calls, vect_schedule_slp): Likewise.
	(vect_analyze_slp_instance): Build up a vec of stmt_vec_infos
	instead of gimple stmts.
	* tree-vect-data-refs.c (vect_slp_analyze_node_dependences): Change
	the stores parameter for a vec<gimple *> to a vec<stmt_vec_info>.
	(vect_slp_analyze_instance_dependence): Update uses of
	SLP_TREE_SCALAR_STMTS.
	(vect_slp_analyze_and_verify_node_alignment): Likewise.
	(vect_slp_analyze_and_verify_instance_alignment): Likewise.
	* tree-vect-loop.c (neutral_op_for_slp_reduction): Likewise.
	(get_initial_defs_for_reduction): Likewise.
	(vect_create_epilog_for_reduction): Likewise.
	(vectorize_fold_left_reduction): Likewise.
	* tree-vect-stmts.c (vect_prologue_cost_for_slp_op): Likewise.
	(vect_model_simple_cost, vectorizable_shift, vectorizable_load)
	(can_vectorize_live_stmts): Likewise.

From-SVN: r263133
This commit is contained in:
Richard Sandiford 2018-07-31 14:23:20 +00:00 committed by Richard Sandiford
parent 32c91dfcfd
commit b978758186
6 changed files with 360 additions and 306 deletions

View File

@ -1,3 +1,46 @@
2018-07-31 Richard Sandiford <richard.sandiford@arm.com>
* tree-vectorizer.h (_slp_tree::stmts): Change from a vec<gimple *>
to a vec<stmt_vec_info>.
* tree-vect-slp.c (vect_free_slp_tree): Update accordingly.
(vect_create_new_slp_node): Take a vec<gimple *> instead of a
vec<stmt_vec_info>.
(_slp_oprnd_info::def_stmts): Change from a vec<gimple *>
to a vec<stmt_vec_info>.
(bst_traits::value_type, bst_traits::value_type): Likewise.
(bst_traits::hash): Update accordingly.
(vect_get_and_check_slp_defs): Change the stmts parameter from
a vec<gimple *> to a vec<stmt_vec_info>.
(vect_two_operations_perm_ok_p, vect_build_slp_tree_1): Likewise.
(vect_build_slp_tree): Likewise.
(vect_build_slp_tree_2): Likewise. Update uses of
SLP_TREE_SCALAR_STMTS.
(vect_print_slp_tree): Update uses of SLP_TREE_SCALAR_STMTS.
(vect_mark_slp_stmts, vect_mark_slp_stmts_relevant)
(vect_slp_rearrange_stmts, vect_attempt_slp_rearrange_stmts)
(vect_supported_load_permutation_p, vect_find_last_scalar_stmt_in_slp)
(vect_detect_hybrid_slp_stmts, vect_slp_analyze_node_operations_1)
(vect_slp_analyze_node_operations, vect_slp_analyze_operations)
(vect_bb_slp_scalar_cost, vect_slp_analyze_bb_1)
(vect_get_constant_vectors, vect_get_slp_defs)
(vect_transform_slp_perm_load, vect_schedule_slp_instance)
(vect_remove_slp_scalar_calls, vect_schedule_slp): Likewise.
(vect_analyze_slp_instance): Build up a vec of stmt_vec_infos
instead of gimple stmts.
* tree-vect-data-refs.c (vect_slp_analyze_node_dependences): Change
the stores parameter for a vec<gimple *> to a vec<stmt_vec_info>.
(vect_slp_analyze_instance_dependence): Update uses of
SLP_TREE_SCALAR_STMTS.
(vect_slp_analyze_and_verify_node_alignment): Likewise.
(vect_slp_analyze_and_verify_instance_alignment): Likewise.
* tree-vect-loop.c (neutral_op_for_slp_reduction): Likewise.
(get_initial_defs_for_reduction): Likewise.
(vect_create_epilog_for_reduction): Likewise.
(vectorize_fold_left_reduction): Likewise.
* tree-vect-stmts.c (vect_prologue_cost_for_slp_op): Likewise.
(vect_model_simple_cost, vectorizable_shift, vectorizable_load)
(can_vectorize_live_stmts): Likewise.
2018-07-31 Richard Sandiford <richard.sandiford@arm.com>
* tree-vectorizer.h (_loop_vec_info::reductions): Change from an

View File

@ -665,7 +665,8 @@ vect_slp_analyze_data_ref_dependence (struct data_dependence_relation *ddr)
static bool
vect_slp_analyze_node_dependences (slp_instance instance, slp_tree node,
vec<gimple *> stores, gimple *last_store)
vec<stmt_vec_info> stores,
gimple *last_store)
{
/* This walks over all stmts involved in the SLP load/store done
in NODE verifying we can sink them up to the last stmt in the
@ -673,13 +674,13 @@ vect_slp_analyze_node_dependences (slp_instance instance, slp_tree node,
gimple *last_access = vect_find_last_scalar_stmt_in_slp (node);
for (unsigned k = 0; k < SLP_INSTANCE_GROUP_SIZE (instance); ++k)
{
gimple *access = SLP_TREE_SCALAR_STMTS (node)[k];
if (access == last_access)
stmt_vec_info access_info = SLP_TREE_SCALAR_STMTS (node)[k];
if (access_info == last_access)
continue;
data_reference *dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (access));
data_reference *dr_a = STMT_VINFO_DATA_REF (access_info);
ao_ref ref;
bool ref_initialized_p = false;
for (gimple_stmt_iterator gsi = gsi_for_stmt (access);
for (gimple_stmt_iterator gsi = gsi_for_stmt (access_info->stmt);
gsi_stmt (gsi) != last_access; gsi_next (&gsi))
{
gimple *stmt = gsi_stmt (gsi);
@ -712,11 +713,10 @@ vect_slp_analyze_node_dependences (slp_instance instance, slp_tree node,
if (stmt != last_store)
continue;
unsigned i;
gimple *store;
FOR_EACH_VEC_ELT (stores, i, store)
stmt_vec_info store_info;
FOR_EACH_VEC_ELT (stores, i, store_info)
{
data_reference *store_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (store));
data_reference *store_dr = STMT_VINFO_DATA_REF (store_info);
ddr_p ddr = initialize_data_dependence_relation
(dr_a, store_dr, vNULL);
dependent = vect_slp_analyze_data_ref_dependence (ddr);
@ -753,7 +753,7 @@ vect_slp_analyze_instance_dependence (slp_instance instance)
/* The stores of this instance are at the root of the SLP tree. */
slp_tree store = SLP_INSTANCE_TREE (instance);
if (! STMT_VINFO_DATA_REF (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (store)[0])))
if (! STMT_VINFO_DATA_REF (SLP_TREE_SCALAR_STMTS (store)[0]))
store = NULL;
/* Verify we can sink stores to the vectorized stmt insert location. */
@ -766,7 +766,7 @@ vect_slp_analyze_instance_dependence (slp_instance instance)
/* Mark stores in this instance and remember the last one. */
last_store = vect_find_last_scalar_stmt_in_slp (store);
for (unsigned k = 0; k < SLP_INSTANCE_GROUP_SIZE (instance); ++k)
gimple_set_visited (SLP_TREE_SCALAR_STMTS (store)[k], true);
gimple_set_visited (SLP_TREE_SCALAR_STMTS (store)[k]->stmt, true);
}
bool res = true;
@ -788,7 +788,7 @@ vect_slp_analyze_instance_dependence (slp_instance instance)
/* Unset the visited flag. */
if (store)
for (unsigned k = 0; k < SLP_INSTANCE_GROUP_SIZE (instance); ++k)
gimple_set_visited (SLP_TREE_SCALAR_STMTS (store)[k], false);
gimple_set_visited (SLP_TREE_SCALAR_STMTS (store)[k]->stmt, false);
return res;
}
@ -2389,10 +2389,11 @@ vect_slp_analyze_and_verify_node_alignment (slp_tree node)
/* We vectorize from the first scalar stmt in the node unless
the node is permuted in which case we start from the first
element in the group. */
gimple *first_stmt = SLP_TREE_SCALAR_STMTS (node)[0];
data_reference_p first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
stmt_vec_info first_stmt_info = SLP_TREE_SCALAR_STMTS (node)[0];
gimple *first_stmt = first_stmt_info->stmt;
data_reference_p first_dr = STMT_VINFO_DATA_REF (first_stmt_info);
if (SLP_TREE_LOAD_PERMUTATION (node).exists ())
first_stmt = DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt));
first_stmt = DR_GROUP_FIRST_ELEMENT (first_stmt_info);
data_reference_p dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
vect_compute_data_ref_alignment (dr);
@ -2429,7 +2430,7 @@ vect_slp_analyze_and_verify_instance_alignment (slp_instance instance)
return false;
node = SLP_INSTANCE_TREE (instance);
if (STMT_VINFO_DATA_REF (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node)[0]))
if (STMT_VINFO_DATA_REF (SLP_TREE_SCALAR_STMTS (node)[0])
&& ! vect_slp_analyze_and_verify_node_alignment
(SLP_INSTANCE_TREE (instance)))
return false;

View File

@ -2186,8 +2186,7 @@ again:
FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
{
stmt_vec_info vinfo;
vinfo = vinfo_for_stmt
(SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0]);
vinfo = SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0];
if (! STMT_VINFO_GROUPED_ACCESS (vinfo))
continue;
vinfo = vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (vinfo));
@ -2199,7 +2198,7 @@ again:
return false;
FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), j, node)
{
vinfo = vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node)[0]);
vinfo = SLP_TREE_SCALAR_STMTS (node)[0];
vinfo = vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (vinfo));
bool single_element_p = !DR_GROUP_NEXT_ELEMENT (vinfo);
size = DR_GROUP_SIZE (vinfo);
@ -2442,12 +2441,11 @@ static tree
neutral_op_for_slp_reduction (slp_tree slp_node, tree_code code,
bool reduc_chain)
{
vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
gimple *stmt = stmts[0];
stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
stmt_vec_info stmt_vinfo = stmts[0];
tree vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
tree scalar_type = TREE_TYPE (vector_type);
struct loop *loop = gimple_bb (stmt)->loop_father;
struct loop *loop = gimple_bb (stmt_vinfo->stmt)->loop_father;
gcc_assert (loop);
switch (code)
@ -2473,7 +2471,8 @@ neutral_op_for_slp_reduction (slp_tree slp_node, tree_code code,
has only a single initial value, so that value is neutral for
all statements. */
if (reduc_chain)
return PHI_ARG_DEF_FROM_EDGE (stmt, loop_preheader_edge (loop));
return PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt,
loop_preheader_edge (loop));
return NULL_TREE;
default:
@ -4182,9 +4181,8 @@ get_initial_defs_for_reduction (slp_tree slp_node,
unsigned int number_of_vectors,
bool reduc_chain, tree neutral_op)
{
vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
gimple *stmt = stmts[0];
stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
stmt_vec_info stmt_vinfo = stmts[0];
unsigned HOST_WIDE_INT nunits;
unsigned j, number_of_places_left_in_vector;
tree vector_type;
@ -4201,7 +4199,7 @@ get_initial_defs_for_reduction (slp_tree slp_node,
gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def);
loop = (gimple_bb (stmt))->loop_father;
loop = (gimple_bb (stmt_vinfo->stmt))->loop_father;
gcc_assert (loop);
edge pe = loop_preheader_edge (loop);
@ -4234,7 +4232,7 @@ get_initial_defs_for_reduction (slp_tree slp_node,
elts.quick_grow (nunits);
for (j = 0; j < number_of_copies; j++)
{
for (i = group_size - 1; stmts.iterate (i, &stmt); i--)
for (i = group_size - 1; stmts.iterate (i, &stmt_vinfo); i--)
{
tree op;
/* Get the def before the loop. In reduction chain we have only
@ -4244,7 +4242,7 @@ get_initial_defs_for_reduction (slp_tree slp_node,
&& neutral_op)
op = neutral_op;
else
op = PHI_ARG_DEF_FROM_EDGE (stmt, pe);
op = PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt, pe);
/* Create 'vect_ = {op0,op1,...,opn}'. */
number_of_places_left_in_vector--;
@ -5128,7 +5126,8 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
gcc_assert (pow2p_hwi (group_size));
slp_tree orig_phis_slp_node = slp_node_instance->reduc_phis;
vec<gimple *> orig_phis = SLP_TREE_SCALAR_STMTS (orig_phis_slp_node);
vec<stmt_vec_info> orig_phis
= SLP_TREE_SCALAR_STMTS (orig_phis_slp_node);
gimple_seq seq = NULL;
/* Build a vector {0, 1, 2, ...}, with the same number of elements
@ -5159,7 +5158,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
if (!neutral_op)
{
tree scalar_value
= PHI_ARG_DEF_FROM_EDGE (orig_phis[i],
= PHI_ARG_DEF_FROM_EDGE (orig_phis[i]->stmt,
loop_preheader_edge (loop));
vector_identity = gimple_build_vector_from_val (&seq, vectype,
scalar_value);
@ -5572,12 +5571,13 @@ vect_finalize_reduction:
the loop exit phi node. */
if (REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
{
gimple *dest_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1];
stmt_vec_info dest_stmt_info
= SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1];
/* Handle reduction patterns. */
if (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (dest_stmt)))
dest_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (dest_stmt));
if (STMT_VINFO_RELATED_STMT (dest_stmt_info))
dest_stmt_info = STMT_VINFO_RELATED_STMT (dest_stmt_info);
scalar_dest = gimple_assign_lhs (dest_stmt);
scalar_dest = gimple_assign_lhs (dest_stmt_info->stmt);
group_size = 1;
}
@ -5607,13 +5607,12 @@ vect_finalize_reduction:
if (slp_reduc)
{
gimple *current_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[k];
stmt_vec_info scalar_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[k];
orig_stmt_info
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (current_stmt));
orig_stmt_info = STMT_VINFO_RELATED_STMT (scalar_stmt_info);
/* SLP statements can't participate in patterns. */
gcc_assert (!orig_stmt_info);
scalar_dest = gimple_assign_lhs (current_stmt);
scalar_dest = gimple_assign_lhs (scalar_stmt_info->stmt);
}
phis.create (3);
@ -5881,23 +5880,23 @@ vectorize_fold_left_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
tree op0 = ops[1 - reduc_index];
int group_size = 1;
gimple *scalar_dest_def;
stmt_vec_info scalar_dest_def_info;
auto_vec<tree> vec_oprnds0;
if (slp_node)
{
vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL, slp_node);
group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
scalar_dest_def = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1];
scalar_dest_def_info = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1];
}
else
{
tree loop_vec_def0 = vect_get_vec_def_for_operand (op0, stmt);
vec_oprnds0.create (1);
vec_oprnds0.quick_push (loop_vec_def0);
scalar_dest_def = stmt;
scalar_dest_def_info = stmt_info;
}
tree scalar_dest = gimple_assign_lhs (scalar_dest_def);
tree scalar_dest = gimple_assign_lhs (scalar_dest_def_info->stmt);
tree scalar_type = TREE_TYPE (scalar_dest);
tree reduc_var = gimple_phi_result (reduc_def_stmt);
@ -5964,10 +5963,11 @@ vectorize_fold_left_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
if (i == vec_num - 1)
{
gimple_set_lhs (new_stmt, scalar_dest);
new_stmt_info = vect_finish_replace_stmt (scalar_dest_def, new_stmt);
new_stmt_info = vect_finish_replace_stmt (scalar_dest_def_info,
new_stmt);
}
else
new_stmt_info = vect_finish_stmt_generation (scalar_dest_def,
new_stmt_info = vect_finish_stmt_generation (scalar_dest_def_info,
new_stmt, gsi);
if (slp_node)

File diff suppressed because it is too large Load Diff

View File

@ -806,7 +806,7 @@ vect_prologue_cost_for_slp_op (slp_tree node, stmt_vec_info stmt_info,
unsigned opno, enum vect_def_type dt,
stmt_vector_for_cost *cost_vec)
{
gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[0];
gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[0]->stmt;
tree op = gimple_op (stmt, opno);
unsigned prologue_cost = 0;
@ -838,11 +838,11 @@ vect_prologue_cost_for_slp_op (slp_tree node, stmt_vec_info stmt_info,
{
unsigned si = j % group_size;
if (nelt == 0)
elt = gimple_op (SLP_TREE_SCALAR_STMTS (node)[si], opno);
elt = gimple_op (SLP_TREE_SCALAR_STMTS (node)[si]->stmt, opno);
/* ??? We're just tracking whether all operands of a single
vector initializer are the same, ideally we'd check if
we emitted the same one already. */
else if (elt != gimple_op (SLP_TREE_SCALAR_STMTS (node)[si],
else if (elt != gimple_op (SLP_TREE_SCALAR_STMTS (node)[si]->stmt,
opno))
elt = NULL_TREE;
nelt++;
@ -889,7 +889,7 @@ vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
/* Scan operands and account for prologue cost of constants/externals.
??? This over-estimates cost for multiple uses and should be
re-engineered. */
gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[0];
gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[0]->stmt;
tree lhs = gimple_get_lhs (stmt);
for (unsigned i = 0; i < gimple_num_ops (stmt); ++i)
{
@ -5532,12 +5532,15 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
a scalar shift. */
if (slp_node)
{
vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
gimple *slpstmt;
vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
stmt_vec_info slpstmt_info;
FOR_EACH_VEC_ELT (stmts, k, slpstmt)
if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
scalar_shift_arg = false;
FOR_EACH_VEC_ELT (stmts, k, slpstmt_info)
{
gassign *slpstmt = as_a <gassign *> (slpstmt_info->stmt);
if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
scalar_shift_arg = false;
}
}
/* If the shift amount is computed by a pattern stmt we cannot
@ -7421,7 +7424,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
vec<tree> dr_chain = vNULL;
bool grouped_load = false;
gimple *first_stmt;
gimple *first_stmt_for_drptr = NULL;
stmt_vec_info first_stmt_info_for_drptr = NULL;
bool inv_p;
bool compute_in_loop = false;
struct loop *at_loop;
@ -7930,7 +7933,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
/* For BB vectorization always use the first stmt to base
the data ref pointer on. */
if (bb_vinfo)
first_stmt_for_drptr = SLP_TREE_SCALAR_STMTS (slp_node)[0];
first_stmt_info_for_drptr = SLP_TREE_SCALAR_STMTS (slp_node)[0];
/* Check if the chain of loads is already vectorized. */
if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt))
@ -8180,17 +8183,17 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
dataref_offset = build_int_cst (ref_type, 0);
inv_p = false;
}
else if (first_stmt_for_drptr
&& first_stmt != first_stmt_for_drptr)
else if (first_stmt_info_for_drptr
&& first_stmt != first_stmt_info_for_drptr)
{
dataref_ptr
= vect_create_data_ref_ptr (first_stmt_for_drptr, aggr_type,
at_loop, offset, &dummy, gsi,
&ptr_incr, simd_lane_access_p,
= vect_create_data_ref_ptr (first_stmt_info_for_drptr,
aggr_type, at_loop, offset, &dummy,
gsi, &ptr_incr, simd_lane_access_p,
&inv_p, byte_offset, bump);
/* Adjust the pointer by the difference to first_stmt. */
data_reference_p ptrdr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt_for_drptr));
= STMT_VINFO_DATA_REF (first_stmt_info_for_drptr);
tree diff = fold_convert (sizetype,
size_binop (MINUS_EXPR,
DR_INIT (first_dr),
@ -9391,13 +9394,12 @@ can_vectorize_live_stmts (gimple *stmt, gimple_stmt_iterator *gsi,
{
if (slp_node)
{
gimple *slp_stmt;
stmt_vec_info slp_stmt_info;
unsigned int i;
FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node), i, slp_stmt)
FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node), i, slp_stmt_info)
{
stmt_vec_info slp_stmt_info = vinfo_for_stmt (slp_stmt);
if (STMT_VINFO_LIVE_P (slp_stmt_info)
&& !vectorizable_live_operation (slp_stmt, gsi, slp_node, i,
&& !vectorizable_live_operation (slp_stmt_info, gsi, slp_node, i,
vec_stmt, cost_vec))
return false;
}

View File

@ -138,7 +138,7 @@ struct _slp_tree {
/* Nodes that contain def-stmts of this node statements operands. */
vec<slp_tree> children;
/* A group of scalar stmts to be vectorized together. */
vec<gimple *> stmts;
vec<stmt_vec_info> stmts;
/* Load permutation relative to the stores, NULL if there is no
permutation. */
vec<unsigned> load_permutation;