@@ -1622,8 +1622,7 @@ check_load_store_for_partial_vectors (loop_vec_info loop_vinfo, tree vectype,
16221622 return ;
16231623 }
16241624
1625- if (memory_access_type != VMAT_CONTIGUOUS
1626- && memory_access_type != VMAT_CONTIGUOUS_PERMUTE)
1625+ if (memory_access_type != VMAT_CONTIGUOUS)
16271626 {
16281627 /* Element X of the data must come from iteration i * VF + X of the
16291628 scalar loop. We need more work to support other mappings. */
@@ -9050,7 +9049,6 @@ vectorizable_store (vec_info *vinfo,
90509049
90519050 gcc_assert (memory_access_type == VMAT_CONTIGUOUS
90529051 || memory_access_type == VMAT_CONTIGUOUS_DOWN
9053- || memory_access_type == VMAT_CONTIGUOUS_PERMUTE
90549052 || memory_access_type == VMAT_CONTIGUOUS_REVERSE);
90559053
90569054 unsigned inside_cost = 0 , prologue_cost = 0 ;
@@ -9095,25 +9093,7 @@ vectorizable_store (vec_info *vinfo,
90959093 simd_lane_access_p, bump);
90969094
90979095 new_stmt = NULL ;
9098- if (grouped_store)
9099- {
9100- /* Permute. */
9101- gcc_assert (memory_access_type == VMAT_CONTIGUOUS_PERMUTE);
9102- if (costing_p)
9103- {
9104- int group_size = DR_GROUP_SIZE (first_stmt_info);
9105- int nstmts = ceil_log2 (group_size) * group_size;
9106- inside_cost += record_stmt_cost (cost_vec, nstmts, vec_perm,
9107- slp_node, 0 , vect_body);
9108- if (dump_enabled_p ())
9109- dump_printf_loc (MSG_NOTE, vect_location, " vect_model_store_cost: "
9110- " strided group_size = %d .\n " , group_size);
9111- }
9112- else
9113- vect_permute_store_chain (vinfo, dr_chain, group_size, stmt_info,
9114- gsi, &result_chain);
9115- }
9116-
9096+ gcc_assert (!grouped_store);
91179097 for (i = 0 ; i < vec_num; i++)
91189098 {
91199099 if (!costing_p)
@@ -11457,18 +11437,12 @@ vectorizable_load (vec_info *vinfo,
1145711437 alignment support schemes. */
1145811438 if (costing_p)
1145911439 {
11460- /* For VMAT_CONTIGUOUS_PERMUTE if it's grouped load, we
11461- only need to take care of the first stmt, whose
11462- stmt_info is first_stmt_info, vec_num iterating on it
11463- will cover the cost for the remaining, it's consistent
11464- with transforming. For the prologue cost for realign,
11440+ /* For the prologue cost for realign,
1146511441 we only need to count it once for the whole group. */
1146611442 bool first_stmt_info_p = first_stmt_info == stmt_info;
1146711443 bool add_realign_cost = first_stmt_info_p && i == 0 ;
1146811444 if (memory_access_type == VMAT_CONTIGUOUS
11469- || memory_access_type == VMAT_CONTIGUOUS_REVERSE
11470- || (memory_access_type == VMAT_CONTIGUOUS_PERMUTE
11471- && (!grouped_load || first_stmt_info_p)))
11445+ || memory_access_type == VMAT_CONTIGUOUS_REVERSE)
1147211446 {
1147311447 /* Leave realign cases alone to keep them simple. */
1147411448 if (alignment_support_scheme == dr_explicit_realign_optimized
@@ -11625,8 +11599,7 @@ vectorizable_load (vec_info *vinfo,
1162511599 if (costing_p)
1162611600 {
1162711601 gcc_assert (memory_access_type == VMAT_CONTIGUOUS
11628- || memory_access_type == VMAT_CONTIGUOUS_REVERSE
11629- || memory_access_type == VMAT_CONTIGUOUS_PERMUTE);
11602+ || memory_access_type == VMAT_CONTIGUOUS_REVERSE);
1163011603 if (n_adjacent_loads > 0 )
1163111604 vect_get_load_cost (vinfo, stmt_info, slp_node, n_adjacent_loads,
1163211605 alignment_support_scheme, misalignment, false ,
0 commit comments