diff options
author | Alexander Ivchenko <alexander.ivchenko@intel.com> | 2014-07-11 15:24:10 +0400 |
---|---|---|
committer | Alexander Ivchenko <alexander.ivchenko@intel.com> | 2014-08-06 16:24:16 +0400 |
commit | 55f9fbb03d0413cb8fe74e5ec5d6c2dd4280933e (patch) | |
tree | a276531909449c8ed589df86ad3cfdd3048b7400 /gcc-4.9/gcc/tree-vect-data-refs.c | |
parent | 38a8aecfb882072900434499696b5c32a2274515 (diff) | |
download | toolchain_gcc-55f9fbb03d0413cb8fe74e5ec5d6c2dd4280933e.tar.gz toolchain_gcc-55f9fbb03d0413cb8fe74e5ec5d6c2dd4280933e.tar.bz2 toolchain_gcc-55f9fbb03d0413cb8fe74e5ec5d6c2dd4280933e.zip |
[4.8, 4.9] Backport of additional SLM tuning.
Six patches from trunk, reg-tested via 'make check':
2014-05-07 Evgeny Stupachenko <evstupac@gmail.com>
* tree-vect-data-refs.c (vect_grouped_load_supported): New
check for loads group of length 3.
(vect_permute_load_chain): New permutations for loads group of
length 3.
* tree-vect-stmts.c (vect_model_load_cost): Change cost
of vec_perm_shuffle for the new permutations.
2014-04-17 Evgeny Stupachenko <evstupac@gmail.com>
* config/i386/i386.c (x86_add_stmt_cost): Fix vector cost model for
Silvermont.
2014-04-17 Evgeny Stupachenko <evstupac@gmail.com>
* config/i386/x86-tune.def (TARGET_SLOW_PSHUFB): New tune definition.
* config/i386/i386.h (TARGET_SLOW_PSHUFB): New tune flag.
* config/i386/i386.c (expand_vec_perm_even_odd_1): Avoid byte shuffles
for TARGET_SLOW_PSHUFB
2014-04-17 Evgeny Stupachenko <evstupac@gmail.com>
* config/i386/i386.c (slm_cost): Adjust vec_to_scalar_cost.
* config/i386/i386.c (intel_cost): Ditto.
2014-06-18 Evgeny Stupachenko <evstupac@gmail.com>
* config/i386/i386.c (ix86_reassociation_width): Add alternative for
vector case.
* config/i386/i386.h (TARGET_VECTOR_PARALLEL_EXECUTION): New.
* config/i386/x86-tune.def (X86_TUNE_VECTOR_PARALLEL_EXECUTION): New.
* tree-vect-data-refs.c (vect_shift_permute_load_chain): New.
Introduces alternative way of loads group permutaions.
(vect_transform_grouped_load): Try alternative way of permutations.
2014-06-05 Evgeny Stupachenko <evstupac@gmail.com>
* config/i386/sse.md (*ssse3_palignr<mode>_perm): New.
* config/i386/predicates.md (palignr_operand): New.
Indicates if permutation is suitable for palignr instruction.
Change-Id: I5e505735ce3dc0ec3c2a1151713a119b24d712fe
Signed-off-by: Alexander Ivchenko <alexander.ivchenko@intel.com>
Diffstat (limited to 'gcc-4.9/gcc/tree-vect-data-refs.c')
-rw-r--r-- | gcc-4.9/gcc/tree-vect-data-refs.c | 524 |
1 files changed, 484 insertions, 40 deletions
diff --git a/gcc-4.9/gcc/tree-vect-data-refs.c b/gcc-4.9/gcc/tree-vect-data-refs.c index 6622bd84d..ab1197ec6 100644 --- a/gcc-4.9/gcc/tree-vect-data-refs.c +++ b/gcc-4.9/gcc/tree-vect-data-refs.c @@ -4815,36 +4815,76 @@ vect_grouped_load_supported (tree vectype, unsigned HOST_WIDE_INT count) { enum machine_mode mode = TYPE_MODE (vectype); - /* vect_permute_load_chain requires the group size to be a power of two. */ - if (exact_log2 (count) == -1) + /* vect_permute_load_chain requires the group size to be equal to 3 or + be a power of two. */ + if (count != 3 && exact_log2 (count) == -1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "the size of the group of accesses" - " is not a power of 2\n"); + "the size of the group of accesses" + " is not a power of 2 or not equal to 3\n"); return false; } /* Check that the permutation is supported. */ if (VECTOR_MODE_P (mode)) { - unsigned int i, nelt = GET_MODE_NUNITS (mode); + unsigned int i, j, nelt = GET_MODE_NUNITS (mode); unsigned char *sel = XALLOCAVEC (unsigned char, nelt); - for (i = 0; i < nelt; i++) - sel[i] = i * 2; - if (can_vec_perm_p (mode, false, sel)) + if (count == 3) { + unsigned int k; + for (k = 0; k < 3; k++) + { + for (i = 0; i < nelt; i++) + if (3 * i + k < 2 * nelt) + sel[i] = 3 * i + k; + else + sel[i] = 0; + if (!can_vec_perm_p (mode, false, sel)) + { + if (dump_enabled_p ()) + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "shuffle of 3 loads is not supported by" + " target\n"); + return false; + } + for (i = 0, j = 0; i < nelt; i++) + if (3 * i + k < 2 * nelt) + sel[i] = i; + else + sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++); + if (!can_vec_perm_p (mode, false, sel)) + { + if (dump_enabled_p ()) + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "shuffle of 3 loads is not supported by" + " target\n"); + return false; + } + } + return true; + } + else + { + /* If length is not equal to 3 then only power of 2 is supported. */ + gcc_assert (exact_log2 (count) != -1); for (i = 0; i < nelt; i++) - sel[i] = i * 2 + 1; + sel[i] = i * 2; if (can_vec_perm_p (mode, false, sel)) - return true; - } + { + for (i = 0; i < nelt; i++) + sel[i] = i * 2 + 1; + if (can_vec_perm_p (mode, false, sel)) + return true; + } + } } if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "extract even/odd not supported by target\n"); + "extract even/odd not supported by target\n"); return false; } @@ -4862,8 +4902,9 @@ vect_load_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count) /* Function vect_permute_load_chain. Given a chain of interleaved loads in DR_CHAIN of LENGTH that must be - a power of 2, generate extract_even/odd stmts to reorder the input data - correctly. Return the final references for loads in RESULT_CHAIN. + a power of 2 or equal to 3, generate extract_even/odd stmts to reorder + the input data correctly. Return the final references for loads in + RESULT_CHAIN. E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8. The input is 4 vectors each containing 8 elements. We assign a number to each @@ -4944,6 +4985,7 @@ vect_permute_load_chain (vec<tree> dr_chain, { tree data_ref, first_vect, second_vect; tree perm_mask_even, perm_mask_odd; + tree perm3_mask_low, perm3_mask_high; gimple perm_stmt; tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)); unsigned int i, j, log_length = exact_log2 (length); @@ -4954,44 +4996,437 @@ vect_permute_load_chain (vec<tree> dr_chain, memcpy (result_chain->address (), dr_chain.address (), length * sizeof (tree)); - for (i = 0; i < nelt; ++i) - sel[i] = i * 2; - perm_mask_even = vect_gen_perm_mask (vectype, sel); - gcc_assert (perm_mask_even != NULL); - - for (i = 0; i < nelt; ++i) - sel[i] = i * 2 + 1; - perm_mask_odd = vect_gen_perm_mask (vectype, sel); - gcc_assert (perm_mask_odd != NULL); - - for (i = 0; i < log_length; i++) + if (length == 3) { - for (j = 0; j < length; j += 2) - { - first_vect = dr_chain[j]; - second_vect = dr_chain[j+1]; + unsigned int k; - /* data_ref = permute_even (first_data_ref, second_data_ref); */ - data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_even"); + for (k = 0; k < 3; k++) + { + for (i = 0; i < nelt; i++) + if (3 * i + k < 2 * nelt) + sel[i] = 3 * i + k; + else + sel[i] = 0; + perm3_mask_low = vect_gen_perm_mask (vectype, sel); + gcc_assert (perm3_mask_low != NULL); + + for (i = 0, j = 0; i < nelt; i++) + if (3 * i + k < 2 * nelt) + sel[i] = i; + else + sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++); + + perm3_mask_high = vect_gen_perm_mask (vectype, sel); + gcc_assert (perm3_mask_high != NULL); + + first_vect = dr_chain[0]; + second_vect = dr_chain[1]; + + /* Create interleaving stmt (low part of): + low = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k, + ...}> */ + data_ref = make_temp_ssa_name (vectype, NULL, "vect_suffle3_low"); perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref, first_vect, second_vect, - perm_mask_even); + perm3_mask_low); vect_finish_stmt_generation (stmt, perm_stmt, gsi); - (*result_chain)[j/2] = data_ref; - /* data_ref = permute_odd (first_data_ref, second_data_ref); */ - data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_odd"); + /* Create interleaving stmt (high part of): + high = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k, + ...}> */ + first_vect = data_ref; + second_vect = dr_chain[2]; + data_ref = make_temp_ssa_name (vectype, NULL, "vect_suffle3_high"); perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref, first_vect, second_vect, - perm_mask_odd); + perm3_mask_high); vect_finish_stmt_generation (stmt, perm_stmt, gsi); - (*result_chain)[j/2+length/2] = data_ref; + (*result_chain)[k] = data_ref; + } + } + else + { + /* If length is not equal to 3 then only power of 2 is supported. */ + gcc_assert (exact_log2 (length) != -1); + + for (i = 0; i < nelt; ++i) + sel[i] = i * 2; + perm_mask_even = vect_gen_perm_mask (vectype, sel); + gcc_assert (perm_mask_even != NULL); + + for (i = 0; i < nelt; ++i) + sel[i] = i * 2 + 1; + perm_mask_odd = vect_gen_perm_mask (vectype, sel); + gcc_assert (perm_mask_odd != NULL); + + for (i = 0; i < log_length; i++) + { + for (j = 0; j < length; j += 2) + { + first_vect = dr_chain[j]; + second_vect = dr_chain[j+1]; + + /* data_ref = permute_even (first_data_ref, second_data_ref); */ + data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_even"); + perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref, + first_vect, second_vect, + perm_mask_even); + vect_finish_stmt_generation (stmt, perm_stmt, gsi); + (*result_chain)[j/2] = data_ref; + + /* data_ref = permute_odd (first_data_ref, second_data_ref); */ + data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_odd"); + perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref, + first_vect, second_vect, + perm_mask_odd); + vect_finish_stmt_generation (stmt, perm_stmt, gsi); + (*result_chain)[j/2+length/2] = data_ref; + } + memcpy (dr_chain.address (), result_chain->address (), + length * sizeof (tree)); } - memcpy (dr_chain.address (), result_chain->address (), - length * sizeof (tree)); } } +/* Function vect_shift_permute_load_chain. + + Given a chain of loads in DR_CHAIN of LENGTH 2 or 3, generate + sequence of stmts to reorder the input data accordingly. + Return the final references for loads in RESULT_CHAIN. + Return true if successed, false otherwise. + + E.g., LENGTH is 3 and the scalar type is short, i.e., VF is 8. + The input is 3 vectors each containing 8 elements. We assign a + number to each element, the input sequence is: + + 1st vec: 0 1 2 3 4 5 6 7 + 2nd vec: 8 9 10 11 12 13 14 15 + 3rd vec: 16 17 18 19 20 21 22 23 + + The output sequence should be: + + 1st vec: 0 3 6 9 12 15 18 21 + 2nd vec: 1 4 7 10 13 16 19 22 + 3rd vec: 2 5 8 11 14 17 20 23 + + We use 3 shuffle instructions and 3 * 3 - 1 shifts to create such output. + + First we shuffle all 3 vectors to get correct elements order: + + 1st vec: ( 0 3 6) ( 1 4 7) ( 2 5) + 2nd vec: ( 8 11 14) ( 9 12 15) (10 13) + 3rd vec: (16 19 22) (17 20 23) (18 21) + + Next we unite and shift vector 3 times: + + 1st step: + shift right by 6 the concatenation of: + "1st vec" and "2nd vec" + ( 0 3 6) ( 1 4 7) |( 2 5) _ ( 8 11 14) ( 9 12 15)| (10 13) + "2nd vec" and "3rd vec" + ( 8 11 14) ( 9 12 15) |(10 13) _ (16 19 22) (17 20 23)| (18 21) + "3rd vec" and "1st vec" + (16 19 22) (17 20 23) |(18 21) _ ( 0 3 6) ( 1 4 7)| ( 2 5) + | New vectors | + + So that now new vectors are: + + 1st vec: ( 2 5) ( 8 11 14) ( 9 12 15) + 2nd vec: (10 13) (16 19 22) (17 20 23) + 3rd vec: (18 21) ( 0 3 6) ( 1 4 7) + + 2nd step: + shift right by 5 the concatenation of: + "1st vec" and "3rd vec" + ( 2 5) ( 8 11 14) |( 9 12 15) _ (18 21) ( 0 3 6)| ( 1 4 7) + "2nd vec" and "1st vec" + (10 13) (16 19 22) |(17 20 23) _ ( 2 5) ( 8 11 14)| ( 9 12 15) + "3rd vec" and "2nd vec" + (18 21) ( 0 3 6) |( 1 4 7) _ (10 13) (16 19 22)| (17 20 23) + | New vectors | + + So that now new vectors are: + + 1st vec: ( 9 12 15) (18 21) ( 0 3 6) + 2nd vec: (17 20 23) ( 2 5) ( 8 11 14) + 3rd vec: ( 1 4 7) (10 13) (16 19 22) READY + + 3rd step: + shift right by 5 the concatenation of: + "1st vec" and "1st vec" + ( 9 12 15) (18 21) |( 0 3 6) _ ( 9 12 15) (18 21)| ( 0 3 6) + shift right by 3 the concatenation of: + "2nd vec" and "2nd vec" + (17 20 23) |( 2 5) ( 8 11 14) _ (17 20 23)| ( 2 5) ( 8 11 14) + | New vectors | + + So that now all vectors are READY: + 1st vec: ( 0 3 6) ( 9 12 15) (18 21) + 2nd vec: ( 2 5) ( 8 11 14) (17 20 23) + 3rd vec: ( 1 4 7) (10 13) (16 19 22) + + This algorithm is faster than one in vect_permute_load_chain if: + 1. "shift of a concatination" is faster than general permutation. + This is usually so. + 2. The TARGET machine can't execute vector instructions in parallel. + This is because each step of the algorithm depends on previous. + The algorithm in vect_permute_load_chain is much more parallel. + + The algorithm is applicable only for LOAD CHAIN LENGTH less than VF. +*/ + +static bool +vect_shift_permute_load_chain (vec<tree> dr_chain, + unsigned int length, + gimple stmt, + gimple_stmt_iterator *gsi, + vec<tree> *result_chain) +{ + tree vect[3], vect_shift[3], data_ref, first_vect, second_vect; + tree perm2_mask1, perm2_mask2, perm3_mask; + tree select_mask, shift1_mask, shift2_mask, shift3_mask, shift4_mask; + gimple perm_stmt; + + tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)); + unsigned int i; + unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype); + unsigned char *sel = XALLOCAVEC (unsigned char, nelt); + stmt_vec_info stmt_info = vinfo_for_stmt (stmt); + loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + + result_chain->quick_grow (length); + memcpy (result_chain->address (), dr_chain.address (), + length * sizeof (tree)); + + if (length == 2 && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 4) + { + for (i = 0; i < nelt / 2; ++i) + sel[i] = i * 2; + for (i = 0; i < nelt / 2; ++i) + sel[nelt / 2 + i] = i * 2 + 1; + if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel)) + { + if (dump_enabled_p ()) + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "shuffle of 2 fields structure is not \ + supported by target\n"); + return false; + } + perm2_mask1 = vect_gen_perm_mask (vectype, sel); + gcc_assert (perm2_mask1 != NULL); + + for (i = 0; i < nelt / 2; ++i) + sel[i] = i * 2 + 1; + for (i = 0; i < nelt / 2; ++i) + sel[nelt / 2 + i] = i * 2; + if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel)) + { + if (dump_enabled_p ()) + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "shuffle of 2 fields structure is not \ + supported by target\n"); + return false; + } + perm2_mask2 = vect_gen_perm_mask (vectype, sel); + gcc_assert (perm2_mask2 != NULL); + + /* Generating permutation constant to shift all elements. + For vector length 8 it is {4 5 6 7 8 9 10 11}. */ + for (i = 0; i < nelt; i++) + sel[i] = nelt / 2 + i; + if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel)) + { + if (dump_enabled_p ()) + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "shift permutation is not supported by target\n"); + return false; + } + shift1_mask = vect_gen_perm_mask (vectype, sel); + gcc_assert (shift1_mask != NULL); + + /* Generating permutation constant to select vector from 2. + For vector length 8 it is {0 1 2 3 12 13 14 15}. */ + for (i = 0; i < nelt / 2; i++) + sel[i] = i; + for (i = nelt / 2; i < nelt; i++) + sel[i] = nelt + i; + if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel)) + { + if (dump_enabled_p ()) + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "select is not supported by target\n"); + return false; + } + select_mask = vect_gen_perm_mask (vectype, sel); + gcc_assert (select_mask != NULL); + + first_vect = dr_chain[0]; + second_vect = dr_chain[1]; + + data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2"); + perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref, + first_vect, first_vect, + perm2_mask1); + vect_finish_stmt_generation (stmt, perm_stmt, gsi); + vect[0] = data_ref; + + data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2"); + perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref, + second_vect, second_vect, + perm2_mask2); + vect_finish_stmt_generation (stmt, perm_stmt, gsi); + vect[1] = data_ref; + + data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift"); + perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref, + vect[0], vect[1], + shift1_mask); + vect_finish_stmt_generation (stmt, perm_stmt, gsi); + (*result_chain)[1] = data_ref; + + data_ref = make_temp_ssa_name (vectype, NULL, "vect_select"); + perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref, + vect[0], vect[1], + select_mask); + vect_finish_stmt_generation (stmt, perm_stmt, gsi); + (*result_chain)[0] = data_ref; + + return true; + } + if (length == 3 && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 2) + { + unsigned int k = 0, l = 0; + + /* Generating permutation constant to get all elements in rigth order. + For vector length 8 it is {0 3 6 1 4 7 2 5}. */ + for (i = 0; i < nelt; i++) + { + if (3 * k + (l % 3) >= nelt) + { + k = 0; + l += (3 - (nelt % 3)); + } + sel[i] = 3 * k + (l % 3); + k++; + } + if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel)) + { + if (dump_enabled_p ()) + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "shuffle of 3 fields structure is not \ + supported by target\n"); + return false; + } + perm3_mask = vect_gen_perm_mask (vectype, sel); + gcc_assert (perm3_mask != NULL); + + /* Generating permutation constant to shift all elements. + For vector length 8 it is {6 7 8 9 10 11 12 13}. */ + for (i = 0; i < nelt; i++) + sel[i] = 2 * (nelt / 3) + (nelt % 3) + i; + if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel)) + { + if (dump_enabled_p ()) + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "shift permutation is not supported by target\n"); + return false; + } + shift1_mask = vect_gen_perm_mask (vectype, sel); + gcc_assert (shift1_mask != NULL); + + /* Generating permutation constant to shift all elements. + For vector length 8 it is {5 6 7 8 9 10 11 12}. */ + for (i = 0; i < nelt; i++) + sel[i] = 2 * (nelt / 3) + 1 + i; + if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel)) + { + if (dump_enabled_p ()) + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "shift permutation is not supported by target\n"); + return false; + } + shift2_mask = vect_gen_perm_mask (vectype, sel); + gcc_assert (shift2_mask != NULL); + + /* Generating permutation constant to shift all elements. + For vector length 8 it is {3 4 5 6 7 8 9 10}. */ + for (i = 0; i < nelt; i++) + sel[i] = (nelt / 3) + (nelt % 3) / 2 + i; + if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel)) + { + if (dump_enabled_p ()) + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "shift permutation is not supported by target\n"); + return false; + } + shift3_mask = vect_gen_perm_mask (vectype, sel); + gcc_assert (shift3_mask != NULL); + + /* Generating permutation constant to shift all elements. + For vector length 8 it is {5 6 7 8 9 10 11 12}. */ + for (i = 0; i < nelt; i++) + sel[i] = 2 * (nelt / 3) + (nelt % 3) / 2 + i; + if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel)) + { + if (dump_enabled_p ()) + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "shift permutation is not supported by target\n"); + return false; + } + shift4_mask = vect_gen_perm_mask (vectype, sel); + gcc_assert (shift4_mask != NULL); + + for (k = 0; k < 3; k++) + { + data_ref = make_temp_ssa_name (vectype, NULL, "vect_suffle3"); + perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref, + dr_chain[k], dr_chain[k], + perm3_mask); + vect_finish_stmt_generation (stmt, perm_stmt, gsi); + vect[k] = data_ref; + } + + for (k = 0; k < 3; k++) + { + data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift1"); + perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref, + vect[k % 3], + vect[(k + 1) % 3], + shift1_mask); + vect_finish_stmt_generation (stmt, perm_stmt, gsi); + vect_shift[k] = data_ref; + } + + for (k = 0; k < 3; k++) + { + data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift2"); + perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref, + vect_shift[(4 - k) % 3], + vect_shift[(3 - k) % 3], + shift2_mask); + vect_finish_stmt_generation (stmt, perm_stmt, gsi); + vect[k] = data_ref; + } + + (*result_chain)[3 - (nelt % 3)] = vect[2]; + + data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift3"); + perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref, + vect[0], vect[0], + shift3_mask); + vect_finish_stmt_generation (stmt, perm_stmt, gsi); + (*result_chain)[nelt % 3] = data_ref; + + data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift4"); + perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref, + vect[1], vect[1], + shift4_mask); + vect_finish_stmt_generation (stmt, perm_stmt, gsi); + (*result_chain)[0] = data_ref; + return true; + } + return false; +} /* Function vect_transform_grouped_load. @@ -5004,13 +5439,22 @@ void vect_transform_grouped_load (gimple stmt, vec<tree> dr_chain, int size, gimple_stmt_iterator *gsi) { + enum machine_mode mode; vec<tree> result_chain = vNULL; /* DR_CHAIN contains input data-refs that are a part of the interleaving. RESULT_CHAIN is the output of vect_permute_load_chain, it contains permuted vectors, that are ready for vector computation. */ result_chain.create (size); - vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain); + + /* If reassociation width for vector type is 2 or greater target machine can + execute 2 or more vector instructions in parallel. Otherwise try to + get chain for loads group using vect_shift_permute_load_chain. */ + mode = TYPE_MODE (STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt))); + if (targetm.sched.reassociation_width (VEC_PERM_EXPR, mode) > 1 + || !vect_shift_permute_load_chain (dr_chain, size, stmt, + gsi, &result_chain)) + vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain); vect_record_grouped_load_vectors (stmt, result_chain); result_chain.release (); } |