summaryrefslogtreecommitdiffstats
path: root/src/compiler/nir/nir_lower_phis_to_scalar.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/compiler/nir/nir_lower_phis_to_scalar.c')
-rw-r--r--src/compiler/nir/nir_lower_phis_to_scalar.c20
1 files changed, 15 insertions, 5 deletions
diff --git a/src/compiler/nir/nir_lower_phis_to_scalar.c b/src/compiler/nir/nir_lower_phis_to_scalar.c
index 9fd00cc784..b12718f273 100644
--- a/src/compiler/nir/nir_lower_phis_to_scalar.c
+++ b/src/compiler/nir/nir_lower_phis_to_scalar.c
@@ -166,6 +166,8 @@ static bool
lower_phis_to_scalar_block(nir_block *block,
struct lower_phis_to_scalar_state *state)
{
+ bool progress = false;
+
/* Find the last phi node in the block */
nir_phi_instr *last_phi = NULL;
nir_foreach_instr(instr, block) {
@@ -248,6 +250,8 @@ lower_phis_to_scalar_block(nir_block *block,
ralloc_steal(state->dead_ctx, phi);
nir_instr_remove(&phi->instr);
+ progress = true;
+
/* We're using the safe iterator and inserting all the newly
* scalarized phi nodes before their non-scalarized version so that's
* ok. However, we are also inserting vec operations after all of
@@ -258,13 +262,14 @@ lower_phis_to_scalar_block(nir_block *block,
break;
}
- return true;
+ return progress;
}
-static void
+static bool
lower_phis_to_scalar_impl(nir_function_impl *impl)
{
struct lower_phis_to_scalar_state state;
+ bool progress = false;
state.mem_ctx = ralloc_parent(impl);
state.dead_ctx = ralloc_context(NULL);
@@ -272,13 +277,14 @@ lower_phis_to_scalar_impl(nir_function_impl *impl)
_mesa_key_pointer_equal);
nir_foreach_block(block, impl) {
- lower_phis_to_scalar_block(block, &state);
+ progress = lower_phis_to_scalar_block(block, &state) || progress;
}
nir_metadata_preserve(impl, nir_metadata_block_index |
nir_metadata_dominance);
ralloc_free(state.dead_ctx);
+ return progress;
}
/** A pass that lowers vector phi nodes to scalar
@@ -288,11 +294,15 @@ lower_phis_to_scalar_impl(nir_function_impl *impl)
* instance, if one of the sources is a non-scalarizable vector, then we
* don't bother lowering because that would generate hard-to-coalesce movs.
*/
-void
+bool
nir_lower_phis_to_scalar(nir_shader *shader)
{
+ bool progress = false;
+
nir_foreach_function(function, shader) {
if (function->impl)
- lower_phis_to_scalar_impl(function->impl);
+ progress = lower_phis_to_scalar_impl(function->impl) || progress;
}
+
+ return progress;
}