Unfortunately some int64 lowerings generate more int64 operations, so we
need to call this function a few times. Also call
nir_lower_alu_to_scalar() beforehand to make more int64 operations
available for lowering.
---
 src/intel/compiler/brw_nir.c | 10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/src/intel/compiler/brw_nir.c b/src/intel/compiler/brw_nir.c
index f61baee230a..066724c58a6 100644
--- a/src/intel/compiler/brw_nir.c
+++ b/src/intel/compiler/brw_nir.c
@@ -670,12 +670,16 @@ brw_preprocess_nir(const struct brw_compiler *compiler, 
nir_shader *nir)
     */
    OPT(nir_opt_algebraic);
 
+   if (is_scalar) {
+      OPT(nir_lower_alu_to_scalar);
+   }
+
    /* Lower int64 instructions before nir_optimize so that loop unrolling
     * sees their actual cost.
     */
-   nir_lower_int64(nir, nir_lower_imul64 |
-                        nir_lower_isign64 |
-                        nir_lower_divmod64);
+   while (nir_lower_int64(nir, nir_lower_imul64 |
+                               nir_lower_isign64 |
+                               nir_lower_divmod64));
 
    nir = brw_nir_optimize(nir, compiler, is_scalar, true);
 
-- 
2.16.4

_______________________________________________
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

Reply via email to