Oops, the test case that I meant to show is this one:
================================ foo.c =================================
#include <stdio.h>
#define assume(R) ((R) ? (void) 0 : __builtin_unreachable ())
//#define assume(R) (!__builtin_constant_p (!(R) == !(R)) || (R) ? (void) 0 :
__builtin_unreachable ())
extern int complicated (int i);
extern int nonnegative (int i);
int f_generic (int i)
{
printf("%d\n", i & 0x80000000);
return 0;
}
int f_condition (int i)
{
if (complicated (i) && nonnegative (i))
printf("%d\n", i & 0x80000000);
return 0;
}
int f_assume (int i)
{
assume (complicated (i) && nonnegative (i));
printf("%d\n", i & 0x80000000);
return 0;
}
================================ bar.c =================================
int complicated (int i) { return (i & 7) == 3; }
int nonnegative (int i) { return i >= 0; }
========================================================================
The results are as shown: the optimization in f_assume is performed
with the old 'assume' definition, but not with the new one.