Similarly to g:22d9c8802add09a93308319fc37dd3a0f1125393, I would like to use
{UN,}LIKELY macros in libgomp. If the community is fine, I'm planning doing
the same in other GCC's libraries.

Patch can bootstrap on x86_64-linux-gnu and survives regression tests.

Ready to be installed?
Thanks,
Martin

libgomp/ChangeLog:

        * libgomp.h (LIKELY): Define macro.
        (UNLIKELY): Likewise.
        * affinity-fmt.c (gomp_display_string): Use the macros.
        (gomp_display_repeat): Likewise.
        * alloc.c (defined): Likewise.
        * allocator.c (omp_aligned_alloc): Likewise.
        (GOMP_alloc): Likewise.
        (omp_aligned_calloc): Likewise.
        (omp_realloc): Likewise.
        * config/gcn/bar.c (gomp_barrier_wait_end): Likewise.
        (gomp_team_barrier_wait_end): Likewise.
        (gomp_team_barrier_wait_final): Likewise.
        (gomp_team_barrier_wait_cancel_end): Likewise.
        * config/gcn/bar.h (gomp_team_barrier_cancelled): Likewise.
        * config/linux/alpha/futex.h (futex_wait): Likewise.
        (futex_wake): Likewise.
        * config/linux/bar.c (gomp_barrier_wait_end): Likewise.
        (gomp_team_barrier_wait_end): Likewise.
        (gomp_team_barrier_wait_final): Likewise.
        (gomp_team_barrier_wait_cancel_end): Likewise.
        * config/linux/bar.h (gomp_team_barrier_cancelled): Likewise.
        * config/linux/futex.h (futex_wait): Likewise.
        (futex_wake): Likewise.
        * config/linux/ia64/futex.h (futex_wait): Likewise.
        (futex_wake): Likewise.
        * config/linux/lock.c (gomp_tid): Likewise.
        * config/linux/mutex.h (gomp_mutex_unlock): Likewise.
        * config/linux/powerpc/futex.h (sys_futex0): Likewise.
        (futex_wait): Likewise.
        (futex_wake): Likewise.
        * config/linux/s390/futex.h (futex_wait): Likewise.
        (futex_wake): Likewise.
        * config/linux/sem.c (gomp_sem_wait_slow): Likewise.
        * config/linux/sem.h (gomp_sem_post): Likewise.
        * config/linux/sparc/futex.h (futex_wait): Likewise.
        (futex_wake): Likewise.
        * config/linux/wait.h (do_spin): Likewise.
        * config/linux/x86/futex.h (futex_wait): Likewise.
        (futex_wake): Likewise.
        * config/nvptx/bar.c (futex_wait): Likewise.
        * config/nvptx/bar.h (gomp_team_barrier_cancelled): Likewise.
        * config/posix/bar.h (gomp_team_barrier_cancelled): Likewise.
        * config/posix/pool.h (gomp_get_thread_pool): Likewise.
        * config/rtems/bar.c (do_spin): Likewise.
        * config/rtems/bar.h (gomp_team_barrier_cancelled): Likewise.
        * config/rtems/pool.h (gomp_get_own_thread_pool): Likewise.
        (gomp_get_thread_pool): Likewise.
        * iter.c (gomp_iter_dynamic_next): Likewise.
        (gomp_iter_guided_next): Likewise.
        * iter_ull.c (gomp_iter_ull_static_next): Likewise.
        (gomp_iter_ull_dynamic_next_locked): Likewise.
        (gomp_iter_ull_dynamic_next): Likewise.
        (gomp_iter_ull_guided_next_locked): Likewise.
        (gomp_iter_ull_guided_next): Likewise.
        (gomp_vdebug): Likewise.
        (gomp_debug): Likewise.
        (gomp_finish_task): Likewise.
        (gomp_work_share_init_done): Likewise.
        * loop.c (gomp_loop_init): Likewise.
        * loop_ull.c (gomp_loop_ull_init): Likewise.
        * oacc-int.h (GOACC_PROF_ENABLED): Likewise.
        * oacc-profiling.c (_goacc_profiling_dispatch_p): Likewise.
        (_goacc_profiling_setup_p): Likewise.
        * ordered.c (GOMP_doacross_post): Likewise.
        (GOMP_doacross_wait): Likewise.
        (GOMP_doacross_ull_post): Likewise.
        (GOMP_doacross_ull_wait): Likewise.
        * parallel.c (gomp_resolve_num_threads): Likewise.
        (GOMP_parallel_end): Likewise.
        * plugin/plugin-gcn.c (alloc_by_agent): Likewise.
        (gcn_exec): Likewise.
        (GOMP_OFFLOAD_free): Likewise.
        * plugin/plugin-nvptx.c (nvptx_exec): Likewise.
        (nvptx_alloc): Likewise.
        (nvptx_free): Likewise.
        (GOMP_OFFLOAD_openacc_exec): Likewise.
        (GOMP_OFFLOAD_openacc_async_exec): Likewise.
        * priority_queue.h (priority_queue_multi_p): Likewise.
        (priority_tree_insert): Likewise.
        * single.c (GOMP_single_start): Likewise.
        * target.c (gomp_copy_host2dev): Likewise.
        (gomp_copy_dev2host): Likewise.
        (GOMP_target_ext): Likewise.
        (GOMP_target_update_ext): Likewise.
        (GOMP_target_enter_exit_data): Likewise.
        * task.c (gomp_task_handle_depend): Likewise.
        (GOMP_task): Likewise.
        (gomp_create_target_task): Likewise.
        (priority_list_downgrade_task): Likewise.
        (gomp_task_run_pre): Likewise.
        (gomp_task_run_post_handle_dependers): Likewise.
        (gomp_task_run_post_remove_parent): Likewise.
        (gomp_barrier_handle_tasks): Likewise.
        (GOMP_taskwait): Likewise.
        (GOMP_taskwait_depend): Likewise.
        (gomp_task_maybe_wait_for_dependencies): Likewise.
        (GOMP_taskgroup_end): Likewise.
        (gomp_reduction_register): Likewise.
        (GOMP_taskgroup_reduction_register): Likewise.
        (GOMP_task_reduction_remap): Likewise.
        * taskloop.c (GOMP_taskloop): Likewise.
        * team.c (gomp_team_start): Likewise.
        (gomp_team_end): Likewise.
        * work.c (INLINE_ORDERED_TEAM_IDS_SIZE): Likewise.
        (free_work_share): Likewise.
        (gomp_work_share_end): Likewise.
        (gomp_work_share_end_cancel): Likewise.
        (gomp_work_share_end_nowait): Likewise.
---
 libgomp/affinity-fmt.c               |  4 +-
 libgomp/alloc.c                      |  2 +-
 libgomp/allocator.c                  | 26 ++++++------
 libgomp/config/gcn/bar.c             | 20 ++++-----
 libgomp/config/gcn/bar.h             |  2 +-
 libgomp/config/linux/alpha/futex.h   |  4 +-
 libgomp/config/linux/bar.c           | 20 ++++-----
 libgomp/config/linux/bar.h           |  2 +-
 libgomp/config/linux/futex.h         |  4 +-
 libgomp/config/linux/ia64/futex.h    |  4 +-
 libgomp/config/linux/lock.c          |  2 +-
 libgomp/config/linux/mutex.h         |  2 +-
 libgomp/config/linux/powerpc/futex.h |  6 +--
 libgomp/config/linux/s390/futex.h    |  4 +-
 libgomp/config/linux/sem.c           |  3 +-
 libgomp/config/linux/sem.h           |  2 +-
 libgomp/config/linux/sparc/futex.h   |  4 +-
 libgomp/config/linux/wait.h          |  8 ++--
 libgomp/config/linux/x86/futex.h     |  4 +-
 libgomp/config/nvptx/bar.c           |  2 +-
 libgomp/config/nvptx/bar.h           |  2 +-
 libgomp/config/posix/bar.h           |  2 +-
 libgomp/config/posix/pool.h          |  2 +-
 libgomp/config/rtems/bar.c           |  4 +-
 libgomp/config/rtems/bar.h           |  2 +-
 libgomp/config/rtems/pool.h          |  4 +-
 libgomp/iter.c                       |  8 ++--
 libgomp/iter_ull.c                   | 22 +++++-----
 libgomp/libgomp.h                    | 11 +++--
 libgomp/loop.c                       | 14 +++----
 libgomp/loop_ull.c                   | 14 +++----
 libgomp/oacc-int.h                   |  3 +-
 libgomp/oacc-profiling.c             |  8 ++--
 libgomp/ordered.c                    | 32 +++++++-------
 libgomp/parallel.c                   |  4 +-
 libgomp/plugin/plugin-gcn.c          |  6 +--
 libgomp/plugin/plugin-nvptx.c        | 10 ++---
 libgomp/priority_queue.h             |  6 +--
 libgomp/single.c                     |  2 +-
 libgomp/target.c                     | 14 +++----
 libgomp/task.c                       | 62 ++++++++++++++--------------
 libgomp/taskloop.c                   |  6 +--
 libgomp/team.c                       | 42 +++++++++----------
 libgomp/work.c                       | 12 +++---
 44 files changed, 209 insertions(+), 208 deletions(-)

diff --git a/libgomp/affinity-fmt.c b/libgomp/affinity-fmt.c
index 30b7ef97322..c9154ca6f1a 100644
--- a/libgomp/affinity-fmt.c
+++ b/libgomp/affinity-fmt.c
@@ -101,7 +101,7 @@ gomp_display_string (char *buffer, size_t size, size_t *ret,
       memcpy (buffer + r, str, l);
     }
   *ret += len;
-  if (__builtin_expect (r > *ret, 0))
+  if (UNLIKELY (r > *ret))
     gomp_fatal ("overflow in omp_capture_affinity");
 }
 
@@ -118,7 +118,7 @@ gomp_display_repeat (char *buffer, size_t size, size_t *ret,
       memset (buffer + r, c, l);
     }
   *ret += len;
-  if (__builtin_expect (r > *ret, 0))
+  if (UNLIKELY (r > *ret))
     gomp_fatal ("overflow in omp_capture_affinity");
 }
 
diff --git a/libgomp/alloc.c b/libgomp/alloc.c
index a2a25befdf3..ee5f943967c 100644
--- a/libgomp/alloc.c
+++ b/libgomp/alloc.c
@@ -78,7 +78,7 @@ gomp_aligned_alloc (size_t al, size_t size)
 #elif defined(HAVE_ALIGNED_ALLOC)
   {
     size_t sz = (size + al - 1) & ~(al - 1);
-    if (__builtin_expect (sz >= size, 1))
+    if (LIKELY (sz >= size))
       ret = aligned_alloc (al, sz);
     else
       ret = NULL;
diff --git a/libgomp/allocator.c b/libgomp/allocator.c
index 07a5645f4cc..a34e929863f 100644
--- a/libgomp/allocator.c
+++ b/libgomp/allocator.c
@@ -214,7 +214,7 @@ omp_aligned_alloc (size_t alignment, size_t size,
   size_t new_size, new_alignment;
   void *ptr, *ret;
 
-  if (__builtin_expect (size == 0, 0))
+  if (UNLIKELY (size == 0))
     return NULL;
 
 retry:
@@ -246,8 +246,8 @@ retry:
   if (__builtin_add_overflow (size, new_size, &new_size))
     goto fail;
 
-  if (__builtin_expect (allocator_data
-                       && allocator_data->pool_size < ~(uintptr_t) 0, 0))
+  if (UNLIKELY (allocator_data
+               && allocator_data->pool_size < ~(uintptr_t) 0))
     {
       uintptr_t used_pool_size;
       if (new_size > allocator_data->pool_size)
@@ -363,7 +363,7 @@ GOMP_alloc (size_t alignment, size_t size, uintptr_t 
allocator)
   void *ret
     = ialias_call (omp_aligned_alloc) (alignment, size,
                                       (omp_allocator_handle_t) allocator);
-  if (__builtin_expect (ret == NULL, 0) && size)
+  if (UNLIKELY (ret == NULL) && size)
     gomp_fatal ("Out of memory allocating %lu bytes",
                (unsigned long) size);
   return ret;
@@ -413,7 +413,7 @@ omp_aligned_calloc (size_t alignment, size_t nmemb, size_t 
size,
   size_t new_size, size_temp, new_alignment;
   void *ptr, *ret;
 
-  if (__builtin_expect (size == 0 || nmemb == 0, 0))
+  if (UNLIKELY (size == 0 || nmemb == 0))
     return NULL;
 
 retry:
@@ -447,8 +447,8 @@ retry:
   if (__builtin_add_overflow (size_temp, new_size, &new_size))
     goto fail;
 
-  if (__builtin_expect (allocator_data
-                       && allocator_data->pool_size < ~(uintptr_t) 0, 0))
+  if (UNLIKELY (allocator_data
+               && allocator_data->pool_size < ~(uintptr_t) 0))
     {
       uintptr_t used_pool_size;
       if (new_size > allocator_data->pool_size)
@@ -563,10 +563,10 @@ omp_realloc (void *ptr, size_t size, 
omp_allocator_handle_t allocator,
   void *new_ptr, *ret;
   struct omp_mem_header *data;
 
-  if (__builtin_expect (ptr == NULL, 0))
+  if (UNLIKELY (ptr == NULL))
     return ialias_call (omp_aligned_alloc) (1, size, allocator);
 
-  if (__builtin_expect (size == 0, 0))
+  if (UNLIKELY (size == 0))
     {
       ialias_call (omp_free) (ptr, free_allocator);
       return NULL;
@@ -601,8 +601,8 @@ retry:
     goto fail;
   old_size = data->size;
 
-  if (__builtin_expect (allocator_data
-                       && allocator_data->pool_size < ~(uintptr_t) 0, 0))
+  if (UNLIKELY (allocator_data
+               && allocator_data->pool_size < ~(uintptr_t) 0))
     {
       uintptr_t used_pool_size;
       size_t prev_size = 0;
@@ -719,8 +719,8 @@ retry:
   if (old_size - old_alignment < size)
     size = old_size - old_alignment;
   memcpy (ret, ptr, size);
-  if (__builtin_expect (free_allocator_data
-                       && free_allocator_data->pool_size < ~(uintptr_t) 0, 0))
+  if (UNLIKELY (free_allocator_data
+               && free_allocator_data->pool_size < ~(uintptr_t) 0))
     {
 #ifdef HAVE_SYNC_BUILTINS
       __atomic_add_fetch (&free_allocator_data->used_pool_size, -data->size,
diff --git a/libgomp/config/gcn/bar.c b/libgomp/config/gcn/bar.c
index b2aac724641..1e8e5136ea9 100644
--- a/libgomp/config/gcn/bar.c
+++ b/libgomp/config/gcn/bar.c
@@ -36,7 +36,7 @@
 void
 gomp_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
 {
-  if (__builtin_expect (state & BAR_WAS_LAST, 0))
+  if (UNLIKELY (state & BAR_WAS_LAST))
     {
       /* Next time we'll be awaiting TOTAL threads again.  */
       bar->awaited = bar->total;
@@ -81,7 +81,7 @@ gomp_team_barrier_wait_end (gomp_barrier_t *bar, 
gomp_barrier_state_t state)
 {
   unsigned int generation, gen;
 
-  if (__builtin_expect (state & BAR_WAS_LAST, 0))
+  if (UNLIKELY (state & BAR_WAS_LAST))
     {
       /* Next time we'll be awaiting TOTAL threads again.  */
       struct gomp_thread *thr = gomp_thread ();
@@ -89,7 +89,7 @@ gomp_team_barrier_wait_end (gomp_barrier_t *bar, 
gomp_barrier_state_t state)
 
       bar->awaited = bar->total;
       team->work_share_cancelled = 0;
-      if (__builtin_expect (team->task_count, 0))
+      if (UNLIKELY (team->task_count))
        {
          gomp_barrier_handle_tasks (state);
          state &= ~BAR_WAS_LAST;
@@ -123,7 +123,7 @@ gomp_team_barrier_wait_end (gomp_barrier_t *bar, 
gomp_barrier_state_t state)
 
       asm ("s_barrier" ::: "memory");
       gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
-      if (__builtin_expect (gen & BAR_TASK_PENDING, 0))
+      if (UNLIKELY (gen & BAR_TASK_PENDING))
        {
          gomp_barrier_handle_tasks (state);
          gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
@@ -143,7 +143,7 @@ void
 gomp_team_barrier_wait_final (gomp_barrier_t *bar)
 {
   gomp_barrier_state_t state = gomp_barrier_wait_final_start (bar);
-  if (__builtin_expect (state & BAR_WAS_LAST, 0))
+  if (UNLIKELY (state & BAR_WAS_LAST))
     bar->awaited_final = bar->total;
   gomp_team_barrier_wait_end (bar, state);
 }
@@ -154,7 +154,7 @@ gomp_team_barrier_wait_cancel_end (gomp_barrier_t *bar,
 {
   unsigned int generation, gen;
 
-  if (__builtin_expect (state & BAR_WAS_LAST, 0))
+  if (UNLIKELY (state & BAR_WAS_LAST))
     {
       /* Next time we'll be awaiting TOTAL threads again.  */
       /* BAR_CANCELLED should never be set in state here, because
@@ -166,7 +166,7 @@ gomp_team_barrier_wait_cancel_end (gomp_barrier_t *bar,
 
       bar->awaited = bar->total;
       team->work_share_cancelled = 0;
-      if (__builtin_expect (team->task_count, 0))
+      if (UNLIKELY (team->task_count))
        {
          gomp_barrier_handle_tasks (state);
          state &= ~BAR_WAS_LAST;
@@ -181,7 +181,7 @@ gomp_team_barrier_wait_cancel_end (gomp_barrier_t *bar,
        }
     }
 
-  if (__builtin_expect (state & BAR_CANCELLED, 0))
+  if (UNLIKELY (state & BAR_CANCELLED))
     return true;
 
   generation = state;
@@ -202,9 +202,9 @@ gomp_team_barrier_wait_cancel_end (gomp_barrier_t *bar,
       if (bar->total > 1)
        asm ("s_barrier" ::: "memory");
       gen = __atomic_load_n (&bar->generation, MEMMODEL_RELAXED);
-      if (__builtin_expect (gen & BAR_CANCELLED, 0))
+      if (UNLIKELY (gen & BAR_CANCELLED))
        return true;
-      if (__builtin_expect (gen & BAR_TASK_PENDING, 0))
+      if (UNLIKELY (gen & BAR_TASK_PENDING))
        {
          gomp_barrier_handle_tasks (state);
          gen = __atomic_load_n (&bar->generation, MEMMODEL_RELAXED);
diff --git a/libgomp/config/gcn/bar.h b/libgomp/config/gcn/bar.h
index 19d3a6204bc..a1b18d97cc9 100644
--- a/libgomp/config/gcn/bar.h
+++ b/libgomp/config/gcn/bar.h
@@ -156,7 +156,7 @@ gomp_team_barrier_waiting_for_tasks (gomp_barrier_t *bar)
 static inline bool
 gomp_team_barrier_cancelled (gomp_barrier_t *bar)
 {
-  return __builtin_expect ((bar->generation & BAR_CANCELLED) != 0, 0);
+  return UNLIKELY ((bar->generation & BAR_CANCELLED) != 0);
 }
 
 static inline void
diff --git a/libgomp/config/linux/alpha/futex.h 
b/libgomp/config/linux/alpha/futex.h
index 0921c55a16a..ffba4d6ee86 100644
--- a/libgomp/config/linux/alpha/futex.h
+++ b/libgomp/config/linux/alpha/futex.h
@@ -49,7 +49,7 @@ futex_wait (int *addr, int val)
                  : "0"(sc_0), "r" (sc_16), "r"(sc_17), "r"(sc_18), "1"(sc_19)
                  : "$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8",
                    "$22", "$23", "$24", "$25", "$27", "$28", "memory");
-  if (__builtin_expect (sc_19, 0) && sc_0 == ENOSYS)
+  if (UNLIKELY (sc_19) && sc_0 == ENOSYS)
     {
       gomp_futex_wait &= ~FUTEX_PRIVATE_FLAG;
       gomp_futex_wake &= ~FUTEX_PRIVATE_FLAG;
@@ -83,7 +83,7 @@ futex_wake (int *addr, int count)
                  : "0"(sc_0), "r" (sc_16), "r"(sc_17), "r"(sc_18)
                  : "$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8",
                    "$22", "$23", "$24", "$25", "$27", "$28", "memory");
-  if (__builtin_expect (sc_19, 0) && sc_0 == ENOSYS)
+  if (UNLIKELY (sc_19) && sc_0 == ENOSYS)
     {
       gomp_futex_wait &= ~FUTEX_PRIVATE_FLAG;
       gomp_futex_wake &= ~FUTEX_PRIVATE_FLAG;
diff --git a/libgomp/config/linux/bar.c b/libgomp/config/linux/bar.c
index 159f4182cb8..54705bb5821 100644
--- a/libgomp/config/linux/bar.c
+++ b/libgomp/config/linux/bar.c
@@ -34,7 +34,7 @@
 void
 gomp_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
 {
-  if (__builtin_expect (state & BAR_WAS_LAST, 0))
+  if (UNLIKELY (state & BAR_WAS_LAST))
     {
       /* Next time we'll be awaiting TOTAL threads again.  */
       bar->awaited = bar->total;
@@ -82,7 +82,7 @@ gomp_team_barrier_wait_end (gomp_barrier_t *bar, 
gomp_barrier_state_t state)
 {
   unsigned int generation, gen;
 
-  if (__builtin_expect (state & BAR_WAS_LAST, 0))
+  if (UNLIKELY (state & BAR_WAS_LAST))
     {
       /* Next time we'll be awaiting TOTAL threads again.  */
       struct gomp_thread *thr = gomp_thread ();
@@ -90,7 +90,7 @@ gomp_team_barrier_wait_end (gomp_barrier_t *bar, 
gomp_barrier_state_t state)
 
       bar->awaited = bar->total;
       team->work_share_cancelled = 0;
-      if (__builtin_expect (team->task_count, 0))
+      if (UNLIKELY (team->task_count))
        {
          gomp_barrier_handle_tasks (state);
          state &= ~BAR_WAS_LAST;
@@ -111,7 +111,7 @@ gomp_team_barrier_wait_end (gomp_barrier_t *bar, 
gomp_barrier_state_t state)
     {
       do_wait ((int *) &bar->generation, generation);
       gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
-      if (__builtin_expect (gen & BAR_TASK_PENDING, 0))
+      if (UNLIKELY (gen & BAR_TASK_PENDING))
        {
          gomp_barrier_handle_tasks (state);
          gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
@@ -131,7 +131,7 @@ void
 gomp_team_barrier_wait_final (gomp_barrier_t *bar)
 {
   gomp_barrier_state_t state = gomp_barrier_wait_final_start (bar);
-  if (__builtin_expect (state & BAR_WAS_LAST, 0))
+  if (UNLIKELY (state & BAR_WAS_LAST))
     bar->awaited_final = bar->total;
   gomp_team_barrier_wait_end (bar, state);
 }
@@ -142,7 +142,7 @@ gomp_team_barrier_wait_cancel_end (gomp_barrier_t *bar,
 {
   unsigned int generation, gen;
 
-  if (__builtin_expect (state & BAR_WAS_LAST, 0))
+  if (UNLIKELY (state & BAR_WAS_LAST))
     {
       /* Next time we'll be awaiting TOTAL threads again.  */
       /* BAR_CANCELLED should never be set in state here, because
@@ -154,7 +154,7 @@ gomp_team_barrier_wait_cancel_end (gomp_barrier_t *bar,
 
       bar->awaited = bar->total;
       team->work_share_cancelled = 0;
-      if (__builtin_expect (team->task_count, 0))
+      if (UNLIKELY (team->task_count))
        {
          gomp_barrier_handle_tasks (state);
          state &= ~BAR_WAS_LAST;
@@ -168,7 +168,7 @@ gomp_team_barrier_wait_cancel_end (gomp_barrier_t *bar,
        }
     }
 
-  if (__builtin_expect (state & BAR_CANCELLED, 0))
+  if (UNLIKELY (state & BAR_CANCELLED))
     return true;
 
   generation = state;
@@ -176,9 +176,9 @@ gomp_team_barrier_wait_cancel_end (gomp_barrier_t *bar,
     {
       do_wait ((int *) &bar->generation, generation);
       gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
-      if (__builtin_expect (gen & BAR_CANCELLED, 0))
+      if (UNLIKELY (gen & BAR_CANCELLED))
        return true;
-      if (__builtin_expect (gen & BAR_TASK_PENDING, 0))
+      if (UNLIKELY (gen & BAR_TASK_PENDING))
        {
          gomp_barrier_handle_tasks (state);
          gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
diff --git a/libgomp/config/linux/bar.h b/libgomp/config/linux/bar.h
index 85908376301..80b31a7e1f1 100644
--- a/libgomp/config/linux/bar.h
+++ b/libgomp/config/linux/bar.h
@@ -156,7 +156,7 @@ gomp_team_barrier_waiting_for_tasks (gomp_barrier_t *bar)
 static inline bool
 gomp_team_barrier_cancelled (gomp_barrier_t *bar)
 {
-  return __builtin_expect ((bar->generation & BAR_CANCELLED) != 0, 0);
+  return UNLIKELY ((bar->generation & BAR_CANCELLED) != 0);
 }
 
 static inline void
diff --git a/libgomp/config/linux/futex.h b/libgomp/config/linux/futex.h
index e01bd96d48b..9ed9cec5902 100644
--- a/libgomp/config/linux/futex.h
+++ b/libgomp/config/linux/futex.h
@@ -43,7 +43,7 @@ static inline void
 futex_wait (int *addr, int val)
 {
   int err = syscall (SYS_futex, addr, gomp_futex_wait, val, NULL);
-  if (__builtin_expect (err < 0 && errno == ENOSYS, 0))
+  if (UNLIKELY (err < 0 && errno == ENOSYS))
     {
       gomp_futex_wait &= ~FUTEX_PRIVATE_FLAG;
       gomp_futex_wake &= ~FUTEX_PRIVATE_FLAG;
@@ -55,7 +55,7 @@ static inline void
 futex_wake (int *addr, int count)
 {
   int err = syscall (SYS_futex, addr, gomp_futex_wake, count);
-  if (__builtin_expect (err < 0 && errno == ENOSYS, 0))
+  if (UNLIKELY (err < 0 && errno == ENOSYS))
     {
       gomp_futex_wait &= ~FUTEX_PRIVATE_FLAG;
       gomp_futex_wake &= ~FUTEX_PRIVATE_FLAG;
diff --git a/libgomp/config/linux/ia64/futex.h 
b/libgomp/config/linux/ia64/futex.h
index 79e6fc493df..71d014ed55f 100644
--- a/libgomp/config/linux/ia64/futex.h
+++ b/libgomp/config/linux/ia64/futex.h
@@ -62,7 +62,7 @@ static inline void
 futex_wait (int *addr, int val)
 {
   long err = sys_futex0 (addr, gomp_futex_wait, val);
-  if (__builtin_expect (err == ENOSYS, 0))
+  if (UNLIKELY (err == ENOSYS))
     {
       gomp_futex_wait &= ~FUTEX_PRIVATE_FLAG;
       gomp_futex_wake &= ~FUTEX_PRIVATE_FLAG;
@@ -74,7 +74,7 @@ static inline void
 futex_wake (int *addr, int count)
 {
   long err = sys_futex0 (addr, gomp_futex_wake, count);
-  if (__builtin_expect (err == ENOSYS, 0))
+  if (UNLIKELY (err == ENOSYS))
     {
       gomp_futex_wait &= ~FUTEX_PRIVATE_FLAG;
       gomp_futex_wake &= ~FUTEX_PRIVATE_FLAG;
diff --git a/libgomp/config/linux/lock.c b/libgomp/config/linux/lock.c
index c238e1e78df..9e88277c7e5 100644
--- a/libgomp/config/linux/lock.c
+++ b/libgomp/config/linux/lock.c
@@ -76,7 +76,7 @@ static __thread int tid_cache;
 static inline int gomp_tid (void)
 {
   int tid = tid_cache;
-  if (__builtin_expect (tid == 0, 0))
+  if (UNLIKELY (tid == 0))
     tid_cache = tid = syscall (SYS_gettid);
   return tid;
 }
diff --git a/libgomp/config/linux/mutex.h b/libgomp/config/linux/mutex.h
index be38d144e21..e8786d2689e 100644
--- a/libgomp/config/linux/mutex.h
+++ b/libgomp/config/linux/mutex.h
@@ -61,7 +61,7 @@ static inline void
 gomp_mutex_unlock (gomp_mutex_t *mutex)
 {
   int wait = __atomic_exchange_n (mutex, 0, MEMMODEL_RELEASE);
-  if (__builtin_expect (wait < 0, 0))
+  if (UNLIKELY (wait < 0))
     gomp_mutex_unlock_slow (mutex);
 }
 #endif /* GOMP_MUTEX_H */
diff --git a/libgomp/config/linux/powerpc/futex.h 
b/libgomp/config/linux/powerpc/futex.h
index d4c87436561..49afe9b2e19 100644
--- a/libgomp/config/linux/powerpc/futex.h
+++ b/libgomp/config/linux/powerpc/futex.h
@@ -51,7 +51,7 @@ sys_futex0 (int *addr, int op, int val)
                  : "r"(r0), "r"(r3), "r"(r4), "r"(r5), "r"(r6)
                  : "r7", "r8", "r9", "r10", "r11", "r12",
                    "cr0", "ctr", "memory");
-  if (__builtin_expect (r0 & (1 << 28), 0))
+  if (UNLIKELY (r0 & (1 << 28)))
     return r3;
   return 0;
 }
@@ -60,7 +60,7 @@ static inline void
 futex_wait (int *addr, int val)
 {
   long err = sys_futex0 (addr, gomp_futex_wait, val);
-  if (__builtin_expect (err == ENOSYS, 0))
+  if (UNLIKELY (err == ENOSYS))
     {
       gomp_futex_wait &= ~FUTEX_PRIVATE_FLAG;
       gomp_futex_wake &= ~FUTEX_PRIVATE_FLAG;
@@ -72,7 +72,7 @@ static inline void
 futex_wake (int *addr, int count)
 {
   long err = sys_futex0 (addr, gomp_futex_wake, count);
-  if (__builtin_expect (err == ENOSYS, 0))
+  if (UNLIKELY (err == ENOSYS))
     {
       gomp_futex_wait &= ~FUTEX_PRIVATE_FLAG;
       gomp_futex_wake &= ~FUTEX_PRIVATE_FLAG;
diff --git a/libgomp/config/linux/s390/futex.h 
b/libgomp/config/linux/s390/futex.h
index fb8c6ee832c..935525d2318 100644
--- a/libgomp/config/linux/s390/futex.h
+++ b/libgomp/config/linux/s390/futex.h
@@ -52,7 +52,7 @@ static inline void
 futex_wait (int *addr, int val)
 {
   long err = sys_futex0 (addr, gomp_futex_wait, val);
-  if (__builtin_expect (err == -ENOSYS, 0))
+  if (UNLIKELY (err == -ENOSYS))
     {
       gomp_futex_wait &= ~FUTEX_PRIVATE_FLAG;
       gomp_futex_wake &= ~FUTEX_PRIVATE_FLAG;
@@ -64,7 +64,7 @@ static inline void
 futex_wake (int *addr, int count)
 {
   long err = sys_futex0 (addr, gomp_futex_wake, count);
-  if (__builtin_expect (err == -ENOSYS, 0))
+  if (UNLIKELY (err == -ENOSYS))
     {
       gomp_futex_wait &= ~FUTEX_PRIVATE_FLAG;
       gomp_futex_wake &= ~FUTEX_PRIVATE_FLAG;
diff --git a/libgomp/config/linux/sem.c b/libgomp/config/linux/sem.c
index b4dc1db9f46..a9c32fef693 100644
--- a/libgomp/config/linux/sem.c
+++ b/libgomp/config/linux/sem.c
@@ -44,8 +44,7 @@ gomp_sem_wait_slow (gomp_sem_t *sem, int count)
        break;
       }
   /* Something changed.  If it wasn't the wait flag, we're good to go.  */
-    else if (__builtin_expect (((count = *sem) & SEM_WAIT) == 0 && count != 0,
-                              1))
+    else if (LIKELY (((count = *sem) & SEM_WAIT) == 0 && count != 0))
       {
        if (__atomic_compare_exchange_n (sem, &count, count - SEM_INC, false,
                                         MEMMODEL_ACQUIRE, MEMMODEL_RELAXED))
diff --git a/libgomp/config/linux/sem.h b/libgomp/config/linux/sem.h
index 5828d16a541..a3c5deb1c09 100644
--- a/libgomp/config/linux/sem.h
+++ b/libgomp/config/linux/sem.h
@@ -80,7 +80,7 @@ gomp_sem_post (gomp_sem_t *sem)
                                       MEMMODEL_RELEASE, MEMMODEL_RELAXED))
     continue;
 
-  if (__builtin_expect (count & SEM_WAIT, 0))
+  if (UNLIKELY (count & SEM_WAIT))
     gomp_sem_post_slow (sem);
 }
 
diff --git a/libgomp/config/linux/sparc/futex.h 
b/libgomp/config/linux/sparc/futex.h
index 0d9305fbb89..c5a146062db 100644
--- a/libgomp/config/linux/sparc/futex.h
+++ b/libgomp/config/linux/sparc/futex.h
@@ -68,7 +68,7 @@ static inline void
 futex_wait (int *addr, int val)
 {
   long err = sys_futex0 (addr, gomp_futex_wait, val);
-  if (__builtin_expect (err == ENOSYS, 0))
+  if (UNLIKELY (err == ENOSYS))
     {
       gomp_futex_wait &= ~FUTEX_PRIVATE_FLAG;
       gomp_futex_wake &= ~FUTEX_PRIVATE_FLAG;
@@ -80,7 +80,7 @@ static inline void
 futex_wake (int *addr, int count)
 {
   long err = sys_futex0 (addr, gomp_futex_wake, count);
-  if (__builtin_expect (err == ENOSYS, 0))
+  if (UNLIKELY (err == ENOSYS))
     {
       gomp_futex_wait &= ~FUTEX_PRIVATE_FLAG;
       gomp_futex_wake &= ~FUTEX_PRIVATE_FLAG;
diff --git a/libgomp/config/linux/wait.h b/libgomp/config/linux/wait.h
index 0ba9db5a4ad..2be7055a93a 100644
--- a/libgomp/config/linux/wait.h
+++ b/libgomp/config/linux/wait.h
@@ -49,12 +49,12 @@ static inline int do_spin (int *addr, int val)
 {
   unsigned long long i, count = gomp_spin_count_var;
 
-  if (__builtin_expect (__atomic_load_n (&gomp_managed_threads,
-                                         MEMMODEL_RELAXED)
-                        > gomp_available_cpus, 0))
+  if (UNLIKELY (__atomic_load_n (&gomp_managed_threads,
+                                MEMMODEL_RELAXED)
+               > gomp_available_cpus))
     count = gomp_throttled_spin_count_var;
   for (i = 0; i < count; i++)
-    if (__builtin_expect (__atomic_load_n (addr, MEMMODEL_RELAXED) != val, 0))
+    if (UNLIKELY (__atomic_load_n (addr, MEMMODEL_RELAXED) != val))
       return 0;
     else
       cpu_relax ();
diff --git a/libgomp/config/linux/x86/futex.h b/libgomp/config/linux/x86/futex.h
index bdb360dff58..ef2ec713af2 100644
--- a/libgomp/config/linux/x86/futex.h
+++ b/libgomp/config/linux/x86/futex.h
@@ -94,7 +94,7 @@ futex_wait (int *addr, int val)
 {
   long err = __futex_wait (addr, gomp_futex_wait, val);
 
-  if (__builtin_expect (err == -ENOSYS, 0))
+  if (UNLIKELY (err == -ENOSYS))
     {
       gomp_futex_wait &= ~FUTEX_PRIVATE_FLAG;
       gomp_futex_wake &= ~FUTEX_PRIVATE_FLAG;
@@ -108,7 +108,7 @@ futex_wake (int *addr, int count)
 {
   long err = __futex_wake (addr, gomp_futex_wake, count);
 
-  if (__builtin_expect (err == -ENOSYS, 0))
+  if (UNLIKELY (err == -ENOSYS))
     {
       gomp_futex_wait &= ~FUTEX_PRIVATE_FLAG;
       gomp_futex_wake &= ~FUTEX_PRIVATE_FLAG;
diff --git a/libgomp/config/nvptx/bar.c b/libgomp/config/nvptx/bar.c
index eee21071f47..5b7be6d60d6 100644
--- a/libgomp/config/nvptx/bar.c
+++ b/libgomp/config/nvptx/bar.c
@@ -59,7 +59,7 @@ futex_wait (int *addr, int val)
   gomp_mutex_lock (&bar->lock);
 
   /* Futex semantics: only go to sleep if *addr == val.  */
-  if (__builtin_expect (__atomic_load_n (addr, MEMMODEL_ACQUIRE) != val, 0))
+  if (UNLIKELY (__atomic_load_n (addr, MEMMODEL_ACQUIRE) != val))
     {
       gomp_mutex_unlock (&bar->lock);
       return;
diff --git a/libgomp/config/nvptx/bar.h b/libgomp/config/nvptx/bar.h
index 28bf7f4d313..ef58c1eef88 100644
--- a/libgomp/config/nvptx/bar.h
+++ b/libgomp/config/nvptx/bar.h
@@ -158,7 +158,7 @@ gomp_team_barrier_waiting_for_tasks (gomp_barrier_t *bar)
 static inline bool
 gomp_team_barrier_cancelled (gomp_barrier_t *bar)
 {
-  return __builtin_expect ((bar->generation & BAR_CANCELLED) != 0, 0);
+  return UNLIKELY ((bar->generation & BAR_CANCELLED) != 0);
 }
 
 static inline void
diff --git a/libgomp/config/posix/bar.h b/libgomp/config/posix/bar.h
index a1a18eb6147..1e9f1e6e26d 100644
--- a/libgomp/config/posix/bar.h
+++ b/libgomp/config/posix/bar.h
@@ -146,7 +146,7 @@ gomp_team_barrier_waiting_for_tasks (gomp_barrier_t *bar)
 static inline bool
 gomp_team_barrier_cancelled (gomp_barrier_t *bar)
 {
-  return __builtin_expect ((bar->generation & BAR_CANCELLED) != 0, 0);
+  return UNLIKELY ((bar->generation & BAR_CANCELLED) != 0);
 }
 
 static inline void
diff --git a/libgomp/config/posix/pool.h b/libgomp/config/posix/pool.h
index ddd253d52b5..3202674bb5a 100644
--- a/libgomp/config/posix/pool.h
+++ b/libgomp/config/posix/pool.h
@@ -37,7 +37,7 @@ static inline struct gomp_thread_pool *
 gomp_get_thread_pool (struct gomp_thread *thr, unsigned nthreads)
 {
   struct gomp_thread_pool *pool = thr->thread_pool;
-  if (__builtin_expect (pool == NULL, 0))
+  if (UNLIKELY (pool == NULL))
     {
       pool = gomp_malloc (sizeof (*pool));
       pool->threads = NULL;
diff --git a/libgomp/config/rtems/bar.c b/libgomp/config/rtems/bar.c
index ab0c585567d..fa14ccf071c 100644
--- a/libgomp/config/rtems/bar.c
+++ b/libgomp/config/rtems/bar.c
@@ -57,10 +57,10 @@ do_spin (int *addr, int val)
 {
   unsigned long long i, count = gomp_spin_count_var;
 
-  if (__builtin_expect (gomp_managed_threads > gomp_available_cpus, 0))
+  if (UNLIKELY (gomp_managed_threads > gomp_available_cpus))
     count = gomp_throttled_spin_count_var;
   for (i = 0; i < count; i++)
-    if (__builtin_expect (__atomic_load_n (addr, MEMMODEL_RELAXED) != val, 0))
+    if (UNLIKELY (__atomic_load_n (addr, MEMMODEL_RELAXED) != val))
       return 0;
   return 1;
 }
diff --git a/libgomp/config/rtems/bar.h b/libgomp/config/rtems/bar.h
index 71a3c5c6eff..d8fc632db42 100644
--- a/libgomp/config/rtems/bar.h
+++ b/libgomp/config/rtems/bar.h
@@ -158,7 +158,7 @@ gomp_team_barrier_waiting_for_tasks (gomp_barrier_t *bar)
 static inline bool
 gomp_team_barrier_cancelled (gomp_barrier_t *bar)
 {
-  return __builtin_expect ((bar->generation & BAR_CANCELLED) != 0, 0);
+  return UNLIKELY ((bar->generation & BAR_CANCELLED) != 0);
 }
 
 static inline void
diff --git a/libgomp/config/rtems/pool.h b/libgomp/config/rtems/pool.h
index 774060a1360..0a3d238948b 100644
--- a/libgomp/config/rtems/pool.h
+++ b/libgomp/config/rtems/pool.h
@@ -74,7 +74,7 @@ static inline struct gomp_thread_pool *
 gomp_get_own_thread_pool (struct gomp_thread *thr, unsigned nthreads)
 {
   struct gomp_thread_pool *pool = thr->thread_pool;
-  if (__builtin_expect (pool == NULL, 0))
+  if (UNLIKELY (pool == NULL))
     {
       pool = gomp_malloc_cleared (sizeof (*pool));
       pool->threads_busy = nthreads;
@@ -89,7 +89,7 @@ gomp_get_thread_pool (struct gomp_thread *thr, unsigned 
nthreads)
   struct gomp_thread_pool *pool;
   struct gomp_thread_pool_reservoir *res;
 
-  if (__builtin_expect (thr->thread_pool == NULL, 0))
+  if (UNLIKELY (thr->thread_pool == NULL))
     pthread_setspecific (gomp_thread_destructor, thr);
 
   res = gomp_get_thread_pool_reservoir ();
diff --git a/libgomp/iter.c b/libgomp/iter.c
index 40e9124b324..ec43c312d3e 100644
--- a/libgomp/iter.c
+++ b/libgomp/iter.c
@@ -191,7 +191,7 @@ gomp_iter_dynamic_next (long *pstart, long *pend)
   incr = ws->incr;
   chunk = ws->chunk_size;
 
-  if (__builtin_expect (ws->mode, 1))
+  if (LIKELY (ws->mode))
     {
       long tmp = __sync_fetch_and_add (&ws->next, chunk);
       if (incr > 0)
@@ -240,7 +240,7 @@ gomp_iter_dynamic_next (long *pstart, long *pend)
       nend = start + chunk;
 
       tmp = __sync_val_compare_and_swap (&ws->next, start, nend);
-      if (__builtin_expect (tmp == start, 1))
+      if (LIKELY (tmp == start))
        break;
 
       start = tmp;
@@ -319,13 +319,13 @@ gomp_iter_guided_next (long *pstart, long *pend)
 
       if (q < chunk_size)
        q = chunk_size;
-      if (__builtin_expect (q <= n, 1))
+      if (LIKELY (q <= n))
        nend = start + q * incr;
       else
        nend = end;
 
       tmp = __sync_val_compare_and_swap (&ws->next, start, nend);
-      if (__builtin_expect (tmp == start, 1))
+      if (LIKELY (tmp == start))
        break;
 
       start = tmp;
diff --git a/libgomp/iter_ull.c b/libgomp/iter_ull.c
index 491af74da40..2d11b4d34e2 100644
--- a/libgomp/iter_ull.c
+++ b/libgomp/iter_ull.c
@@ -67,7 +67,7 @@ gomp_iter_ull_static_next (gomp_ull *pstart, gomp_ull *pend)
        return 1;
 
       /* Compute the total number of iterations.  */
-      if (__builtin_expect (ws->mode, 0) == 0)
+      if (UNLIKELY (ws->mode) == 0)
        n = (ws->end_ull - ws->next_ull + ws->incr_ull - 1) / ws->incr_ull;
       else
        n = (ws->next_ull - ws->end_ull - ws->incr_ull - 1) / -ws->incr_ull;
@@ -108,7 +108,7 @@ gomp_iter_ull_static_next (gomp_ull *pstart, gomp_ull *pend)
       /* Otherwise, each thread gets exactly chunk_size iterations
         (if available) each time through the loop.  */
 
-      if (__builtin_expect (ws->mode, 0) == 0)
+      if (UNLIKELY (ws->mode) == 0)
        n = (ws->end_ull - ws->next_ull + ws->incr_ull - 1) / ws->incr_ull;
       else
        n = (ws->next_ull - ws->end_ull - ws->incr_ull - 1) / -ws->incr_ull;
@@ -159,7 +159,7 @@ gomp_iter_ull_dynamic_next_locked (gomp_ull *pstart, 
gomp_ull *pend)
 
   chunk = ws->chunk_size_ull;
   left = ws->end_ull - start;
-  if (__builtin_expect (ws->mode & 2, 0))
+  if (UNLIKELY (ws->mode & 2))
     {
       if (chunk < left)
        chunk = left;
@@ -192,10 +192,10 @@ gomp_iter_ull_dynamic_next (gomp_ull *pstart, gomp_ull 
*pend)
   end = ws->end_ull;
   chunk = ws->chunk_size_ull;
 
-  if (__builtin_expect (ws->mode & 1, 1))
+  if (LIKELY (ws->mode & 1))
     {
       gomp_ull tmp = __sync_fetch_and_add (&ws->next_ull, chunk);
-      if (__builtin_expect (ws->mode & 2, 0) == 0)
+      if (UNLIKELY (ws->mode & 2) == 0)
        {
          if (tmp >= end)
            return false;
@@ -228,7 +228,7 @@ gomp_iter_ull_dynamic_next (gomp_ull *pstart, gomp_ull 
*pend)
       if (start == end)
        return false;
 
-      if (__builtin_expect (ws->mode & 2, 0))
+      if (UNLIKELY (ws->mode & 2))
        {
          if (chunk < left)
            chunk = left;
@@ -241,7 +241,7 @@ gomp_iter_ull_dynamic_next (gomp_ull *pstart, gomp_ull 
*pend)
       nend = start + chunk;
 
       tmp = __sync_val_compare_and_swap (&ws->next_ull, start, nend);
-      if (__builtin_expect (tmp == start, 1))
+      if (LIKELY (tmp == start))
        break;
 
       start = tmp;
@@ -272,7 +272,7 @@ gomp_iter_ull_guided_next_locked (gomp_ull *pstart, 
gomp_ull *pend)
     return false;
 
   start = ws->next_ull;
-  if (__builtin_expect (ws->mode, 0) == 0)
+  if (UNLIKELY (ws->mode) == 0)
     n = (ws->end_ull - start) / ws->incr_ull;
   else
     n = (start - ws->end_ull) / -ws->incr_ull;
@@ -318,7 +318,7 @@ gomp_iter_ull_guided_next (gomp_ull *pstart, gomp_ull *pend)
       if (start == end)
        return false;
 
-      if (__builtin_expect (ws->mode, 0) == 0)
+      if (UNLIKELY (ws->mode) == 0)
        n = (end - start) / incr;
       else
        n = (start - end) / -incr;
@@ -326,13 +326,13 @@ gomp_iter_ull_guided_next (gomp_ull *pstart, gomp_ull 
*pend)
 
       if (q < chunk_size)
        q = chunk_size;
-      if (__builtin_expect (q <= n, 1))
+      if (LIKELY (q <= n))
        nend = start + q * incr;
       else
        nend = end;
 
       tmp = __sync_val_compare_and_swap (&ws->next_ull, start, nend);
-      if (__builtin_expect (tmp == start, 1))
+      if (LIKELY (tmp == start))
        break;
 
       start = tmp;
diff --git a/libgomp/libgomp.h b/libgomp/libgomp.h
index 74487e540d3..23e1fb72283 100644
--- a/libgomp/libgomp.h
+++ b/libgomp/libgomp.h
@@ -73,6 +73,9 @@
 # pragma GCC visibility push(hidden)
 #endif
 
+#define LIKELY(x) (__builtin_expect ((x), 1))
+#define UNLIKELY(x) (__builtin_expect ((x), 0))
+
 /* If we were a C++ library, we'd get this from <std/atomic>.  */
 enum memmodel
 {
@@ -180,12 +183,12 @@ extern void gomp_debug (int, const char *, ...)
        __attribute__ ((format (printf, 2, 3)));
 #define gomp_vdebug(KIND, FMT, VALIST) \
   do { \
-    if (__builtin_expect (gomp_debug_var, 0)) \
+    if (UNLIKELY (gomp_debug_var)) \
       (gomp_vdebug) ((KIND), (FMT), (VALIST)); \
   } while (0)
 #define gomp_debug(KIND, ...) \
   do { \
-    if (__builtin_expect (gomp_debug_var, 0)) \
+    if (UNLIKELY (gomp_debug_var)) \
       (gomp_debug) ((KIND), __VA_ARGS__); \
   } while (0)
 extern void gomp_verror (const char *, va_list);
@@ -996,7 +999,7 @@ extern void gomp_workshare_task_reduction_register 
(uintptr_t *, uintptr_t *);
 static void inline
 gomp_finish_task (struct gomp_task *task)
 {
-  if (__builtin_expect (task->depend_hash != NULL, 0))
+  if (UNLIKELY (task->depend_hash != NULL))
     free (task->depend_hash);
 }
 
@@ -1316,7 +1319,7 @@ static inline void
 gomp_work_share_init_done (void)
 {
   struct gomp_thread *thr = gomp_thread ();
-  if (__builtin_expect (thr->ts.last_work_share != NULL, 1))
+  if (LIKELY (thr->ts.last_work_share != NULL))
     gomp_ptrlock_set (&thr->ts.last_work_share->next_ws, thr->ts.work_share);
 }
 
diff --git a/libgomp/loop.c b/libgomp/loop.c
index be85162bb1e..2b548b5f04f 100644
--- a/libgomp/loop.c
+++ b/libgomp/loop.c
@@ -59,21 +59,21 @@ gomp_loop_init (struct gomp_work_share *ws, long start, 
long end, long incr,
        struct gomp_team *team = thr->ts.team;
        long nthreads = team ? team->nthreads : 1;
 
-       if (__builtin_expect (incr > 0, 1))
+       if (LIKELY (incr > 0))
          {
            /* Cheap overflow protection.  */
-           if (__builtin_expect ((nthreads | ws->chunk_size)
-                                 >= 1UL << (sizeof (long)
-                                            * __CHAR_BIT__ / 2 - 1), 0))
+           if (UNLIKELY ((nthreads | ws->chunk_size)
+                         >= 1UL << (sizeof (long)
+                                    * __CHAR_BIT__ / 2 - 1)))
              ws->mode = 0;
            else
              ws->mode = ws->end < (LONG_MAX
                                    - (nthreads + 1) * ws->chunk_size);
          }
        /* Cheap overflow protection.  */
-       else if (__builtin_expect ((nthreads | -ws->chunk_size)
-                                  >= 1UL << (sizeof (long)
-                                             * __CHAR_BIT__ / 2 - 1), 0))
+       else if (UNLIKELY ((nthreads | -ws->chunk_size)
+                          >= 1UL << (sizeof (long)
+                                     * __CHAR_BIT__ / 2 - 1)))
          ws->mode = 0;
        else
          ws->mode = ws->end > (nthreads + 1) * -ws->chunk_size - LONG_MAX;
diff --git a/libgomp/loop_ull.c b/libgomp/loop_ull.c
index 602737296d4..167a53e16f7 100644
--- a/libgomp/loop_ull.c
+++ b/libgomp/loop_ull.c
@@ -62,19 +62,19 @@ gomp_loop_ull_init (struct gomp_work_share *ws, bool up, 
gomp_ull start,
        struct gomp_team *team = thr->ts.team;
        long nthreads = team ? team->nthreads : 1;
 
-       if (__builtin_expect (up, 1))
+       if (LIKELY (up))
          {
            /* Cheap overflow protection.  */
-           if (__builtin_expect ((nthreads | ws->chunk_size_ull)
-                                 < 1ULL << (sizeof (gomp_ull)
-                                            * __CHAR_BIT__ / 2 - 1), 1))
+           if (LIKELY ((nthreads | ws->chunk_size_ull)
+                       < 1ULL << (sizeof (gomp_ull)
+                                  * __CHAR_BIT__ / 2 - 1)))
              ws->mode = ws->end_ull < (__LONG_LONG_MAX__ * 2ULL + 1
                                        - (nthreads + 1) * ws->chunk_size_ull);
          }
        /* Cheap overflow protection.  */
-       else if (__builtin_expect ((nthreads | -ws->chunk_size_ull)
-                                  < 1ULL << (sizeof (gomp_ull)
-                                             * __CHAR_BIT__ / 2 - 1), 1))
+       else if (LIKELY ((nthreads | -ws->chunk_size_ull)
+                        < 1ULL << (sizeof (gomp_ull)
+                                   * __CHAR_BIT__ / 2 - 1)))
          ws->mode = ws->end_ull > ((nthreads + 1) * -ws->chunk_size_ull
                                    - (__LONG_LONG_MAX__ * 2ULL + 1));
       }
diff --git a/libgomp/oacc-int.h b/libgomp/oacc-int.h
index 2642be957ce..e5ef35f7862 100644
--- a/libgomp/oacc-int.h
+++ b/libgomp/oacc-int.h
@@ -147,8 +147,7 @@ async_synchronous_p (int async)
 extern bool goacc_prof_enabled;
 /* Tune for the (very common) case that profiling is not enabled.  */
 #define GOACC_PROF_ENABLED \
-  (__builtin_expect (__atomic_load_n (&goacc_prof_enabled, \
-                                     MEMMODEL_ACQUIRE) == true, false))
+  (UNLIKELY (__atomic_load_n (&goacc_prof_enabled, MEMMODEL_ACQUIRE) == true))
 
 void goacc_profiling_initialize (void);
 bool _goacc_profiling_dispatch_p (bool);
diff --git a/libgomp/oacc-profiling.c b/libgomp/oacc-profiling.c
index 3df6eeba1c0..220cdad6233 100644
--- a/libgomp/oacc-profiling.c
+++ b/libgomp/oacc-profiling.c
@@ -504,7 +504,7 @@ _goacc_profiling_dispatch_p (bool check_not_nested_p)
   bool ret;
 
   struct goacc_thread *thr = goacc_thread ();
-  if (__builtin_expect (thr == NULL, false))
+  if (UNLIKELY (thr == NULL))
     {
       /* If we don't have any per-thread state yet, that means that per-thread
         callback dispatch has not been explicitly disabled (which only a call
@@ -523,7 +523,7 @@ _goacc_profiling_dispatch_p (bool check_not_nested_p)
          assert (thr->api_info == NULL);
        }
 
-      if (__builtin_expect (!thr->prof_callbacks_enabled, true))
+      if (LIKELY (!thr->prof_callbacks_enabled))
        {
          gomp_debug (0, "  %s: disabled for this thread\n", __FUNCTION__);
          ret = false;
@@ -534,7 +534,7 @@ _goacc_profiling_dispatch_p (bool check_not_nested_p)
   gomp_mutex_lock (&goacc_prof_lock);
 
   /* 'goacc_prof_callbacks_enabled[acc_ev_none]' acts as a global toggle.  */
-  if (__builtin_expect (!goacc_prof_callbacks_enabled[acc_ev_none], true))
+  if (LIKELY (!goacc_prof_callbacks_enabled[acc_ev_none]))
     {
       gomp_debug (0, "  %s: disabled globally\n", __FUNCTION__);
       ret = false;
@@ -560,7 +560,7 @@ _goacc_profiling_setup_p (struct goacc_thread *thr,
 
   /* If we don't have any per-thread state yet, we can't register 'prof_info'
      and 'api_info'.  */
-  if (__builtin_expect (thr == NULL, false))
+  if (UNLIKELY (thr == NULL))
     {
       gomp_debug (0, "Can't dispatch OpenACC Profiling Interface events for"
                  " the current call, construct, or directive\n");
diff --git a/libgomp/ordered.c b/libgomp/ordered.c
index ca5cf09efec..3df3443c487 100644
--- a/libgomp/ordered.c
+++ b/libgomp/ordered.c
@@ -379,14 +379,14 @@ GOMP_doacross_post (long *counts)
   unsigned long ent;
   unsigned int i;
 
-  if (__builtin_expect (doacross == NULL, 0)
-      || __builtin_expect (doacross->array == NULL, 0))
+  if (UNLIKELY (doacross == NULL)
+      || UNLIKELY (doacross->array == NULL))
     {
       __sync_synchronize ();
       return;
     }
 
-  if (__builtin_expect (ws->sched == GFS_STATIC, 1))
+  if (LIKELY (ws->sched == GFS_STATIC))
     ent = thr->ts.team_id;
   else if (ws->sched == GFS_GUIDED)
     ent = counts[0];
@@ -395,7 +395,7 @@ GOMP_doacross_post (long *counts)
   unsigned long *array = (unsigned long *) (doacross->array
                                            + ent * doacross->elt_sz);
 
-  if (__builtin_expect (doacross->flattened, 1))
+  if (LIKELY (doacross->flattened))
     {
       unsigned long flattened
        = (unsigned long) counts[0] << doacross->shift_counts[0];
@@ -431,14 +431,14 @@ GOMP_doacross_wait (long first, ...)
   unsigned long ent;
   unsigned int i;
 
-  if (__builtin_expect (doacross == NULL, 0)
-      || __builtin_expect (doacross->array == NULL, 0))
+  if (UNLIKELY (doacross == NULL)
+      || UNLIKELY (doacross->array == NULL))
     {
       __sync_synchronize ();
       return;
     }
 
-  if (__builtin_expect (ws->sched == GFS_STATIC, 1))
+  if (LIKELY (ws->sched == GFS_STATIC))
     {
       if (ws->chunk_size == 0)
        {
@@ -458,7 +458,7 @@ GOMP_doacross_wait (long first, ...)
   unsigned long *array = (unsigned long *) (doacross->array
                                            + ent * doacross->elt_sz);
 
-  if (__builtin_expect (doacross->flattened, 1))
+  if (LIKELY (doacross->flattened))
     {
       unsigned long flattened
        = (unsigned long) first << doacross->shift_counts[0];
@@ -635,21 +635,21 @@ GOMP_doacross_ull_post (gomp_ull *counts)
   unsigned long ent;
   unsigned int i;
 
-  if (__builtin_expect (doacross == NULL, 0)
-      || __builtin_expect (doacross->array == NULL, 0))
+  if (UNLIKELY (doacross == NULL)
+      || UNLIKELY (doacross->array == NULL))
     {
       __sync_synchronize ();
       return;
     }
 
-  if (__builtin_expect (ws->sched == GFS_STATIC, 1))
+  if (LIKELY (ws->sched == GFS_STATIC))
     ent = thr->ts.team_id;
   else if (ws->sched == GFS_GUIDED)
     ent = counts[0];
   else
     ent = counts[0] / doacross->chunk_size_ull;
 
-  if (__builtin_expect (doacross->flattened, 1))
+  if (LIKELY (doacross->flattened))
     {
       unsigned long *array = (unsigned long *) (doacross->array
                              + ent * doacross->elt_sz);
@@ -708,14 +708,14 @@ GOMP_doacross_ull_wait (gomp_ull first, ...)
   unsigned long ent;
   unsigned int i;
 
-  if (__builtin_expect (doacross == NULL, 0)
-      || __builtin_expect (doacross->array == NULL, 0))
+  if (UNLIKELY (doacross == NULL)
+      || UNLIKELY (doacross->array == NULL))
     {
       __sync_synchronize ();
       return;
     }
 
-  if (__builtin_expect (ws->sched == GFS_STATIC, 1))
+  if (LIKELY (ws->sched == GFS_STATIC))
     {
       if (ws->chunk_size_ull == 0)
        {
@@ -733,7 +733,7 @@ GOMP_doacross_ull_wait (gomp_ull first, ...)
   else
     ent = first / doacross->chunk_size_ull;
 
-  if (__builtin_expect (doacross->flattened, 1))
+  if (LIKELY (doacross->flattened))
     {
       unsigned long *array = (unsigned long *) (doacross->array
                                                + ent * doacross->elt_sz);
diff --git a/libgomp/parallel.c b/libgomp/parallel.c
index f7722990566..26fc351fd58 100644
--- a/libgomp/parallel.c
+++ b/libgomp/parallel.c
@@ -82,7 +82,7 @@ gomp_resolve_num_threads (unsigned specified, unsigned count)
     }
 
   /* UINT_MAX stands for infinity.  */
-  if (__builtin_expect (icv->thread_limit_var == UINT_MAX, 1)
+  if (LIKELY (icv->thread_limit_var == UINT_MAX)
       || max_num_threads == 1)
     return max_num_threads;
 
@@ -138,7 +138,7 @@ void
 GOMP_parallel_end (void)
 {
   struct gomp_task_icv *icv = gomp_icv (false);
-  if (__builtin_expect (icv->thread_limit_var != UINT_MAX, 0))
+  if (UNLIKELY (icv->thread_limit_var != UINT_MAX))
     {
       struct gomp_thread *thr = gomp_thread ();
       struct gomp_team *team = thr->ts.team;
diff --git a/libgomp/plugin/plugin-gcn.c b/libgomp/plugin/plugin-gcn.c
index 1c0436842da..8b49d4e0f40 100644
--- a/libgomp/plugin/plugin-gcn.c
+++ b/libgomp/plugin/plugin-gcn.c
@@ -1838,7 +1838,7 @@ alloc_by_agent (struct agent_info *agent, size_t size)
 
   struct goacc_thread *thr = GOMP_PLUGIN_goacc_thread ();
   bool profiling_dispatch_p
-    = __builtin_expect (thr != NULL && thr->prof_info != NULL, false);
+    = UNLIKELY (thr != NULL && thr->prof_info != NULL);
   if (profiling_dispatch_p)
     {
       acc_prof_info *prof_info = thr->prof_info;
@@ -3131,7 +3131,7 @@ gcn_exec (struct kernel_info *kernel, size_t mapnum, void 
**hostaddrs,
   acc_prof_info *prof_info = thr->prof_info;
   acc_event_info enqueue_launch_event_info;
   acc_api_info *api_info = thr->api_info;
-  bool profiling_dispatch_p = __builtin_expect (prof_info != NULL, false);
+  bool profiling_dispatch_p = UNLIKELY (prof_info != NULL);
   if (profiling_dispatch_p)
     {
       prof_info->event_type = acc_ev_enqueue_launch_start;
@@ -3671,7 +3671,7 @@ GOMP_OFFLOAD_free (int device, void *ptr)
 
   struct goacc_thread *thr = GOMP_PLUGIN_goacc_thread ();
   bool profiling_dispatch_p
-    = __builtin_expect (thr != NULL && thr->prof_info != NULL, false);
+    = UNLIKELY (thr != NULL && thr->prof_info != NULL);
   if (profiling_dispatch_p)
     {
       acc_prof_info *prof_info = thr->prof_info;
diff --git a/libgomp/plugin/plugin-nvptx.c b/libgomp/plugin/plugin-nvptx.c
index 387bcbbc52a..e7ee186b434 100644
--- a/libgomp/plugin/plugin-nvptx.c
+++ b/libgomp/plugin/plugin-nvptx.c
@@ -949,7 +949,7 @@ nvptx_exec (void (*fn), size_t mapnum, void **hostaddrs, 
void **devaddrs,
   acc_prof_info *prof_info = thr->prof_info;
   acc_event_info enqueue_launch_event_info;
   acc_api_info *api_info = thr->api_info;
-  bool profiling_p = __builtin_expect (prof_info != NULL, false);
+  bool profiling_p = UNLIKELY (prof_info != NULL);
   if (profiling_p)
     {
       prof_info->event_type = acc_ev_enqueue_launch_start;
@@ -1057,7 +1057,7 @@ nvptx_alloc (size_t s, bool suppress_errors)
   /* NOTE: We only do profiling stuff if the memory allocation succeeds.  */
   struct goacc_thread *thr = GOMP_PLUGIN_goacc_thread ();
   bool profiling_p
-    = __builtin_expect (thr != NULL && thr->prof_info != NULL, false);
+    = UNLIKELY (thr != NULL && thr->prof_info != NULL);
   if (profiling_p)
     goacc_profiling_acc_ev_alloc (thr, (void *) d, s);
 
@@ -1125,7 +1125,7 @@ nvptx_free (void *p, struct ptx_device *ptx_dev)
   CUDA_CALL (cuMemFree, (CUdeviceptr) p);
   struct goacc_thread *thr = GOMP_PLUGIN_goacc_thread ();
   bool profiling_p
-    = __builtin_expect (thr != NULL && thr->prof_info != NULL, false);
+    = UNLIKELY (thr != NULL && thr->prof_info != NULL);
   if (profiling_p)
     goacc_profiling_acc_ev_free (thr, p);
 
@@ -1460,7 +1460,7 @@ GOMP_OFFLOAD_openacc_exec (void (*fn) (void *), size_t 
mapnum,
   acc_prof_info *prof_info = thr->prof_info;
   acc_event_info data_event_info;
   acc_api_info *api_info = thr->api_info;
-  bool profiling_p = __builtin_expect (prof_info != NULL, false);
+  bool profiling_p = UNLIKELY (prof_info != NULL);
 
   void **hp = NULL;
   CUdeviceptr dp = 0;
@@ -1548,7 +1548,7 @@ GOMP_OFFLOAD_openacc_async_exec (void (*fn) (void *), 
size_t mapnum,
   acc_prof_info *prof_info = thr->prof_info;
   acc_event_info data_event_info;
   acc_api_info *api_info = thr->api_info;
-  bool profiling_p = __builtin_expect (prof_info != NULL, false);
+  bool profiling_p = UNLIKELY (prof_info != NULL);
 
   void **hp = NULL;
   CUdeviceptr dp = 0;
diff --git a/libgomp/priority_queue.h b/libgomp/priority_queue.h
index e032d0799f3..93bfc4faf0c 100644
--- a/libgomp/priority_queue.h
+++ b/libgomp/priority_queue.h
@@ -143,7 +143,7 @@ extern struct gomp_task *priority_tree_next_task (enum 
priority_queue_type,
 static inline bool
 priority_queue_multi_p (struct priority_queue *head)
 {
-  return __builtin_expect (head->t.root != NULL, 0);
+  return UNLIKELY (head->t.root != NULL);
 }
 
 /* Initialize a priority queue.  */
@@ -300,7 +300,7 @@ priority_tree_insert (enum priority_queue_type type,
                      bool adjust_parent_depends_on,
                      bool task_is_parent_depends_on)
 {
-  if (__builtin_expect (head->t.root == NULL, 0))
+  if (UNLIKELY (head->t.root == NULL))
     {
       /* The first time around, transfer any priority 0 items to the
         tree.  */
@@ -349,7 +349,7 @@ priority_queue_insert (enum priority_queue_type type,
   if (priority_queue_task_in_queue_p (type, head, task))
     gomp_fatal ("Attempt to insert existing task %p", task);
 #endif
-  if (priority_queue_multi_p (head) || __builtin_expect (priority > 0, 0))
+  if (priority_queue_multi_p (head) || UNLIKELY (priority > 0))
     priority_tree_insert (type, head, task, priority, pos,
                          adjust_parent_depends_on,
                          task_is_parent_depends_on);
diff --git a/libgomp/single.c b/libgomp/single.c
index 79a3f8eb4b6..ca30893db12 100644
--- a/libgomp/single.c
+++ b/libgomp/single.c
@@ -40,7 +40,7 @@ GOMP_single_start (void)
   struct gomp_team *team = thr->ts.team;
   unsigned long single_count;
 
-  if (__builtin_expect (team == NULL, 0))
+  if (UNLIKELY (team == NULL))
     return true;
 
   single_count = thr->ts.single_count++;
diff --git a/libgomp/target.c b/libgomp/target.c
index 4740f8a45d3..96993ab260a 100644
--- a/libgomp/target.c
+++ b/libgomp/target.c
@@ -348,7 +348,7 @@ gomp_copy_host2dev (struct gomp_device_descr *devicep,
                    void *d, const void *h, size_t sz,
                    bool ephemeral, struct gomp_coalesce_buf *cbuf)
 {
-  if (__builtin_expect (aq != NULL, 0))
+  if (UNLIKELY (aq != NULL))
     {
       /* See 'gomp_coalesce_buf_add'.  */
       assert (!cbuf);
@@ -409,7 +409,7 @@ gomp_copy_dev2host (struct gomp_device_descr *devicep,
                    struct goacc_asyncqueue *aq,
                    void *h, const void *d, size_t sz)
 {
-  if (__builtin_expect (aq != NULL, 0))
+  if (UNLIKELY (aq != NULL))
     goacc_device_copy_async (devicep, devicep->openacc.async.dev2host_func,
                             "host", h, "dev", d, NULL, sz, aq);
   else
@@ -2659,7 +2659,7 @@ GOMP_target_ext (int device, void (*fn) (void *), size_t 
mapnum,
       /* Create a team if we don't have any around, as nowait
         target tasks make sense to run asynchronously even when
         outside of any parallel.  */
-      if (__builtin_expect (thr->ts.team == NULL, 0))
+      if (UNLIKELY (thr->ts.team == NULL))
        {
          struct gomp_team *team = gomp_new_team (1);
          struct gomp_task *task = thr->task;
@@ -2898,7 +2898,7 @@ GOMP_target_update_ext (int device, size_t mapnum, void 
**hostaddrs,
              struct gomp_team *team = thr->ts.team;
              /* If parallel or taskgroup has been cancelled, don't start new
                 tasks.  */
-             if (__builtin_expect (gomp_cancel_var, 0) && team)
+             if (UNLIKELY (gomp_cancel_var) && team)
                {
                  if (gomp_team_barrier_cancelled (&team->barrier))
                    return;
@@ -2926,7 +2926,7 @@ GOMP_target_update_ext (int device, size_t mapnum, void 
**hostaddrs,
   struct gomp_thread *thr = gomp_thread ();
   struct gomp_team *team = thr->ts.team;
   /* If parallel or taskgroup has been cancelled, don't start new tasks.  */
-  if (__builtin_expect (gomp_cancel_var, 0) && team)
+  if (UNLIKELY (gomp_cancel_var) && team)
     {
       if (gomp_team_barrier_cancelled (&team->barrier))
        return;
@@ -3091,7 +3091,7 @@ GOMP_target_enter_exit_data (int device, size_t mapnum, 
void **hostaddrs,
              struct gomp_team *team = thr->ts.team;
              /* If parallel or taskgroup has been cancelled, don't start new
                 tasks.  */
-             if (__builtin_expect (gomp_cancel_var, 0) && team)
+             if (UNLIKELY (gomp_cancel_var) && team)
                {
                  if (gomp_team_barrier_cancelled (&team->barrier))
                    return;
@@ -3119,7 +3119,7 @@ GOMP_target_enter_exit_data (int device, size_t mapnum, 
void **hostaddrs,
   struct gomp_thread *thr = gomp_thread ();
   struct gomp_team *team = thr->ts.team;
   /* If parallel or taskgroup has been cancelled, don't start new tasks.  */
-  if (__builtin_expect (gomp_cancel_var, 0) && team)
+  if (UNLIKELY (gomp_cancel_var) && team)
     {
       if (gomp_team_barrier_cancelled (&team->barrier))
        return;
diff --git a/libgomp/task.c b/libgomp/task.c
index 30cd046df2a..282dc451eff 100644
--- a/libgomp/task.c
+++ b/libgomp/task.c
@@ -238,7 +238,7 @@ gomp_task_handle_depend (struct gomp_task *task, struct 
gomp_task *parent,
          }
     }
   task->num_dependees = 0;
-  if (__builtin_expect (parent->depend_all_memory && ndepend, false))
+  if (UNLIKELY (parent->depend_all_memory && ndepend))
     {
       struct gomp_task *tsk = parent->depend_all_memory;
       if (tsk->dependers == NULL)
@@ -266,7 +266,7 @@ gomp_task_handle_depend (struct gomp_task *task, struct 
gomp_task *parent,
        }
       task->num_dependees++;
     }
-  if (__builtin_expect (all_memory, false))
+  if (UNLIKELY (all_memory))
     {
       /* A task with depend(inout: omp_all_memory) depends on all previous
         sibling tasks which have any dependencies and all later sibling
@@ -509,7 +509,7 @@ GOMP_task (void (*fn) (void *), void *data, void (*cpyfn) 
(void *, void *),
 #endif
 
   /* If parallel or taskgroup has been cancelled, don't start new tasks.  */
-  if (__builtin_expect (gomp_cancel_var, 0) && team)
+  if (UNLIKELY (gomp_cancel_var) && team)
     {
       if (gomp_team_barrier_cancelled (&team->barrier))
        return;
@@ -524,7 +524,7 @@ GOMP_task (void (*fn) (void *), void *data, void (*cpyfn) 
(void *, void *),
        }
     }
 
-  if (__builtin_expect ((flags & GOMP_TASK_FLAG_PRIORITY) != 0, 0))
+  if (UNLIKELY ((flags & GOMP_TASK_FLAG_PRIORITY) != 0))
     {
       priority = priority_arg;
       if (priority > gomp_max_task_priority_var)
@@ -572,7 +572,7 @@ GOMP_task (void (*fn) (void *), void *data, void (*cpyfn) 
(void *, void *),
          task.taskgroup = thr->task->taskgroup;
        }
       thr->task = &task;
-      if (__builtin_expect (cpyfn != NULL, 0))
+      if (UNLIKELY (cpyfn != NULL))
        {
          char buf[arg_size + arg_align - 1];
          char *arg = (char *) (((uintptr_t) buf + arg_align - 1)
@@ -654,7 +654,7 @@ GOMP_task (void (*fn) (void *), void *data, void (*cpyfn) 
(void *, void *),
       gomp_mutex_lock (&team->task_lock);
       /* If parallel or taskgroup has been cancelled, don't start new
         tasks.  */
-      if (__builtin_expect (gomp_cancel_var, 0)
+      if (UNLIKELY (gomp_cancel_var)
          && !task->copy_ctors_done)
        {
          if (gomp_team_barrier_cancelled (&team->barrier))
@@ -694,7 +694,7 @@ GOMP_task (void (*fn) (void *), void *data, void (*cpyfn) 
(void *, void *),
            }
          /* Check for taskwait nowait depend which doesn't need to wait for
             anything.  */
-         if (__builtin_expect (fn == empty_task, 0))
+         if (UNLIKELY (fn == empty_task))
            {
              if (taskgroup)
                taskgroup->num_children--;
@@ -875,7 +875,7 @@ gomp_create_target_task (struct gomp_device_descr *devicep,
   struct gomp_team *team = thr->ts.team;
 
   /* If parallel or taskgroup has been cancelled, don't start new tasks.  */
-  if (__builtin_expect (gomp_cancel_var, 0) && team)
+  if (UNLIKELY (gomp_cancel_var) && team)
     {
       if (gomp_team_barrier_cancelled (&team->barrier))
        return true;
@@ -993,7 +993,7 @@ gomp_create_target_task (struct gomp_device_descr *devicep,
   task->final_task = 0;
   gomp_mutex_lock (&team->task_lock);
   /* If parallel or taskgroup has been cancelled, don't start new tasks.  */
-  if (__builtin_expect (gomp_cancel_var, 0))
+  if (UNLIKELY (gomp_cancel_var))
     {
       if (gomp_team_barrier_cancelled (&team->barrier))
        {
@@ -1216,7 +1216,7 @@ priority_list_downgrade_task (enum priority_queue_type 
type,
 
   /* If the current task is the last_parent_depends_on for its
      priority, adjust last_parent_depends_on appropriately.  */
-  if (__builtin_expect (child_task->parent_depends_on, 0)
+  if (UNLIKELY (child_task->parent_depends_on)
       && list->last_parent_depends_on == node)
     {
       struct gomp_task *prev_child = priority_node_to_task (type, node->prev);
@@ -1304,7 +1304,7 @@ gomp_task_run_pre (struct gomp_task *child_task, struct 
gomp_task *parent,
 
   if (--team->task_queued_count == 0)
     gomp_team_barrier_clear_task_pending (&team->barrier);
-  if (__builtin_expect (gomp_cancel_var, 0)
+  if (UNLIKELY (gomp_cancel_var)
       && !child_task->copy_ctors_done)
     {
       if (gomp_team_barrier_cancelled (&team->barrier))
@@ -1378,11 +1378,11 @@ gomp_task_run_post_handle_dependers (struct gomp_task 
*child_task,
        continue;
 
       struct gomp_taskgroup *taskgroup = task->taskgroup;
-      if (__builtin_expect (task->fn == empty_task, 0))
+      if (UNLIKELY (task->fn == empty_task))
        {
          if (!parent)
            task->parent = NULL;
-         else if (__builtin_expect (task->parent_depends_on, 0)
+         else if (UNLIKELY (task->parent_depends_on)
                   && --parent->taskwait->n_depend == 0
                   && parent->taskwait->in_depend_wait)
            {
@@ -1498,7 +1498,7 @@ gomp_task_run_post_remove_parent (struct gomp_task 
*child_task)
   /* If this was the last task the parent was depending on,
      synchronize with gomp_task_maybe_wait_for_dependencies so it can
      clean up and return.  */
-  if (__builtin_expect (child_task->parent_depends_on, 0)
+  if (UNLIKELY (child_task->parent_depends_on)
       && --parent->taskwait->n_depend == 0
       && parent->taskwait->in_depend_wait)
     {
@@ -1584,7 +1584,7 @@ gomp_barrier_handle_tasks (gomp_barrier_state_t state)
                                        &ignored);
          cancelled = gomp_task_run_pre (child_task, child_task->parent,
                                         team);
-         if (__builtin_expect (cancelled, 0))
+         if (UNLIKELY (cancelled))
            {
              if (to_free)
                {
@@ -1625,7 +1625,7 @@ gomp_barrier_handle_tasks (gomp_barrier_state_t state)
       if (child_task)
        {
          thr->task = child_task;
-         if (__builtin_expect (child_task->fn == NULL, 0))
+         if (UNLIKELY (child_task->fn == NULL))
            {
              if (gomp_target_task_fn (child_task->fn_data))
                {
@@ -1743,7 +1743,7 @@ GOMP_taskwait (void)
          child_task = next_task;
          cancelled
            = gomp_task_run_pre (child_task, task, team);
-         if (__builtin_expect (cancelled, 0))
+         if (UNLIKELY (cancelled))
            {
              if (to_free)
                {
@@ -1784,7 +1784,7 @@ GOMP_taskwait (void)
       if (child_task)
        {
          thr->task = child_task;
-         if (__builtin_expect (child_task->fn == NULL, 0))
+         if (UNLIKELY (child_task->fn == NULL))
            {
              if (gomp_target_task_fn (child_task->fn_data))
                {
@@ -1866,7 +1866,7 @@ GOMP_taskwait_depend (void **depend)
   struct gomp_team *team = thr->ts.team;
 
   /* If parallel or taskgroup has been cancelled, return early.  */
-  if (__builtin_expect (gomp_cancel_var, 0) && team)
+  if (UNLIKELY (gomp_cancel_var) && team)
     {
       if (gomp_team_barrier_cancelled (&team->barrier))
        return;
@@ -1935,7 +1935,7 @@ gomp_task_maybe_wait_for_dependencies (void **depend)
       n = 5;
     }
   gomp_mutex_lock (&team->task_lock);
-  if (__builtin_expect (task->depend_all_memory && ndepend, false))
+  if (UNLIKELY (task->depend_all_memory && ndepend))
     {
       struct gomp_task *tsk = task->depend_all_memory;
       if (!tsk->parent_depends_on)
@@ -1950,7 +1950,7 @@ gomp_task_maybe_wait_for_dependencies (void **depend)
     {
       elem.addr = depend[i + n];
       elem.is_in = i >= nout;
-      if (__builtin_expect (i >= normal, 0))
+      if (UNLIKELY (i >= normal))
        {
          void **d = (void **) elem.addr;
          switch ((uintptr_t) d[1])
@@ -1971,7 +1971,7 @@ gomp_task_maybe_wait_for_dependencies (void **depend)
            }
          elem.addr = d[0];
        }
-      if (__builtin_expect (elem.addr == NULL && !elem.is_in, false))
+      if (UNLIKELY (elem.addr == NULL && !elem.is_in))
        {
          size_t size = htab_size (task->depend_hash);
          if (htab_elements (task->depend_hash) * 8 < size && size > 32)
@@ -2070,7 +2070,7 @@ gomp_task_maybe_wait_for_dependencies (void **depend)
          child_task = next_task;
          cancelled
            = gomp_task_run_pre (child_task, task, team);
-         if (__builtin_expect (cancelled, 0))
+         if (UNLIKELY (cancelled))
            {
              if (to_free)
                {
@@ -2102,7 +2102,7 @@ gomp_task_maybe_wait_for_dependencies (void **depend)
       if (child_task)
        {
          thr->task = child_task;
-         if (__builtin_expect (child_task->fn == NULL, 0))
+         if (UNLIKELY (child_task->fn == NULL))
            {
              if (gomp_target_task_fn (child_task->fn_data))
                {
@@ -2212,7 +2212,7 @@ GOMP_taskgroup_end (void)
   if (team == NULL)
     return;
   taskgroup = task->taskgroup;
-  if (__builtin_expect (taskgroup == NULL, 0)
+  if (UNLIKELY (taskgroup == NULL)
       && thr->ts.level == 0)
     {
       /* This can happen if GOMP_taskgroup_start is called when
@@ -2270,7 +2270,7 @@ GOMP_taskgroup_end (void)
        {
          cancelled
            = gomp_task_run_pre (child_task, child_task->parent, team);
-         if (__builtin_expect (cancelled, 0))
+         if (UNLIKELY (cancelled))
            {
              if (to_free)
                {
@@ -2306,7 +2306,7 @@ GOMP_taskgroup_end (void)
       if (child_task)
        {
          thr->task = child_task;
-         if (__builtin_expect (child_task->fn == NULL, 0))
+         if (UNLIKELY (child_task->fn == NULL))
            {
              if (gomp_target_task_fn (child_task->fn_data))
                {
@@ -2382,7 +2382,7 @@ gomp_reduction_register (uintptr_t *data, uintptr_t *old, 
uintptr_t *orig,
   struct htab *old_htab = NULL, *new_htab;
   do
     {
-      if (__builtin_expect (orig != NULL, 0))
+      if (UNLIKELY (orig != NULL))
        {
          /* For worksharing task reductions, memory has been allocated
             already by some other thread that encountered the construct
@@ -2515,7 +2515,7 @@ GOMP_taskgroup_reduction_register (uintptr_t *data)
   struct gomp_team *team = thr->ts.team;
   struct gomp_task *task;
   unsigned nthreads;
-  if (__builtin_expect (team == NULL, 0))
+  if (UNLIKELY (team == NULL))
     {
       /* The task reduction code needs a team and task, so for
         orphaned taskgroups just create the implicit team.  */
@@ -2576,7 +2576,7 @@ GOMP_task_reduction_remap (size_t cnt, size_t cntorig, 
void **ptrs)
             for one thread.  */
          d = (uintptr_t *) p[2];
          ptrs[i] = (void *) (d[2] + id * d[1] + p[1]);
-         if (__builtin_expect (i < cntorig, 0))
+         if (UNLIKELY (i < cntorig))
            ptrs[cnt + i] = (void *) p[0];
          continue;
        }
@@ -2592,7 +2592,7 @@ GOMP_task_reduction_remap (size_t cnt, size_t cntorig, 
void **ptrs)
                    "task modifier for %p", ptrs[i]);
       uintptr_t off = ((uintptr_t) ptrs[i] - d[2]) % d[1];
       ptrs[i] = (void *) (d[2] + id * d[1] + off);
-      if (__builtin_expect (i < cntorig, 0))
+      if (UNLIKELY (i < cntorig))
        {
          size_t lo = 0, hi = d[0] - 1;
          while (lo <= hi)
diff --git a/libgomp/taskloop.c b/libgomp/taskloop.c
index af175f4ac14..39145423133 100644
--- a/libgomp/taskloop.c
+++ b/libgomp/taskloop.c
@@ -180,7 +180,7 @@ GOMP_taskloop (void (*fn) (void *), void *data, void 
(*cpyfn) (void *, void *),
 
   if (flags & GOMP_TASK_FLAG_NOGROUP)
     {
-      if (__builtin_expect (gomp_cancel_var, 0)
+      if (UNLIKELY (gomp_cancel_var)
          && thr->task
          && thr->task->taskgroup)
        {
@@ -211,7 +211,7 @@ GOMP_taskloop (void (*fn) (void *), void *data, void 
(*cpyfn) (void *, void *),
       || team->task_count + num_tasks > 64 * team->nthreads)
     {
       unsigned long i;
-      if (__builtin_expect (cpyfn != NULL, 0))
+      if (UNLIKELY (cpyfn != NULL))
        {
          struct gomp_task task[num_tasks];
          struct gomp_task *parent = thr->task;
@@ -332,7 +332,7 @@ GOMP_taskloop (void (*fn) (void *), void *data, void 
(*cpyfn) (void *, void *),
       gomp_mutex_lock (&team->task_lock);
       /* If parallel or taskgroup has been cancelled, don't start new
         tasks.  */
-      if (__builtin_expect (gomp_cancel_var, 0)
+      if (UNLIKELY (gomp_cancel_var)
          && cpyfn == NULL)
        {
          if (gomp_team_barrier_cancelled (&team->barrier))
diff --git a/libgomp/team.c b/libgomp/team.c
index cb6875d70fa..575c6549a0b 100644
--- a/libgomp/team.c
+++ b/libgomp/team.c
@@ -303,7 +303,7 @@ gomp_free_thread (void *arg __attribute__((unused)))
 #endif
       thr->thread_pool = NULL;
     }
-  if (thr->ts.level == 0 && __builtin_expect (thr->ts.team != NULL, 0))
+  if (thr->ts.level == 0 && UNLIKELY (thr->ts.team != NULL))
     gomp_team_end ();
   if (thr->task != NULL)
     {
@@ -341,10 +341,10 @@ gomp_team_start (void (*fn) (void *), void *data, 
unsigned nthreads,
   pool = thr->thread_pool;
   task = thr->task;
   icv = task ? &task->icv : &gomp_global_icv;
-  if (__builtin_expect (gomp_places_list != NULL, 0) && thr->place == 0)
+  if (UNLIKELY (gomp_places_list != NULL) && thr->place == 0)
     {
       gomp_init_affinity ();
-      if (__builtin_expect (gomp_display_affinity_var, 0) && nthreads == 1)
+      if (UNLIKELY (gomp_display_affinity_var) && nthreads == 1)
        gomp_display_affinity_thread (gomp_thread_self (), &thr->ts,
                                      thr->place);
     }
@@ -370,14 +370,14 @@ gomp_team_start (void (*fn) (void *), void *data, 
unsigned nthreads,
   thr->handle = pthread_self ();
 #endif
   nthreads_var = icv->nthreads_var;
-  if (__builtin_expect (gomp_nthreads_var_list != NULL, 0)
+  if (UNLIKELY (gomp_nthreads_var_list != NULL)
       && thr->ts.level < gomp_nthreads_var_list_len)
     nthreads_var = gomp_nthreads_var_list[thr->ts.level];
   bind_var = icv->bind_var;
   if (bind_var != omp_proc_bind_false && (flags & 7) != omp_proc_bind_false)
     bind_var = flags & 7;
   bind = bind_var;
-  if (__builtin_expect (gomp_bind_var_list != NULL, 0)
+  if (UNLIKELY (gomp_bind_var_list != NULL)
       && thr->ts.level < gomp_bind_var_list_len)
     bind_var = gomp_bind_var_list[thr->ts.level];
   gomp_init_task (thr->task, task, icv);
@@ -390,7 +390,7 @@ gomp_team_start (void (*fn) (void *), void *data, unsigned 
nthreads,
 
   i = 1;
 
-  if (__builtin_expect (gomp_places_list != NULL, 0))
+  if (UNLIKELY (gomp_places_list != NULL))
     {
       /* Depending on chosen proc_bind model, set subpartition
         for the master thread and initialize helper variables
@@ -508,7 +508,7 @@ gomp_team_start (void (*fn) (void *), void *data, unsigned 
nthreads,
          unsigned int place_partition_off = thr->ts.place_partition_off;
          unsigned int place_partition_len = thr->ts.place_partition_len;
          unsigned int place = 0;
-         if (__builtin_expect (gomp_places_list != NULL, 0))
+         if (UNLIKELY (gomp_places_list != NULL))
            {
              switch (bind)
                {
@@ -667,7 +667,7 @@ gomp_team_start (void (*fn) (void *), void *data, unsigned 
nthreads,
          team->ordered_release[i] = &nthr->release;
        }
 
-      if (__builtin_expect (affinity_thr != NULL, 0))
+      if (UNLIKELY (affinity_thr != NULL))
        {
          /* If AFFINITY_THR is non-NULL just because we had to
             permute some threads in the pool, but we've managed
@@ -730,7 +730,7 @@ gomp_team_start (void (*fn) (void *), void *data, unsigned 
nthreads,
 
     }
 
-  if (__builtin_expect (nthreads + affinity_count > old_threads_used, 0))
+  if (UNLIKELY (nthreads + affinity_count > old_threads_used))
     {
       long diff = (long) (nthreads + affinity_count) - (long) old_threads_used;
 
@@ -747,7 +747,7 @@ gomp_team_start (void (*fn) (void *), void *data, unsigned 
nthreads,
     }
 
   attr = &gomp_thread_attr;
-  if (__builtin_expect (gomp_places_list != NULL, 0))
+  if (UNLIKELY (gomp_places_list != NULL))
     {
       size_t stacksize;
       pthread_attr_init (&thread_attr);
@@ -767,7 +767,7 @@ gomp_team_start (void (*fn) (void *), void *data, unsigned 
nthreads,
       start_data->ts.place_partition_off = thr->ts.place_partition_off;
       start_data->ts.place_partition_len = thr->ts.place_partition_len;
       start_data->place = 0;
-      if (__builtin_expect (gomp_places_list != NULL, 0))
+      if (UNLIKELY (gomp_places_list != NULL))
        {
          switch (bind)
            {
@@ -862,7 +862,7 @@ gomp_team_start (void (*fn) (void *), void *data, unsigned 
nthreads,
        gomp_fatal ("Thread creation failed: %s", strerror (err));
     }
 
-  if (__builtin_expect (attr == &thread_attr, 0))
+  if (UNLIKELY (attr == &thread_attr))
     pthread_attr_destroy (&thread_attr);
 
  do_release:
@@ -879,8 +879,8 @@ gomp_team_start (void (*fn) (void *), void *data, unsigned 
nthreads,
      set to NTHREADS + AFFINITY_COUNT.  For NTHREADS < OLD_THREADS_COUNT,
      AFFINITY_COUNT if non-zero will be always at least
      OLD_THREADS_COUNT - NTHREADS.  */
-  if (__builtin_expect (nthreads < old_threads_used, 0)
-      || __builtin_expect (affinity_count, 0))
+  if (UNLIKELY (nthreads < old_threads_used)
+      || UNLIKELY (affinity_count))
     {
       long diff = (long) nthreads - (long) old_threads_used;
 
@@ -897,7 +897,7 @@ gomp_team_start (void (*fn) (void *), void *data, unsigned 
nthreads,
       gomp_mutex_unlock (&gomp_managed_threads_lock);
 #endif
     }
-  if (__builtin_expect (gomp_display_affinity_var, 0))
+  if (UNLIKELY (gomp_display_affinity_var))
     {
       if (nested
          || nthreads != old_threads_used
@@ -933,7 +933,7 @@ gomp_team_start (void (*fn) (void *), void *data, unsigned 
nthreads,
            }
        }
     }
-  if (__builtin_expect (affinity_thr != NULL, 0)
+  if (UNLIKELY (affinity_thr != NULL)
       && team->prev_ts.place_partition_len > 64)
     free (affinity_thr);
 }
@@ -954,7 +954,7 @@ gomp_team_end (void)
      team->barrier in a inconsistent state, we need to use a different
      counter here.  */
   gomp_team_barrier_wait_final (&team->barrier);
-  if (__builtin_expect (team->team_cancelled, 0))
+  if (UNLIKELY (team->team_cancelled))
     {
       struct gomp_work_share *ws = team->work_shares_to_free;
       do
@@ -973,7 +973,7 @@ gomp_team_end (void)
   gomp_end_task ();
   thr->ts = team->prev_ts;
 
-  if (__builtin_expect (thr->ts.level != 0, 0))
+  if (UNLIKELY (thr->ts.level != 0))
     {
 #ifdef HAVE_SYNC_BUILTINS
       __sync_fetch_and_add (&gomp_managed_threads, 1L - team->nthreads);
@@ -987,7 +987,7 @@ gomp_team_end (void)
       gomp_barrier_wait (&team->barrier);
     }
 
-  if (__builtin_expect (team->work_shares[0].next_alloc != NULL, 0))
+  if (UNLIKELY (team->work_shares[0].next_alloc != NULL))
     {
       struct gomp_work_share *ws = team->work_shares[0].next_alloc;
       do
@@ -1000,8 +1000,8 @@ gomp_team_end (void)
     }
   gomp_sem_destroy (&team->master_release);
 
-  if (__builtin_expect (thr->ts.team != NULL, 0)
-      || __builtin_expect (team->nthreads == 1, 0))
+  if (UNLIKELY (thr->ts.team != NULL)
+      || UNLIKELY (team->nthreads == 1))
     free_team (team);
   else
     {
diff --git a/libgomp/work.c b/libgomp/work.c
index c53625afe2c..7582a861901 100644
--- a/libgomp/work.c
+++ b/libgomp/work.c
@@ -102,13 +102,13 @@ gomp_init_work_share (struct gomp_work_share *ws, size_t 
ordered,
                      unsigned nthreads)
 {
   gomp_mutex_init (&ws->lock);
-  if (__builtin_expect (ordered, 0))
+  if (UNLIKELY (ordered))
     {
 #define INLINE_ORDERED_TEAM_IDS_SIZE \
   (sizeof (struct gomp_work_share) \
    - offsetof (struct gomp_work_share, inline_ordered_team_ids))
 
-      if (__builtin_expect (ordered != 1, 0))
+      if (UNLIKELY (ordered != 1))
        {
          size_t o = nthreads * sizeof (*ws->ordered_team_ids);
          o += __alignof__ (long long) - 1;
@@ -155,7 +155,7 @@ static inline void
 free_work_share (struct gomp_team *team, struct gomp_work_share *ws)
 {
   gomp_fini_work_share (ws);
-  if (__builtin_expect (team == NULL, 0))
+  if (UNLIKELY (team == NULL))
     free (ws);
   else
     {
@@ -244,7 +244,7 @@ gomp_work_share_end (void)
 
   if (gomp_barrier_last_thread (bstate))
     {
-      if (__builtin_expect (thr->ts.last_work_share != NULL, 1))
+      if (LIKELY (thr->ts.last_work_share != NULL))
        {
          team->work_shares_to_free = thr->ts.work_share;
          free_work_share (team, thr->ts.last_work_share);
@@ -270,7 +270,7 @@ gomp_work_share_end_cancel (void)
 
   if (gomp_barrier_last_thread (bstate))
     {
-      if (__builtin_expect (thr->ts.last_work_share != NULL, 1))
+      if (LIKELY (thr->ts.last_work_share != NULL))
        {
          team->work_shares_to_free = thr->ts.work_share;
          free_work_share (team, thr->ts.last_work_share);
@@ -300,7 +300,7 @@ gomp_work_share_end_nowait (void)
       return;
     }
 
-  if (__builtin_expect (thr->ts.last_work_share == NULL, 0))
+  if (UNLIKELY (thr->ts.last_work_share == NULL))
     return;
 
 #ifdef HAVE_SYNC_BUILTINS
-- 
2.36.1

Reply via email to