diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index af4af7c..76a0837 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -61,6 +61,7 @@ along with GCC; see the file COPYING3.  If not see
 #include "fibheap.h"
 #include "opts.h"
 #include "diagnostic.h"
+#include "cfgloop.h"
 
 enum upper_128bits_state
 {
@@ -2635,6 +2636,54 @@ static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
   "btver1"
 };
 
+/* This routine is used in ix86_loop_unroll_adjust and helps to calculate 
+   number of imuls in a loop.  */
+
+static int
+check_imul (rtx *x, unsigned *op_count)
+{
+  if (*x && GET_CODE (*x) == MULT)
+    (*op_count)++;
+  return 0;
+}
+
+/* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
+   a new number struct loop *loop should be unrolled if tuned for Atom at -O2.
+   The loop is analyzed for imull ops number.  */
+
+static unsigned
+ix86_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
+{
+  basic_block *bbs;
+  rtx insn;
+  unsigned i;
+  unsigned imul_count = 0;
+
+  if (optimize != 2
+      || optimize_size
+      || ix86_tune != PROCESSOR_ATOM)
+    return nunroll;
+
+  /* Count the number of imuls within the loop body. Due to Atom specific
+     imuls unrolling of loops that contaim them could harm the performance.  */
+  bbs = get_loop_body (loop);
+  for (i = 0; i < loop->num_nodes; i++)
+    {
+      for (insn = BB_HEAD (bbs[i]);
+           insn != BB_END (bbs[i]);
+           insn = NEXT_INSN (insn))
+	if (INSN_P (insn) && INSN_CODE (insn) != -1)
+          for_each_rtx (&insn, (rtx_function) check_imul, &imul_count);
+    }
+  free (bbs);
+
+  /* Prevent division by zero, and we do not need to adjust nunroll in this case.  */
+  if (imul_count >= 5)
+    return 0;
+
+  return nunroll;
+}
+
 /* Return true if a red-zone is in use.  */
 
 static inline bool
@@ -3815,6 +3864,33 @@ ix86_option_override_internal (bool main_args_p)
       && TARGET_SOFTWARE_PREFETCHING_BENEFICIAL)
     flag_prefetch_loop_arrays = 1;
 
+  /* Enable unrolling at -O2 on Atom.  */
+  if (optimize == 2
+      && !optimize_size
+      && ix86_tune == PROCESSOR_ATOM
+      && !global_options_set.x_flag_unroll_loops
+      && !flag_unroll_loops)
+    {
+      int default_max_unrolled_insns = TARGET_64BIT == 0 ? 72 : 150;
+      int default_max_completely_peeled_insns = TARGET_64BIT == 0 ? 150 : 400;
+      flag_unroll_loops = 1;
+      flag_rename_registers = 0;
+      maybe_set_param_value (PARAM_MAX_UNROLL_TIMES,
+			     2,
+			     global_options.x_param_values,
+			     global_options_set.x_param_values);
+      if (!global_options_set.x_param_values[PARAM_MAX_UNROLLED_INSNS])
+	maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS,
+			       default_max_unrolled_insns,
+			       global_options.x_param_values,
+			       global_options_set.x_param_values);
+      if (!global_options_set.x_param_values[PARAM_MAX_COMPLETELY_PEELED_INSNS])
+	maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS,
+			       default_max_completely_peeled_insns,
+			       global_options.x_param_values,
+			       global_options_set.x_param_values);
+    }
+
   /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
      can be optimized to ap = __builtin_next_arg (0).  */
   if (!TARGET_64BIT && !flag_split_stack)
@@ -39258,6 +39334,9 @@ ix86_autovectorize_vector_sizes (void)
 #define TARGET_INIT_LIBFUNCS darwin_rename_builtins
 #endif
 
+#undef TARGET_LOOP_UNROLL_ADJUST
+#define TARGET_LOOP_UNROLL_ADJUST ix86_loop_unroll_adjust
+
 struct gcc_target targetm = TARGET_INITIALIZER;
 
 #include "gt-i386.h"
