In analogy with commits 5af84b82701a and 97df8c12995,
using asynchronous threads can improve the overall
resume time significantly.

This patch is for dpm_complete phase.

Signed-off-by: Chuansheng Liu <chuansheng....@intel.com>
Signed-off-by: xiaoming wang <xiaoming.w...@intel.com>
---
 drivers/base/power/main.c |   38 ++++++++++++++++++++++++++++++++++----
 1 files changed, 34 insertions(+), 4 deletions(-)

diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index f9fe1b3..00c4bf1 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -889,14 +889,15 @@ void dpm_resume(pm_message_t state)
  * @dev: Device to handle.
  * @state: PM transition of the system being carried out.
  */
-static void device_complete(struct device *dev, pm_message_t state)
+static void device_complete(struct device *dev, pm_message_t state, bool async)
 {
        void (*callback)(struct device *) = NULL;
        char *info = NULL;
 
        if (dev->power.syscore)
-               return;
+               goto Complete;
 
+       dpm_wait(dev->parent, async);
        device_lock(dev);
 
        if (dev->pm_domain) {
@@ -928,6 +929,17 @@ static void device_complete(struct device *dev, 
pm_message_t state)
        device_unlock(dev);
 
        pm_runtime_put(dev);
+
+Complete:
+       complete_all(&dev->power.completion);
+}
+
+static void async_complete(void *data, async_cookie_t cookie)
+{
+       struct device *dev = (struct device *)data;
+
+       device_complete(dev, pm_transition, true);
+       put_device(dev);
 }
 
 /**
@@ -940,27 +952,45 @@ static void device_complete(struct device *dev, 
pm_message_t state)
 void dpm_complete(pm_message_t state)
 {
        struct list_head list;
+       struct device *dev;
 
        trace_suspend_resume(TPS("dpm_complete"), state.event, true);
        might_sleep();
 
        INIT_LIST_HEAD(&list);
        mutex_lock(&dpm_list_mtx);
+       pm_transition = state;
+
+       /*
+         * Advanced the async threads upfront,
+         * in case the starting of async threads is
+         * delayed by non-async resuming devices.
+         */
+       list_for_each_entry(dev, &dpm_prepared_list, power.entry) {
+               reinit_completion(&dev->power.completion);
+               if (is_async(dev)) {
+                       get_device(dev);
+                       async_schedule(async_complete, dev);
+               }
+       }
+
        while (!list_empty(&dpm_prepared_list)) {
-               struct device *dev = to_device(dpm_prepared_list.prev);
+               dev = to_device(dpm_prepared_list.prev);
 
                get_device(dev);
                dev->power.is_prepared = false;
                list_move(&dev->power.entry, &list);
                mutex_unlock(&dpm_list_mtx);
 
-               device_complete(dev, state);
+               if (!is_async(dev))
+                       device_complete(dev, state, false);
 
                mutex_lock(&dpm_list_mtx);
                put_device(dev);
        }
        list_splice(&list, &dpm_list);
        mutex_unlock(&dpm_list_mtx);
+       async_synchronize_full();
        trace_suspend_resume(TPS("dpm_complete"), state.event, false);
 }
 
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to