diff options
author | Srikant Patnaik | 2015-01-11 12:28:04 +0530 |
---|---|---|
committer | Srikant Patnaik | 2015-01-11 12:28:04 +0530 |
commit | 871480933a1c28f8a9fed4c4d34d06c439a7a422 (patch) | |
tree | 8718f573808810c2a1e8cb8fb6ac469093ca2784 /drivers/base/power | |
parent | 9d40ac5867b9aefe0722bc1f110b965ff294d30d (diff) | |
download | FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.tar.gz FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.tar.bz2 FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.zip |
Moved, renamed, and deleted files
The original directory structure was scattered and unorganized.
Changes are basically to make it look like kernel structure.
Diffstat (limited to 'drivers/base/power')
-rw-r--r-- | drivers/base/power/Makefile | 9 | ||||
-rw-r--r-- | drivers/base/power/clock_ops.c | 487 | ||||
-rw-r--r-- | drivers/base/power/common.c | 87 | ||||
-rw-r--r-- | drivers/base/power/domain.c | 1815 | ||||
-rw-r--r-- | drivers/base/power/domain_governor.c | 254 | ||||
-rw-r--r-- | drivers/base/power/generic_ops.c | 329 | ||||
-rw-r--r-- | drivers/base/power/main.c | 1409 | ||||
-rw-r--r-- | drivers/base/power/opp.c | 676 | ||||
-rw-r--r-- | drivers/base/power/power.h | 87 | ||||
-rw-r--r-- | drivers/base/power/qos.c | 513 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 1317 | ||||
-rw-r--r-- | drivers/base/power/sysfs.c | 634 | ||||
-rw-r--r-- | drivers/base/power/trace.c | 266 | ||||
-rw-r--r-- | drivers/base/power/wakeup.c | 997 |
14 files changed, 8880 insertions, 0 deletions
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile new file mode 100644 index 00000000..2e58ebb1 --- /dev/null +++ b/drivers/base/power/Makefile @@ -0,0 +1,9 @@ +obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o +obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o +obj-$(CONFIG_PM_RUNTIME) += runtime.o +obj-$(CONFIG_PM_TRACE_RTC) += trace.o +obj-$(CONFIG_PM_OPP) += opp.o +obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o +obj-$(CONFIG_HAVE_CLK) += clock_ops.o + +ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c new file mode 100644 index 00000000..869d7ff2 --- /dev/null +++ b/drivers/base/power/clock_ops.c @@ -0,0 +1,487 @@ +/* + * drivers/base/power/clock_ops.c - Generic clock manipulation PM callbacks + * + * Copyright (c) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. + * + * This file is released under the GPLv2. + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/pm.h> +#include <linux/pm_clock.h> +#include <linux/clk.h> +#include <linux/slab.h> +#include <linux/err.h> + +#ifdef CONFIG_PM + +enum pce_status { + PCE_STATUS_NONE = 0, + PCE_STATUS_ACQUIRED, + PCE_STATUS_ENABLED, + PCE_STATUS_ERROR, +}; + +struct pm_clock_entry { + struct list_head node; + char *con_id; + struct clk *clk; + enum pce_status status; +}; + +/** + * pm_clk_acquire - Acquire a device clock. + * @dev: Device whose clock is to be acquired. + * @ce: PM clock entry corresponding to the clock. + */ +static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce) +{ + ce->clk = clk_get(dev, ce->con_id); + if (IS_ERR(ce->clk)) { + ce->status = PCE_STATUS_ERROR; + } else { + ce->status = PCE_STATUS_ACQUIRED; + dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id); + } +} + +/** + * pm_clk_add - Start using a device clock for power management. + * @dev: Device whose clock is going to be used for power management. + * @con_id: Connection ID of the clock. + * + * Add the clock represented by @con_id to the list of clocks used for + * the power management of @dev. + */ +int pm_clk_add(struct device *dev, const char *con_id) +{ + struct pm_subsys_data *psd = dev_to_psd(dev); + struct pm_clock_entry *ce; + + if (!psd) + return -EINVAL; + + ce = kzalloc(sizeof(*ce), GFP_KERNEL); + if (!ce) { + dev_err(dev, "Not enough memory for clock entry.\n"); + return -ENOMEM; + } + + if (con_id) { + ce->con_id = kstrdup(con_id, GFP_KERNEL); + if (!ce->con_id) { + dev_err(dev, + "Not enough memory for clock connection ID.\n"); + kfree(ce); + return -ENOMEM; + } + } + + pm_clk_acquire(dev, ce); + + spin_lock_irq(&psd->lock); + list_add_tail(&ce->node, &psd->clock_list); + spin_unlock_irq(&psd->lock); + return 0; +} + +/** + * __pm_clk_remove - Destroy PM clock entry. + * @ce: PM clock entry to destroy. + */ +static void __pm_clk_remove(struct pm_clock_entry *ce) +{ + if (!ce) + return; + + if (ce->status < PCE_STATUS_ERROR) { + if (ce->status == PCE_STATUS_ENABLED) + clk_disable(ce->clk); + + if (ce->status >= PCE_STATUS_ACQUIRED) + clk_put(ce->clk); + } + + kfree(ce->con_id); + kfree(ce); +} + +/** + * pm_clk_remove - Stop using a device clock for power management. + * @dev: Device whose clock should not be used for PM any more. + * @con_id: Connection ID of the clock. + * + * Remove the clock represented by @con_id from the list of clocks used for + * the power management of @dev. + */ +void pm_clk_remove(struct device *dev, const char *con_id) +{ + struct pm_subsys_data *psd = dev_to_psd(dev); + struct pm_clock_entry *ce; + + if (!psd) + return; + + spin_lock_irq(&psd->lock); + + list_for_each_entry(ce, &psd->clock_list, node) { + if (!con_id && !ce->con_id) + goto remove; + else if (!con_id || !ce->con_id) + continue; + else if (!strcmp(con_id, ce->con_id)) + goto remove; + } + + spin_unlock_irq(&psd->lock); + return; + + remove: + list_del(&ce->node); + spin_unlock_irq(&psd->lock); + + __pm_clk_remove(ce); +} + +/** + * pm_clk_init - Initialize a device's list of power management clocks. + * @dev: Device to initialize the list of PM clocks for. + * + * Initialize the lock and clock_list members of the device's pm_subsys_data + * object. + */ +void pm_clk_init(struct device *dev) +{ + struct pm_subsys_data *psd = dev_to_psd(dev); + if (psd) + INIT_LIST_HEAD(&psd->clock_list); +} + +/** + * pm_clk_create - Create and initialize a device's list of PM clocks. + * @dev: Device to create and initialize the list of PM clocks for. + * + * Allocate a struct pm_subsys_data object, initialize its lock and clock_list + * members and make the @dev's power.subsys_data field point to it. + */ +int pm_clk_create(struct device *dev) +{ + int ret = dev_pm_get_subsys_data(dev); + return ret < 0 ? ret : 0; +} + +/** + * pm_clk_destroy - Destroy a device's list of power management clocks. + * @dev: Device to destroy the list of PM clocks for. + * + * Clear the @dev's power.subsys_data field, remove the list of clock entries + * from the struct pm_subsys_data object pointed to by it before and free + * that object. + */ +void pm_clk_destroy(struct device *dev) +{ + struct pm_subsys_data *psd = dev_to_psd(dev); + struct pm_clock_entry *ce, *c; + struct list_head list; + + if (!psd) + return; + + INIT_LIST_HEAD(&list); + + spin_lock_irq(&psd->lock); + + list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node) + list_move(&ce->node, &list); + + spin_unlock_irq(&psd->lock); + + dev_pm_put_subsys_data(dev); + + list_for_each_entry_safe_reverse(ce, c, &list, node) { + list_del(&ce->node); + __pm_clk_remove(ce); + } +} + +#endif /* CONFIG_PM */ + +#ifdef CONFIG_PM_RUNTIME + +/** + * pm_clk_suspend - Disable clocks in a device's PM clock list. + * @dev: Device to disable the clocks for. + */ +int pm_clk_suspend(struct device *dev) +{ + struct pm_subsys_data *psd = dev_to_psd(dev); + struct pm_clock_entry *ce; + unsigned long flags; + + dev_dbg(dev, "%s()\n", __func__); + + if (!psd) + return 0; + + spin_lock_irqsave(&psd->lock, flags); + + list_for_each_entry_reverse(ce, &psd->clock_list, node) { + if (ce->status < PCE_STATUS_ERROR) { + if (ce->status == PCE_STATUS_ENABLED) + clk_disable(ce->clk); + ce->status = PCE_STATUS_ACQUIRED; + } + } + + spin_unlock_irqrestore(&psd->lock, flags); + + return 0; +} + +/** + * pm_clk_resume - Enable clocks in a device's PM clock list. + * @dev: Device to enable the clocks for. + */ +int pm_clk_resume(struct device *dev) +{ + struct pm_subsys_data *psd = dev_to_psd(dev); + struct pm_clock_entry *ce; + unsigned long flags; + + dev_dbg(dev, "%s()\n", __func__); + + if (!psd) + return 0; + + spin_lock_irqsave(&psd->lock, flags); + + list_for_each_entry(ce, &psd->clock_list, node) { + if (ce->status < PCE_STATUS_ERROR) { + clk_enable(ce->clk); + ce->status = PCE_STATUS_ENABLED; + } + } + + spin_unlock_irqrestore(&psd->lock, flags); + + return 0; +} + +/** + * pm_clk_notify - Notify routine for device addition and removal. + * @nb: Notifier block object this function is a member of. + * @action: Operation being carried out by the caller. + * @data: Device the routine is being run for. + * + * For this function to work, @nb must be a member of an object of type + * struct pm_clk_notifier_block containing all of the requisite data. + * Specifically, the pm_domain member of that object is copied to the device's + * pm_domain field and its con_ids member is used to populate the device's list + * of PM clocks, depending on @action. + * + * If the device's pm_domain field is already populated with a value different + * from the one stored in the struct pm_clk_notifier_block object, the function + * does nothing. + */ +static int pm_clk_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct pm_clk_notifier_block *clknb; + struct device *dev = data; + char **con_id; + int error; + + dev_dbg(dev, "%s() %ld\n", __func__, action); + + clknb = container_of(nb, struct pm_clk_notifier_block, nb); + + switch (action) { + case BUS_NOTIFY_ADD_DEVICE: + if (dev->pm_domain) + break; + + error = pm_clk_create(dev); + if (error) + break; + + dev->pm_domain = clknb->pm_domain; + if (clknb->con_ids[0]) { + for (con_id = clknb->con_ids; *con_id; con_id++) + pm_clk_add(dev, *con_id); + } else { + pm_clk_add(dev, NULL); + } + + break; + case BUS_NOTIFY_DEL_DEVICE: + if (dev->pm_domain != clknb->pm_domain) + break; + + dev->pm_domain = NULL; + pm_clk_destroy(dev); + break; + } + + return 0; +} + +#else /* !CONFIG_PM_RUNTIME */ + +#ifdef CONFIG_PM + +/** + * pm_clk_suspend - Disable clocks in a device's PM clock list. + * @dev: Device to disable the clocks for. + */ +int pm_clk_suspend(struct device *dev) +{ + struct pm_subsys_data *psd = dev_to_psd(dev); + struct pm_clock_entry *ce; + unsigned long flags; + + dev_dbg(dev, "%s()\n", __func__); + + /* If there is no driver, the clocks are already disabled. */ + if (!psd || !dev->driver) + return 0; + + spin_lock_irqsave(&psd->lock, flags); + + list_for_each_entry_reverse(ce, &psd->clock_list, node) + clk_disable(ce->clk); + + spin_unlock_irqrestore(&psd->lock, flags); + + return 0; +} + +/** + * pm_clk_resume - Enable clocks in a device's PM clock list. + * @dev: Device to enable the clocks for. + */ +int pm_clk_resume(struct device *dev) +{ + struct pm_subsys_data *psd = dev_to_psd(dev); + struct pm_clock_entry *ce; + unsigned long flags; + + dev_dbg(dev, "%s()\n", __func__); + + /* If there is no driver, the clocks should remain disabled. */ + if (!psd || !dev->driver) + return 0; + + spin_lock_irqsave(&psd->lock, flags); + + list_for_each_entry(ce, &psd->clock_list, node) + clk_enable(ce->clk); + + spin_unlock_irqrestore(&psd->lock, flags); + + return 0; +} + +#endif /* CONFIG_PM */ + +/** + * enable_clock - Enable a device clock. + * @dev: Device whose clock is to be enabled. + * @con_id: Connection ID of the clock. + */ +static void enable_clock(struct device *dev, const char *con_id) +{ + struct clk *clk; + + clk = clk_get(dev, con_id); + if (!IS_ERR(clk)) { + clk_enable(clk); + clk_put(clk); + dev_info(dev, "Runtime PM disabled, clock forced on.\n"); + } +} + +/** + * disable_clock - Disable a device clock. + * @dev: Device whose clock is to be disabled. + * @con_id: Connection ID of the clock. + */ +static void disable_clock(struct device *dev, const char *con_id) +{ + struct clk *clk; + + clk = clk_get(dev, con_id); + if (!IS_ERR(clk)) { + clk_disable(clk); + clk_put(clk); + dev_info(dev, "Runtime PM disabled, clock forced off.\n"); + } +} + +/** + * pm_clk_notify - Notify routine for device addition and removal. + * @nb: Notifier block object this function is a member of. + * @action: Operation being carried out by the caller. + * @data: Device the routine is being run for. + * + * For this function to work, @nb must be a member of an object of type + * struct pm_clk_notifier_block containing all of the requisite data. + * Specifically, the con_ids member of that object is used to enable or disable + * the device's clocks, depending on @action. + */ +static int pm_clk_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct pm_clk_notifier_block *clknb; + struct device *dev = data; + char **con_id; + + dev_dbg(dev, "%s() %ld\n", __func__, action); + + clknb = container_of(nb, struct pm_clk_notifier_block, nb); + + switch (action) { + case BUS_NOTIFY_BIND_DRIVER: + if (clknb->con_ids[0]) { + for (con_id = clknb->con_ids; *con_id; con_id++) + enable_clock(dev, *con_id); + } else { + enable_clock(dev, NULL); + } + break; + case BUS_NOTIFY_UNBOUND_DRIVER: + if (clknb->con_ids[0]) { + for (con_id = clknb->con_ids; *con_id; con_id++) + disable_clock(dev, *con_id); + } else { + disable_clock(dev, NULL); + } + break; + } + + return 0; +} + +#endif /* !CONFIG_PM_RUNTIME */ + +/** + * pm_clk_add_notifier - Add bus type notifier for power management clocks. + * @bus: Bus type to add the notifier to. + * @clknb: Notifier to be added to the given bus type. + * + * The nb member of @clknb is not expected to be initialized and its + * notifier_call member will be replaced with pm_clk_notify(). However, + * the remaining members of @clknb should be populated prior to calling this + * routine. + */ +void pm_clk_add_notifier(struct bus_type *bus, + struct pm_clk_notifier_block *clknb) +{ + if (!bus || !clknb) + return; + + clknb->nb.notifier_call = pm_clk_notify; + bus_register_notifier(bus, &clknb->nb); +} diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c new file mode 100644 index 00000000..a14085cc --- /dev/null +++ b/drivers/base/power/common.c @@ -0,0 +1,87 @@ +/* + * drivers/base/power/common.c - Common device power management code. + * + * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. + * + * This file is released under the GPLv2. + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/export.h> +#include <linux/slab.h> +#include <linux/pm_clock.h> + +/** + * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device. + * @dev: Device to handle. + * + * If power.subsys_data is NULL, point it to a new object, otherwise increment + * its reference counter. Return 1 if a new object has been created, otherwise + * return 0 or error code. + */ +int dev_pm_get_subsys_data(struct device *dev) +{ + struct pm_subsys_data *psd; + int ret = 0; + + psd = kzalloc(sizeof(*psd), GFP_KERNEL); + if (!psd) + return -ENOMEM; + + spin_lock_irq(&dev->power.lock); + + if (dev->power.subsys_data) { + dev->power.subsys_data->refcount++; + } else { + spin_lock_init(&psd->lock); + psd->refcount = 1; + dev->power.subsys_data = psd; + pm_clk_init(dev); + psd = NULL; + ret = 1; + } + + spin_unlock_irq(&dev->power.lock); + + /* kfree() verifies that its argument is nonzero. */ + kfree(psd); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data); + +/** + * dev_pm_put_subsys_data - Drop reference to power.subsys_data. + * @dev: Device to handle. + * + * If the reference counter of power.subsys_data is zero after dropping the + * reference, power.subsys_data is removed. Return 1 if that happens or 0 + * otherwise. + */ +int dev_pm_put_subsys_data(struct device *dev) +{ + struct pm_subsys_data *psd; + int ret = 0; + + spin_lock_irq(&dev->power.lock); + + psd = dev_to_psd(dev); + if (!psd) { + ret = -EINVAL; + goto out; + } + + if (--psd->refcount == 0) { + dev->power.subsys_data = NULL; + kfree(psd); + ret = 1; + } + + out: + spin_unlock_irq(&dev->power.lock); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data); diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c new file mode 100644 index 00000000..83aa694a --- /dev/null +++ b/drivers/base/power/domain.c @@ -0,0 +1,1815 @@ +/* + * drivers/base/power/domain.c - Common code related to device power domains. + * + * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. + * + * This file is released under the GPLv2. + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/pm_runtime.h> +#include <linux/pm_domain.h> +#include <linux/pm_qos.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/sched.h> +#include <linux/suspend.h> +#include <linux/export.h> + +#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ +({ \ + type (*__routine)(struct device *__d); \ + type __ret = (type)0; \ + \ + __routine = genpd->dev_ops.callback; \ + if (__routine) { \ + __ret = __routine(dev); \ + } else { \ + __routine = dev_gpd_data(dev)->ops.callback; \ + if (__routine) \ + __ret = __routine(dev); \ + } \ + __ret; \ +}) + +#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \ +({ \ + ktime_t __start = ktime_get(); \ + type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \ + s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \ + struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \ + if (!__retval && __elapsed > __td->field) { \ + __td->field = __elapsed; \ + dev_warn(dev, name " latency exceeded, new value %lld ns\n", \ + __elapsed); \ + genpd->max_off_time_changed = true; \ + __td->constraint_changed = true; \ + } \ + __retval; \ +}) + +static LIST_HEAD(gpd_list); +static DEFINE_MUTEX(gpd_list_lock); + +#ifdef CONFIG_PM + +struct generic_pm_domain *dev_to_genpd(struct device *dev) +{ + if (IS_ERR_OR_NULL(dev->pm_domain)) + return ERR_PTR(-EINVAL); + + return pd_to_genpd(dev->pm_domain); +} + +static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev, + stop_latency_ns, "stop"); +} + +static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev, + start_latency_ns, "start"); +} + +static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, + save_state_latency_ns, "state save"); +} + +static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, + restore_state_latency_ns, + "state restore"); +} + +static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) +{ + bool ret = false; + + if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) + ret = !!atomic_dec_and_test(&genpd->sd_count); + + return ret; +} + +static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) +{ + atomic_inc(&genpd->sd_count); + smp_mb__after_atomic_inc(); +} + +static void genpd_acquire_lock(struct generic_pm_domain *genpd) +{ + DEFINE_WAIT(wait); + + mutex_lock(&genpd->lock); + /* + * Wait for the domain to transition into either the active, + * or the power off state. + */ + for (;;) { + prepare_to_wait(&genpd->status_wait_queue, &wait, + TASK_UNINTERRUPTIBLE); + if (genpd->status == GPD_STATE_ACTIVE + || genpd->status == GPD_STATE_POWER_OFF) + break; + mutex_unlock(&genpd->lock); + + schedule(); + + mutex_lock(&genpd->lock); + } + finish_wait(&genpd->status_wait_queue, &wait); +} + +static void genpd_release_lock(struct generic_pm_domain *genpd) +{ + mutex_unlock(&genpd->lock); +} + +static void genpd_set_active(struct generic_pm_domain *genpd) +{ + if (genpd->resume_count == 0) + genpd->status = GPD_STATE_ACTIVE; +} + +/** + * __pm_genpd_poweron - Restore power to a given PM domain and its masters. + * @genpd: PM domain to power up. + * + * Restore power to @genpd and all of its masters so that it is possible to + * resume a device belonging to it. + */ +int __pm_genpd_poweron(struct generic_pm_domain *genpd) + __releases(&genpd->lock) __acquires(&genpd->lock) +{ + struct gpd_link *link; + DEFINE_WAIT(wait); + int ret = 0; + + /* If the domain's master is being waited for, we have to wait too. */ + for (;;) { + prepare_to_wait(&genpd->status_wait_queue, &wait, + TASK_UNINTERRUPTIBLE); + if (genpd->status != GPD_STATE_WAIT_MASTER) + break; + mutex_unlock(&genpd->lock); + + schedule(); + + mutex_lock(&genpd->lock); + } + finish_wait(&genpd->status_wait_queue, &wait); + + if (genpd->status == GPD_STATE_ACTIVE + || (genpd->prepared_count > 0 && genpd->suspend_power_off)) + return 0; + + if (genpd->status != GPD_STATE_POWER_OFF) { + genpd_set_active(genpd); + return 0; + } + + /* + * The list is guaranteed not to change while the loop below is being + * executed, unless one of the masters' .power_on() callbacks fiddles + * with it. + */ + list_for_each_entry(link, &genpd->slave_links, slave_node) { + genpd_sd_counter_inc(link->master); + genpd->status = GPD_STATE_WAIT_MASTER; + + mutex_unlock(&genpd->lock); + + ret = pm_genpd_poweron(link->master); + + mutex_lock(&genpd->lock); + + /* + * The "wait for parent" status is guaranteed not to change + * while the master is powering on. + */ + genpd->status = GPD_STATE_POWER_OFF; + wake_up_all(&genpd->status_wait_queue); + if (ret) { + genpd_sd_counter_dec(link->master); + goto err; + } + } + + if (genpd->power_on) { + ktime_t time_start = ktime_get(); + s64 elapsed_ns; + + ret = genpd->power_on(genpd); + if (ret) + goto err; + + elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); + if (elapsed_ns > genpd->power_on_latency_ns) { + genpd->power_on_latency_ns = elapsed_ns; + genpd->max_off_time_changed = true; + if (genpd->name) + pr_warning("%s: Power-on latency exceeded, " + "new value %lld ns\n", genpd->name, + elapsed_ns); + } + } + + genpd_set_active(genpd); + + return 0; + + err: + list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node) + genpd_sd_counter_dec(link->master); + + return ret; +} + +/** + * pm_genpd_poweron - Restore power to a given PM domain and its masters. + * @genpd: PM domain to power up. + */ +int pm_genpd_poweron(struct generic_pm_domain *genpd) +{ + int ret; + + mutex_lock(&genpd->lock); + ret = __pm_genpd_poweron(genpd); + mutex_unlock(&genpd->lock); + return ret; +} + +#endif /* CONFIG_PM */ + +#ifdef CONFIG_PM_RUNTIME + +static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, + unsigned long val, void *ptr) +{ + struct generic_pm_domain_data *gpd_data; + struct device *dev; + + gpd_data = container_of(nb, struct generic_pm_domain_data, nb); + + mutex_lock(&gpd_data->lock); + dev = gpd_data->base.dev; + if (!dev) { + mutex_unlock(&gpd_data->lock); + return NOTIFY_DONE; + } + mutex_unlock(&gpd_data->lock); + + for (;;) { + struct generic_pm_domain *genpd; + struct pm_domain_data *pdd; + + spin_lock_irq(&dev->power.lock); + + pdd = dev->power.subsys_data ? + dev->power.subsys_data->domain_data : NULL; + if (pdd) { + to_gpd_data(pdd)->td.constraint_changed = true; + genpd = dev_to_genpd(dev); + } else { + genpd = ERR_PTR(-ENODATA); + } + + spin_unlock_irq(&dev->power.lock); + + if (!IS_ERR(genpd)) { + mutex_lock(&genpd->lock); + genpd->max_off_time_changed = true; + mutex_unlock(&genpd->lock); + } + + dev = dev->parent; + if (!dev || dev->power.ignore_children) + break; + } + + return NOTIFY_DONE; +} + +/** + * __pm_genpd_save_device - Save the pre-suspend state of a device. + * @pdd: Domain data of the device to save the state of. + * @genpd: PM domain the device belongs to. + */ +static int __pm_genpd_save_device(struct pm_domain_data *pdd, + struct generic_pm_domain *genpd) + __releases(&genpd->lock) __acquires(&genpd->lock) +{ + struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); + struct device *dev = pdd->dev; + int ret = 0; + + if (gpd_data->need_restore) + return 0; + + mutex_unlock(&genpd->lock); + + genpd_start_dev(genpd, dev); + ret = genpd_save_dev(genpd, dev); + genpd_stop_dev(genpd, dev); + + mutex_lock(&genpd->lock); + + if (!ret) + gpd_data->need_restore = true; + + return ret; +} + +/** + * __pm_genpd_restore_device - Restore the pre-suspend state of a device. + * @pdd: Domain data of the device to restore the state of. + * @genpd: PM domain the device belongs to. + */ +static void __pm_genpd_restore_device(struct pm_domain_data *pdd, + struct generic_pm_domain *genpd) + __releases(&genpd->lock) __acquires(&genpd->lock) +{ + struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); + struct device *dev = pdd->dev; + + if (!gpd_data->need_restore) + return; + + mutex_unlock(&genpd->lock); + + genpd_start_dev(genpd, dev); + genpd_restore_dev(genpd, dev); + genpd_stop_dev(genpd, dev); + + mutex_lock(&genpd->lock); + + gpd_data->need_restore = false; +} + +/** + * genpd_abort_poweroff - Check if a PM domain power off should be aborted. + * @genpd: PM domain to check. + * + * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during + * a "power off" operation, which means that a "power on" has occured in the + * meantime, or if its resume_count field is different from zero, which means + * that one of its devices has been resumed in the meantime. + */ +static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) +{ + return genpd->status == GPD_STATE_WAIT_MASTER + || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; +} + +/** + * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff(). + * @genpd: PM domait to power off. + * + * Queue up the execution of pm_genpd_poweroff() unless it's already been done + * before. + */ +void genpd_queue_power_off_work(struct generic_pm_domain *genpd) +{ + if (!work_pending(&genpd->power_off_work)) + queue_work(pm_wq, &genpd->power_off_work); +} + +/** + * pm_genpd_poweroff - Remove power from a given PM domain. + * @genpd: PM domain to power down. + * + * If all of the @genpd's devices have been suspended and all of its subdomains + * have been powered down, run the runtime suspend callbacks provided by all of + * the @genpd's devices' drivers and remove power from @genpd. + */ +static int pm_genpd_poweroff(struct generic_pm_domain *genpd) + __releases(&genpd->lock) __acquires(&genpd->lock) +{ + struct pm_domain_data *pdd; + struct gpd_link *link; + unsigned int not_suspended; + int ret = 0; + + start: + /* + * Do not try to power off the domain in the following situations: + * (1) The domain is already in the "power off" state. + * (2) The domain is waiting for its master to power up. + * (3) One of the domain's devices is being resumed right now. + * (4) System suspend is in progress. + */ + if (genpd->status == GPD_STATE_POWER_OFF + || genpd->status == GPD_STATE_WAIT_MASTER + || genpd->resume_count > 0 || genpd->prepared_count > 0) + return 0; + + if (atomic_read(&genpd->sd_count) > 0) + return -EBUSY; + + not_suspended = 0; + list_for_each_entry(pdd, &genpd->dev_list, list_node) + if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) + || pdd->dev->power.irq_safe || to_gpd_data(pdd)->always_on)) + not_suspended++; + + if (not_suspended > genpd->in_progress) + return -EBUSY; + + if (genpd->poweroff_task) { + /* + * Another instance of pm_genpd_poweroff() is executing + * callbacks, so tell it to start over and return. + */ + genpd->status = GPD_STATE_REPEAT; + return 0; + } + + if (genpd->gov && genpd->gov->power_down_ok) { + if (!genpd->gov->power_down_ok(&genpd->domain)) + return -EAGAIN; + } + + genpd->status = GPD_STATE_BUSY; + genpd->poweroff_task = current; + + list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) { + ret = atomic_read(&genpd->sd_count) == 0 ? + __pm_genpd_save_device(pdd, genpd) : -EBUSY; + + if (genpd_abort_poweroff(genpd)) + goto out; + + if (ret) { + genpd_set_active(genpd); + goto out; + } + + if (genpd->status == GPD_STATE_REPEAT) { + genpd->poweroff_task = NULL; + goto start; + } + } + + if (genpd->power_off) { + ktime_t time_start; + s64 elapsed_ns; + + if (atomic_read(&genpd->sd_count) > 0) { + ret = -EBUSY; + goto out; + } + + time_start = ktime_get(); + + /* + * If sd_count > 0 at this point, one of the subdomains hasn't + * managed to call pm_genpd_poweron() for the master yet after + * incrementing it. In that case pm_genpd_poweron() will wait + * for us to drop the lock, so we can call .power_off() and let + * the pm_genpd_poweron() restore power for us (this shouldn't + * happen very often). + */ + ret = genpd->power_off(genpd); + if (ret == -EBUSY) { + genpd_set_active(genpd); + goto out; + } + + elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); + if (elapsed_ns > genpd->power_off_latency_ns) { + genpd->power_off_latency_ns = elapsed_ns; + genpd->max_off_time_changed = true; + if (genpd->name) + pr_warning("%s: Power-off latency exceeded, " + "new value %lld ns\n", genpd->name, + elapsed_ns); + } + } + + genpd->status = GPD_STATE_POWER_OFF; + + list_for_each_entry(link, &genpd->slave_links, slave_node) { + genpd_sd_counter_dec(link->master); + genpd_queue_power_off_work(link->master); + } + + out: + genpd->poweroff_task = NULL; + wake_up_all(&genpd->status_wait_queue); + return ret; +} + +/** + * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. + * @work: Work structure used for scheduling the execution of this function. + */ +static void genpd_power_off_work_fn(struct work_struct *work) +{ + struct generic_pm_domain *genpd; + + genpd = container_of(work, struct generic_pm_domain, power_off_work); + + genpd_acquire_lock(genpd); + pm_genpd_poweroff(genpd); + genpd_release_lock(genpd); +} + +/** + * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain. + * @dev: Device to suspend. + * + * Carry out a runtime suspend of a device under the assumption that its + * pm_domain field points to the domain member of an object of type + * struct generic_pm_domain representing a PM domain consisting of I/O devices. + */ +static int pm_genpd_runtime_suspend(struct device *dev) +{ + struct generic_pm_domain *genpd; + bool (*stop_ok)(struct device *__dev); + int ret; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + might_sleep_if(!genpd->dev_irq_safe); + + if (dev_gpd_data(dev)->always_on) + return -EBUSY; + + stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; + if (stop_ok && !stop_ok(dev)) + return -EBUSY; + + ret = genpd_stop_dev(genpd, dev); + if (ret) + return ret; + + /* + * If power.irq_safe is set, this routine will be run with interrupts + * off, so it can't use mutexes. + */ + if (dev->power.irq_safe) + return 0; + + mutex_lock(&genpd->lock); + genpd->in_progress++; + pm_genpd_poweroff(genpd); + genpd->in_progress--; + mutex_unlock(&genpd->lock); + + return 0; +} + +/** + * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. + * @dev: Device to resume. + * + * Carry out a runtime resume of a device under the assumption that its + * pm_domain field points to the domain member of an object of type + * struct generic_pm_domain representing a PM domain consisting of I/O devices. + */ +static int pm_genpd_runtime_resume(struct device *dev) +{ + struct generic_pm_domain *genpd; + DEFINE_WAIT(wait); + int ret; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + might_sleep_if(!genpd->dev_irq_safe); + + /* If power.irq_safe, the PM domain is never powered off. */ + if (dev->power.irq_safe) + goto out; + + mutex_lock(&genpd->lock); + ret = __pm_genpd_poweron(genpd); + if (ret) { + mutex_unlock(&genpd->lock); + return ret; + } + genpd->status = GPD_STATE_BUSY; + genpd->resume_count++; + for (;;) { + prepare_to_wait(&genpd->status_wait_queue, &wait, + TASK_UNINTERRUPTIBLE); + /* + * If current is the powering off task, we have been called + * reentrantly from one of the device callbacks, so we should + * not wait. + */ + if (!genpd->poweroff_task || genpd->poweroff_task == current) + break; + mutex_unlock(&genpd->lock); + + schedule(); + + mutex_lock(&genpd->lock); + } + finish_wait(&genpd->status_wait_queue, &wait); + __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd); + genpd->resume_count--; + genpd_set_active(genpd); + wake_up_all(&genpd->status_wait_queue); + mutex_unlock(&genpd->lock); + + out: + genpd_start_dev(genpd, dev); + + return 0; +} + +/** + * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use. + */ +void pm_genpd_poweroff_unused(void) +{ + struct generic_pm_domain *genpd; + + mutex_lock(&gpd_list_lock); + + list_for_each_entry(genpd, &gpd_list, gpd_list_node) + genpd_queue_power_off_work(genpd); + + mutex_unlock(&gpd_list_lock); +} + +#else + +static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb, + unsigned long val, void *ptr) +{ + return NOTIFY_DONE; +} + +static inline void genpd_power_off_work_fn(struct work_struct *work) {} + +#define pm_genpd_runtime_suspend NULL +#define pm_genpd_runtime_resume NULL + +#endif /* CONFIG_PM_RUNTIME */ + +#ifdef CONFIG_PM_SLEEP + +static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, + struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev); +} + +static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, int, suspend, dev); +} + +static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev); +} + +static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev); +} + +static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, int, resume, dev); +} + +static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, int, freeze, dev); +} + +static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev); +} + +static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev); +} + +static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, int, thaw, dev); +} + +/** + * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. + * @genpd: PM domain to power off, if possible. + * + * Check if the given PM domain can be powered off (during system suspend or + * hibernation) and do that if so. Also, in that case propagate to its masters. + * + * This function is only called in "noirq" stages of system power transitions, + * so it need not acquire locks (all of the "noirq" callbacks are executed + * sequentially, so it is guaranteed that it will never run twice in parallel). + */ +static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) +{ + struct gpd_link *link; + + if (genpd->status == GPD_STATE_POWER_OFF) + return; + + if (genpd->suspended_count != genpd->device_count + || atomic_read(&genpd->sd_count) > 0) + return; + + if (genpd->power_off) + genpd->power_off(genpd); + + genpd->status = GPD_STATE_POWER_OFF; + + list_for_each_entry(link, &genpd->slave_links, slave_node) { + genpd_sd_counter_dec(link->master); + pm_genpd_sync_poweroff(link->master); + } +} + +/** + * resume_needed - Check whether to resume a device before system suspend. + * @dev: Device to check. + * @genpd: PM domain the device belongs to. + * + * There are two cases in which a device that can wake up the system from sleep + * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled + * to wake up the system and it has to remain active for this purpose while the + * system is in the sleep state and (2) if the device is not enabled to wake up + * the system from sleep states and it generally doesn't generate wakeup signals + * by itself (those signals are generated on its behalf by other parts of the + * system). In the latter case it may be necessary to reconfigure the device's + * wakeup settings during system suspend, because it may have been set up to + * signal remote wakeup from the system's working state as needed by runtime PM. + * Return 'true' in either of the above cases. + */ +static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd) +{ + bool active_wakeup; + + if (!device_can_wakeup(dev)) + return false; + + active_wakeup = genpd_dev_active_wakeup(genpd, dev); + return device_may_wakeup(dev) ? active_wakeup : !active_wakeup; +} + +/** + * pm_genpd_prepare - Start power transition of a device in a PM domain. + * @dev: Device to start the transition of. + * + * Start a power transition of a device (during a system-wide power transition) + * under the assumption that its pm_domain field points to the domain member of + * an object of type struct generic_pm_domain representing a PM domain + * consisting of I/O devices. + */ +static int pm_genpd_prepare(struct device *dev) +{ + struct generic_pm_domain *genpd; + int ret; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + /* + * If a wakeup request is pending for the device, it should be woken up + * at this point and a system wakeup event should be reported if it's + * set up to wake up the system from sleep states. + */ + pm_runtime_get_noresume(dev); + if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) + pm_wakeup_event(dev, 0); + + if (pm_wakeup_pending()) { + pm_runtime_put_sync(dev); + return -EBUSY; + } + + if (resume_needed(dev, genpd)) + pm_runtime_resume(dev); + + genpd_acquire_lock(genpd); + + if (genpd->prepared_count++ == 0) { + genpd->suspended_count = 0; + genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; + } + + genpd_release_lock(genpd); + + if (genpd->suspend_power_off) { + pm_runtime_put_noidle(dev); + return 0; + } + + /* + * The PM domain must be in the GPD_STATE_ACTIVE state at this point, + * so pm_genpd_poweron() will return immediately, but if the device + * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need + * to make it operational. + */ + pm_runtime_resume(dev); + __pm_runtime_disable(dev, false); + + ret = pm_generic_prepare(dev); + if (ret) { + mutex_lock(&genpd->lock); + + if (--genpd->prepared_count == 0) + genpd->suspend_power_off = false; + + mutex_unlock(&genpd->lock); + pm_runtime_enable(dev); + } + + pm_runtime_put_sync(dev); + return ret; +} + +/** + * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain. + * @dev: Device to suspend. + * + * Suspend a device under the assumption that its pm_domain field points to the + * domain member of an object of type struct generic_pm_domain representing + * a PM domain consisting of I/O devices. + */ +static int pm_genpd_suspend(struct device *dev) +{ + struct generic_pm_domain *genpd; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev); +} + +/** + * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain. + * @dev: Device to suspend. + * + * Carry out a late suspend of a device under the assumption that its + * pm_domain field points to the domain member of an object of type + * struct generic_pm_domain representing a PM domain consisting of I/O devices. + */ +static int pm_genpd_suspend_late(struct device *dev) +{ + struct generic_pm_domain *genpd; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev); +} + +/** + * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain. + * @dev: Device to suspend. + * + * Stop the device and remove power from the domain if all devices in it have + * been stopped. + */ +static int pm_genpd_suspend_noirq(struct device *dev) +{ + struct generic_pm_domain *genpd; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on + || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) + return 0; + + genpd_stop_dev(genpd, dev); + + /* + * Since all of the "noirq" callbacks are executed sequentially, it is + * guaranteed that this function will never run twice in parallel for + * the same PM domain, so it is not necessary to use locking here. + */ + genpd->suspended_count++; + pm_genpd_sync_poweroff(genpd); + + return 0; +} + +/** + * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain. + * @dev: Device to resume. + * + * Restore power to the device's PM domain, if necessary, and start the device. + */ +static int pm_genpd_resume_noirq(struct device *dev) +{ + struct generic_pm_domain *genpd; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on + || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) + return 0; + + /* + * Since all of the "noirq" callbacks are executed sequentially, it is + * guaranteed that this function will never run twice in parallel for + * the same PM domain, so it is not necessary to use locking here. + */ + pm_genpd_poweron(genpd); + genpd->suspended_count--; + + return genpd_start_dev(genpd, dev); +} + +/** + * pm_genpd_resume_early - Early resume of a device in an I/O PM domain. + * @dev: Device to resume. + * + * Carry out an early resume of a device under the assumption that its + * pm_domain field points to the domain member of an object of type + * struct generic_pm_domain representing a power domain consisting of I/O + * devices. + */ +static int pm_genpd_resume_early(struct device *dev) +{ + struct generic_pm_domain *genpd; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev); +} + +/** + * pm_genpd_resume - Resume of device in an I/O PM domain. + * @dev: Device to resume. + * + * Resume a device under the assumption that its pm_domain field points to the + * domain member of an object of type struct generic_pm_domain representing + * a power domain consisting of I/O devices. + */ +static int pm_genpd_resume(struct device *dev) +{ + struct generic_pm_domain *genpd; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev); +} + +/** + * pm_genpd_freeze - Freezing a device in an I/O PM domain. + * @dev: Device to freeze. + * + * Freeze a device under the assumption that its pm_domain field points to the + * domain member of an object of type struct generic_pm_domain representing + * a power domain consisting of I/O devices. + */ +static int pm_genpd_freeze(struct device *dev) +{ + struct generic_pm_domain *genpd; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev); +} + +/** + * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain. + * @dev: Device to freeze. + * + * Carry out a late freeze of a device under the assumption that its + * pm_domain field points to the domain member of an object of type + * struct generic_pm_domain representing a power domain consisting of I/O + * devices. + */ +static int pm_genpd_freeze_late(struct device *dev) +{ + struct generic_pm_domain *genpd; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev); +} + +/** + * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain. + * @dev: Device to freeze. + * + * Carry out a late freeze of a device under the assumption that its + * pm_domain field points to the domain member of an object of type + * struct generic_pm_domain representing a power domain consisting of I/O + * devices. + */ +static int pm_genpd_freeze_noirq(struct device *dev) +{ + struct generic_pm_domain *genpd; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ? + 0 : genpd_stop_dev(genpd, dev); +} + +/** + * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain. + * @dev: Device to thaw. + * + * Start the device, unless power has been removed from the domain already + * before the system transition. + */ +static int pm_genpd_thaw_noirq(struct device *dev) +{ + struct generic_pm_domain *genpd; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ? + 0 : genpd_start_dev(genpd, dev); +} + +/** + * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain. + * @dev: Device to thaw. + * + * Carry out an early thaw of a device under the assumption that its + * pm_domain field points to the domain member of an object of type + * struct generic_pm_domain representing a power domain consisting of I/O + * devices. + */ +static int pm_genpd_thaw_early(struct device *dev) +{ + struct generic_pm_domain *genpd; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev); +} + +/** + * pm_genpd_thaw - Thaw a device belonging to an I/O power domain. + * @dev: Device to thaw. + * + * Thaw a device under the assumption that its pm_domain field points to the + * domain member of an object of type struct generic_pm_domain representing + * a power domain consisting of I/O devices. + */ +static int pm_genpd_thaw(struct device *dev) +{ + struct generic_pm_domain *genpd; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev); +} + +/** + * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain. + * @dev: Device to resume. + * + * Make sure the domain will be in the same power state as before the + * hibernation the system is resuming from and start the device if necessary. + */ +static int pm_genpd_restore_noirq(struct device *dev) +{ + struct generic_pm_domain *genpd; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + /* + * Since all of the "noirq" callbacks are executed sequentially, it is + * guaranteed that this function will never run twice in parallel for + * the same PM domain, so it is not necessary to use locking here. + * + * At this point suspended_count == 0 means we are being run for the + * first time for the given domain in the present cycle. + */ + if (genpd->suspended_count++ == 0) { + /* + * The boot kernel might put the domain into arbitrary state, + * so make it appear as powered off to pm_genpd_poweron(), so + * that it tries to power it on in case it was really off. + */ + genpd->status = GPD_STATE_POWER_OFF; + if (genpd->suspend_power_off) { + /* + * If the domain was off before the hibernation, make + * sure it will be off going forward. + */ + if (genpd->power_off) + genpd->power_off(genpd); + + return 0; + } + } + + if (genpd->suspend_power_off) + return 0; + + pm_genpd_poweron(genpd); + + return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev); +} + +/** + * pm_genpd_complete - Complete power transition of a device in a power domain. + * @dev: Device to complete the transition of. + * + * Complete a power transition of a device (during a system-wide power + * transition) under the assumption that its pm_domain field points to the + * domain member of an object of type struct generic_pm_domain representing + * a power domain consisting of I/O devices. + */ +static void pm_genpd_complete(struct device *dev) +{ + struct generic_pm_domain *genpd; + bool run_complete; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return; + + mutex_lock(&genpd->lock); + + run_complete = !genpd->suspend_power_off; + if (--genpd->prepared_count == 0) + genpd->suspend_power_off = false; + + mutex_unlock(&genpd->lock); + + if (run_complete) { + pm_generic_complete(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + pm_runtime_idle(dev); + } +} + +#else + +#define pm_genpd_prepare NULL +#define pm_genpd_suspend NULL +#define pm_genpd_suspend_late NULL +#define pm_genpd_suspend_noirq NULL +#define pm_genpd_resume_early NULL +#define pm_genpd_resume_noirq NULL +#define pm_genpd_resume NULL +#define pm_genpd_freeze NULL +#define pm_genpd_freeze_late NULL +#define pm_genpd_freeze_noirq NULL +#define pm_genpd_thaw_early NULL +#define pm_genpd_thaw_noirq NULL +#define pm_genpd_thaw NULL +#define pm_genpd_restore_noirq NULL +#define pm_genpd_complete NULL + +#endif /* CONFIG_PM_SLEEP */ + +/** + * __pm_genpd_add_device - Add a device to an I/O PM domain. + * @genpd: PM domain to add the device to. + * @dev: Device to be added. + * @td: Set of PM QoS timing parameters to attach to the device. + */ +int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, + struct gpd_timing_data *td) +{ + struct generic_pm_domain_data *gpd_data; + struct pm_domain_data *pdd; + int ret = 0; + + dev_dbg(dev, "%s()\n", __func__); + + if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) + return -EINVAL; + + gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); + if (!gpd_data) + return -ENOMEM; + + mutex_init(&gpd_data->lock); + gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; + dev_pm_qos_add_notifier(dev, &gpd_data->nb); + + genpd_acquire_lock(genpd); + + if (genpd->prepared_count > 0) { + ret = -EAGAIN; + goto out; + } + + list_for_each_entry(pdd, &genpd->dev_list, list_node) + if (pdd->dev == dev) { + ret = -EINVAL; + goto out; + } + + genpd->device_count++; + genpd->max_off_time_changed = true; + + dev_pm_get_subsys_data(dev); + + mutex_lock(&gpd_data->lock); + spin_lock_irq(&dev->power.lock); + dev->pm_domain = &genpd->domain; + dev->power.subsys_data->domain_data = &gpd_data->base; + gpd_data->base.dev = dev; + list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); + gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF; + if (td) + gpd_data->td = *td; + + gpd_data->td.constraint_changed = true; + gpd_data->td.effective_constraint_ns = -1; + spin_unlock_irq(&dev->power.lock); + mutex_unlock(&gpd_data->lock); + + genpd_release_lock(genpd); + + return 0; + + out: + genpd_release_lock(genpd); + + dev_pm_qos_remove_notifier(dev, &gpd_data->nb); + kfree(gpd_data); + return ret; +} + +/** + * __pm_genpd_of_add_device - Add a device to an I/O PM domain. + * @genpd_node: Device tree node pointer representing a PM domain to which the + * the device is added to. + * @dev: Device to be added. + * @td: Set of PM QoS timing parameters to attach to the device. + */ +int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev, + struct gpd_timing_data *td) +{ + struct generic_pm_domain *genpd = NULL, *gpd; + + dev_dbg(dev, "%s()\n", __func__); + + if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev)) + return -EINVAL; + + mutex_lock(&gpd_list_lock); + list_for_each_entry(gpd, &gpd_list, gpd_list_node) { + if (gpd->of_node == genpd_node) { + genpd = gpd; + break; + } + } + mutex_unlock(&gpd_list_lock); + + if (!genpd) + return -EINVAL; + + return __pm_genpd_add_device(genpd, dev, td); +} + +/** + * pm_genpd_remove_device - Remove a device from an I/O PM domain. + * @genpd: PM domain to remove the device from. + * @dev: Device to be removed. + */ +int pm_genpd_remove_device(struct generic_pm_domain *genpd, + struct device *dev) +{ + struct generic_pm_domain_data *gpd_data; + struct pm_domain_data *pdd; + int ret = 0; + + dev_dbg(dev, "%s()\n", __func__); + + if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev) + || IS_ERR_OR_NULL(dev->pm_domain) + || pd_to_genpd(dev->pm_domain) != genpd) + return -EINVAL; + + genpd_acquire_lock(genpd); + + if (genpd->prepared_count > 0) { + ret = -EAGAIN; + goto out; + } + + genpd->device_count--; + genpd->max_off_time_changed = true; + + spin_lock_irq(&dev->power.lock); + dev->pm_domain = NULL; + pdd = dev->power.subsys_data->domain_data; + list_del_init(&pdd->list_node); + dev->power.subsys_data->domain_data = NULL; + spin_unlock_irq(&dev->power.lock); + + gpd_data = to_gpd_data(pdd); + mutex_lock(&gpd_data->lock); + pdd->dev = NULL; + mutex_unlock(&gpd_data->lock); + + genpd_release_lock(genpd); + + dev_pm_qos_remove_notifier(dev, &gpd_data->nb); + kfree(gpd_data); + dev_pm_put_subsys_data(dev); + return 0; + + out: + genpd_release_lock(genpd); + + return ret; +} + +/** + * pm_genpd_dev_always_on - Set/unset the "always on" flag for a given device. + * @dev: Device to set/unset the flag for. + * @val: The new value of the device's "always on" flag. + */ +void pm_genpd_dev_always_on(struct device *dev, bool val) +{ + struct pm_subsys_data *psd; + unsigned long flags; + + spin_lock_irqsave(&dev->power.lock, flags); + + psd = dev_to_psd(dev); + if (psd && psd->domain_data) + to_gpd_data(psd->domain_data)->always_on = val; + + spin_unlock_irqrestore(&dev->power.lock, flags); +} +EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on); + +/** + * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag. + * @dev: Device to set/unset the flag for. + * @val: The new value of the device's "need restore" flag. + */ +void pm_genpd_dev_need_restore(struct device *dev, bool val) +{ + struct pm_subsys_data *psd; + unsigned long flags; + + spin_lock_irqsave(&dev->power.lock, flags); + + psd = dev_to_psd(dev); + if (psd && psd->domain_data) + to_gpd_data(psd->domain_data)->need_restore = val; + + spin_unlock_irqrestore(&dev->power.lock, flags); +} +EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore); + +/** + * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. + * @genpd: Master PM domain to add the subdomain to. + * @subdomain: Subdomain to be added. + */ +int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, + struct generic_pm_domain *subdomain) +{ + struct gpd_link *link; + int ret = 0; + + if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) + return -EINVAL; + + start: + genpd_acquire_lock(genpd); + mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); + + if (subdomain->status != GPD_STATE_POWER_OFF + && subdomain->status != GPD_STATE_ACTIVE) { + mutex_unlock(&subdomain->lock); + genpd_release_lock(genpd); + goto start; + } + + if (genpd->status == GPD_STATE_POWER_OFF + && subdomain->status != GPD_STATE_POWER_OFF) { + ret = -EINVAL; + goto out; + } + + list_for_each_entry(link, &genpd->master_links, master_node) { + if (link->slave == subdomain && link->master == genpd) { + ret = -EINVAL; + goto out; + } + } + + link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) { + ret = -ENOMEM; + goto out; + } + link->master = genpd; + list_add_tail(&link->master_node, &genpd->master_links); + link->slave = subdomain; + list_add_tail(&link->slave_node, &subdomain->slave_links); + if (subdomain->status != GPD_STATE_POWER_OFF) + genpd_sd_counter_inc(genpd); + + out: + mutex_unlock(&subdomain->lock); + genpd_release_lock(genpd); + + return ret; +} + +/** + * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. + * @genpd: Master PM domain to remove the subdomain from. + * @subdomain: Subdomain to be removed. + */ +int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, + struct generic_pm_domain *subdomain) +{ + struct gpd_link *link; + int ret = -EINVAL; + + if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) + return -EINVAL; + + start: + genpd_acquire_lock(genpd); + + list_for_each_entry(link, &genpd->master_links, master_node) { + if (link->slave != subdomain) + continue; + + mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); + + if (subdomain->status != GPD_STATE_POWER_OFF + && subdomain->status != GPD_STATE_ACTIVE) { + mutex_unlock(&subdomain->lock); + genpd_release_lock(genpd); + goto start; + } + + list_del(&link->master_node); + list_del(&link->slave_node); + kfree(link); + if (subdomain->status != GPD_STATE_POWER_OFF) + genpd_sd_counter_dec(genpd); + + mutex_unlock(&subdomain->lock); + + ret = 0; + break; + } + + genpd_release_lock(genpd); + + return ret; +} + +/** + * pm_genpd_add_callbacks - Add PM domain callbacks to a given device. + * @dev: Device to add the callbacks to. + * @ops: Set of callbacks to add. + * @td: Timing data to add to the device along with the callbacks (optional). + */ +int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops, + struct gpd_timing_data *td) +{ + struct pm_domain_data *pdd; + int ret = 0; + + if (!(dev && dev->power.subsys_data && ops)) + return -EINVAL; + + pm_runtime_disable(dev); + device_pm_lock(); + + pdd = dev->power.subsys_data->domain_data; + if (pdd) { + struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); + + gpd_data->ops = *ops; + if (td) + gpd_data->td = *td; + } else { + ret = -EINVAL; + } + + device_pm_unlock(); + pm_runtime_enable(dev); + + return ret; +} +EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks); + +/** + * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device. + * @dev: Device to remove the callbacks from. + * @clear_td: If set, clear the device's timing data too. + */ +int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) +{ + struct pm_domain_data *pdd; + int ret = 0; + + if (!(dev && dev->power.subsys_data)) + return -EINVAL; + + pm_runtime_disable(dev); + device_pm_lock(); + + pdd = dev->power.subsys_data->domain_data; + if (pdd) { + struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); + + gpd_data->ops = (struct gpd_dev_ops){ 0 }; + if (clear_td) + gpd_data->td = (struct gpd_timing_data){ 0 }; + } else { + ret = -EINVAL; + } + + device_pm_unlock(); + pm_runtime_enable(dev); + + return ret; +} +EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks); + +/* Default device callbacks for generic PM domains. */ + +/** + * pm_genpd_default_save_state - Default "save device state" for PM domians. + * @dev: Device to handle. + */ +static int pm_genpd_default_save_state(struct device *dev) +{ + int (*cb)(struct device *__dev); + struct device_driver *drv = dev->driver; + + cb = dev_gpd_data(dev)->ops.save_state; + if (cb) + return cb(dev); + + if (drv && drv->pm && drv->pm->runtime_suspend) + return drv->pm->runtime_suspend(dev); + + return 0; +} + +/** + * pm_genpd_default_restore_state - Default PM domians "restore device state". + * @dev: Device to handle. + */ +static int pm_genpd_default_restore_state(struct device *dev) +{ + int (*cb)(struct device *__dev); + struct device_driver *drv = dev->driver; + + cb = dev_gpd_data(dev)->ops.restore_state; + if (cb) + return cb(dev); + + if (drv && drv->pm && drv->pm->runtime_resume) + return drv->pm->runtime_resume(dev); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP + +/** + * pm_genpd_default_suspend - Default "device suspend" for PM domians. + * @dev: Device to handle. + */ +static int pm_genpd_default_suspend(struct device *dev) +{ + int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend; + + return cb ? cb(dev) : pm_generic_suspend(dev); +} + +/** + * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians. + * @dev: Device to handle. + */ +static int pm_genpd_default_suspend_late(struct device *dev) +{ + int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late; + + return cb ? cb(dev) : pm_generic_suspend_late(dev); +} + +/** + * pm_genpd_default_resume_early - Default "early device resume" for PM domians. + * @dev: Device to handle. + */ +static int pm_genpd_default_resume_early(struct device *dev) +{ + int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early; + + return cb ? cb(dev) : pm_generic_resume_early(dev); +} + +/** + * pm_genpd_default_resume - Default "device resume" for PM domians. + * @dev: Device to handle. + */ +static int pm_genpd_default_resume(struct device *dev) +{ + int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume; + + return cb ? cb(dev) : pm_generic_resume(dev); +} + +/** + * pm_genpd_default_freeze - Default "device freeze" for PM domians. + * @dev: Device to handle. + */ +static int pm_genpd_default_freeze(struct device *dev) +{ + int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze; + + return cb ? cb(dev) : pm_generic_freeze(dev); +} + +/** + * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians. + * @dev: Device to handle. + */ +static int pm_genpd_default_freeze_late(struct device *dev) +{ + int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late; + + return cb ? cb(dev) : pm_generic_freeze_late(dev); +} + +/** + * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians. + * @dev: Device to handle. + */ +static int pm_genpd_default_thaw_early(struct device *dev) +{ + int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early; + + return cb ? cb(dev) : pm_generic_thaw_early(dev); +} + +/** + * pm_genpd_default_thaw - Default "device thaw" for PM domians. + * @dev: Device to handle. + */ +static int pm_genpd_default_thaw(struct device *dev) +{ + int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw; + + return cb ? cb(dev) : pm_generic_thaw(dev); +} + +#else /* !CONFIG_PM_SLEEP */ + +#define pm_genpd_default_suspend NULL +#define pm_genpd_default_suspend_late NULL +#define pm_genpd_default_resume_early NULL +#define pm_genpd_default_resume NULL +#define pm_genpd_default_freeze NULL +#define pm_genpd_default_freeze_late NULL +#define pm_genpd_default_thaw_early NULL +#define pm_genpd_default_thaw NULL + +#endif /* !CONFIG_PM_SLEEP */ + +/** + * pm_genpd_init - Initialize a generic I/O PM domain object. + * @genpd: PM domain object to initialize. + * @gov: PM domain governor to associate with the domain (may be NULL). + * @is_off: Initial value of the domain's power_is_off field. + */ +void pm_genpd_init(struct generic_pm_domain *genpd, + struct dev_power_governor *gov, bool is_off) +{ + if (IS_ERR_OR_NULL(genpd)) + return; + + INIT_LIST_HEAD(&genpd->master_links); + INIT_LIST_HEAD(&genpd->slave_links); + INIT_LIST_HEAD(&genpd->dev_list); + mutex_init(&genpd->lock); + genpd->gov = gov; + INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); + genpd->in_progress = 0; + atomic_set(&genpd->sd_count, 0); + genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; + init_waitqueue_head(&genpd->status_wait_queue); + genpd->poweroff_task = NULL; + genpd->resume_count = 0; + genpd->device_count = 0; + genpd->max_off_time_ns = -1; + genpd->max_off_time_changed = true; + genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; + genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; + genpd->domain.ops.runtime_idle = pm_generic_runtime_idle; + genpd->domain.ops.prepare = pm_genpd_prepare; + genpd->domain.ops.suspend = pm_genpd_suspend; + genpd->domain.ops.suspend_late = pm_genpd_suspend_late; + genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq; + genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq; + genpd->domain.ops.resume_early = pm_genpd_resume_early; + genpd->domain.ops.resume = pm_genpd_resume; + genpd->domain.ops.freeze = pm_genpd_freeze; + genpd->domain.ops.freeze_late = pm_genpd_freeze_late; + genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; + genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; + genpd->domain.ops.thaw_early = pm_genpd_thaw_early; + genpd->domain.ops.thaw = pm_genpd_thaw; + genpd->domain.ops.poweroff = pm_genpd_suspend; + genpd->domain.ops.poweroff_late = pm_genpd_suspend_late; + genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq; + genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; + genpd->domain.ops.restore_early = pm_genpd_resume_early; + genpd->domain.ops.restore = pm_genpd_resume; + genpd->domain.ops.complete = pm_genpd_complete; + genpd->dev_ops.save_state = pm_genpd_default_save_state; + genpd->dev_ops.restore_state = pm_genpd_default_restore_state; + genpd->dev_ops.suspend = pm_genpd_default_suspend; + genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late; + genpd->dev_ops.resume_early = pm_genpd_default_resume_early; + genpd->dev_ops.resume = pm_genpd_default_resume; + genpd->dev_ops.freeze = pm_genpd_default_freeze; + genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late; + genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early; + genpd->dev_ops.thaw = pm_genpd_default_thaw; + mutex_lock(&gpd_list_lock); + list_add(&genpd->gpd_list_node, &gpd_list); + mutex_unlock(&gpd_list_lock); +} diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c new file mode 100644 index 00000000..28dee305 --- /dev/null +++ b/drivers/base/power/domain_governor.c @@ -0,0 +1,254 @@ +/* + * drivers/base/power/domain_governor.c - Governors for device PM domains. + * + * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. + * + * This file is released under the GPLv2. + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/pm_domain.h> +#include <linux/pm_qos.h> +#include <linux/hrtimer.h> + +#ifdef CONFIG_PM_RUNTIME + +static int dev_update_qos_constraint(struct device *dev, void *data) +{ + s64 *constraint_ns_p = data; + s32 constraint_ns = -1; + + if (dev->power.subsys_data && dev->power.subsys_data->domain_data) + constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns; + + if (constraint_ns < 0) { + constraint_ns = dev_pm_qos_read_value(dev); + constraint_ns *= NSEC_PER_USEC; + } + if (constraint_ns == 0) + return 0; + + /* + * constraint_ns cannot be negative here, because the device has been + * suspended. + */ + if (constraint_ns < *constraint_ns_p || *constraint_ns_p == 0) + *constraint_ns_p = constraint_ns; + + return 0; +} + +/** + * default_stop_ok - Default PM domain governor routine for stopping devices. + * @dev: Device to check. + */ +bool default_stop_ok(struct device *dev) +{ + struct gpd_timing_data *td = &dev_gpd_data(dev)->td; + unsigned long flags; + s64 constraint_ns; + + dev_dbg(dev, "%s()\n", __func__); + + spin_lock_irqsave(&dev->power.lock, flags); + + if (!td->constraint_changed) { + bool ret = td->cached_stop_ok; + + spin_unlock_irqrestore(&dev->power.lock, flags); + return ret; + } + td->constraint_changed = false; + td->cached_stop_ok = false; + td->effective_constraint_ns = -1; + constraint_ns = __dev_pm_qos_read_value(dev); + + spin_unlock_irqrestore(&dev->power.lock, flags); + + if (constraint_ns < 0) + return false; + + constraint_ns *= NSEC_PER_USEC; + /* + * We can walk the children without any additional locking, because + * they all have been suspended at this point and their + * effective_constraint_ns fields won't be modified in parallel with us. + */ + if (!dev->power.ignore_children) + device_for_each_child(dev, &constraint_ns, + dev_update_qos_constraint); + + if (constraint_ns > 0) { + constraint_ns -= td->start_latency_ns; + if (constraint_ns == 0) + return false; + } + td->effective_constraint_ns = constraint_ns; + td->cached_stop_ok = constraint_ns > td->stop_latency_ns || + constraint_ns == 0; + /* + * The children have been suspended already, so we don't need to take + * their stop latencies into account here. + */ + return td->cached_stop_ok; +} + +/** + * default_power_down_ok - Default generic PM domain power off governor routine. + * @pd: PM domain to check. + * + * This routine must be executed under the PM domain's lock. + */ +static bool default_power_down_ok(struct dev_pm_domain *pd) +{ + struct generic_pm_domain *genpd = pd_to_genpd(pd); + struct gpd_link *link; + struct pm_domain_data *pdd; + s64 min_off_time_ns; + s64 off_on_time_ns; + + if (genpd->max_off_time_changed) { + struct gpd_link *link; + + /* + * We have to invalidate the cached results for the masters, so + * use the observation that default_power_down_ok() is not + * going to be called for any master until this instance + * returns. + */ + list_for_each_entry(link, &genpd->slave_links, slave_node) + link->master->max_off_time_changed = true; + + genpd->max_off_time_changed = false; + genpd->cached_power_down_ok = false; + genpd->max_off_time_ns = -1; + } else { + return genpd->cached_power_down_ok; + } + + off_on_time_ns = genpd->power_off_latency_ns + + genpd->power_on_latency_ns; + /* + * It doesn't make sense to remove power from the domain if saving + * the state of all devices in it and the power off/power on operations + * take too much time. + * + * All devices in this domain have been stopped already at this point. + */ + list_for_each_entry(pdd, &genpd->dev_list, list_node) { + if (pdd->dev->driver) + off_on_time_ns += + to_gpd_data(pdd)->td.save_state_latency_ns; + } + + min_off_time_ns = -1; + /* + * Check if subdomains can be off for enough time. + * + * All subdomains have been powered off already at this point. + */ + list_for_each_entry(link, &genpd->master_links, master_node) { + struct generic_pm_domain *sd = link->slave; + s64 sd_max_off_ns = sd->max_off_time_ns; + + if (sd_max_off_ns < 0) + continue; + + /* + * Check if the subdomain is allowed to be off long enough for + * the current domain to turn off and on (that's how much time + * it will have to wait worst case). + */ + if (sd_max_off_ns <= off_on_time_ns) + return false; + + if (min_off_time_ns > sd_max_off_ns || min_off_time_ns < 0) + min_off_time_ns = sd_max_off_ns; + } + + /* + * Check if the devices in the domain can be off enough time. + */ + list_for_each_entry(pdd, &genpd->dev_list, list_node) { + struct gpd_timing_data *td; + s64 constraint_ns; + + if (!pdd->dev->driver) + continue; + + /* + * Check if the device is allowed to be off long enough for the + * domain to turn off and on (that's how much time it will + * have to wait worst case). + */ + td = &to_gpd_data(pdd)->td; + constraint_ns = td->effective_constraint_ns; + /* default_stop_ok() need not be called before us. */ + if (constraint_ns < 0) { + constraint_ns = dev_pm_qos_read_value(pdd->dev); + constraint_ns *= NSEC_PER_USEC; + } + if (constraint_ns == 0) + continue; + + /* + * constraint_ns cannot be negative here, because the device has + * been suspended. + */ + constraint_ns -= td->restore_state_latency_ns; + if (constraint_ns <= off_on_time_ns) + return false; + + if (min_off_time_ns > constraint_ns || min_off_time_ns < 0) + min_off_time_ns = constraint_ns; + } + + genpd->cached_power_down_ok = true; + + /* + * If the computed minimum device off time is negative, there are no + * latency constraints, so the domain can spend arbitrary time in the + * "off" state. + */ + if (min_off_time_ns < 0) + return true; + + /* + * The difference between the computed minimum subdomain or device off + * time and the time needed to turn the domain on is the maximum + * theoretical time this domain can spend in the "off" state. + */ + genpd->max_off_time_ns = min_off_time_ns - genpd->power_on_latency_ns; + return true; +} + +static bool always_on_power_down_ok(struct dev_pm_domain *domain) +{ + return false; +} + +#else /* !CONFIG_PM_RUNTIME */ + +bool default_stop_ok(struct device *dev) +{ + return false; +} + +#define default_power_down_ok NULL +#define always_on_power_down_ok NULL + +#endif /* !CONFIG_PM_RUNTIME */ + +struct dev_power_governor simple_qos_governor = { + .stop_ok = default_stop_ok, + .power_down_ok = default_power_down_ok, +}; + +/** + * pm_genpd_gov_always_on - A governor implementing an always-on policy + */ +struct dev_power_governor pm_domain_always_on_gov = { + .power_down_ok = always_on_power_down_ok, + .stop_ok = default_stop_ok, +}; diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c new file mode 100644 index 00000000..d03d290f --- /dev/null +++ b/drivers/base/power/generic_ops.c @@ -0,0 +1,329 @@ +/* + * drivers/base/power/generic_ops.c - Generic PM callbacks for subsystems + * + * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. + * + * This file is released under the GPLv2. + */ + +#include <linux/pm.h> +#include <linux/pm_runtime.h> +#include <linux/export.h> + +#ifdef CONFIG_PM_RUNTIME +/** + * pm_generic_runtime_idle - Generic runtime idle callback for subsystems. + * @dev: Device to handle. + * + * If PM operations are defined for the @dev's driver and they include + * ->runtime_idle(), execute it and return its error code, if nonzero. + * Otherwise, execute pm_runtime_suspend() for the device and return 0. + */ +int pm_generic_runtime_idle(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + if (pm && pm->runtime_idle) { + int ret = pm->runtime_idle(dev); + if (ret) + return ret; + } + + pm_runtime_suspend(dev); + return 0; +} +EXPORT_SYMBOL_GPL(pm_generic_runtime_idle); + +/** + * pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems. + * @dev: Device to suspend. + * + * If PM operations are defined for the @dev's driver and they include + * ->runtime_suspend(), execute it and return its error code. Otherwise, + * return 0. + */ +int pm_generic_runtime_suspend(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + int ret; + + ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0; + + return ret; +} +EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend); + +/** + * pm_generic_runtime_resume - Generic runtime resume callback for subsystems. + * @dev: Device to resume. + * + * If PM operations are defined for the @dev's driver and they include + * ->runtime_resume(), execute it and return its error code. Otherwise, + * return 0. + */ +int pm_generic_runtime_resume(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + int ret; + + ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0; + + return ret; +} +EXPORT_SYMBOL_GPL(pm_generic_runtime_resume); +#endif /* CONFIG_PM_RUNTIME */ + +#ifdef CONFIG_PM_SLEEP +/** + * pm_generic_prepare - Generic routine preparing a device for power transition. + * @dev: Device to prepare. + * + * Prepare a device for a system-wide power transition. + */ +int pm_generic_prepare(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (drv && drv->pm && drv->pm->prepare) + ret = drv->pm->prepare(dev); + + return ret; +} + +/** + * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems. + * @dev: Device to suspend. + */ +int pm_generic_suspend_noirq(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq); + +/** + * pm_generic_suspend_late - Generic suspend_late callback for subsystems. + * @dev: Device to suspend. + */ +int pm_generic_suspend_late(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->suspend_late ? pm->suspend_late(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_suspend_late); + +/** + * pm_generic_suspend - Generic suspend callback for subsystems. + * @dev: Device to suspend. + */ +int pm_generic_suspend(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->suspend ? pm->suspend(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_suspend); + +/** + * pm_generic_freeze_noirq - Generic freeze_noirq callback for subsystems. + * @dev: Device to freeze. + */ +int pm_generic_freeze_noirq(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq); + +/** + * pm_generic_freeze_late - Generic freeze_late callback for subsystems. + * @dev: Device to freeze. + */ +int pm_generic_freeze_late(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->freeze_late ? pm->freeze_late(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_freeze_late); + +/** + * pm_generic_freeze - Generic freeze callback for subsystems. + * @dev: Device to freeze. + */ +int pm_generic_freeze(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->freeze ? pm->freeze(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_freeze); + +/** + * pm_generic_poweroff_noirq - Generic poweroff_noirq callback for subsystems. + * @dev: Device to handle. + */ +int pm_generic_poweroff_noirq(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq); + +/** + * pm_generic_poweroff_late - Generic poweroff_late callback for subsystems. + * @dev: Device to handle. + */ +int pm_generic_poweroff_late(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_poweroff_late); + +/** + * pm_generic_poweroff - Generic poweroff callback for subsystems. + * @dev: Device to handle. + */ +int pm_generic_poweroff(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->poweroff ? pm->poweroff(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_poweroff); + +/** + * pm_generic_thaw_noirq - Generic thaw_noirq callback for subsystems. + * @dev: Device to thaw. + */ +int pm_generic_thaw_noirq(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq); + +/** + * pm_generic_thaw_early - Generic thaw_early callback for subsystems. + * @dev: Device to thaw. + */ +int pm_generic_thaw_early(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->thaw_early ? pm->thaw_early(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_thaw_early); + +/** + * pm_generic_thaw - Generic thaw callback for subsystems. + * @dev: Device to thaw. + */ +int pm_generic_thaw(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->thaw ? pm->thaw(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_thaw); + +/** + * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems. + * @dev: Device to resume. + */ +int pm_generic_resume_noirq(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_resume_noirq); + +/** + * pm_generic_resume_early - Generic resume_early callback for subsystems. + * @dev: Device to resume. + */ +int pm_generic_resume_early(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->resume_early ? pm->resume_early(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_resume_early); + +/** + * pm_generic_resume - Generic resume callback for subsystems. + * @dev: Device to resume. + */ +int pm_generic_resume(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->resume ? pm->resume(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_resume); + +/** + * pm_generic_restore_noirq - Generic restore_noirq callback for subsystems. + * @dev: Device to restore. + */ +int pm_generic_restore_noirq(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_restore_noirq); + +/** + * pm_generic_restore_early - Generic restore_early callback for subsystems. + * @dev: Device to resume. + */ +int pm_generic_restore_early(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->restore_early ? pm->restore_early(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_restore_early); + +/** + * pm_generic_restore - Generic restore callback for subsystems. + * @dev: Device to restore. + */ +int pm_generic_restore(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->restore ? pm->restore(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_restore); + +/** + * pm_generic_complete - Generic routine competing a device power transition. + * @dev: Device to handle. + * + * Complete a device power transition during a system-wide power transition. + */ +void pm_generic_complete(struct device *dev) +{ + struct device_driver *drv = dev->driver; + + if (drv && drv->pm && drv->pm->complete) + drv->pm->complete(dev); + + /* + * Let runtime PM try to suspend devices that haven't been in use before + * going into the system-wide sleep state we're resuming from. + */ + pm_runtime_idle(dev); +} +#endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c new file mode 100644 index 00000000..e153e03b --- /dev/null +++ b/drivers/base/power/main.c @@ -0,0 +1,1409 @@ +/* + * drivers/base/power/main.c - Where the driver meets power management. + * + * Copyright (c) 2003 Patrick Mochel + * Copyright (c) 2003 Open Source Development Lab + * + * This file is released under the GPLv2 + * + * + * The driver model core calls device_pm_add() when a device is registered. + * This will initialize the embedded device_pm_info object in the device + * and add it to the list of power-controlled devices. sysfs entries for + * controlling device power management will also be added. + * + * A separate list is used for keeping track of power info, because the power + * domain dependencies may differ from the ancestral dependencies that the + * subsystem list maintains. + */ + +#include <linux/device.h> +#include <linux/kallsyms.h> +#include <linux/export.h> +#include <linux/mutex.h> +#include <linux/pm.h> +#include <linux/pm_runtime.h> +#include <linux/resume-trace.h> +#include <linux/interrupt.h> +#include <linux/sched.h> +#include <linux/async.h> +#include <linux/suspend.h> +#include <linux/timer.h> + +#include "../base.h" +#include "power.h" +#include <mach/wmt_secure.h> + +typedef int (*pm_callback_t)(struct device *); + +/* + * The entries in the dpm_list list are in a depth first order, simply + * because children are guaranteed to be discovered after parents, and + * are inserted at the back of the list on discovery. + * + * Since device_pm_add() may be called with a device lock held, + * we must never try to acquire a device lock while holding + * dpm_list_mutex. + */ + +LIST_HEAD(dpm_list); +LIST_HEAD(dpm_prepared_list); +LIST_HEAD(dpm_suspended_list); +LIST_HEAD(dpm_late_early_list); +LIST_HEAD(dpm_noirq_list); + +struct suspend_stats suspend_stats; +static DEFINE_MUTEX(dpm_list_mtx); +static pm_message_t pm_transition; + +struct dpm_watchdog { + struct device *dev; + struct task_struct *tsk; + struct timer_list timer; +}; + +static int async_error; +extern unsigned int cpu_trustzone_enabled; +extern int console_printk[]; +#define console_loglevel (console_printk[0]) + +/** + * device_pm_init - Initialize the PM-related part of a device object. + * @dev: Device object being initialized. + */ +void device_pm_init(struct device *dev) +{ + dev->power.is_prepared = false; + dev->power.is_suspended = false; + init_completion(&dev->power.completion); + complete_all(&dev->power.completion); + dev->power.wakeup = NULL; + spin_lock_init(&dev->power.lock); + pm_runtime_init(dev); + INIT_LIST_HEAD(&dev->power.entry); + dev->power.power_state = PMSG_INVALID; +} + +/** + * device_pm_lock - Lock the list of active devices used by the PM core. + */ +void device_pm_lock(void) +{ + mutex_lock(&dpm_list_mtx); +} + +/** + * device_pm_unlock - Unlock the list of active devices used by the PM core. + */ +void device_pm_unlock(void) +{ + mutex_unlock(&dpm_list_mtx); +} + +/** + * device_pm_add - Add a device to the PM core's list of active devices. + * @dev: Device to add to the list. + */ +void device_pm_add(struct device *dev) +{ + pr_debug("PM: Adding info for %s:%s\n", + dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); + mutex_lock(&dpm_list_mtx); + if (dev->parent && dev->parent->power.is_prepared) + dev_warn(dev, "parent %s should not be sleeping\n", + dev_name(dev->parent)); + list_add_tail(&dev->power.entry, &dpm_list); + dev_pm_qos_constraints_init(dev); + mutex_unlock(&dpm_list_mtx); +} + +/** + * device_pm_remove - Remove a device from the PM core's list of active devices. + * @dev: Device to be removed from the list. + */ +void device_pm_remove(struct device *dev) +{ + pr_debug("PM: Removing info for %s:%s\n", + dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); + complete_all(&dev->power.completion); + mutex_lock(&dpm_list_mtx); + dev_pm_qos_constraints_destroy(dev); + list_del_init(&dev->power.entry); + mutex_unlock(&dpm_list_mtx); + device_wakeup_disable(dev); + pm_runtime_remove(dev); +} + +/** + * device_pm_move_before - Move device in the PM core's list of active devices. + * @deva: Device to move in dpm_list. + * @devb: Device @deva should come before. + */ +void device_pm_move_before(struct device *deva, struct device *devb) +{ + pr_debug("PM: Moving %s:%s before %s:%s\n", + deva->bus ? deva->bus->name : "No Bus", dev_name(deva), + devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); + /* Delete deva from dpm_list and reinsert before devb. */ + list_move_tail(&deva->power.entry, &devb->power.entry); +} + +/** + * device_pm_move_after - Move device in the PM core's list of active devices. + * @deva: Device to move in dpm_list. + * @devb: Device @deva should come after. + */ +void device_pm_move_after(struct device *deva, struct device *devb) +{ + pr_debug("PM: Moving %s:%s after %s:%s\n", + deva->bus ? deva->bus->name : "No Bus", dev_name(deva), + devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); + /* Delete deva from dpm_list and reinsert after devb. */ + list_move(&deva->power.entry, &devb->power.entry); +} + +/** + * device_pm_move_last - Move device to end of the PM core's list of devices. + * @dev: Device to move in dpm_list. + */ +void device_pm_move_last(struct device *dev) +{ + pr_debug("PM: Moving %s:%s to end of list\n", + dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); + list_move_tail(&dev->power.entry, &dpm_list); +} + +static ktime_t initcall_debug_start(struct device *dev) +{ + ktime_t calltime = ktime_set(0, 0); + + if (initcall_debug) { + pr_info("calling %s+ @ %i, parent: %s\n", + dev_name(dev), task_pid_nr(current), + dev->parent ? dev_name(dev->parent) : "none"); + calltime = ktime_get(); + } + + return calltime; +} + +static void initcall_debug_report(struct device *dev, ktime_t calltime, + int error) +{ + ktime_t delta, rettime; + + if (initcall_debug) { + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); + pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), + error, (unsigned long long)ktime_to_ns(delta) >> 10); + } +} + +/** + * dpm_wait - Wait for a PM operation to complete. + * @dev: Device to wait for. + * @async: If unset, wait only if the device's power.async_suspend flag is set. + */ +static void dpm_wait(struct device *dev, bool async) +{ + if (!dev) + return; + + if (async || (pm_async_enabled && dev->power.async_suspend)) + wait_for_completion(&dev->power.completion); +} + +static int dpm_wait_fn(struct device *dev, void *async_ptr) +{ + dpm_wait(dev, *((bool *)async_ptr)); + return 0; +} + +static void dpm_wait_for_children(struct device *dev, bool async) +{ + device_for_each_child(dev, &async, dpm_wait_fn); +} + +/** + * pm_op - Return the PM operation appropriate for given PM event. + * @ops: PM operations to choose from. + * @state: PM transition of the system being carried out. + */ +static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state) +{ + switch (state.event) { +#ifdef CONFIG_SUSPEND + case PM_EVENT_SUSPEND: + return ops->suspend; + case PM_EVENT_RESUME: + return ops->resume; +#endif /* CONFIG_SUSPEND */ +#ifdef CONFIG_HIBERNATE_CALLBACKS + case PM_EVENT_FREEZE: + case PM_EVENT_QUIESCE: + return ops->freeze; + case PM_EVENT_HIBERNATE: + return ops->poweroff; + case PM_EVENT_THAW: + case PM_EVENT_RECOVER: + return ops->thaw; + break; + case PM_EVENT_RESTORE: + return ops->restore; +#endif /* CONFIG_HIBERNATE_CALLBACKS */ + } + + return NULL; +} + +/** + * pm_late_early_op - Return the PM operation appropriate for given PM event. + * @ops: PM operations to choose from. + * @state: PM transition of the system being carried out. + * + * Runtime PM is disabled for @dev while this function is being executed. + */ +static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops, + pm_message_t state) +{ + switch (state.event) { +#ifdef CONFIG_SUSPEND + case PM_EVENT_SUSPEND: + return ops->suspend_late; + case PM_EVENT_RESUME: + return ops->resume_early; +#endif /* CONFIG_SUSPEND */ +#ifdef CONFIG_HIBERNATE_CALLBACKS + case PM_EVENT_FREEZE: + case PM_EVENT_QUIESCE: + return ops->freeze_late; + case PM_EVENT_HIBERNATE: + return ops->poweroff_late; + case PM_EVENT_THAW: + case PM_EVENT_RECOVER: + return ops->thaw_early; + case PM_EVENT_RESTORE: + return ops->restore_early; +#endif /* CONFIG_HIBERNATE_CALLBACKS */ + } + + return NULL; +} + +/** + * pm_noirq_op - Return the PM operation appropriate for given PM event. + * @ops: PM operations to choose from. + * @state: PM transition of the system being carried out. + * + * The driver of @dev will not receive interrupts while this function is being + * executed. + */ +static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state) +{ + switch (state.event) { +#ifdef CONFIG_SUSPEND + case PM_EVENT_SUSPEND: + return ops->suspend_noirq; + case PM_EVENT_RESUME: + return ops->resume_noirq; +#endif /* CONFIG_SUSPEND */ +#ifdef CONFIG_HIBERNATE_CALLBACKS + case PM_EVENT_FREEZE: + case PM_EVENT_QUIESCE: + return ops->freeze_noirq; + case PM_EVENT_HIBERNATE: + return ops->poweroff_noirq; + case PM_EVENT_THAW: + case PM_EVENT_RECOVER: + return ops->thaw_noirq; + case PM_EVENT_RESTORE: + return ops->restore_noirq; +#endif /* CONFIG_HIBERNATE_CALLBACKS */ + } + + return NULL; +} + +static char *pm_verb(int event) +{ + switch (event) { + case PM_EVENT_SUSPEND: + return "suspend"; + case PM_EVENT_RESUME: + return "resume"; + case PM_EVENT_FREEZE: + return "freeze"; + case PM_EVENT_QUIESCE: + return "quiesce"; + case PM_EVENT_HIBERNATE: + return "hibernate"; + case PM_EVENT_THAW: + return "thaw"; + case PM_EVENT_RESTORE: + return "restore"; + case PM_EVENT_RECOVER: + return "recover"; + default: + return "(unknown PM event)"; + } +} + +static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info) +{ + dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), + ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? + ", may wakeup" : ""); +} + +static void pm_dev_err(struct device *dev, pm_message_t state, char *info, + int error) +{ + printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", + dev_name(dev), pm_verb(state.event), info, error); +} + +static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) +{ + ktime_t calltime; + u64 usecs64; + int usecs; + + calltime = ktime_get(); + usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); + do_div(usecs64, NSEC_PER_USEC); + usecs = usecs64; + if (usecs == 0) + usecs = 1; + pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n", + info ?: "", info ? " " : "", pm_verb(state.event), + usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); +} + +static int dpm_run_callback(pm_callback_t cb, struct device *dev, + pm_message_t state, char *info) +{ + ktime_t calltime; + int error; + + if (!cb) + return 0; + + calltime = initcall_debug_start(dev); + + pm_dev_dbg(dev, state, info); + error = cb(dev); + suspend_report_result(cb, error); + + initcall_debug_report(dev, calltime, error); + + return error; +} + +/** + * dpm_wd_handler - Driver suspend / resume watchdog handler. + * + * Called when a driver has timed out suspending or resuming. + * There's not much we can do here to recover so BUG() out for + * a crash-dump + */ +static void dpm_wd_handler(unsigned long data) +{ + struct dpm_watchdog *wd = (void *)data; + struct device *dev = wd->dev; + struct task_struct *tsk = wd->tsk; + + dev_emerg(dev, "**** DPM device timeout ****\n"); + show_stack(tsk, NULL); + + BUG(); +} + +/** + * dpm_wd_set - Enable pm watchdog for given device. + * @wd: Watchdog. Must be allocated on the stack. + * @dev: Device to handle. + */ +static void dpm_wd_set(struct dpm_watchdog *wd, struct device *dev) +{ + struct timer_list *timer = &wd->timer; + + wd->dev = dev; + wd->tsk = get_current(); + + init_timer_on_stack(timer); + timer->expires = jiffies + HZ * 12; + timer->function = dpm_wd_handler; + timer->data = (unsigned long)wd; + add_timer(timer); +} + +/** + * dpm_wd_clear - Disable pm watchdog. + * @wd: Watchdog to disable. + */ +static void dpm_wd_clear(struct dpm_watchdog *wd) +{ + struct timer_list *timer = &wd->timer; + + del_timer_sync(timer); + destroy_timer_on_stack(timer); +} + +/*------------------------- Resume routines -------------------------*/ + +/** + * device_resume_noirq - Execute an "early resume" callback for given device. + * @dev: Device to handle. + * @state: PM transition of the system being carried out. + * + * The driver of @dev will not receive interrupts while this function is being + * executed. + */ +static int device_resume_noirq(struct device *dev, pm_message_t state) +{ + pm_callback_t callback = NULL; + char *info = NULL; + int error = 0; + + TRACE_DEVICE(dev); + TRACE_RESUME(0); + + if (dev->pm_domain) { + info = "noirq power domain "; + callback = pm_noirq_op(&dev->pm_domain->ops, state); + } else if (dev->type && dev->type->pm) { + info = "noirq type "; + callback = pm_noirq_op(dev->type->pm, state); + } else if (dev->class && dev->class->pm) { + info = "noirq class "; + callback = pm_noirq_op(dev->class->pm, state); + } else if (dev->bus && dev->bus->pm) { + info = "noirq bus "; + callback = pm_noirq_op(dev->bus->pm, state); + } + + if (!callback && dev->driver && dev->driver->pm) { + info = "noirq driver "; + callback = pm_noirq_op(dev->driver->pm, state); + } + + error = dpm_run_callback(callback, dev, state, info); + + TRACE_RESUME(error); + return error; +} + +/** + * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. + * @state: PM transition of the system being carried out. + * + * Call the "noirq" resume handlers for all devices in dpm_noirq_list and + * enable device drivers to receive interrupts. + */ +static void dpm_resume_noirq(pm_message_t state) +{ + ktime_t starttime = ktime_get(); + + mutex_lock(&dpm_list_mtx); + while (!list_empty(&dpm_noirq_list)) { + struct device *dev = to_device(dpm_noirq_list.next); + int error; + + get_device(dev); + list_move_tail(&dev->power.entry, &dpm_late_early_list); + mutex_unlock(&dpm_list_mtx); + + error = device_resume_noirq(dev, state); + if (error) { + suspend_stats.failed_resume_noirq++; + dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, state, " noirq", error); + } + + mutex_lock(&dpm_list_mtx); + put_device(dev); + } + mutex_unlock(&dpm_list_mtx); + dpm_show_time(starttime, state, "noirq"); + resume_device_irqs(); +} + +/** + * device_resume_early - Execute an "early resume" callback for given device. + * @dev: Device to handle. + * @state: PM transition of the system being carried out. + * + * Runtime PM is disabled for @dev while this function is being executed. + */ +static int device_resume_early(struct device *dev, pm_message_t state) +{ + pm_callback_t callback = NULL; + char *info = NULL; + int error = 0; + + TRACE_DEVICE(dev); + TRACE_RESUME(0); + + if (dev->pm_domain) { + info = "early power domain "; + callback = pm_late_early_op(&dev->pm_domain->ops, state); + } else if (dev->type && dev->type->pm) { + info = "early type "; + callback = pm_late_early_op(dev->type->pm, state); + } else if (dev->class && dev->class->pm) { + info = "early class "; + callback = pm_late_early_op(dev->class->pm, state); + } else if (dev->bus && dev->bus->pm) { + info = "early bus "; + callback = pm_late_early_op(dev->bus->pm, state); + } + + if (!callback && dev->driver && dev->driver->pm) { + info = "early driver "; + callback = pm_late_early_op(dev->driver->pm, state); + } + + error = dpm_run_callback(callback, dev, state, info); + + TRACE_RESUME(error); + return error; +} + +/** + * dpm_resume_early - Execute "early resume" callbacks for all devices. + * @state: PM transition of the system being carried out. + */ +static void dpm_resume_early(pm_message_t state) +{ + ktime_t starttime = ktime_get(); + + mutex_lock(&dpm_list_mtx); + while (!list_empty(&dpm_late_early_list)) { + struct device *dev = to_device(dpm_late_early_list.next); + int error; + + get_device(dev); + list_move_tail(&dev->power.entry, &dpm_suspended_list); + mutex_unlock(&dpm_list_mtx); + + error = device_resume_early(dev, state); + if (error) { + suspend_stats.failed_resume_early++; + dpm_save_failed_step(SUSPEND_RESUME_EARLY); + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, state, " early", error); + } + + mutex_lock(&dpm_list_mtx); + put_device(dev); + } + mutex_unlock(&dpm_list_mtx); + dpm_show_time(starttime, state, "early"); +} + +/** + * dpm_resume_start - Execute "noirq" and "early" device callbacks. + * @state: PM transition of the system being carried out. + */ +void dpm_resume_start(pm_message_t state) +{ + dpm_resume_noirq(state); + dpm_resume_early(state); +} +EXPORT_SYMBOL_GPL(dpm_resume_start); + +/** + * device_resume - Execute "resume" callbacks for given device. + * @dev: Device to handle. + * @state: PM transition of the system being carried out. + * @async: If true, the device is being resumed asynchronously. + */ +static int device_resume(struct device *dev, pm_message_t state, bool async) +{ + pm_callback_t callback = NULL; + char *info = NULL; + int error = 0; + bool put = false; + struct dpm_watchdog wd; + + TRACE_DEVICE(dev); + TRACE_RESUME(0); + + dpm_wait(dev->parent, async); + device_lock(dev); + + /* + * This is a fib. But we'll allow new children to be added below + * a resumed device, even if the device hasn't been completed yet. + */ + dev->power.is_prepared = false; + dpm_wd_set(&wd, dev); + + if (!dev->power.is_suspended) + goto Unlock; + + pm_runtime_enable(dev); + put = true; + + if (dev->pm_domain) { + info = "power domain "; + callback = pm_op(&dev->pm_domain->ops, state); + goto Driver; + } + + if (dev->type && dev->type->pm) { + info = "type "; + callback = pm_op(dev->type->pm, state); + goto Driver; + } + + if (dev->class) { + if (dev->class->pm) { + info = "class "; + callback = pm_op(dev->class->pm, state); + goto Driver; + } else if (dev->class->resume) { + info = "legacy class "; + callback = dev->class->resume; + goto End; + } + } + + if (dev->bus) { + if (dev->bus->pm) { + info = "bus "; + callback = pm_op(dev->bus->pm, state); + } else if (dev->bus->resume) { + info = "legacy bus "; + callback = dev->bus->resume; + goto End; + } + } + + Driver: + if (!callback && dev->driver && dev->driver->pm) { + info = "driver "; + callback = pm_op(dev->driver->pm, state); + } + + End: + error = dpm_run_callback(callback, dev, state, info); + dev->power.is_suspended = false; + + Unlock: + device_unlock(dev); + dpm_wd_clear(&wd); + complete_all(&dev->power.completion); + + TRACE_RESUME(error); + + if (put) + pm_runtime_put_sync(dev); + + return error; +} + +static void async_resume(void *data, async_cookie_t cookie) +{ + struct device *dev = (struct device *)data; + int error; + + error = device_resume(dev, pm_transition, true); + if (error) + pm_dev_err(dev, pm_transition, " async", error); + put_device(dev); +} + +static bool is_async(struct device *dev) +{ + return dev->power.async_suspend && pm_async_enabled + && !pm_trace_is_enabled(); +} + +/** + * dpm_resume - Execute "resume" callbacks for non-sysdev devices. + * @state: PM transition of the system being carried out. + * + * Execute the appropriate "resume" callback for all devices whose status + * indicates that they are suspended. + */ +void dpm_resume(pm_message_t state) +{ + struct device *dev; + ktime_t starttime = ktime_get(); + int tmp; + + might_sleep(); + + mutex_lock(&dpm_list_mtx); + pm_transition = state; + async_error = 0; + + list_for_each_entry(dev, &dpm_suspended_list, power.entry) { + INIT_COMPLETION(dev->power.completion); + if (is_async(dev)) { + get_device(dev); + async_schedule(async_resume, dev); + } + } + + tmp = console_loglevel; + console_loglevel = 7; + while (!list_empty(&dpm_suspended_list)) { + dev = to_device(dpm_suspended_list.next); + get_device(dev); + if (!is_async(dev)) { + int error; + + mutex_unlock(&dpm_list_mtx); + + error = device_resume(dev, state, false); + if (error) { + suspend_stats.failed_resume++; + dpm_save_failed_step(SUSPEND_RESUME); + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, state, "", error); + } + + mutex_lock(&dpm_list_mtx); + } + if (!list_empty(&dev->power.entry)) + list_move_tail(&dev->power.entry, &dpm_prepared_list); + put_device(dev); + } + console_loglevel = tmp; + mutex_unlock(&dpm_list_mtx); + async_synchronize_full(); + dpm_show_time(starttime, state, NULL); +} + +/** + * device_complete - Complete a PM transition for given device. + * @dev: Device to handle. + * @state: PM transition of the system being carried out. + */ +static void device_complete(struct device *dev, pm_message_t state) +{ + void (*callback)(struct device *) = NULL; + char *info = NULL; + + device_lock(dev); + + if (dev->pm_domain) { + info = "completing power domain "; + callback = dev->pm_domain->ops.complete; + } else if (dev->type && dev->type->pm) { + info = "completing type "; + callback = dev->type->pm->complete; + } else if (dev->class && dev->class->pm) { + info = "completing class "; + callback = dev->class->pm->complete; + } else if (dev->bus && dev->bus->pm) { + info = "completing bus "; + callback = dev->bus->pm->complete; + } + + if (!callback && dev->driver && dev->driver->pm) { + info = "completing driver "; + callback = dev->driver->pm->complete; + } + + if (callback) { + pm_dev_dbg(dev, state, info); + callback(dev); + } + + device_unlock(dev); +} + +/** + * dpm_complete - Complete a PM transition for all non-sysdev devices. + * @state: PM transition of the system being carried out. + * + * Execute the ->complete() callbacks for all devices whose PM status is not + * DPM_ON (this allows new devices to be registered). + */ +void dpm_complete(pm_message_t state) +{ + struct list_head list; + + might_sleep(); + + INIT_LIST_HEAD(&list); + mutex_lock(&dpm_list_mtx); + while (!list_empty(&dpm_prepared_list)) { + struct device *dev = to_device(dpm_prepared_list.prev); + + get_device(dev); + dev->power.is_prepared = false; + list_move(&dev->power.entry, &list); + mutex_unlock(&dpm_list_mtx); + + device_complete(dev, state); + + mutex_lock(&dpm_list_mtx); + put_device(dev); + } + list_splice(&list, &dpm_list); + mutex_unlock(&dpm_list_mtx); +} + +/** + * dpm_resume_end - Execute "resume" callbacks and complete system transition. + * @state: PM transition of the system being carried out. + * + * Execute "resume" callbacks for all devices and complete the PM transition of + * the system. + */ +void dpm_resume_end(pm_message_t state) +{ + if (cpu_trustzone_enabled == 1) { + wmt_smc(WMT_SMC_CMD_DEVICE_RESUME, 0x1001); + } + + dpm_resume(state); + dpm_complete(state); +} +EXPORT_SYMBOL_GPL(dpm_resume_end); + + +/*------------------------- Suspend routines -------------------------*/ + +/** + * resume_event - Return a "resume" message for given "suspend" sleep state. + * @sleep_state: PM message representing a sleep state. + * + * Return a PM message representing the resume event corresponding to given + * sleep state. + */ +static pm_message_t resume_event(pm_message_t sleep_state) +{ + switch (sleep_state.event) { + case PM_EVENT_SUSPEND: + return PMSG_RESUME; + case PM_EVENT_FREEZE: + case PM_EVENT_QUIESCE: + return PMSG_RECOVER; + case PM_EVENT_HIBERNATE: + return PMSG_RESTORE; + } + return PMSG_ON; +} + +/** + * device_suspend_noirq - Execute a "late suspend" callback for given device. + * @dev: Device to handle. + * @state: PM transition of the system being carried out. + * + * The driver of @dev will not receive interrupts while this function is being + * executed. + */ +static int device_suspend_noirq(struct device *dev, pm_message_t state) +{ + pm_callback_t callback = NULL; + char *info = NULL; + + if (dev->pm_domain) { + info = "noirq power domain "; + callback = pm_noirq_op(&dev->pm_domain->ops, state); + } else if (dev->type && dev->type->pm) { + info = "noirq type "; + callback = pm_noirq_op(dev->type->pm, state); + } else if (dev->class && dev->class->pm) { + info = "noirq class "; + callback = pm_noirq_op(dev->class->pm, state); + } else if (dev->bus && dev->bus->pm) { + info = "noirq bus "; + callback = pm_noirq_op(dev->bus->pm, state); + } + + if (!callback && dev->driver && dev->driver->pm) { + info = "noirq driver "; + callback = pm_noirq_op(dev->driver->pm, state); + } + + return dpm_run_callback(callback, dev, state, info); +} + +/** + * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices. + * @state: PM transition of the system being carried out. + * + * Prevent device drivers from receiving interrupts and call the "noirq" suspend + * handlers for all non-sysdev devices. + */ +static int dpm_suspend_noirq(pm_message_t state) +{ + ktime_t starttime = ktime_get(); + int error = 0; + + suspend_device_irqs(); + mutex_lock(&dpm_list_mtx); + while (!list_empty(&dpm_late_early_list)) { + struct device *dev = to_device(dpm_late_early_list.prev); + + get_device(dev); + mutex_unlock(&dpm_list_mtx); + + error = device_suspend_noirq(dev, state); + + mutex_lock(&dpm_list_mtx); + if (error) { + pm_dev_err(dev, state, " noirq", error); + suspend_stats.failed_suspend_noirq++; + dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); + dpm_save_failed_dev(dev_name(dev)); + put_device(dev); + break; + } + if (!list_empty(&dev->power.entry)) + list_move(&dev->power.entry, &dpm_noirq_list); + put_device(dev); + + if (pm_wakeup_pending()) { + error = -EBUSY; + break; + } + } + mutex_unlock(&dpm_list_mtx); + if (error) + dpm_resume_noirq(resume_event(state)); + else + dpm_show_time(starttime, state, "noirq"); + return error; +} + +/** + * device_suspend_late - Execute a "late suspend" callback for given device. + * @dev: Device to handle. + * @state: PM transition of the system being carried out. + * + * Runtime PM is disabled for @dev while this function is being executed. + */ +static int device_suspend_late(struct device *dev, pm_message_t state) +{ + pm_callback_t callback = NULL; + char *info = NULL; + + if (dev->pm_domain) { + info = "late power domain "; + callback = pm_late_early_op(&dev->pm_domain->ops, state); + } else if (dev->type && dev->type->pm) { + info = "late type "; + callback = pm_late_early_op(dev->type->pm, state); + } else if (dev->class && dev->class->pm) { + info = "late class "; + callback = pm_late_early_op(dev->class->pm, state); + } else if (dev->bus && dev->bus->pm) { + info = "late bus "; + callback = pm_late_early_op(dev->bus->pm, state); + } + + if (!callback && dev->driver && dev->driver->pm) { + info = "late driver "; + callback = pm_late_early_op(dev->driver->pm, state); + } + + return dpm_run_callback(callback, dev, state, info); +} + +/** + * dpm_suspend_late - Execute "late suspend" callbacks for all devices. + * @state: PM transition of the system being carried out. + */ +static int dpm_suspend_late(pm_message_t state) +{ + ktime_t starttime = ktime_get(); + int error = 0; + + mutex_lock(&dpm_list_mtx); + while (!list_empty(&dpm_suspended_list)) { + struct device *dev = to_device(dpm_suspended_list.prev); + + get_device(dev); + mutex_unlock(&dpm_list_mtx); + + error = device_suspend_late(dev, state); + + mutex_lock(&dpm_list_mtx); + if (error) { + pm_dev_err(dev, state, " late", error); + suspend_stats.failed_suspend_late++; + dpm_save_failed_step(SUSPEND_SUSPEND_LATE); + dpm_save_failed_dev(dev_name(dev)); + put_device(dev); + break; + } + if (!list_empty(&dev->power.entry)) + list_move(&dev->power.entry, &dpm_late_early_list); + put_device(dev); + + if (pm_wakeup_pending()) { + error = -EBUSY; + break; + } + } + mutex_unlock(&dpm_list_mtx); + if (error) + dpm_resume_early(resume_event(state)); + else + dpm_show_time(starttime, state, "late"); + + return error; +} + +/** + * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks. + * @state: PM transition of the system being carried out. + */ +int dpm_suspend_end(pm_message_t state) +{ + int error = dpm_suspend_late(state); + if (error) + return error; + + error = dpm_suspend_noirq(state); + if (error) { + dpm_resume_early(state); + return error; + } + + return 0; +} +EXPORT_SYMBOL_GPL(dpm_suspend_end); + +/** + * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. + * @dev: Device to suspend. + * @state: PM transition of the system being carried out. + * @cb: Suspend callback to execute. + */ +static int legacy_suspend(struct device *dev, pm_message_t state, + int (*cb)(struct device *dev, pm_message_t state)) +{ + int error; + ktime_t calltime; + + calltime = initcall_debug_start(dev); + + error = cb(dev, state); + suspend_report_result(cb, error); + + initcall_debug_report(dev, calltime, error); + + return error; +} + +/** + * device_suspend - Execute "suspend" callbacks for given device. + * @dev: Device to handle. + * @state: PM transition of the system being carried out. + * @async: If true, the device is being suspended asynchronously. + */ +static int __device_suspend(struct device *dev, pm_message_t state, bool async) +{ + pm_callback_t callback = NULL; + char *info = NULL; + int error = 0; + struct dpm_watchdog wd; + + dpm_wait_for_children(dev, async); + + if (async_error) + goto Complete; + + pm_runtime_get_noresume(dev); + if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) + pm_wakeup_event(dev, 0); + + if (pm_wakeup_pending()) { + pm_runtime_put_sync(dev); + async_error = -EBUSY; + goto Complete; + } + + dpm_wd_set(&wd, dev); + + device_lock(dev); + + if (dev->pm_domain) { + info = "power domain "; + callback = pm_op(&dev->pm_domain->ops, state); + goto Run; + } + + if (dev->type && dev->type->pm) { + info = "type "; + callback = pm_op(dev->type->pm, state); + goto Run; + } + + if (dev->class) { + if (dev->class->pm) { + info = "class "; + callback = pm_op(dev->class->pm, state); + goto Run; + } else if (dev->class->suspend) { + pm_dev_dbg(dev, state, "legacy class "); + error = legacy_suspend(dev, state, dev->class->suspend); + goto End; + } + } + + if (dev->bus) { + if (dev->bus->pm) { + info = "bus "; + callback = pm_op(dev->bus->pm, state); + } else if (dev->bus->suspend) { + pm_dev_dbg(dev, state, "legacy bus "); + error = legacy_suspend(dev, state, dev->bus->suspend); + goto End; + } + } + + Run: + if (!callback && dev->driver && dev->driver->pm) { + info = "driver "; + callback = pm_op(dev->driver->pm, state); + } + + error = dpm_run_callback(callback, dev, state, info); + + End: + if (!error) { + dev->power.is_suspended = true; + if (dev->power.wakeup_path + && dev->parent && !dev->parent->power.ignore_children) + dev->parent->power.wakeup_path = true; + } + + device_unlock(dev); + + dpm_wd_clear(&wd); + + Complete: + complete_all(&dev->power.completion); + + if (error) { + pm_runtime_put_sync(dev); + async_error = error; + } else if (dev->power.is_suspended) { + __pm_runtime_disable(dev, false); + } + + return error; +} + +static void async_suspend(void *data, async_cookie_t cookie) +{ + struct device *dev = (struct device *)data; + int error; + + error = __device_suspend(dev, pm_transition, true); + if (error) { + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, pm_transition, " async", error); + } + + put_device(dev); +} + +static int device_suspend(struct device *dev) +{ + INIT_COMPLETION(dev->power.completion); + + if (pm_async_enabled && dev->power.async_suspend) { + get_device(dev); + async_schedule(async_suspend, dev); + return 0; + } + + return __device_suspend(dev, pm_transition, false); +} + +/** + * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. + * @state: PM transition of the system being carried out. + */ +int dpm_suspend(pm_message_t state) +{ + ktime_t starttime = ktime_get(); + int error = 0; + int tmp; + + might_sleep(); + + mutex_lock(&dpm_list_mtx); + pm_transition = state; + async_error = 0; + + tmp = console_loglevel; + console_loglevel = 7; + while (!list_empty(&dpm_prepared_list)) { + struct device *dev = to_device(dpm_prepared_list.prev); + + get_device(dev); + mutex_unlock(&dpm_list_mtx); + + error = device_suspend(dev); + + mutex_lock(&dpm_list_mtx); + if (error) { + pm_dev_err(dev, state, "", error); + dpm_save_failed_dev(dev_name(dev)); + put_device(dev); + break; + } + if (!list_empty(&dev->power.entry)) + list_move(&dev->power.entry, &dpm_suspended_list); + put_device(dev); + if (async_error) + break; + } + console_loglevel = tmp; + mutex_unlock(&dpm_list_mtx); + async_synchronize_full(); + if (!error) + error = async_error; + if (error) { + suspend_stats.failed_suspend++; + dpm_save_failed_step(SUSPEND_SUSPEND); + } else + dpm_show_time(starttime, state, NULL); + return error; +} + +/** + * device_prepare - Prepare a device for system power transition. + * @dev: Device to handle. + * @state: PM transition of the system being carried out. + * + * Execute the ->prepare() callback(s) for given device. No new children of the + * device may be registered after this function has returned. + */ +static int device_prepare(struct device *dev, pm_message_t state) +{ + int (*callback)(struct device *) = NULL; + char *info = NULL; + int error = 0; + + device_lock(dev); + + dev->power.wakeup_path = device_may_wakeup(dev); + + if (dev->pm_domain) { + info = "preparing power domain "; + callback = dev->pm_domain->ops.prepare; + } else if (dev->type && dev->type->pm) { + info = "preparing type "; + callback = dev->type->pm->prepare; + } else if (dev->class && dev->class->pm) { + info = "preparing class "; + callback = dev->class->pm->prepare; + } else if (dev->bus && dev->bus->pm) { + info = "preparing bus "; + callback = dev->bus->pm->prepare; + } + + if (!callback && dev->driver && dev->driver->pm) { + info = "preparing driver "; + callback = dev->driver->pm->prepare; + } + + if (callback) { + error = callback(dev); + suspend_report_result(callback, error); + } + + device_unlock(dev); + + return error; +} + +/** + * dpm_prepare - Prepare all non-sysdev devices for a system PM transition. + * @state: PM transition of the system being carried out. + * + * Execute the ->prepare() callback(s) for all devices. + */ +int dpm_prepare(pm_message_t state) +{ + int error = 0; + + might_sleep(); + + mutex_lock(&dpm_list_mtx); + while (!list_empty(&dpm_list)) { + struct device *dev = to_device(dpm_list.next); + + get_device(dev); + mutex_unlock(&dpm_list_mtx); + + error = device_prepare(dev, state); + + mutex_lock(&dpm_list_mtx); + if (error) { + if (error == -EAGAIN) { + put_device(dev); + error = 0; + continue; + } + printk(KERN_INFO "PM: Device %s not prepared " + "for power transition: code %d\n", + dev_name(dev), error); + put_device(dev); + break; + } + dev->power.is_prepared = true; + if (!list_empty(&dev->power.entry)) + list_move_tail(&dev->power.entry, &dpm_prepared_list); + put_device(dev); + } + mutex_unlock(&dpm_list_mtx); + return error; +} + +/** + * dpm_suspend_start - Prepare devices for PM transition and suspend them. + * @state: PM transition of the system being carried out. + * + * Prepare all non-sysdev devices for system PM transition and execute "suspend" + * callbacks for them. + */ +int dpm_suspend_start(pm_message_t state) +{ + int error; + + error = dpm_prepare(state); + if (error) { + suspend_stats.failed_prepare++; + dpm_save_failed_step(SUSPEND_PREPARE); + } else + error = dpm_suspend(state); + if (cpu_trustzone_enabled == 1) { + wmt_smc(WMT_SMC_CMD_DEVICE_SUSPEND, 0x1001); + } + return error; +} +EXPORT_SYMBOL_GPL(dpm_suspend_start); + +void __suspend_report_result(const char *function, void *fn, int ret) +{ + if (ret) + printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); +} +EXPORT_SYMBOL_GPL(__suspend_report_result); + +/** + * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. + * @dev: Device to wait for. + * @subordinate: Device that needs to wait for @dev. + */ +int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) +{ + dpm_wait(dev, subordinate->power.async_suspend); + return async_error; +} +EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c new file mode 100644 index 00000000..ac993eaf --- /dev/null +++ b/drivers/base/power/opp.c @@ -0,0 +1,676 @@ +/* + * Generic OPP Interface + * + * Copyright (C) 2009-2010 Texas Instruments Incorporated. + * Nishanth Menon + * Romit Dasgupta + * Kevin Hilman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/cpufreq.h> +#include <linux/device.h> +#include <linux/list.h> +#include <linux/rculist.h> +#include <linux/rcupdate.h> +#include <linux/opp.h> + +/* + * Internal data structure organization with the OPP layer library is as + * follows: + * dev_opp_list (root) + * |- device 1 (represents voltage domain 1) + * | |- opp 1 (availability, freq, voltage) + * | |- opp 2 .. + * ... ... + * | `- opp n .. + * |- device 2 (represents the next voltage domain) + * ... + * `- device m (represents mth voltage domain) + * device 1, 2.. are represented by dev_opp structure while each opp + * is represented by the opp structure. + */ + +/** + * struct opp - Generic OPP description structure + * @node: opp list node. The nodes are maintained throughout the lifetime + * of boot. It is expected only an optimal set of OPPs are + * added to the library by the SoC framework. + * RCU usage: opp list is traversed with RCU locks. node + * modification is possible realtime, hence the modifications + * are protected by the dev_opp_list_lock for integrity. + * IMPORTANT: the opp nodes should be maintained in increasing + * order. + * @available: true/false - marks if this OPP as available or not + * @rate: Frequency in hertz + * @u_volt: Nominal voltage in microvolts corresponding to this OPP + * @dev_opp: points back to the device_opp struct this opp belongs to + * + * This structure stores the OPP information for a given device. + */ +struct opp { + struct list_head node; + + bool available; + unsigned long rate; + unsigned long u_volt; + + struct device_opp *dev_opp; +}; + +/** + * struct device_opp - Device opp structure + * @node: list node - contains the devices with OPPs that + * have been registered. Nodes once added are not modified in this + * list. + * RCU usage: nodes are not modified in the list of device_opp, + * however addition is possible and is secured by dev_opp_list_lock + * @dev: device pointer + * @head: notifier head to notify the OPP availability changes. + * @opp_list: list of opps + * + * This is an internal data structure maintaining the link to opps attached to + * a device. This structure is not meant to be shared to users as it is + * meant for book keeping and private to OPP library + */ +struct device_opp { + struct list_head node; + + struct device *dev; + struct srcu_notifier_head head; + struct list_head opp_list; +}; + +/* + * The root of the list of all devices. All device_opp structures branch off + * from here, with each device_opp containing the list of opp it supports in + * various states of availability. + */ +static LIST_HEAD(dev_opp_list); +/* Lock to allow exclusive modification to the device and opp lists */ +static DEFINE_MUTEX(dev_opp_list_lock); + +/** + * find_device_opp() - find device_opp struct using device pointer + * @dev: device pointer used to lookup device OPPs + * + * Search list of device OPPs for one containing matching device. Does a RCU + * reader operation to grab the pointer needed. + * + * Returns pointer to 'struct device_opp' if found, otherwise -ENODEV or + * -EINVAL based on type of error. + * + * Locking: This function must be called under rcu_read_lock(). device_opp + * is a RCU protected pointer. This means that device_opp is valid as long + * as we are under RCU lock. + */ +static struct device_opp *find_device_opp(struct device *dev) +{ + struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); + + if (unlikely(IS_ERR_OR_NULL(dev))) { + pr_err("%s: Invalid parameters\n", __func__); + return ERR_PTR(-EINVAL); + } + + list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) { + if (tmp_dev_opp->dev == dev) { + dev_opp = tmp_dev_opp; + break; + } + } + + return dev_opp; +} + +/** + * opp_get_voltage() - Gets the voltage corresponding to an available opp + * @opp: opp for which voltage has to be returned for + * + * Return voltage in micro volt corresponding to the opp, else + * return 0 + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. This means that opp which could have been fetched by + * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are + * under RCU lock. The pointer returned by the opp_find_freq family must be + * used in the same section as the usage of this function with the pointer + * prior to unlocking with rcu_read_unlock() to maintain the integrity of the + * pointer. + */ +unsigned long opp_get_voltage(struct opp *opp) +{ + struct opp *tmp_opp; + unsigned long v = 0; + + tmp_opp = rcu_dereference(opp); + if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) + pr_err("%s: Invalid parameters\n", __func__); + else + v = tmp_opp->u_volt; + + return v; +} + +/** + * opp_get_freq() - Gets the frequency corresponding to an available opp + * @opp: opp for which frequency has to be returned for + * + * Return frequency in hertz corresponding to the opp, else + * return 0 + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. This means that opp which could have been fetched by + * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are + * under RCU lock. The pointer returned by the opp_find_freq family must be + * used in the same section as the usage of this function with the pointer + * prior to unlocking with rcu_read_unlock() to maintain the integrity of the + * pointer. + */ +unsigned long opp_get_freq(struct opp *opp) +{ + struct opp *tmp_opp; + unsigned long f = 0; + + tmp_opp = rcu_dereference(opp); + if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) + pr_err("%s: Invalid parameters\n", __func__); + else + f = tmp_opp->rate; + + return f; +} + +/** + * opp_get_opp_count() - Get number of opps available in the opp list + * @dev: device for which we do this operation + * + * This function returns the number of available opps if there are any, + * else returns 0 if none or the corresponding error value. + * + * Locking: This function must be called under rcu_read_lock(). This function + * internally references two RCU protected structures: device_opp and opp which + * are safe as long as we are under a common RCU locked section. + */ +int opp_get_opp_count(struct device *dev) +{ + struct device_opp *dev_opp; + struct opp *temp_opp; + int count = 0; + + dev_opp = find_device_opp(dev); + if (IS_ERR(dev_opp)) { + int r = PTR_ERR(dev_opp); + dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r); + return r; + } + + list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { + if (temp_opp->available) + count++; + } + + return count; +} + +/** + * opp_find_freq_exact() - search for an exact frequency + * @dev: device for which we do this operation + * @freq: frequency to search for + * @available: true/false - match for available opp + * + * Searches for exact match in the opp list and returns pointer to the matching + * opp if found, else returns ERR_PTR in case of error and should be handled + * using IS_ERR. + * + * Note: available is a modifier for the search. if available=true, then the + * match is for exact matching frequency and is available in the stored OPP + * table. if false, the match is for exact frequency which is not available. + * + * This provides a mechanism to enable an opp which is not available currently + * or the opposite as well. + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. The reason for the same is that the opp pointer which is + * returned will remain valid for use with opp_get_{voltage, freq} only while + * under the locked area. The pointer returned must be used prior to unlocking + * with rcu_read_unlock() to maintain the integrity of the pointer. + */ +struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, + bool available) +{ + struct device_opp *dev_opp; + struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); + + dev_opp = find_device_opp(dev); + if (IS_ERR(dev_opp)) { + int r = PTR_ERR(dev_opp); + dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r); + return ERR_PTR(r); + } + + list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { + if (temp_opp->available == available && + temp_opp->rate == freq) { + opp = temp_opp; + break; + } + } + + return opp; +} + +/** + * opp_find_freq_ceil() - Search for an rounded ceil freq + * @dev: device for which we do this operation + * @freq: Start frequency + * + * Search for the matching ceil *available* OPP from a starting freq + * for a device. + * + * Returns matching *opp and refreshes *freq accordingly, else returns + * ERR_PTR in case of error and should be handled using IS_ERR. + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. The reason for the same is that the opp pointer which is + * returned will remain valid for use with opp_get_{voltage, freq} only while + * under the locked area. The pointer returned must be used prior to unlocking + * with rcu_read_unlock() to maintain the integrity of the pointer. + */ +struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) +{ + struct device_opp *dev_opp; + struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); + + if (!dev || !freq) { + dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); + return ERR_PTR(-EINVAL); + } + + dev_opp = find_device_opp(dev); + if (IS_ERR(dev_opp)) + return opp; + + list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { + if (temp_opp->available && temp_opp->rate >= *freq) { + opp = temp_opp; + *freq = opp->rate; + break; + } + } + + return opp; +} + +/** + * opp_find_freq_floor() - Search for a rounded floor freq + * @dev: device for which we do this operation + * @freq: Start frequency + * + * Search for the matching floor *available* OPP from a starting freq + * for a device. + * + * Returns matching *opp and refreshes *freq accordingly, else returns + * ERR_PTR in case of error and should be handled using IS_ERR. + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. The reason for the same is that the opp pointer which is + * returned will remain valid for use with opp_get_{voltage, freq} only while + * under the locked area. The pointer returned must be used prior to unlocking + * with rcu_read_unlock() to maintain the integrity of the pointer. + */ +struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) +{ + struct device_opp *dev_opp; + struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); + + if (!dev || !freq) { + dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); + return ERR_PTR(-EINVAL); + } + + dev_opp = find_device_opp(dev); + if (IS_ERR(dev_opp)) + return opp; + + list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { + if (temp_opp->available) { + /* go to the next node, before choosing prev */ + if (temp_opp->rate > *freq) + break; + else + opp = temp_opp; + } + } + if (!IS_ERR(opp)) + *freq = opp->rate; + + return opp; +} + +/** + * opp_add() - Add an OPP table from a table definitions + * @dev: device for which we do this operation + * @freq: Frequency in Hz for this OPP + * @u_volt: Voltage in uVolts for this OPP + * + * This function adds an opp definition to the opp list and returns status. + * The opp is made available by default and it can be controlled using + * opp_enable/disable functions. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + */ +int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) +{ + struct device_opp *dev_opp = NULL; + struct opp *opp, *new_opp; + struct list_head *head; + + /* allocate new OPP node */ + new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL); + if (!new_opp) { + dev_warn(dev, "%s: Unable to create new OPP node\n", __func__); + return -ENOMEM; + } + + /* Hold our list modification lock here */ + mutex_lock(&dev_opp_list_lock); + + /* Check for existing list for 'dev' */ + dev_opp = find_device_opp(dev); + if (IS_ERR(dev_opp)) { + /* + * Allocate a new device OPP table. In the infrequent case + * where a new device is needed to be added, we pay this + * penalty. + */ + dev_opp = kzalloc(sizeof(struct device_opp), GFP_KERNEL); + if (!dev_opp) { + mutex_unlock(&dev_opp_list_lock); + kfree(new_opp); + dev_warn(dev, + "%s: Unable to create device OPP structure\n", + __func__); + return -ENOMEM; + } + + dev_opp->dev = dev; + srcu_init_notifier_head(&dev_opp->head); + INIT_LIST_HEAD(&dev_opp->opp_list); + + /* Secure the device list modification */ + list_add_rcu(&dev_opp->node, &dev_opp_list); + } + + /* populate the opp table */ + new_opp->dev_opp = dev_opp; + new_opp->rate = freq; + new_opp->u_volt = u_volt; + new_opp->available = true; + + /* Insert new OPP in order of increasing frequency */ + head = &dev_opp->opp_list; + list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { + if (new_opp->rate < opp->rate) + break; + else + head = &opp->node; + } + + list_add_rcu(&new_opp->node, head); + mutex_unlock(&dev_opp_list_lock); + + /* + * Notify the changes in the availability of the operable + * frequency/voltage list. + */ + srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp); + return 0; +} + +/** + * opp_set_availability() - helper to set the availability of an opp + * @dev: device for which we do this operation + * @freq: OPP frequency to modify availability + * @availability_req: availability status requested for this opp + * + * Set the availability of an OPP with an RCU operation, opp_{enable,disable} + * share a common logic which is isolated here. + * + * Returns -EINVAL for bad pointers, -ENOMEM if no memory available for the + * copy operation, returns 0 if no modifcation was done OR modification was + * successful. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks to + * keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex locking or synchronize_rcu() blocking calls cannot be used. + */ +static int opp_set_availability(struct device *dev, unsigned long freq, + bool availability_req) +{ + struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); + struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); + int r = 0; + + /* keep the node allocated */ + new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL); + if (!new_opp) { + dev_warn(dev, "%s: Unable to create OPP\n", __func__); + return -ENOMEM; + } + + mutex_lock(&dev_opp_list_lock); + + /* Find the device_opp */ + list_for_each_entry(tmp_dev_opp, &dev_opp_list, node) { + if (dev == tmp_dev_opp->dev) { + dev_opp = tmp_dev_opp; + break; + } + } + if (IS_ERR(dev_opp)) { + r = PTR_ERR(dev_opp); + dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); + goto unlock; + } + + /* Do we have the frequency? */ + list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) { + if (tmp_opp->rate == freq) { + opp = tmp_opp; + break; + } + } + if (IS_ERR(opp)) { + r = PTR_ERR(opp); + goto unlock; + } + + /* Is update really needed? */ + if (opp->available == availability_req) + goto unlock; + /* copy the old data over */ + *new_opp = *opp; + + /* plug in new node */ + new_opp->available = availability_req; + + list_replace_rcu(&opp->node, &new_opp->node); + mutex_unlock(&dev_opp_list_lock); + synchronize_rcu(); + + /* Notify the change of the OPP availability */ + if (availability_req) + srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ENABLE, + new_opp); + else + srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE, + new_opp); + + /* clean up old opp */ + new_opp = opp; + goto out; + +unlock: + mutex_unlock(&dev_opp_list_lock); +out: + kfree(new_opp); + return r; +} + +/** + * opp_enable() - Enable a specific OPP + * @dev: device for which we do this operation + * @freq: OPP frequency to enable + * + * Enables a provided opp. If the operation is valid, this returns 0, else the + * corresponding error value. It is meant to be used for users an OPP available + * after being temporarily made unavailable with opp_disable. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function indirectly uses RCU and mutex locks to keep the + * integrity of the internal data structures. Callers should ensure that + * this function is *NOT* called under RCU protection or in contexts where + * mutex locking or synchronize_rcu() blocking calls cannot be used. + */ +int opp_enable(struct device *dev, unsigned long freq) +{ + return opp_set_availability(dev, freq, true); +} + +/** + * opp_disable() - Disable a specific OPP + * @dev: device for which we do this operation + * @freq: OPP frequency to disable + * + * Disables a provided opp. If the operation is valid, this returns + * 0, else the corresponding error value. It is meant to be a temporary + * control by users to make this OPP not available until the circumstances are + * right to make it available again (with a call to opp_enable). + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function indirectly uses RCU and mutex locks to keep the + * integrity of the internal data structures. Callers should ensure that + * this function is *NOT* called under RCU protection or in contexts where + * mutex locking or synchronize_rcu() blocking calls cannot be used. + */ +int opp_disable(struct device *dev, unsigned long freq) +{ + return opp_set_availability(dev, freq, false); +} + +#ifdef CONFIG_CPU_FREQ +/** + * opp_init_cpufreq_table() - create a cpufreq table for a device + * @dev: device for which we do this operation + * @table: Cpufreq table returned back to caller + * + * Generate a cpufreq table for a provided device- this assumes that the + * opp list is already initialized and ready for usage. + * + * This function allocates required memory for the cpufreq table. It is + * expected that the caller does the required maintenance such as freeing + * the table as required. + * + * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM + * if no memory available for the operation (table is not populated), returns 0 + * if successful and table is populated. + * + * WARNING: It is important for the callers to ensure refreshing their copy of + * the table if any of the mentioned functions have been invoked in the interim. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * To simplify the logic, we pretend we are updater and hold relevant mutex here + * Callers should ensure that this function is *NOT* called under RCU protection + * or in contexts where mutex locking cannot be used. + */ +int opp_init_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table) +{ + struct device_opp *dev_opp; + struct opp *opp; + struct cpufreq_frequency_table *freq_table; + int i = 0; + + /* Pretend as if I am an updater */ + mutex_lock(&dev_opp_list_lock); + + dev_opp = find_device_opp(dev); + if (IS_ERR(dev_opp)) { + int r = PTR_ERR(dev_opp); + mutex_unlock(&dev_opp_list_lock); + dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r); + return r; + } + + freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) * + (opp_get_opp_count(dev) + 1), GFP_KERNEL); + if (!freq_table) { + mutex_unlock(&dev_opp_list_lock); + dev_warn(dev, "%s: Unable to allocate frequency table\n", + __func__); + return -ENOMEM; + } + + list_for_each_entry(opp, &dev_opp->opp_list, node) { + if (opp->available) { + freq_table[i].index = i; + freq_table[i].frequency = opp->rate / 1000; + i++; + } + } + mutex_unlock(&dev_opp_list_lock); + + freq_table[i].index = i; + freq_table[i].frequency = CPUFREQ_TABLE_END; + + *table = &freq_table[0]; + + return 0; +} + +/** + * opp_free_cpufreq_table() - free the cpufreq table + * @dev: device for which we do this operation + * @table: table to free + * + * Free up the table allocated by opp_init_cpufreq_table + */ +void opp_free_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table) +{ + if (!table) + return; + + kfree(*table); + *table = NULL; +} +#endif /* CONFIG_CPU_FREQ */ + +/** + * opp_get_notifier() - find notifier_head of the device with opp + * @dev: device pointer used to lookup device OPPs. + */ +struct srcu_notifier_head *opp_get_notifier(struct device *dev) +{ + struct device_opp *dev_opp = find_device_opp(dev); + + if (IS_ERR(dev_opp)) + return ERR_CAST(dev_opp); /* matching type */ + + return &dev_opp->head; +} diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h new file mode 100644 index 00000000..eeb4bff9 --- /dev/null +++ b/drivers/base/power/power.h @@ -0,0 +1,87 @@ +#include <linux/pm_qos.h> + +#ifdef CONFIG_PM_RUNTIME + +extern void pm_runtime_init(struct device *dev); +extern void pm_runtime_remove(struct device *dev); + +#else /* !CONFIG_PM_RUNTIME */ + +static inline void pm_runtime_init(struct device *dev) {} +static inline void pm_runtime_remove(struct device *dev) {} + +#endif /* !CONFIG_PM_RUNTIME */ + +#ifdef CONFIG_PM_SLEEP + +/* kernel/power/main.c */ +extern int pm_async_enabled; + +/* drivers/base/power/main.c */ +extern struct list_head dpm_list; /* The active device list */ + +static inline struct device *to_device(struct list_head *entry) +{ + return container_of(entry, struct device, power.entry); +} + +extern void device_pm_init(struct device *dev); +extern void device_pm_add(struct device *); +extern void device_pm_remove(struct device *); +extern void device_pm_move_before(struct device *, struct device *); +extern void device_pm_move_after(struct device *, struct device *); +extern void device_pm_move_last(struct device *); + +#else /* !CONFIG_PM_SLEEP */ + +static inline void device_pm_init(struct device *dev) +{ + spin_lock_init(&dev->power.lock); + dev->power.power_state = PMSG_INVALID; + pm_runtime_init(dev); +} + +static inline void device_pm_add(struct device *dev) +{ + dev_pm_qos_constraints_init(dev); +} + +static inline void device_pm_remove(struct device *dev) +{ + dev_pm_qos_constraints_destroy(dev); + pm_runtime_remove(dev); +} + +static inline void device_pm_move_before(struct device *deva, + struct device *devb) {} +static inline void device_pm_move_after(struct device *deva, + struct device *devb) {} +static inline void device_pm_move_last(struct device *dev) {} + +#endif /* !CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM + +/* + * sysfs.c + */ + +extern int dpm_sysfs_add(struct device *dev); +extern void dpm_sysfs_remove(struct device *dev); +extern void rpm_sysfs_remove(struct device *dev); +extern int wakeup_sysfs_add(struct device *dev); +extern void wakeup_sysfs_remove(struct device *dev); +extern int pm_qos_sysfs_add(struct device *dev); +extern void pm_qos_sysfs_remove(struct device *dev); + +#else /* CONFIG_PM */ + +static inline int dpm_sysfs_add(struct device *dev) { return 0; } +static inline void dpm_sysfs_remove(struct device *dev) {} +static inline void rpm_sysfs_remove(struct device *dev) {} +static inline int wakeup_sysfs_add(struct device *dev) { return 0; } +static inline void wakeup_sysfs_remove(struct device *dev) {} +static inline int pm_qos_sysfs_add(struct device *dev) { return 0; } +static inline void pm_qos_sysfs_remove(struct device *dev) {} + +#endif diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c new file mode 100644 index 00000000..fd849a2c --- /dev/null +++ b/drivers/base/power/qos.c @@ -0,0 +1,513 @@ +/* + * Devices PM QoS constraints management + * + * Copyright (C) 2011 Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * + * This module exposes the interface to kernel space for specifying + * per-device PM QoS dependencies. It provides infrastructure for registration + * of: + * + * Dependents on a QoS value : register requests + * Watchers of QoS value : get notified when target QoS value changes + * + * This QoS design is best effort based. Dependents register their QoS needs. + * Watchers register to keep track of the current QoS needs of the system. + * Watchers can register different types of notification callbacks: + * . a per-device notification callback using the dev_pm_qos_*_notifier API. + * The notification chain data is stored in the per-device constraint + * data struct. + * . a system-wide notification callback using the dev_pm_qos_*_global_notifier + * API. The notification chain data is stored in a static variable. + * + * Note about the per-device constraint data struct allocation: + * . The per-device constraints data struct ptr is tored into the device + * dev_pm_info. + * . To minimize the data usage by the per-device constraints, the data struct + * is only allocated at the first call to dev_pm_qos_add_request. + * . The data is later free'd when the device is removed from the system. + * . A global mutex protects the constraints users from the data being + * allocated and free'd. + */ + +#include <linux/pm_qos.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/device.h> +#include <linux/mutex.h> +#include <linux/export.h> + +#include "power.h" + +static DEFINE_MUTEX(dev_pm_qos_mtx); + +static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); + +/** + * __dev_pm_qos_read_value - Get PM QoS constraint for a given device. + * @dev: Device to get the PM QoS constraint value for. + * + * This routine must be called with dev->power.lock held. + */ +s32 __dev_pm_qos_read_value(struct device *dev) +{ + struct pm_qos_constraints *c = dev->power.constraints; + + return c ? pm_qos_read_value(c) : 0; +} + +/** + * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked). + * @dev: Device to get the PM QoS constraint value for. + */ +s32 dev_pm_qos_read_value(struct device *dev) +{ + unsigned long flags; + s32 ret; + + spin_lock_irqsave(&dev->power.lock, flags); + ret = __dev_pm_qos_read_value(dev); + spin_unlock_irqrestore(&dev->power.lock, flags); + + return ret; +} + +/* + * apply_constraint + * @req: constraint request to apply + * @action: action to perform add/update/remove, of type enum pm_qos_req_action + * @value: defines the qos request + * + * Internal function to update the constraints list using the PM QoS core + * code and if needed call the per-device and the global notification + * callbacks + */ +static int apply_constraint(struct dev_pm_qos_request *req, + enum pm_qos_req_action action, int value) +{ + int ret, curr_value; + + ret = pm_qos_update_target(req->dev->power.constraints, + &req->node, action, value); + + if (ret) { + /* Call the global callbacks if needed */ + curr_value = pm_qos_read_value(req->dev->power.constraints); + blocking_notifier_call_chain(&dev_pm_notifiers, + (unsigned long)curr_value, + req); + } + + return ret; +} + +/* + * dev_pm_qos_constraints_allocate + * @dev: device to allocate data for + * + * Called at the first call to add_request, for constraint data allocation + * Must be called with the dev_pm_qos_mtx mutex held + */ +static int dev_pm_qos_constraints_allocate(struct device *dev) +{ + struct pm_qos_constraints *c; + struct blocking_notifier_head *n; + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return -ENOMEM; + + n = kzalloc(sizeof(*n), GFP_KERNEL); + if (!n) { + kfree(c); + return -ENOMEM; + } + BLOCKING_INIT_NOTIFIER_HEAD(n); + + plist_head_init(&c->list); + c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; + c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; + c->type = PM_QOS_MIN; + c->notifiers = n; + + spin_lock_irq(&dev->power.lock); + dev->power.constraints = c; + spin_unlock_irq(&dev->power.lock); + + return 0; +} + +/** + * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer. + * @dev: target device + * + * Called from the device PM subsystem during device insertion under + * device_pm_lock(). + */ +void dev_pm_qos_constraints_init(struct device *dev) +{ + mutex_lock(&dev_pm_qos_mtx); + dev->power.constraints = NULL; + dev->power.power_state = PMSG_ON; + mutex_unlock(&dev_pm_qos_mtx); +} + +/** + * dev_pm_qos_constraints_destroy + * @dev: target device + * + * Called from the device PM subsystem on device removal under device_pm_lock(). + */ +void dev_pm_qos_constraints_destroy(struct device *dev) +{ + struct dev_pm_qos_request *req, *tmp; + struct pm_qos_constraints *c; + + /* + * If the device's PM QoS resume latency limit has been exposed to user + * space, it has to be hidden at this point. + */ + dev_pm_qos_hide_latency_limit(dev); + + mutex_lock(&dev_pm_qos_mtx); + + dev->power.power_state = PMSG_INVALID; + c = dev->power.constraints; + if (!c) + goto out; + + /* Flush the constraints list for the device */ + plist_for_each_entry_safe(req, tmp, &c->list, node) { + /* + * Update constraints list and call the notification + * callbacks if needed + */ + apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); + memset(req, 0, sizeof(*req)); + } + + spin_lock_irq(&dev->power.lock); + dev->power.constraints = NULL; + spin_unlock_irq(&dev->power.lock); + + kfree(c->notifiers); + kfree(c); + + out: + mutex_unlock(&dev_pm_qos_mtx); +} + +/** + * dev_pm_qos_add_request - inserts new qos request into the list + * @dev: target device for the constraint + * @req: pointer to a preallocated handle + * @value: defines the qos request + * + * This function inserts a new entry in the device constraints list of + * requested qos performance characteristics. It recomputes the aggregate + * QoS expectations of parameters and initializes the dev_pm_qos_request + * handle. Caller needs to save this handle for later use in updates and + * removal. + * + * Returns 1 if the aggregated constraint value has changed, + * 0 if the aggregated constraint value has not changed, + * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory + * to allocate for data structures, -ENODEV if the device has just been removed + * from the system. + */ +int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, + s32 value) +{ + int ret = 0; + + if (!dev || !req) /*guard against callers passing in null */ + return -EINVAL; + + if (WARN(dev_pm_qos_request_active(req), + "%s() called for already added request\n", __func__)) + return -EINVAL; + + req->dev = dev; + + mutex_lock(&dev_pm_qos_mtx); + + if (!dev->power.constraints) { + if (dev->power.power_state.event == PM_EVENT_INVALID) { + /* The device has been removed from the system. */ + req->dev = NULL; + ret = -ENODEV; + goto out; + } else { + /* + * Allocate the constraints data on the first call to + * add_request, i.e. only if the data is not already + * allocated and if the device has not been removed. + */ + ret = dev_pm_qos_constraints_allocate(dev); + } + } + + if (!ret) + ret = apply_constraint(req, PM_QOS_ADD_REQ, value); + + out: + mutex_unlock(&dev_pm_qos_mtx); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_add_request); + +/** + * dev_pm_qos_update_request - modifies an existing qos request + * @req : handle to list element holding a dev_pm_qos request to use + * @new_value: defines the qos request + * + * Updates an existing dev PM qos request along with updating the + * target value. + * + * Attempts are made to make this code callable on hot code paths. + * + * Returns 1 if the aggregated constraint value has changed, + * 0 if the aggregated constraint value has not changed, + * -EINVAL in case of wrong parameters, -ENODEV if the device has been + * removed from the system + */ +int dev_pm_qos_update_request(struct dev_pm_qos_request *req, + s32 new_value) +{ + int ret = 0; + + if (!req) /*guard against callers passing in null */ + return -EINVAL; + + if (WARN(!dev_pm_qos_request_active(req), + "%s() called for unknown object\n", __func__)) + return -EINVAL; + + mutex_lock(&dev_pm_qos_mtx); + + if (req->dev->power.constraints) { + if (new_value != req->node.prio) + ret = apply_constraint(req, PM_QOS_UPDATE_REQ, + new_value); + } else { + /* Return if the device has been removed */ + ret = -ENODEV; + } + + mutex_unlock(&dev_pm_qos_mtx); + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); + +/** + * dev_pm_qos_remove_request - modifies an existing qos request + * @req: handle to request list element + * + * Will remove pm qos request from the list of constraints and + * recompute the current target value. Call this on slow code paths. + * + * Returns 1 if the aggregated constraint value has changed, + * 0 if the aggregated constraint value has not changed, + * -EINVAL in case of wrong parameters, -ENODEV if the device has been + * removed from the system + */ +int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) +{ + int ret = 0; + + if (!req) /*guard against callers passing in null */ + return -EINVAL; + + if (WARN(!dev_pm_qos_request_active(req), + "%s() called for unknown object\n", __func__)) + return -EINVAL; + + mutex_lock(&dev_pm_qos_mtx); + + if (req->dev->power.constraints) { + ret = apply_constraint(req, PM_QOS_REMOVE_REQ, + PM_QOS_DEFAULT_VALUE); + memset(req, 0, sizeof(*req)); + } else { + /* Return if the device has been removed */ + ret = -ENODEV; + } + + mutex_unlock(&dev_pm_qos_mtx); + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request); + +/** + * dev_pm_qos_add_notifier - sets notification entry for changes to target value + * of per-device PM QoS constraints + * + * @dev: target device for the constraint + * @notifier: notifier block managed by caller. + * + * Will register the notifier into a notification chain that gets called + * upon changes to the target value for the device. + * + * If the device's constraints object doesn't exist when this routine is called, + * it will be created (or error code will be returned if that fails). + */ +int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) +{ + int ret = 0; + + mutex_lock(&dev_pm_qos_mtx); + + if (!dev->power.constraints) + ret = dev->power.power_state.event != PM_EVENT_INVALID ? + dev_pm_qos_constraints_allocate(dev) : -ENODEV; + + if (!ret) + ret = blocking_notifier_chain_register( + dev->power.constraints->notifiers, notifier); + + mutex_unlock(&dev_pm_qos_mtx); + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier); + +/** + * dev_pm_qos_remove_notifier - deletes notification for changes to target value + * of per-device PM QoS constraints + * + * @dev: target device for the constraint + * @notifier: notifier block to be removed. + * + * Will remove the notifier from the notification chain that gets called + * upon changes to the target value. + */ +int dev_pm_qos_remove_notifier(struct device *dev, + struct notifier_block *notifier) +{ + int retval = 0; + + mutex_lock(&dev_pm_qos_mtx); + + /* Silently return if the constraints object is not present. */ + if (dev->power.constraints) + retval = blocking_notifier_chain_unregister( + dev->power.constraints->notifiers, + notifier); + + mutex_unlock(&dev_pm_qos_mtx); + return retval; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier); + +/** + * dev_pm_qos_add_global_notifier - sets notification entry for changes to + * target value of the PM QoS constraints for any device + * + * @notifier: notifier block managed by caller. + * + * Will register the notifier into a notification chain that gets called + * upon changes to the target value for any device. + */ +int dev_pm_qos_add_global_notifier(struct notifier_block *notifier) +{ + return blocking_notifier_chain_register(&dev_pm_notifiers, notifier); +} +EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier); + +/** + * dev_pm_qos_remove_global_notifier - deletes notification for changes to + * target value of PM QoS constraints for any device + * + * @notifier: notifier block to be removed. + * + * Will remove the notifier from the notification chain that gets called + * upon changes to the target value for any device. + */ +int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier) +{ + return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier); +} +EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier); + +/** + * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor. + * @dev: Device whose ancestor to add the request for. + * @req: Pointer to the preallocated handle. + * @value: Constraint latency value. + */ +int dev_pm_qos_add_ancestor_request(struct device *dev, + struct dev_pm_qos_request *req, s32 value) +{ + struct device *ancestor = dev->parent; + int error = -ENODEV; + + while (ancestor && !ancestor->power.ignore_children) + ancestor = ancestor->parent; + + if (ancestor) + error = dev_pm_qos_add_request(ancestor, req, value); + + if (error) + req->dev = NULL; + + return error; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request); + +#ifdef CONFIG_PM_RUNTIME +static void __dev_pm_qos_drop_user_request(struct device *dev) +{ + dev_pm_qos_remove_request(dev->power.pq_req); + dev->power.pq_req = 0; +} + +/** + * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space. + * @dev: Device whose PM QoS latency limit is to be exposed to user space. + * @value: Initial value of the latency limit. + */ +int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) +{ + struct dev_pm_qos_request *req; + int ret; + + if (!device_is_registered(dev) || value < 0) + return -EINVAL; + + if (dev->power.pq_req) + return -EEXIST; + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + ret = dev_pm_qos_add_request(dev, req, value); + if (ret < 0) + return ret; + + dev->power.pq_req = req; + ret = pm_qos_sysfs_add(dev); + if (ret) + __dev_pm_qos_drop_user_request(dev); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); + +/** + * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space. + * @dev: Device whose PM QoS latency limit is to be hidden from user space. + */ +void dev_pm_qos_hide_latency_limit(struct device *dev) +{ + if (dev->power.pq_req) { + pm_qos_sysfs_remove(dev); + __dev_pm_qos_drop_user_request(dev); + } +} +EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); +#endif /* CONFIG_PM_RUNTIME */ diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c new file mode 100644 index 00000000..59894873 --- /dev/null +++ b/drivers/base/power/runtime.c @@ -0,0 +1,1317 @@ +/* + * drivers/base/power/runtime.c - Helper functions for device runtime PM + * + * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. + * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu> + * + * This file is released under the GPLv2. + */ + +#include <linux/sched.h> +#include <linux/export.h> +#include <linux/pm_runtime.h> +#include <trace/events/rpm.h> +#include "power.h" + +static int rpm_resume(struct device *dev, int rpmflags); +static int rpm_suspend(struct device *dev, int rpmflags); + +/** + * update_pm_runtime_accounting - Update the time accounting of power states + * @dev: Device to update the accounting for + * + * In order to be able to have time accounting of the various power states + * (as used by programs such as PowerTOP to show the effectiveness of runtime + * PM), we need to track the time spent in each state. + * update_pm_runtime_accounting must be called each time before the + * runtime_status field is updated, to account the time in the old state + * correctly. + */ +void update_pm_runtime_accounting(struct device *dev) +{ + unsigned long now = jiffies; + unsigned long delta; + + delta = now - dev->power.accounting_timestamp; + + dev->power.accounting_timestamp = now; + + if (dev->power.disable_depth > 0) + return; + + if (dev->power.runtime_status == RPM_SUSPENDED) + dev->power.suspended_jiffies += delta; + else + dev->power.active_jiffies += delta; +} + +static void __update_runtime_status(struct device *dev, enum rpm_status status) +{ + update_pm_runtime_accounting(dev); + dev->power.runtime_status = status; +} + +/** + * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. + * @dev: Device to handle. + */ +static void pm_runtime_deactivate_timer(struct device *dev) +{ + if (dev->power.timer_expires > 0) { + del_timer(&dev->power.suspend_timer); + dev->power.timer_expires = 0; + } +} + +/** + * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests. + * @dev: Device to handle. + */ +static void pm_runtime_cancel_pending(struct device *dev) +{ + pm_runtime_deactivate_timer(dev); + /* + * In case there's a request pending, make sure its work function will + * return without doing anything. + */ + dev->power.request = RPM_REQ_NONE; +} + +/* + * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time. + * @dev: Device to handle. + * + * Compute the autosuspend-delay expiration time based on the device's + * power.last_busy time. If the delay has already expired or is disabled + * (negative) or the power.use_autosuspend flag isn't set, return 0. + * Otherwise return the expiration time in jiffies (adjusted to be nonzero). + * + * This function may be called either with or without dev->power.lock held. + * Either way it can be racy, since power.last_busy may be updated at any time. + */ +unsigned long pm_runtime_autosuspend_expiration(struct device *dev) +{ + int autosuspend_delay; + long elapsed; + unsigned long last_busy; + unsigned long expires = 0; + + if (!dev->power.use_autosuspend) + goto out; + + autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay); + if (autosuspend_delay < 0) + goto out; + + last_busy = ACCESS_ONCE(dev->power.last_busy); + elapsed = jiffies - last_busy; + if (elapsed < 0) + goto out; /* jiffies has wrapped around. */ + + /* + * If the autosuspend_delay is >= 1 second, align the timer by rounding + * up to the nearest second. + */ + expires = last_busy + msecs_to_jiffies(autosuspend_delay); + if (autosuspend_delay >= 1000) + expires = round_jiffies(expires); + expires += !expires; + if (elapsed >= expires - last_busy) + expires = 0; /* Already expired. */ + + out: + return expires; +} +EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration); + +/** + * rpm_check_suspend_allowed - Test whether a device may be suspended. + * @dev: Device to test. + */ +static int rpm_check_suspend_allowed(struct device *dev) +{ + int retval = 0; + + if (dev->power.runtime_error) + retval = -EINVAL; + else if (dev->power.disable_depth > 0) + retval = -EACCES; + else if (atomic_read(&dev->power.usage_count) > 0) + retval = -EAGAIN; + else if (!pm_children_suspended(dev)) + retval = -EBUSY; + + /* Pending resume requests take precedence over suspends. */ + else if ((dev->power.deferred_resume + && dev->power.runtime_status == RPM_SUSPENDING) + || (dev->power.request_pending + && dev->power.request == RPM_REQ_RESUME)) + retval = -EAGAIN; + else if (dev->power.runtime_status == RPM_SUSPENDED) + retval = 1; + + return retval; +} + +/** + * __rpm_callback - Run a given runtime PM callback for a given device. + * @cb: Runtime PM callback to run. + * @dev: Device to run the callback for. + */ +static int __rpm_callback(int (*cb)(struct device *), struct device *dev) + __releases(&dev->power.lock) __acquires(&dev->power.lock) +{ + int retval; + + if (dev->power.irq_safe) + spin_unlock(&dev->power.lock); + else + spin_unlock_irq(&dev->power.lock); + + retval = cb(dev); + + if (dev->power.irq_safe) + spin_lock(&dev->power.lock); + else + spin_lock_irq(&dev->power.lock); + + return retval; +} + +/** + * rpm_idle - Notify device bus type if the device can be suspended. + * @dev: Device to notify the bus type about. + * @rpmflags: Flag bits. + * + * Check if the device's runtime PM status allows it to be suspended. If + * another idle notification has been started earlier, return immediately. If + * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise + * run the ->runtime_idle() callback directly. + * + * This function must be called under dev->power.lock with interrupts disabled. + */ +static int rpm_idle(struct device *dev, int rpmflags) +{ + int (*callback)(struct device *); + int retval; + + trace_rpm_idle(dev, rpmflags); + retval = rpm_check_suspend_allowed(dev); + if (retval < 0) + ; /* Conditions are wrong. */ + + /* Idle notifications are allowed only in the RPM_ACTIVE state. */ + else if (dev->power.runtime_status != RPM_ACTIVE) + retval = -EAGAIN; + + /* + * Any pending request other than an idle notification takes + * precedence over us, except that the timer may be running. + */ + else if (dev->power.request_pending && + dev->power.request > RPM_REQ_IDLE) + retval = -EAGAIN; + + /* Act as though RPM_NOWAIT is always set. */ + else if (dev->power.idle_notification) + retval = -EINPROGRESS; + if (retval) + goto out; + + /* Pending requests need to be canceled. */ + dev->power.request = RPM_REQ_NONE; + + if (dev->power.no_callbacks) { + /* Assume ->runtime_idle() callback would have suspended. */ + retval = rpm_suspend(dev, rpmflags); + goto out; + } + + /* Carry out an asynchronous or a synchronous idle notification. */ + if (rpmflags & RPM_ASYNC) { + dev->power.request = RPM_REQ_IDLE; + if (!dev->power.request_pending) { + dev->power.request_pending = true; + queue_work(pm_wq, &dev->power.work); + } + goto out; + } + + dev->power.idle_notification = true; + + if (dev->pm_domain) + callback = dev->pm_domain->ops.runtime_idle; + else if (dev->type && dev->type->pm) + callback = dev->type->pm->runtime_idle; + else if (dev->class && dev->class->pm) + callback = dev->class->pm->runtime_idle; + else if (dev->bus && dev->bus->pm) + callback = dev->bus->pm->runtime_idle; + else + callback = NULL; + + if (!callback && dev->driver && dev->driver->pm) + callback = dev->driver->pm->runtime_idle; + + if (callback) + __rpm_callback(callback, dev); + + dev->power.idle_notification = false; + wake_up_all(&dev->power.wait_queue); + + out: + trace_rpm_return_int(dev, _THIS_IP_, retval); + return retval; +} + +/** + * rpm_callback - Run a given runtime PM callback for a given device. + * @cb: Runtime PM callback to run. + * @dev: Device to run the callback for. + */ +static int rpm_callback(int (*cb)(struct device *), struct device *dev) +{ + int retval; + + if (!cb) + return -ENOSYS; + + retval = __rpm_callback(cb, dev); + + dev->power.runtime_error = retval; + return retval != -EACCES ? retval : -EIO; +} + +/** + * rpm_suspend - Carry out runtime suspend of given device. + * @dev: Device to suspend. + * @rpmflags: Flag bits. + * + * Check if the device's runtime PM status allows it to be suspended. + * Cancel a pending idle notification, autosuspend or suspend. If + * another suspend has been started earlier, either return immediately + * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC + * flags. If the RPM_ASYNC flag is set then queue a suspend request; + * otherwise run the ->runtime_suspend() callback directly. When + * ->runtime_suspend succeeded, if a deferred resume was requested while + * the callback was running then carry it out, otherwise send an idle + * notification for its parent (if the suspend succeeded and both + * ignore_children of parent->power and irq_safe of dev->power are not set). + * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO + * flag is set and the next autosuspend-delay expiration time is in the + * future, schedule another autosuspend attempt. + * + * This function must be called under dev->power.lock with interrupts disabled. + */ +static int rpm_suspend(struct device *dev, int rpmflags) + __releases(&dev->power.lock) __acquires(&dev->power.lock) +{ + int (*callback)(struct device *); + struct device *parent = NULL; + int retval; + + trace_rpm_suspend(dev, rpmflags); + + repeat: + retval = rpm_check_suspend_allowed(dev); + + if (retval < 0) + ; /* Conditions are wrong. */ + + /* Synchronous suspends are not allowed in the RPM_RESUMING state. */ + else if (dev->power.runtime_status == RPM_RESUMING && + !(rpmflags & RPM_ASYNC)) + retval = -EAGAIN; + if (retval) + goto out; + + /* If the autosuspend_delay time hasn't expired yet, reschedule. */ + if ((rpmflags & RPM_AUTO) + && dev->power.runtime_status != RPM_SUSPENDING) { + unsigned long expires = pm_runtime_autosuspend_expiration(dev); + + if (expires != 0) { + /* Pending requests need to be canceled. */ + dev->power.request = RPM_REQ_NONE; + + /* + * Optimization: If the timer is already running and is + * set to expire at or before the autosuspend delay, + * avoid the overhead of resetting it. Just let it + * expire; pm_suspend_timer_fn() will take care of the + * rest. + */ + if (!(dev->power.timer_expires && time_before_eq( + dev->power.timer_expires, expires))) { + dev->power.timer_expires = expires; + mod_timer(&dev->power.suspend_timer, expires); + } + dev->power.timer_autosuspends = 1; + goto out; + } + } + + /* Other scheduled or pending requests need to be canceled. */ + pm_runtime_cancel_pending(dev); + + if (dev->power.runtime_status == RPM_SUSPENDING) { + DEFINE_WAIT(wait); + + if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { + retval = -EINPROGRESS; + goto out; + } + + if (dev->power.irq_safe) { + spin_unlock(&dev->power.lock); + + cpu_relax(); + + spin_lock(&dev->power.lock); + goto repeat; + } + + /* Wait for the other suspend running in parallel with us. */ + for (;;) { + prepare_to_wait(&dev->power.wait_queue, &wait, + TASK_UNINTERRUPTIBLE); + if (dev->power.runtime_status != RPM_SUSPENDING) + break; + + spin_unlock_irq(&dev->power.lock); + + schedule(); + + spin_lock_irq(&dev->power.lock); + } + finish_wait(&dev->power.wait_queue, &wait); + goto repeat; + } + + dev->power.deferred_resume = false; + if (dev->power.no_callbacks) + goto no_callback; /* Assume success. */ + + /* Carry out an asynchronous or a synchronous suspend. */ + if (rpmflags & RPM_ASYNC) { + dev->power.request = (rpmflags & RPM_AUTO) ? + RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND; + if (!dev->power.request_pending) { + dev->power.request_pending = true; + queue_work(pm_wq, &dev->power.work); + } + goto out; + } + + if (__dev_pm_qos_read_value(dev) < 0) { + /* Negative PM QoS constraint means "never suspend". */ + retval = -EPERM; + goto out; + } + + __update_runtime_status(dev, RPM_SUSPENDING); + + if (dev->pm_domain) + callback = dev->pm_domain->ops.runtime_suspend; + else if (dev->type && dev->type->pm) + callback = dev->type->pm->runtime_suspend; + else if (dev->class && dev->class->pm) + callback = dev->class->pm->runtime_suspend; + else if (dev->bus && dev->bus->pm) + callback = dev->bus->pm->runtime_suspend; + else + callback = NULL; + + if (!callback && dev->driver && dev->driver->pm) + callback = dev->driver->pm->runtime_suspend; + + retval = rpm_callback(callback, dev); + if (retval) + goto fail; + + no_callback: + __update_runtime_status(dev, RPM_SUSPENDED); + pm_runtime_deactivate_timer(dev); + + if (dev->parent) { + parent = dev->parent; + atomic_add_unless(&parent->power.child_count, -1, 0); + } + wake_up_all(&dev->power.wait_queue); + + if (dev->power.deferred_resume) { + rpm_resume(dev, 0); + retval = -EAGAIN; + goto out; + } + + /* Maybe the parent is now able to suspend. */ + if (parent && !parent->power.ignore_children && !dev->power.irq_safe) { + spin_unlock(&dev->power.lock); + + spin_lock(&parent->power.lock); + rpm_idle(parent, RPM_ASYNC); + spin_unlock(&parent->power.lock); + + spin_lock(&dev->power.lock); + } + + out: + trace_rpm_return_int(dev, _THIS_IP_, retval); + + return retval; + + fail: + __update_runtime_status(dev, RPM_ACTIVE); + dev->power.deferred_resume = false; + wake_up_all(&dev->power.wait_queue); + + if (retval == -EAGAIN || retval == -EBUSY) { + dev->power.runtime_error = 0; + + /* + * If the callback routine failed an autosuspend, and + * if the last_busy time has been updated so that there + * is a new autosuspend expiration time, automatically + * reschedule another autosuspend. + */ + if ((rpmflags & RPM_AUTO) && + pm_runtime_autosuspend_expiration(dev) != 0) + goto repeat; + } else { + pm_runtime_cancel_pending(dev); + } + goto out; +} + +/** + * rpm_resume - Carry out runtime resume of given device. + * @dev: Device to resume. + * @rpmflags: Flag bits. + * + * Check if the device's runtime PM status allows it to be resumed. Cancel + * any scheduled or pending requests. If another resume has been started + * earlier, either return immediately or wait for it to finish, depending on the + * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in + * parallel with this function, either tell the other process to resume after + * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC + * flag is set then queue a resume request; otherwise run the + * ->runtime_resume() callback directly. Queue an idle notification for the + * device if the resume succeeded. + * + * This function must be called under dev->power.lock with interrupts disabled. + */ +static int rpm_resume(struct device *dev, int rpmflags) + __releases(&dev->power.lock) __acquires(&dev->power.lock) +{ + int (*callback)(struct device *); + struct device *parent = NULL; + int retval = 0; + + trace_rpm_resume(dev, rpmflags); + + repeat: + if (dev->power.runtime_error) + retval = -EINVAL; + else if (dev->power.disable_depth > 0) + retval = -EACCES; + if (retval) + goto out; + + /* + * Other scheduled or pending requests need to be canceled. Small + * optimization: If an autosuspend timer is running, leave it running + * rather than cancelling it now only to restart it again in the near + * future. + */ + dev->power.request = RPM_REQ_NONE; + if (!dev->power.timer_autosuspends) + pm_runtime_deactivate_timer(dev); + + if (dev->power.runtime_status == RPM_ACTIVE) { + retval = 1; + goto out; + } + + if (dev->power.runtime_status == RPM_RESUMING + || dev->power.runtime_status == RPM_SUSPENDING) { + DEFINE_WAIT(wait); + + if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { + if (dev->power.runtime_status == RPM_SUSPENDING) + dev->power.deferred_resume = true; + else + retval = -EINPROGRESS; + goto out; + } + + if (dev->power.irq_safe) { + spin_unlock(&dev->power.lock); + + cpu_relax(); + + spin_lock(&dev->power.lock); + goto repeat; + } + + /* Wait for the operation carried out in parallel with us. */ + for (;;) { + prepare_to_wait(&dev->power.wait_queue, &wait, + TASK_UNINTERRUPTIBLE); + if (dev->power.runtime_status != RPM_RESUMING + && dev->power.runtime_status != RPM_SUSPENDING) + break; + + spin_unlock_irq(&dev->power.lock); + + schedule(); + + spin_lock_irq(&dev->power.lock); + } + finish_wait(&dev->power.wait_queue, &wait); + goto repeat; + } + + /* + * See if we can skip waking up the parent. This is safe only if + * power.no_callbacks is set, because otherwise we don't know whether + * the resume will actually succeed. + */ + if (dev->power.no_callbacks && !parent && dev->parent) { + spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING); + if (dev->parent->power.disable_depth > 0 + || dev->parent->power.ignore_children + || dev->parent->power.runtime_status == RPM_ACTIVE) { + atomic_inc(&dev->parent->power.child_count); + spin_unlock(&dev->parent->power.lock); + goto no_callback; /* Assume success. */ + } + spin_unlock(&dev->parent->power.lock); + } + + /* Carry out an asynchronous or a synchronous resume. */ + if (rpmflags & RPM_ASYNC) { + dev->power.request = RPM_REQ_RESUME; + if (!dev->power.request_pending) { + dev->power.request_pending = true; + queue_work(pm_wq, &dev->power.work); + } + retval = 0; + goto out; + } + + if (!parent && dev->parent) { + /* + * Increment the parent's usage counter and resume it if + * necessary. Not needed if dev is irq-safe; then the + * parent is permanently resumed. + */ + parent = dev->parent; + if (dev->power.irq_safe) + goto skip_parent; + spin_unlock(&dev->power.lock); + + pm_runtime_get_noresume(parent); + + spin_lock(&parent->power.lock); + /* + * We can resume if the parent's runtime PM is disabled or it + * is set to ignore children. + */ + if (!parent->power.disable_depth + && !parent->power.ignore_children) { + rpm_resume(parent, 0); + if (parent->power.runtime_status != RPM_ACTIVE) + retval = -EBUSY; + } + spin_unlock(&parent->power.lock); + + spin_lock(&dev->power.lock); + if (retval) + goto out; + goto repeat; + } + skip_parent: + + if (dev->power.no_callbacks) + goto no_callback; /* Assume success. */ + + __update_runtime_status(dev, RPM_RESUMING); + + if (dev->pm_domain) + callback = dev->pm_domain->ops.runtime_resume; + else if (dev->type && dev->type->pm) + callback = dev->type->pm->runtime_resume; + else if (dev->class && dev->class->pm) + callback = dev->class->pm->runtime_resume; + else if (dev->bus && dev->bus->pm) + callback = dev->bus->pm->runtime_resume; + else + callback = NULL; + + if (!callback && dev->driver && dev->driver->pm) + callback = dev->driver->pm->runtime_resume; + + retval = rpm_callback(callback, dev); + if (retval) { + __update_runtime_status(dev, RPM_SUSPENDED); + pm_runtime_cancel_pending(dev); + } else { + no_callback: + __update_runtime_status(dev, RPM_ACTIVE); + if (parent) + atomic_inc(&parent->power.child_count); + } + wake_up_all(&dev->power.wait_queue); + + if (!retval) + rpm_idle(dev, RPM_ASYNC); + + out: + if (parent && !dev->power.irq_safe) { + spin_unlock_irq(&dev->power.lock); + + pm_runtime_put(parent); + + spin_lock_irq(&dev->power.lock); + } + + trace_rpm_return_int(dev, _THIS_IP_, retval); + + return retval; +} + +/** + * pm_runtime_work - Universal runtime PM work function. + * @work: Work structure used for scheduling the execution of this function. + * + * Use @work to get the device object the work is to be done for, determine what + * is to be done and execute the appropriate runtime PM function. + */ +static void pm_runtime_work(struct work_struct *work) +{ + struct device *dev = container_of(work, struct device, power.work); + enum rpm_request req; + + spin_lock_irq(&dev->power.lock); + + if (!dev->power.request_pending) + goto out; + + req = dev->power.request; + dev->power.request = RPM_REQ_NONE; + dev->power.request_pending = false; + + switch (req) { + case RPM_REQ_NONE: + break; + case RPM_REQ_IDLE: + rpm_idle(dev, RPM_NOWAIT); + break; + case RPM_REQ_SUSPEND: + rpm_suspend(dev, RPM_NOWAIT); + break; + case RPM_REQ_AUTOSUSPEND: + rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO); + break; + case RPM_REQ_RESUME: + rpm_resume(dev, RPM_NOWAIT); + break; + } + + out: + spin_unlock_irq(&dev->power.lock); +} + +/** + * pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). + * @data: Device pointer passed by pm_schedule_suspend(). + * + * Check if the time is right and queue a suspend request. + */ +static void pm_suspend_timer_fn(unsigned long data) +{ + struct device *dev = (struct device *)data; + unsigned long flags; + unsigned long expires; + + spin_lock_irqsave(&dev->power.lock, flags); + + expires = dev->power.timer_expires; + /* If 'expire' is after 'jiffies' we've been called too early. */ + if (expires > 0 && !time_after(expires, jiffies)) { + dev->power.timer_expires = 0; + rpm_suspend(dev, dev->power.timer_autosuspends ? + (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); + } + + spin_unlock_irqrestore(&dev->power.lock, flags); +} + +/** + * pm_schedule_suspend - Set up a timer to submit a suspend request in future. + * @dev: Device to suspend. + * @delay: Time to wait before submitting a suspend request, in milliseconds. + */ +int pm_schedule_suspend(struct device *dev, unsigned int delay) +{ + unsigned long flags; + int retval; + + spin_lock_irqsave(&dev->power.lock, flags); + + if (!delay) { + retval = rpm_suspend(dev, RPM_ASYNC); + goto out; + } + + retval = rpm_check_suspend_allowed(dev); + if (retval) + goto out; + + /* Other scheduled or pending requests need to be canceled. */ + pm_runtime_cancel_pending(dev); + + dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); + dev->power.timer_expires += !dev->power.timer_expires; + dev->power.timer_autosuspends = 0; + mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); + + out: + spin_unlock_irqrestore(&dev->power.lock, flags); + + return retval; +} +EXPORT_SYMBOL_GPL(pm_schedule_suspend); + +/** + * __pm_runtime_idle - Entry point for runtime idle operations. + * @dev: Device to send idle notification for. + * @rpmflags: Flag bits. + * + * If the RPM_GET_PUT flag is set, decrement the device's usage count and + * return immediately if it is larger than zero. Then carry out an idle + * notification, either synchronous or asynchronous. + * + * This routine may be called in atomic context if the RPM_ASYNC flag is set, + * or if pm_runtime_irq_safe() has been called. + */ +int __pm_runtime_idle(struct device *dev, int rpmflags) +{ + unsigned long flags; + int retval; + + might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); + + if (rpmflags & RPM_GET_PUT) { + if (!atomic_dec_and_test(&dev->power.usage_count)) + return 0; + } + + spin_lock_irqsave(&dev->power.lock, flags); + retval = rpm_idle(dev, rpmflags); + spin_unlock_irqrestore(&dev->power.lock, flags); + + return retval; +} +EXPORT_SYMBOL_GPL(__pm_runtime_idle); + +/** + * __pm_runtime_suspend - Entry point for runtime put/suspend operations. + * @dev: Device to suspend. + * @rpmflags: Flag bits. + * + * If the RPM_GET_PUT flag is set, decrement the device's usage count and + * return immediately if it is larger than zero. Then carry out a suspend, + * either synchronous or asynchronous. + * + * This routine may be called in atomic context if the RPM_ASYNC flag is set, + * or if pm_runtime_irq_safe() has been called. + */ +int __pm_runtime_suspend(struct device *dev, int rpmflags) +{ + unsigned long flags; + int retval; + + might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); + + if (rpmflags & RPM_GET_PUT) { + if (!atomic_dec_and_test(&dev->power.usage_count)) + return 0; + } + + spin_lock_irqsave(&dev->power.lock, flags); + retval = rpm_suspend(dev, rpmflags); + spin_unlock_irqrestore(&dev->power.lock, flags); + + return retval; +} +EXPORT_SYMBOL_GPL(__pm_runtime_suspend); + +/** + * __pm_runtime_resume - Entry point for runtime resume operations. + * @dev: Device to resume. + * @rpmflags: Flag bits. + * + * If the RPM_GET_PUT flag is set, increment the device's usage count. Then + * carry out a resume, either synchronous or asynchronous. + * + * This routine may be called in atomic context if the RPM_ASYNC flag is set, + * or if pm_runtime_irq_safe() has been called. + */ +int __pm_runtime_resume(struct device *dev, int rpmflags) +{ + unsigned long flags; + int retval; + + might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); + + if (rpmflags & RPM_GET_PUT) + atomic_inc(&dev->power.usage_count); + + spin_lock_irqsave(&dev->power.lock, flags); + retval = rpm_resume(dev, rpmflags); + spin_unlock_irqrestore(&dev->power.lock, flags); + + return retval; +} +EXPORT_SYMBOL_GPL(__pm_runtime_resume); + +/** + * __pm_runtime_set_status - Set runtime PM status of a device. + * @dev: Device to handle. + * @status: New runtime PM status of the device. + * + * If runtime PM of the device is disabled or its power.runtime_error field is + * different from zero, the status may be changed either to RPM_ACTIVE, or to + * RPM_SUSPENDED, as long as that reflects the actual state of the device. + * However, if the device has a parent and the parent is not active, and the + * parent's power.ignore_children flag is unset, the device's status cannot be + * set to RPM_ACTIVE, so -EBUSY is returned in that case. + * + * If successful, __pm_runtime_set_status() clears the power.runtime_error field + * and the device parent's counter of unsuspended children is modified to + * reflect the new status. If the new status is RPM_SUSPENDED, an idle + * notification request for the parent is submitted. + */ +int __pm_runtime_set_status(struct device *dev, unsigned int status) +{ + struct device *parent = dev->parent; + unsigned long flags; + bool notify_parent = false; + int error = 0; + + if (status != RPM_ACTIVE && status != RPM_SUSPENDED) + return -EINVAL; + + spin_lock_irqsave(&dev->power.lock, flags); + + if (!dev->power.runtime_error && !dev->power.disable_depth) { + error = -EAGAIN; + goto out; + } + + if (dev->power.runtime_status == status) + goto out_set; + + if (status == RPM_SUSPENDED) { + /* It always is possible to set the status to 'suspended'. */ + if (parent) { + atomic_add_unless(&parent->power.child_count, -1, 0); + notify_parent = !parent->power.ignore_children; + } + goto out_set; + } + + if (parent) { + spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING); + + /* + * It is invalid to put an active child under a parent that is + * not active, has runtime PM enabled and the + * 'power.ignore_children' flag unset. + */ + if (!parent->power.disable_depth + && !parent->power.ignore_children + && parent->power.runtime_status != RPM_ACTIVE) + error = -EBUSY; + else if (dev->power.runtime_status == RPM_SUSPENDED) + atomic_inc(&parent->power.child_count); + + spin_unlock(&parent->power.lock); + + if (error) + goto out; + } + + out_set: + __update_runtime_status(dev, status); + dev->power.runtime_error = 0; + out: + spin_unlock_irqrestore(&dev->power.lock, flags); + + if (notify_parent) + pm_request_idle(parent); + + return error; +} +EXPORT_SYMBOL_GPL(__pm_runtime_set_status); + +/** + * __pm_runtime_barrier - Cancel pending requests and wait for completions. + * @dev: Device to handle. + * + * Flush all pending requests for the device from pm_wq and wait for all + * runtime PM operations involving the device in progress to complete. + * + * Should be called under dev->power.lock with interrupts disabled. + */ +static void __pm_runtime_barrier(struct device *dev) +{ + pm_runtime_deactivate_timer(dev); + + if (dev->power.request_pending) { + dev->power.request = RPM_REQ_NONE; + spin_unlock_irq(&dev->power.lock); + + cancel_work_sync(&dev->power.work); + + spin_lock_irq(&dev->power.lock); + dev->power.request_pending = false; + } + + if (dev->power.runtime_status == RPM_SUSPENDING + || dev->power.runtime_status == RPM_RESUMING + || dev->power.idle_notification) { + DEFINE_WAIT(wait); + + /* Suspend, wake-up or idle notification in progress. */ + for (;;) { + prepare_to_wait(&dev->power.wait_queue, &wait, + TASK_UNINTERRUPTIBLE); + if (dev->power.runtime_status != RPM_SUSPENDING + && dev->power.runtime_status != RPM_RESUMING + && !dev->power.idle_notification) + break; + spin_unlock_irq(&dev->power.lock); + + schedule(); + + spin_lock_irq(&dev->power.lock); + } + finish_wait(&dev->power.wait_queue, &wait); + } +} + +/** + * pm_runtime_barrier - Flush pending requests and wait for completions. + * @dev: Device to handle. + * + * Prevent the device from being suspended by incrementing its usage counter and + * if there's a pending resume request for the device, wake the device up. + * Next, make sure that all pending requests for the device have been flushed + * from pm_wq and wait for all runtime PM operations involving the device in + * progress to complete. + * + * Return value: + * 1, if there was a resume request pending and the device had to be woken up, + * 0, otherwise + */ +int pm_runtime_barrier(struct device *dev) +{ + int retval = 0; + + pm_runtime_get_noresume(dev); + spin_lock_irq(&dev->power.lock); + + if (dev->power.request_pending + && dev->power.request == RPM_REQ_RESUME) { + rpm_resume(dev, 0); + retval = 1; + } + + __pm_runtime_barrier(dev); + + spin_unlock_irq(&dev->power.lock); + pm_runtime_put_noidle(dev); + + return retval; +} +EXPORT_SYMBOL_GPL(pm_runtime_barrier); + +/** + * __pm_runtime_disable - Disable runtime PM of a device. + * @dev: Device to handle. + * @check_resume: If set, check if there's a resume request for the device. + * + * Increment power.disable_depth for the device and if was zero previously, + * cancel all pending runtime PM requests for the device and wait for all + * operations in progress to complete. The device can be either active or + * suspended after its runtime PM has been disabled. + * + * If @check_resume is set and there's a resume request pending when + * __pm_runtime_disable() is called and power.disable_depth is zero, the + * function will wake up the device before disabling its runtime PM. + */ +void __pm_runtime_disable(struct device *dev, bool check_resume) +{ + spin_lock_irq(&dev->power.lock); + + if (dev->power.disable_depth > 0) { + dev->power.disable_depth++; + goto out; + } + + /* + * Wake up the device if there's a resume request pending, because that + * means there probably is some I/O to process and disabling runtime PM + * shouldn't prevent the device from processing the I/O. + */ + if (check_resume && dev->power.request_pending + && dev->power.request == RPM_REQ_RESUME) { + /* + * Prevent suspends and idle notifications from being carried + * out after we have woken up the device. + */ + pm_runtime_get_noresume(dev); + + rpm_resume(dev, 0); + + pm_runtime_put_noidle(dev); + } + + if (!dev->power.disable_depth++) + __pm_runtime_barrier(dev); + + out: + spin_unlock_irq(&dev->power.lock); +} +EXPORT_SYMBOL_GPL(__pm_runtime_disable); + +/** + * pm_runtime_enable - Enable runtime PM of a device. + * @dev: Device to handle. + */ +void pm_runtime_enable(struct device *dev) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->power.lock, flags); + + if (dev->power.disable_depth > 0) + dev->power.disable_depth--; + else + dev_warn(dev, "Unbalanced %s!\n", __func__); + + spin_unlock_irqrestore(&dev->power.lock, flags); +} +EXPORT_SYMBOL_GPL(pm_runtime_enable); + +/** + * pm_runtime_forbid - Block runtime PM of a device. + * @dev: Device to handle. + * + * Increase the device's usage count and clear its power.runtime_auto flag, + * so that it cannot be suspended at run time until pm_runtime_allow() is called + * for it. + */ +void pm_runtime_forbid(struct device *dev) +{ + spin_lock_irq(&dev->power.lock); + if (!dev->power.runtime_auto) + goto out; + + dev->power.runtime_auto = false; + atomic_inc(&dev->power.usage_count); + rpm_resume(dev, 0); + + out: + spin_unlock_irq(&dev->power.lock); +} +EXPORT_SYMBOL_GPL(pm_runtime_forbid); + +/** + * pm_runtime_allow - Unblock runtime PM of a device. + * @dev: Device to handle. + * + * Decrease the device's usage count and set its power.runtime_auto flag. + */ +void pm_runtime_allow(struct device *dev) +{ + spin_lock_irq(&dev->power.lock); + if (dev->power.runtime_auto) + goto out; + + dev->power.runtime_auto = true; + if (atomic_dec_and_test(&dev->power.usage_count)) + rpm_idle(dev, RPM_AUTO); + + out: + spin_unlock_irq(&dev->power.lock); +} +EXPORT_SYMBOL_GPL(pm_runtime_allow); + +/** + * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device. + * @dev: Device to handle. + * + * Set the power.no_callbacks flag, which tells the PM core that this + * device is power-managed through its parent and has no runtime PM + * callbacks of its own. The runtime sysfs attributes will be removed. + */ +void pm_runtime_no_callbacks(struct device *dev) +{ + spin_lock_irq(&dev->power.lock); + dev->power.no_callbacks = 1; + spin_unlock_irq(&dev->power.lock); + if (device_is_registered(dev)) + rpm_sysfs_remove(dev); +} +EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks); + +/** + * pm_runtime_irq_safe - Leave interrupts disabled during callbacks. + * @dev: Device to handle + * + * Set the power.irq_safe flag, which tells the PM core that the + * ->runtime_suspend() and ->runtime_resume() callbacks for this device should + * always be invoked with the spinlock held and interrupts disabled. It also + * causes the parent's usage counter to be permanently incremented, preventing + * the parent from runtime suspending -- otherwise an irq-safe child might have + * to wait for a non-irq-safe parent. + */ +void pm_runtime_irq_safe(struct device *dev) +{ + if (dev->parent) + pm_runtime_get_sync(dev->parent); + spin_lock_irq(&dev->power.lock); + dev->power.irq_safe = 1; + spin_unlock_irq(&dev->power.lock); +} +EXPORT_SYMBOL_GPL(pm_runtime_irq_safe); + +/** + * update_autosuspend - Handle a change to a device's autosuspend settings. + * @dev: Device to handle. + * @old_delay: The former autosuspend_delay value. + * @old_use: The former use_autosuspend value. + * + * Prevent runtime suspend if the new delay is negative and use_autosuspend is + * set; otherwise allow it. Send an idle notification if suspends are allowed. + * + * This function must be called under dev->power.lock with interrupts disabled. + */ +static void update_autosuspend(struct device *dev, int old_delay, int old_use) +{ + int delay = dev->power.autosuspend_delay; + + /* Should runtime suspend be prevented now? */ + if (dev->power.use_autosuspend && delay < 0) { + + /* If it used to be allowed then prevent it. */ + if (!old_use || old_delay >= 0) { + atomic_inc(&dev->power.usage_count); + rpm_resume(dev, 0); + } + } + + /* Runtime suspend should be allowed now. */ + else { + + /* If it used to be prevented then allow it. */ + if (old_use && old_delay < 0) + atomic_dec(&dev->power.usage_count); + + /* Maybe we can autosuspend now. */ + rpm_idle(dev, RPM_AUTO); + } +} + +/** + * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value. + * @dev: Device to handle. + * @delay: Value of the new delay in milliseconds. + * + * Set the device's power.autosuspend_delay value. If it changes to negative + * and the power.use_autosuspend flag is set, prevent runtime suspends. If it + * changes the other way, allow runtime suspends. + */ +void pm_runtime_set_autosuspend_delay(struct device *dev, int delay) +{ + int old_delay, old_use; + + spin_lock_irq(&dev->power.lock); + old_delay = dev->power.autosuspend_delay; + old_use = dev->power.use_autosuspend; + dev->power.autosuspend_delay = delay; + update_autosuspend(dev, old_delay, old_use); + spin_unlock_irq(&dev->power.lock); +} +EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay); + +/** + * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag. + * @dev: Device to handle. + * @use: New value for use_autosuspend. + * + * Set the device's power.use_autosuspend flag, and allow or prevent runtime + * suspends as needed. + */ +void __pm_runtime_use_autosuspend(struct device *dev, bool use) +{ + int old_delay, old_use; + + spin_lock_irq(&dev->power.lock); + old_delay = dev->power.autosuspend_delay; + old_use = dev->power.use_autosuspend; + dev->power.use_autosuspend = use; + update_autosuspend(dev, old_delay, old_use); + spin_unlock_irq(&dev->power.lock); +} +EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend); + +/** + * pm_runtime_init - Initialize runtime PM fields in given device object. + * @dev: Device object to initialize. + */ +void pm_runtime_init(struct device *dev) +{ + dev->power.runtime_status = RPM_SUSPENDED; + dev->power.idle_notification = false; + + dev->power.disable_depth = 1; + atomic_set(&dev->power.usage_count, 0); + + dev->power.runtime_error = 0; + + atomic_set(&dev->power.child_count, 0); + pm_suspend_ignore_children(dev, false); + dev->power.runtime_auto = true; + + dev->power.request_pending = false; + dev->power.request = RPM_REQ_NONE; + dev->power.deferred_resume = false; + dev->power.accounting_timestamp = jiffies; + INIT_WORK(&dev->power.work, pm_runtime_work); + + dev->power.timer_expires = 0; + setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn, + (unsigned long)dev); + + init_waitqueue_head(&dev->power.wait_queue); +} + +/** + * pm_runtime_remove - Prepare for removing a device from device hierarchy. + * @dev: Device object being removed from device hierarchy. + */ +void pm_runtime_remove(struct device *dev) +{ + __pm_runtime_disable(dev, false); + + /* Change the status back to 'suspended' to match the initial status. */ + if (dev->power.runtime_status == RPM_ACTIVE) + pm_runtime_set_suspended(dev); + if (dev->power.irq_safe && dev->parent) + pm_runtime_put_sync(dev->parent); +} diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c new file mode 100644 index 00000000..48be2ad4 --- /dev/null +++ b/drivers/base/power/sysfs.c @@ -0,0 +1,634 @@ +/* + * drivers/base/power/sysfs.c - sysfs entries for device PM + */ + +#include <linux/device.h> +#include <linux/string.h> +#include <linux/export.h> +#include <linux/pm_qos.h> +#include <linux/pm_runtime.h> +#include <linux/atomic.h> +#include <linux/jiffies.h> +#include "power.h" + +/* + * control - Report/change current runtime PM setting of the device + * + * Runtime power management of a device can be blocked with the help of + * this attribute. All devices have one of the following two values for + * the power/control file: + * + * + "auto\n" to allow the device to be power managed at run time; + * + "on\n" to prevent the device from being power managed at run time; + * + * The default for all devices is "auto", which means that devices may be + * subject to automatic power management, depending on their drivers. + * Changing this attribute to "on" prevents the driver from power managing + * the device at run time. Doing that while the device is suspended causes + * it to be woken up. + * + * wakeup - Report/change current wakeup option for device + * + * Some devices support "wakeup" events, which are hardware signals + * used to activate devices from suspended or low power states. Such + * devices have one of three values for the sysfs power/wakeup file: + * + * + "enabled\n" to issue the events; + * + "disabled\n" not to do so; or + * + "\n" for temporary or permanent inability to issue wakeup. + * + * (For example, unconfigured USB devices can't issue wakeups.) + * + * Familiar examples of devices that can issue wakeup events include + * keyboards and mice (both PS2 and USB styles), power buttons, modems, + * "Wake-On-LAN" Ethernet links, GPIO lines, and more. Some events + * will wake the entire system from a suspend state; others may just + * wake up the device (if the system as a whole is already active). + * Some wakeup events use normal IRQ lines; other use special out + * of band signaling. + * + * It is the responsibility of device drivers to enable (or disable) + * wakeup signaling as part of changing device power states, respecting + * the policy choices provided through the driver model. + * + * Devices may not be able to generate wakeup events from all power + * states. Also, the events may be ignored in some configurations; + * for example, they might need help from other devices that aren't + * active, or which may have wakeup disabled. Some drivers rely on + * wakeup events internally (unless they are disabled), keeping + * their hardware in low power modes whenever they're unused. This + * saves runtime power, without requiring system-wide sleep states. + * + * async - Report/change current async suspend setting for the device + * + * Asynchronous suspend and resume of the device during system-wide power + * state transitions can be enabled by writing "enabled" to this file. + * Analogously, if "disabled" is written to this file, the device will be + * suspended and resumed synchronously. + * + * All devices have one of the following two values for power/async: + * + * + "enabled\n" to permit the asynchronous suspend/resume of the device; + * + "disabled\n" to forbid it; + * + * NOTE: It generally is unsafe to permit the asynchronous suspend/resume + * of a device unless it is certain that all of the PM dependencies of the + * device are known to the PM core. However, for some devices this + * attribute is set to "enabled" by bus type code or device drivers and in + * that cases it should be safe to leave the default value. + * + * autosuspend_delay_ms - Report/change a device's autosuspend_delay value + * + * Some drivers don't want to carry out a runtime suspend as soon as a + * device becomes idle; they want it always to remain idle for some period + * of time before suspending it. This period is the autosuspend_delay + * value (expressed in milliseconds) and it can be controlled by the user. + * If the value is negative then the device will never be runtime + * suspended. + * + * NOTE: The autosuspend_delay_ms attribute and the autosuspend_delay + * value are used only if the driver calls pm_runtime_use_autosuspend(). + * + * wakeup_count - Report the number of wakeup events related to the device + */ + +static const char enabled[] = "enabled"; +static const char disabled[] = "disabled"; + +const char power_group_name[] = "power"; +EXPORT_SYMBOL_GPL(power_group_name); + +#ifdef CONFIG_PM_RUNTIME +static const char ctrl_auto[] = "auto"; +static const char ctrl_on[] = "on"; + +static ssize_t control_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%s\n", + dev->power.runtime_auto ? ctrl_auto : ctrl_on); +} + +static ssize_t control_store(struct device * dev, struct device_attribute *attr, + const char * buf, size_t n) +{ + char *cp; + int len = n; + + cp = memchr(buf, '\n', n); + if (cp) + len = cp - buf; + device_lock(dev); + if (len == sizeof ctrl_auto - 1 && strncmp(buf, ctrl_auto, len) == 0) + pm_runtime_allow(dev); + else if (len == sizeof ctrl_on - 1 && strncmp(buf, ctrl_on, len) == 0) + pm_runtime_forbid(dev); + else + n = -EINVAL; + device_unlock(dev); + return n; +} + +static DEVICE_ATTR(control, 0644, control_show, control_store); + +static ssize_t rtpm_active_time_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret; + spin_lock_irq(&dev->power.lock); + update_pm_runtime_accounting(dev); + ret = sprintf(buf, "%i\n", jiffies_to_msecs(dev->power.active_jiffies)); + spin_unlock_irq(&dev->power.lock); + return ret; +} + +static DEVICE_ATTR(runtime_active_time, 0444, rtpm_active_time_show, NULL); + +static ssize_t rtpm_suspended_time_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret; + spin_lock_irq(&dev->power.lock); + update_pm_runtime_accounting(dev); + ret = sprintf(buf, "%i\n", + jiffies_to_msecs(dev->power.suspended_jiffies)); + spin_unlock_irq(&dev->power.lock); + return ret; +} + +static DEVICE_ATTR(runtime_suspended_time, 0444, rtpm_suspended_time_show, NULL); + +static ssize_t rtpm_status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + const char *p; + + if (dev->power.runtime_error) { + p = "error\n"; + } else if (dev->power.disable_depth) { + p = "unsupported\n"; + } else { + switch (dev->power.runtime_status) { + case RPM_SUSPENDED: + p = "suspended\n"; + break; + case RPM_SUSPENDING: + p = "suspending\n"; + break; + case RPM_RESUMING: + p = "resuming\n"; + break; + case RPM_ACTIVE: + p = "active\n"; + break; + default: + return -EIO; + } + } + return sprintf(buf, p); +} + +static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL); + +static ssize_t autosuspend_delay_ms_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + if (!dev->power.use_autosuspend) + return -EIO; + return sprintf(buf, "%d\n", dev->power.autosuspend_delay); +} + +static ssize_t autosuspend_delay_ms_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t n) +{ + long delay; + + if (!dev->power.use_autosuspend) + return -EIO; + + if (strict_strtol(buf, 10, &delay) != 0 || delay != (int) delay) + return -EINVAL; + + device_lock(dev); + pm_runtime_set_autosuspend_delay(dev, delay); + device_unlock(dev); + return n; +} + +static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show, + autosuspend_delay_ms_store); + +static ssize_t pm_qos_latency_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", dev->power.pq_req->node.prio); +} + +static ssize_t pm_qos_latency_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t n) +{ + s32 value; + int ret; + + if (kstrtos32(buf, 0, &value)) + return -EINVAL; + + if (value < 0) + return -EINVAL; + + ret = dev_pm_qos_update_request(dev->power.pq_req, value); + return ret < 0 ? ret : n; +} + +static DEVICE_ATTR(pm_qos_resume_latency_us, 0644, + pm_qos_latency_show, pm_qos_latency_store); +#endif /* CONFIG_PM_RUNTIME */ + +#ifdef CONFIG_PM_SLEEP +static ssize_t +wake_show(struct device * dev, struct device_attribute *attr, char * buf) +{ + return sprintf(buf, "%s\n", device_can_wakeup(dev) + ? (device_may_wakeup(dev) ? enabled : disabled) + : ""); +} + +static ssize_t +wake_store(struct device * dev, struct device_attribute *attr, + const char * buf, size_t n) +{ + char *cp; + int len = n; + + if (!device_can_wakeup(dev)) + return -EINVAL; + + cp = memchr(buf, '\n', n); + if (cp) + len = cp - buf; + if (len == sizeof enabled - 1 + && strncmp(buf, enabled, sizeof enabled - 1) == 0) + device_set_wakeup_enable(dev, 1); + else if (len == sizeof disabled - 1 + && strncmp(buf, disabled, sizeof disabled - 1) == 0) + device_set_wakeup_enable(dev, 0); + else + return -EINVAL; + return n; +} + +static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); + +static ssize_t wakeup_count_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long count = 0; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + count = dev->power.wakeup->event_count; + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); +} + +static DEVICE_ATTR(wakeup_count, 0444, wakeup_count_show, NULL); + +static ssize_t wakeup_active_count_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long count = 0; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + count = dev->power.wakeup->active_count; + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); +} + +static DEVICE_ATTR(wakeup_active_count, 0444, wakeup_active_count_show, NULL); + +static ssize_t wakeup_abort_count_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long count = 0; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + count = dev->power.wakeup->wakeup_count; + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); +} + +static DEVICE_ATTR(wakeup_abort_count, 0444, wakeup_abort_count_show, NULL); + +static ssize_t wakeup_expire_count_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long count = 0; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + count = dev->power.wakeup->expire_count; + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); +} + +static DEVICE_ATTR(wakeup_expire_count, 0444, wakeup_expire_count_show, NULL); + +static ssize_t wakeup_active_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned int active = 0; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + active = dev->power.wakeup->active; + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n"); +} + +static DEVICE_ATTR(wakeup_active, 0444, wakeup_active_show, NULL); + +static ssize_t wakeup_total_time_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + s64 msec = 0; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + msec = ktime_to_ms(dev->power.wakeup->total_time); + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); +} + +static DEVICE_ATTR(wakeup_total_time_ms, 0444, wakeup_total_time_show, NULL); + +static ssize_t wakeup_max_time_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + s64 msec = 0; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + msec = ktime_to_ms(dev->power.wakeup->max_time); + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); +} + +static DEVICE_ATTR(wakeup_max_time_ms, 0444, wakeup_max_time_show, NULL); + +static ssize_t wakeup_last_time_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + s64 msec = 0; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + msec = ktime_to_ms(dev->power.wakeup->last_time); + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); +} + +static DEVICE_ATTR(wakeup_last_time_ms, 0444, wakeup_last_time_show, NULL); + +#ifdef CONFIG_PM_AUTOSLEEP +static ssize_t wakeup_prevent_sleep_time_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + s64 msec = 0; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + msec = ktime_to_ms(dev->power.wakeup->prevent_sleep_time); + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); +} + +static DEVICE_ATTR(wakeup_prevent_sleep_time_ms, 0444, + wakeup_prevent_sleep_time_show, NULL); +#endif /* CONFIG_PM_AUTOSLEEP */ +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM_ADVANCED_DEBUG +#ifdef CONFIG_PM_RUNTIME + +static ssize_t rtpm_usagecount_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count)); +} + +static ssize_t rtpm_children_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", dev->power.ignore_children ? + 0 : atomic_read(&dev->power.child_count)); +} + +static ssize_t rtpm_enabled_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + if ((dev->power.disable_depth) && (dev->power.runtime_auto == false)) + return sprintf(buf, "disabled & forbidden\n"); + else if (dev->power.disable_depth) + return sprintf(buf, "disabled\n"); + else if (dev->power.runtime_auto == false) + return sprintf(buf, "forbidden\n"); + return sprintf(buf, "enabled\n"); +} + +static DEVICE_ATTR(runtime_usage, 0444, rtpm_usagecount_show, NULL); +static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL); +static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL); + +#endif + +static ssize_t async_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%s\n", + device_async_suspend_enabled(dev) ? enabled : disabled); +} + +static ssize_t async_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t n) +{ + char *cp; + int len = n; + + cp = memchr(buf, '\n', n); + if (cp) + len = cp - buf; + if (len == sizeof enabled - 1 && strncmp(buf, enabled, len) == 0) + device_enable_async_suspend(dev); + else if (len == sizeof disabled - 1 && strncmp(buf, disabled, len) == 0) + device_disable_async_suspend(dev); + else + return -EINVAL; + return n; +} + +static DEVICE_ATTR(async, 0644, async_show, async_store); +#endif /* CONFIG_PM_ADVANCED_DEBUG */ + +static struct attribute *power_attrs[] = { +#ifdef CONFIG_PM_ADVANCED_DEBUG +#ifdef CONFIG_PM_SLEEP + &dev_attr_async.attr, +#endif +#ifdef CONFIG_PM_RUNTIME + &dev_attr_runtime_status.attr, + &dev_attr_runtime_usage.attr, + &dev_attr_runtime_active_kids.attr, + &dev_attr_runtime_enabled.attr, +#endif +#endif /* CONFIG_PM_ADVANCED_DEBUG */ + NULL, +}; +static struct attribute_group pm_attr_group = { + .name = power_group_name, + .attrs = power_attrs, +}; + +static struct attribute *wakeup_attrs[] = { +#ifdef CONFIG_PM_SLEEP + &dev_attr_wakeup.attr, + &dev_attr_wakeup_count.attr, + &dev_attr_wakeup_active_count.attr, + &dev_attr_wakeup_abort_count.attr, + &dev_attr_wakeup_expire_count.attr, + &dev_attr_wakeup_active.attr, + &dev_attr_wakeup_total_time_ms.attr, + &dev_attr_wakeup_max_time_ms.attr, + &dev_attr_wakeup_last_time_ms.attr, +#ifdef CONFIG_PM_AUTOSLEEP + &dev_attr_wakeup_prevent_sleep_time_ms.attr, +#endif +#endif + NULL, +}; +static struct attribute_group pm_wakeup_attr_group = { + .name = power_group_name, + .attrs = wakeup_attrs, +}; + +static struct attribute *runtime_attrs[] = { +#ifdef CONFIG_PM_RUNTIME +#ifndef CONFIG_PM_ADVANCED_DEBUG + &dev_attr_runtime_status.attr, +#endif + &dev_attr_control.attr, + &dev_attr_runtime_suspended_time.attr, + &dev_attr_runtime_active_time.attr, + &dev_attr_autosuspend_delay_ms.attr, +#endif /* CONFIG_PM_RUNTIME */ + NULL, +}; +static struct attribute_group pm_runtime_attr_group = { + .name = power_group_name, + .attrs = runtime_attrs, +}; + +static struct attribute *pm_qos_attrs[] = { +#ifdef CONFIG_PM_RUNTIME + &dev_attr_pm_qos_resume_latency_us.attr, +#endif /* CONFIG_PM_RUNTIME */ + NULL, +}; +static struct attribute_group pm_qos_attr_group = { + .name = power_group_name, + .attrs = pm_qos_attrs, +}; + +int dpm_sysfs_add(struct device *dev) +{ + int rc; + + rc = sysfs_create_group(&dev->kobj, &pm_attr_group); + if (rc) + return rc; + + if (pm_runtime_callbacks_present(dev)) { + rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group); + if (rc) + goto err_out; + } + + if (device_can_wakeup(dev)) { + rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); + if (rc) { + if (pm_runtime_callbacks_present(dev)) + sysfs_unmerge_group(&dev->kobj, + &pm_runtime_attr_group); + goto err_out; + } + } + return 0; + + err_out: + sysfs_remove_group(&dev->kobj, &pm_attr_group); + return rc; +} + +int wakeup_sysfs_add(struct device *dev) +{ + return sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); +} + +void wakeup_sysfs_remove(struct device *dev) +{ + sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); +} + +int pm_qos_sysfs_add(struct device *dev) +{ + return sysfs_merge_group(&dev->kobj, &pm_qos_attr_group); +} + +void pm_qos_sysfs_remove(struct device *dev) +{ + sysfs_unmerge_group(&dev->kobj, &pm_qos_attr_group); +} + +void rpm_sysfs_remove(struct device *dev) +{ + sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); +} + +void dpm_sysfs_remove(struct device *dev) +{ + rpm_sysfs_remove(dev); + sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); + sysfs_remove_group(&dev->kobj, &pm_attr_group); +} diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c new file mode 100644 index 00000000..d94a1f51 --- /dev/null +++ b/drivers/base/power/trace.c @@ -0,0 +1,266 @@ +/* + * drivers/base/power/trace.c + * + * Copyright (C) 2006 Linus Torvalds + * + * Trace facility for suspend/resume problems, when none of the + * devices may be working. + */ + +#include <linux/resume-trace.h> +#include <linux/export.h> +#include <linux/rtc.h> + +#include <asm/rtc.h> + +#include "power.h" + +/* + * Horrid, horrid, horrid. + * + * It turns out that the _only_ piece of hardware that actually + * keeps its value across a hard boot (and, more importantly, the + * POST init sequence) is literally the realtime clock. + * + * Never mind that an RTC chip has 114 bytes (and often a whole + * other bank of an additional 128 bytes) of nice SRAM that is + * _designed_ to keep data - the POST will clear it. So we literally + * can just use the few bytes of actual time data, which means that + * we're really limited. + * + * It means, for example, that we can't use the seconds at all + * (since the time between the hang and the boot might be more + * than a minute), and we'd better not depend on the low bits of + * the minutes either. + * + * There are the wday fields etc, but I wouldn't guarantee those + * are dependable either. And if the date isn't valid, either the + * hw or POST will do strange things. + * + * So we're left with: + * - year: 0-99 + * - month: 0-11 + * - day-of-month: 1-28 + * - hour: 0-23 + * - min: (0-30)*2 + * + * Giving us a total range of 0-16128000 (0xf61800), ie less + * than 24 bits of actual data we can save across reboots. + * + * And if your box can't boot in less than three minutes, + * you're screwed. + * + * Now, almost 24 bits of data is pitifully small, so we need + * to be pretty dense if we want to use it for anything nice. + * What we do is that instead of saving off nice readable info, + * we save off _hashes_ of information that we can hopefully + * regenerate after the reboot. + * + * In particular, this means that we might be unlucky, and hit + * a case where we have a hash collision, and we end up not + * being able to tell for certain exactly which case happened. + * But that's hopefully unlikely. + * + * What we do is to take the bits we can fit, and split them + * into three parts (16*997*1009 = 16095568), and use the values + * for: + * - 0-15: user-settable + * - 0-996: file + line number + * - 0-1008: device + */ +#define USERHASH (16) +#define FILEHASH (997) +#define DEVHASH (1009) + +#define DEVSEED (7919) + +static unsigned int dev_hash_value; + +static int set_magic_time(unsigned int user, unsigned int file, unsigned int device) +{ + unsigned int n = user + USERHASH*(file + FILEHASH*device); + + // June 7th, 2006 + static struct rtc_time time = { + .tm_sec = 0, + .tm_min = 0, + .tm_hour = 0, + .tm_mday = 7, + .tm_mon = 5, // June - counting from zero + .tm_year = 106, + .tm_wday = 3, + .tm_yday = 160, + .tm_isdst = 1 + }; + + time.tm_year = (n % 100); + n /= 100; + time.tm_mon = (n % 12); + n /= 12; + time.tm_mday = (n % 28) + 1; + n /= 28; + time.tm_hour = (n % 24); + n /= 24; + time.tm_min = (n % 20) * 3; + n /= 20; + set_rtc_time(&time); + return n ? -1 : 0; +} + +static unsigned int read_magic_time(void) +{ + struct rtc_time time; + unsigned int val; + + get_rtc_time(&time); + pr_info("RTC time: %2d:%02d:%02d, date: %02d/%02d/%02d\n", + time.tm_hour, time.tm_min, time.tm_sec, + time.tm_mon + 1, time.tm_mday, time.tm_year % 100); + val = time.tm_year; /* 100 years */ + if (val > 100) + val -= 100; + val += time.tm_mon * 100; /* 12 months */ + val += (time.tm_mday-1) * 100 * 12; /* 28 month-days */ + val += time.tm_hour * 100 * 12 * 28; /* 24 hours */ + val += (time.tm_min / 3) * 100 * 12 * 28 * 24; /* 20 3-minute intervals */ + return val; +} + +/* + * This is just the sdbm hash function with a user-supplied + * seed and final size parameter. + */ +static unsigned int hash_string(unsigned int seed, const char *data, unsigned int mod) +{ + unsigned char c; + while ((c = *data++) != 0) { + seed = (seed << 16) + (seed << 6) - seed + c; + } + return seed % mod; +} + +void set_trace_device(struct device *dev) +{ + dev_hash_value = hash_string(DEVSEED, dev_name(dev), DEVHASH); +} +EXPORT_SYMBOL(set_trace_device); + +/* + * We could just take the "tracedata" index into the .tracedata + * section instead. Generating a hash of the data gives us a + * chance to work across kernel versions, and perhaps more + * importantly it also gives us valid/invalid check (ie we will + * likely not give totally bogus reports - if the hash matches, + * it's not any guarantee, but it's a high _likelihood_ that + * the match is valid). + */ +void generate_resume_trace(const void *tracedata, unsigned int user) +{ + unsigned short lineno = *(unsigned short *)tracedata; + const char *file = *(const char **)(tracedata + 2); + unsigned int user_hash_value, file_hash_value; + + user_hash_value = user % USERHASH; + file_hash_value = hash_string(lineno, file, FILEHASH); + set_magic_time(user_hash_value, file_hash_value, dev_hash_value); +} +EXPORT_SYMBOL(generate_resume_trace); + +extern char __tracedata_start, __tracedata_end; +static int show_file_hash(unsigned int value) +{ + int match; + char *tracedata; + + match = 0; + for (tracedata = &__tracedata_start ; tracedata < &__tracedata_end ; + tracedata += 2 + sizeof(unsigned long)) { + unsigned short lineno = *(unsigned short *)tracedata; + const char *file = *(const char **)(tracedata + 2); + unsigned int hash = hash_string(lineno, file, FILEHASH); + if (hash != value) + continue; + pr_info(" hash matches %s:%u\n", file, lineno); + match++; + } + return match; +} + +static int show_dev_hash(unsigned int value) +{ + int match = 0; + struct list_head *entry; + + device_pm_lock(); + entry = dpm_list.prev; + while (entry != &dpm_list) { + struct device * dev = to_device(entry); + unsigned int hash = hash_string(DEVSEED, dev_name(dev), DEVHASH); + if (hash == value) { + dev_info(dev, "hash matches\n"); + match++; + } + entry = entry->prev; + } + device_pm_unlock(); + return match; +} + +static unsigned int hash_value_early_read; + +int show_trace_dev_match(char *buf, size_t size) +{ + unsigned int value = hash_value_early_read / (USERHASH * FILEHASH); + int ret = 0; + struct list_head *entry; + + /* + * It's possible that multiple devices will match the hash and we can't + * tell which is the culprit, so it's best to output them all. + */ + device_pm_lock(); + entry = dpm_list.prev; + while (size && entry != &dpm_list) { + struct device *dev = to_device(entry); + unsigned int hash = hash_string(DEVSEED, dev_name(dev), + DEVHASH); + if (hash == value) { + int len = snprintf(buf, size, "%s\n", + dev_driver_string(dev)); + if (len > size) + len = size; + buf += len; + ret += len; + size -= len; + } + entry = entry->prev; + } + device_pm_unlock(); + return ret; +} + +static int early_resume_init(void) +{ + hash_value_early_read = read_magic_time(); + return 0; +} + +static int late_resume_init(void) +{ + unsigned int val = hash_value_early_read; + unsigned int user, file, dev; + + user = val % USERHASH; + val = val / USERHASH; + file = val % FILEHASH; + val = val / FILEHASH; + dev = val /* % DEVHASH */; + + pr_info(" Magic number: %d:%d:%d\n", user, file, dev); + show_file_hash(file); + show_dev_hash(dev); + return 0; +} + +core_initcall(early_resume_init); +late_initcall(late_resume_init); diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c new file mode 100644 index 00000000..ba06da42 --- /dev/null +++ b/drivers/base/power/wakeup.c @@ -0,0 +1,997 @@ +/* + * drivers/base/power/wakeup.c - System wakeup events framework + * + * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. + * + * This file is released under the GPLv2. + */ + +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/sched.h> +#include <linux/capability.h> +#include <linux/export.h> +#include <linux/suspend.h> +#include <linux/seq_file.h> +#include <linux/debugfs.h> +#include <trace/events/power.h> + +#include "power.h" + +/* + * If set, the suspend/hibernate code will abort transitions to a sleep state + * if wakeup events are registered during or immediately before the transition. + */ +bool events_check_enabled __read_mostly; + +/* + * Combined counters of registered wakeup events and wakeup events in progress. + * They need to be modified together atomically, so it's better to use one + * atomic variable to hold them both. + */ +static atomic_t combined_event_count = ATOMIC_INIT(0); + +#define IN_PROGRESS_BITS (sizeof(int) * 4) +#define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1) + +static void split_counters(unsigned int *cnt, unsigned int *inpr) +{ + unsigned int comb = atomic_read(&combined_event_count); + + *cnt = (comb >> IN_PROGRESS_BITS); + *inpr = comb & MAX_IN_PROGRESS; +} + +/* A preserved old value of the events counter. */ +static unsigned int saved_count; + +static DEFINE_SPINLOCK(events_lock); + +static void pm_wakeup_timer_fn(unsigned long data); + +static LIST_HEAD(wakeup_sources); + +static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue); + +/** + * wakeup_source_prepare - Prepare a new wakeup source for initialization. + * @ws: Wakeup source to prepare. + * @name: Pointer to the name of the new wakeup source. + * + * Callers must ensure that the @name string won't be freed when @ws is still in + * use. + */ +void wakeup_source_prepare(struct wakeup_source *ws, const char *name) +{ + if (ws) { + memset(ws, 0, sizeof(*ws)); + ws->name = name; + } +} +EXPORT_SYMBOL_GPL(wakeup_source_prepare); + +/** + * wakeup_source_create - Create a struct wakeup_source object. + * @name: Name of the new wakeup source. + */ +struct wakeup_source *wakeup_source_create(const char *name) +{ + struct wakeup_source *ws; + + ws = kmalloc(sizeof(*ws), GFP_KERNEL); + if (!ws) + return NULL; + + wakeup_source_prepare(ws, name ? kstrdup(name, GFP_KERNEL) : NULL); + return ws; +} +EXPORT_SYMBOL_GPL(wakeup_source_create); + +/** + * wakeup_source_drop - Prepare a struct wakeup_source object for destruction. + * @ws: Wakeup source to prepare for destruction. + * + * Callers must ensure that __pm_stay_awake() or __pm_wakeup_event() will never + * be run in parallel with this function for the same wakeup source object. + */ +void wakeup_source_drop(struct wakeup_source *ws) +{ + if (!ws) + return; + + del_timer_sync(&ws->timer); + __pm_relax(ws); +} +EXPORT_SYMBOL_GPL(wakeup_source_drop); + +/** + * wakeup_source_destroy - Destroy a struct wakeup_source object. + * @ws: Wakeup source to destroy. + * + * Use only for wakeup source objects created with wakeup_source_create(). + */ +void wakeup_source_destroy(struct wakeup_source *ws) +{ + if (!ws) + return; + + wakeup_source_drop(ws); + kfree(ws->name); + kfree(ws); +} +EXPORT_SYMBOL_GPL(wakeup_source_destroy); + +/** + * wakeup_source_add - Add given object to the list of wakeup sources. + * @ws: Wakeup source object to add to the list. + */ +void wakeup_source_add(struct wakeup_source *ws) +{ + unsigned long flags; + + if (WARN_ON(!ws)) + return; + + spin_lock_init(&ws->lock); + setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws); + ws->active = false; + ws->last_time = ktime_get(); + + spin_lock_irqsave(&events_lock, flags); + list_add_rcu(&ws->entry, &wakeup_sources); + spin_unlock_irqrestore(&events_lock, flags); +} +EXPORT_SYMBOL_GPL(wakeup_source_add); + +/** + * wakeup_source_remove - Remove given object from the wakeup sources list. + * @ws: Wakeup source object to remove from the list. + */ +void wakeup_source_remove(struct wakeup_source *ws) +{ + unsigned long flags; + + if (WARN_ON(!ws)) + return; + + spin_lock_irqsave(&events_lock, flags); + list_del_rcu(&ws->entry); + spin_unlock_irqrestore(&events_lock, flags); + synchronize_rcu(); +} +EXPORT_SYMBOL_GPL(wakeup_source_remove); + +/** + * wakeup_source_register - Create wakeup source and add it to the list. + * @name: Name of the wakeup source to register. + */ +struct wakeup_source *wakeup_source_register(const char *name) +{ + struct wakeup_source *ws; + + ws = wakeup_source_create(name); + if (ws) + wakeup_source_add(ws); + + return ws; +} +EXPORT_SYMBOL_GPL(wakeup_source_register); + +/** + * wakeup_source_unregister - Remove wakeup source from the list and remove it. + * @ws: Wakeup source object to unregister. + */ +void wakeup_source_unregister(struct wakeup_source *ws) +{ + if (ws) { + wakeup_source_remove(ws); + wakeup_source_destroy(ws); + } +} +EXPORT_SYMBOL_GPL(wakeup_source_unregister); + +/** + * device_wakeup_attach - Attach a wakeup source object to a device object. + * @dev: Device to handle. + * @ws: Wakeup source object to attach to @dev. + * + * This causes @dev to be treated as a wakeup device. + */ +static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws) +{ + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + spin_unlock_irq(&dev->power.lock); + return -EEXIST; + } + dev->power.wakeup = ws; + spin_unlock_irq(&dev->power.lock); + return 0; +} + +/** + * device_wakeup_enable - Enable given device to be a wakeup source. + * @dev: Device to handle. + * + * Create a wakeup source object, register it and attach it to @dev. + */ +int device_wakeup_enable(struct device *dev) +{ + struct wakeup_source *ws; + int ret; + + if (!dev || !dev->power.can_wakeup) + return -EINVAL; + + ws = wakeup_source_register(dev_name(dev)); + if (!ws) + return -ENOMEM; + + ret = device_wakeup_attach(dev, ws); + if (ret) + wakeup_source_unregister(ws); + + return ret; +} +EXPORT_SYMBOL_GPL(device_wakeup_enable); + +/** + * device_wakeup_detach - Detach a device's wakeup source object from it. + * @dev: Device to detach the wakeup source object from. + * + * After it returns, @dev will not be treated as a wakeup device any more. + */ +static struct wakeup_source *device_wakeup_detach(struct device *dev) +{ + struct wakeup_source *ws; + + spin_lock_irq(&dev->power.lock); + ws = dev->power.wakeup; + dev->power.wakeup = NULL; + spin_unlock_irq(&dev->power.lock); + return ws; +} + +/** + * device_wakeup_disable - Do not regard a device as a wakeup source any more. + * @dev: Device to handle. + * + * Detach the @dev's wakeup source object from it, unregister this wakeup source + * object and destroy it. + */ +int device_wakeup_disable(struct device *dev) +{ + struct wakeup_source *ws; + + if (!dev || !dev->power.can_wakeup) + return -EINVAL; + + ws = device_wakeup_detach(dev); + if (ws) + wakeup_source_unregister(ws); + + return 0; +} +EXPORT_SYMBOL_GPL(device_wakeup_disable); + +/** + * device_set_wakeup_capable - Set/reset device wakeup capability flag. + * @dev: Device to handle. + * @capable: Whether or not @dev is capable of waking up the system from sleep. + * + * If @capable is set, set the @dev's power.can_wakeup flag and add its + * wakeup-related attributes to sysfs. Otherwise, unset the @dev's + * power.can_wakeup flag and remove its wakeup-related attributes from sysfs. + * + * This function may sleep and it can't be called from any context where + * sleeping is not allowed. + */ +void device_set_wakeup_capable(struct device *dev, bool capable) +{ + if (!!dev->power.can_wakeup == !!capable) + return; + + if (device_is_registered(dev) && !list_empty(&dev->power.entry)) { + if (capable) { + if (wakeup_sysfs_add(dev)) + return; + } else { + wakeup_sysfs_remove(dev); + } + } + dev->power.can_wakeup = capable; +} +EXPORT_SYMBOL_GPL(device_set_wakeup_capable); + +/** + * device_init_wakeup - Device wakeup initialization. + * @dev: Device to handle. + * @enable: Whether or not to enable @dev as a wakeup device. + * + * By default, most devices should leave wakeup disabled. The exceptions are + * devices that everyone expects to be wakeup sources: keyboards, power buttons, + * possibly network interfaces, etc. Also, devices that don't generate their + * own wakeup requests but merely forward requests from one bus to another + * (like PCI bridges) should have wakeup enabled by default. + */ +int device_init_wakeup(struct device *dev, bool enable) +{ + int ret = 0; + + if (enable) { + device_set_wakeup_capable(dev, true); + ret = device_wakeup_enable(dev); + } else { + device_set_wakeup_capable(dev, false); + } + + return ret; +} +EXPORT_SYMBOL_GPL(device_init_wakeup); + +/** + * device_set_wakeup_enable - Enable or disable a device to wake up the system. + * @dev: Device to handle. + */ +int device_set_wakeup_enable(struct device *dev, bool enable) +{ + if (!dev || !dev->power.can_wakeup) + return -EINVAL; + + return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev); +} +EXPORT_SYMBOL_GPL(device_set_wakeup_enable); + +/* + * The functions below use the observation that each wakeup event starts a + * period in which the system should not be suspended. The moment this period + * will end depends on how the wakeup event is going to be processed after being + * detected and all of the possible cases can be divided into two distinct + * groups. + * + * First, a wakeup event may be detected by the same functional unit that will + * carry out the entire processing of it and possibly will pass it to user space + * for further processing. In that case the functional unit that has detected + * the event may later "close" the "no suspend" period associated with it + * directly as soon as it has been dealt with. The pair of pm_stay_awake() and + * pm_relax(), balanced with each other, is supposed to be used in such + * situations. + * + * Second, a wakeup event may be detected by one functional unit and processed + * by another one. In that case the unit that has detected it cannot really + * "close" the "no suspend" period associated with it, unless it knows in + * advance what's going to happen to the event during processing. This + * knowledge, however, may not be available to it, so it can simply specify time + * to wait before the system can be suspended and pass it as the second + * argument of pm_wakeup_event(). + * + * It is valid to call pm_relax() after pm_wakeup_event(), in which case the + * "no suspend" period will be ended either by the pm_relax(), or by the timer + * function executed when the timer expires, whichever comes first. + */ + +/** + * wakup_source_activate - Mark given wakeup source as active. + * @ws: Wakeup source to handle. + * + * Update the @ws' statistics and, if @ws has just been activated, notify the PM + * core of the event by incrementing the counter of of wakeup events being + * processed. + */ +static void wakeup_source_activate(struct wakeup_source *ws) +{ + unsigned int cec; + + ws->active = true; + ws->active_count++; + ws->last_time = ktime_get(); + if (ws->autosleep_enabled) + ws->start_prevent_time = ws->last_time; + + /* Increment the counter of events in progress. */ + cec = atomic_inc_return(&combined_event_count); + + trace_wakeup_source_activate(ws->name, cec); +} + +/** + * wakeup_source_report_event - Report wakeup event using the given source. + * @ws: Wakeup source to report the event for. + */ +static void wakeup_source_report_event(struct wakeup_source *ws) +{ + ws->event_count++; + /* This is racy, but the counter is approximate anyway. */ + if (events_check_enabled) + ws->wakeup_count++; + + if (!ws->active) + wakeup_source_activate(ws); +} + +/** + * __pm_stay_awake - Notify the PM core of a wakeup event. + * @ws: Wakeup source object associated with the source of the event. + * + * It is safe to call this function from interrupt context. + */ + + +/*add by kevin ,for wakeup lock debug.default is uncompile +usage: + 1: #define WAKE_TRACE_ENABLE 1 + 2: add wake_trace_show(); in kernel/power/main.c/state_show() + 3: cat /sys/power/state +*/ +#define WAKE_TRACE_ENABLE 0 + +#if WAKE_TRACE_ENABLE +#define WAKE_TRACE_NUMBER 100 +typedef struct{ + char name[64]; + int val; +}WAKE_TRACE; +WAKE_TRACE wake_trace[WAKE_TRACE_NUMBER]; +int wake_trace_init=0; + +void wake_trace_lock(struct wakeup_source *ws){ + int i; + if(wake_trace_init==0){ + wake_trace_init = 1; + memset(wake_trace,0,sizeof(wake_trace)); + } + for(i=0;i<WAKE_TRACE_NUMBER;i++){ + if((strlen(wake_trace[i].name)>0&&!strcmp(wake_trace[i].name,ws->name))) + break; + if(strlen(wake_trace[i].name)<=0) + break; + } + if(i>=WAKE_TRACE_NUMBER) + printk("%s %d %s error\n",__func__,__LINE__,ws->name); + else{ + if(strlen(wake_trace[i].name)<=0){ + printk("%s %d insert %s\n",__func__,__LINE__,ws->name); + strcpy(wake_trace[i].name,ws->name); + } + wake_trace[i].val++; + } +} +void wake_trace_unlock(struct wakeup_source *ws){ + int i; + for(i=0;i<WAKE_TRACE_NUMBER;i++){ + if((strlen(wake_trace[i].name)>0&&!strcmp(wake_trace[i].name,ws->name))) + break; + } + if(i>=WAKE_TRACE_NUMBER) + printk("%s %d %s error\n",__func__,__LINE__,ws->name); + else{ + wake_trace[i].val--; + } +} + +int wake_trace_show(void) +{ + int i; + for(i=0;i<WAKE_TRACE_NUMBER;i++){ + if(strlen(wake_trace[i].name)>0) + printk("%d %s %d\n",i,wake_trace[i].name,wake_trace[i].val); + } +} +EXPORT_SYMBOL_GPL(wake_trace_show); +#endif +void __pm_stay_awake(struct wakeup_source *ws) +{ + unsigned long flags; + + if (!ws) + return; + + spin_lock_irqsave(&ws->lock, flags); + +#if WAKE_TRACE_ENABLE + wake_trace_lock(ws); +#endif + + wakeup_source_report_event(ws); + del_timer(&ws->timer); + ws->timer_expires = 0; + + spin_unlock_irqrestore(&ws->lock, flags); +} +EXPORT_SYMBOL_GPL(__pm_stay_awake); + +/** + * pm_stay_awake - Notify the PM core that a wakeup event is being processed. + * @dev: Device the wakeup event is related to. + * + * Notify the PM core of a wakeup event (signaled by @dev) by calling + * __pm_stay_awake for the @dev's wakeup source object. + * + * Call this function after detecting of a wakeup event if pm_relax() is going + * to be called directly after processing the event (and possibly passing it to + * user space for further processing). + */ +void pm_stay_awake(struct device *dev) +{ + unsigned long flags; + + if (!dev) + return; + + spin_lock_irqsave(&dev->power.lock, flags); + __pm_stay_awake(dev->power.wakeup); + spin_unlock_irqrestore(&dev->power.lock, flags); +} +EXPORT_SYMBOL_GPL(pm_stay_awake); + +#ifdef CONFIG_PM_AUTOSLEEP +static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now) +{ + ktime_t delta = ktime_sub(now, ws->start_prevent_time); + ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta); +} +#else +static inline void update_prevent_sleep_time(struct wakeup_source *ws, + ktime_t now) {} +#endif + +/** + * wakup_source_deactivate - Mark given wakeup source as inactive. + * @ws: Wakeup source to handle. + * + * Update the @ws' statistics and notify the PM core that the wakeup source has + * become inactive by decrementing the counter of wakeup events being processed + * and incrementing the counter of registered wakeup events. + */ +static void wakeup_source_deactivate(struct wakeup_source *ws) +{ + unsigned int cnt, inpr, cec; + ktime_t duration; + ktime_t now; + + ws->relax_count++; + /* + * __pm_relax() may be called directly or from a timer function. + * If it is called directly right after the timer function has been + * started, but before the timer function calls __pm_relax(), it is + * possible that __pm_stay_awake() will be called in the meantime and + * will set ws->active. Then, ws->active may be cleared immediately + * by the __pm_relax() called from the timer function, but in such a + * case ws->relax_count will be different from ws->active_count. + */ + if (ws->relax_count != ws->active_count) { + ws->relax_count--; + return; + } + + ws->active = false; + + now = ktime_get(); + duration = ktime_sub(now, ws->last_time); + ws->total_time = ktime_add(ws->total_time, duration); + if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time)) + ws->max_time = duration; + + ws->last_time = now; + del_timer(&ws->timer); + ws->timer_expires = 0; + + if (ws->autosleep_enabled) + update_prevent_sleep_time(ws, now); + + /* + * Increment the counter of registered wakeup events and decrement the + * couter of wakeup events in progress simultaneously. + */ + cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count); + trace_wakeup_source_deactivate(ws->name, cec); + + split_counters(&cnt, &inpr); + if (!inpr && waitqueue_active(&wakeup_count_wait_queue)) + wake_up(&wakeup_count_wait_queue); +} + +/** + * __pm_relax - Notify the PM core that processing of a wakeup event has ended. + * @ws: Wakeup source object associated with the source of the event. + * + * Call this function for wakeup events whose processing started with calling + * __pm_stay_awake(). + * + * It is safe to call it from interrupt context. + */ +void __pm_relax(struct wakeup_source *ws) +{ + unsigned long flags; + + if (!ws) + return; + + spin_lock_irqsave(&ws->lock, flags); +#if WAKE_TRACE_ENABLE + wake_trace_unlock(ws); +#endif + if (ws->active) + wakeup_source_deactivate(ws); + spin_unlock_irqrestore(&ws->lock, flags); +} +EXPORT_SYMBOL_GPL(__pm_relax); + +/** + * pm_relax - Notify the PM core that processing of a wakeup event has ended. + * @dev: Device that signaled the event. + * + * Execute __pm_relax() for the @dev's wakeup source object. + */ +void pm_relax(struct device *dev) +{ + unsigned long flags; + + if (!dev) + return; + + spin_lock_irqsave(&dev->power.lock, flags); + __pm_relax(dev->power.wakeup); + spin_unlock_irqrestore(&dev->power.lock, flags); +} +EXPORT_SYMBOL_GPL(pm_relax); + +/** + * pm_wakeup_timer_fn - Delayed finalization of a wakeup event. + * @data: Address of the wakeup source object associated with the event source. + * + * Call wakeup_source_deactivate() for the wakeup source whose address is stored + * in @data if it is currently active and its timer has not been canceled and + * the expiration time of the timer is not in future. + */ +static void pm_wakeup_timer_fn(unsigned long data) +{ + struct wakeup_source *ws = (struct wakeup_source *)data; + unsigned long flags; + + spin_lock_irqsave(&ws->lock, flags); + + if (ws->active && ws->timer_expires + && time_after_eq(jiffies, ws->timer_expires)) { + wakeup_source_deactivate(ws); + ws->expire_count++; + } + + spin_unlock_irqrestore(&ws->lock, flags); +} + +/** + * __pm_wakeup_event - Notify the PM core of a wakeup event. + * @ws: Wakeup source object associated with the event source. + * @msec: Anticipated event processing time (in milliseconds). + * + * Notify the PM core of a wakeup event whose source is @ws that will take + * approximately @msec milliseconds to be processed by the kernel. If @ws is + * not active, activate it. If @msec is nonzero, set up the @ws' timer to + * execute pm_wakeup_timer_fn() in future. + * + * It is safe to call this function from interrupt context. + */ +void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) +{ + unsigned long flags; + unsigned long expires; + + if (!ws) + return; + + spin_lock_irqsave(&ws->lock, flags); + +#if WAKE_TRACE_ENABLE + wake_trace_lock(ws); +#endif + + wakeup_source_report_event(ws); + + if (!msec) { + wakeup_source_deactivate(ws); + goto unlock; + } + + expires = jiffies + msecs_to_jiffies(msec); + if (!expires) + expires = 1; + + if (!ws->timer_expires || time_after(expires, ws->timer_expires)) { + mod_timer(&ws->timer, expires); + ws->timer_expires = expires; + } + + unlock: + spin_unlock_irqrestore(&ws->lock, flags); +} +EXPORT_SYMBOL_GPL(__pm_wakeup_event); + + +/** + * pm_wakeup_event - Notify the PM core of a wakeup event. + * @dev: Device the wakeup event is related to. + * @msec: Anticipated event processing time (in milliseconds). + * + * Call __pm_wakeup_event() for the @dev's wakeup source object. + */ +void pm_wakeup_event(struct device *dev, unsigned int msec) +{ + unsigned long flags; + + if (!dev) + return; + + spin_lock_irqsave(&dev->power.lock, flags); + __pm_wakeup_event(dev->power.wakeup, msec); + spin_unlock_irqrestore(&dev->power.lock, flags); +} +EXPORT_SYMBOL_GPL(pm_wakeup_event); + +extern int wmt_getsyspara(char *varname, unsigned char *varval, int *varlen); + +static int is_rda5991(void){ + int retval; + unsigned char buf[80]; + int varlen = 80; + + + + memset(buf,0,sizeof(buf)); + varlen = 80; + retval = wmt_getsyspara("wmt.init.rc", buf, &varlen); + if (retval == 0) { + if (!strcmp(buf, "init.rda5991.rc")) + { + printk("is rda5991\n"); + return 1; + } + + } + return 0; + +} + +static void print_active_wakeup_sources(void) +{ + struct wakeup_source *ws; + int active = 0; + struct wakeup_source *last_activity_ws = NULL; + printk("\n...in %s\n",__FUNCTION__); + if(is_rda5991()){ + printk("skip!\n"); + return; + } + rcu_read_lock(); + list_for_each_entry_rcu(ws, &wakeup_sources, entry) { + //kevin add for null pointer + if(!ws) + break; + if (ws->active) { + printk("active wakeup source: %s\n", ws->name); + active = 1; + } else if (!active && + (!last_activity_ws || + ktime_to_ns(ws->last_time) > + ktime_to_ns(last_activity_ws->last_time))) { + last_activity_ws = ws; + } + } + if (!active && last_activity_ws) + printk("last active wakeup source: %s\n", + last_activity_ws->name); + rcu_read_unlock(); +} + +/** + * pm_wakeup_pending - Check if power transition in progress should be aborted. + * + * Compare the current number of registered wakeup events with its preserved + * value from the past and return true if new wakeup events have been registered + * since the old value was stored. Also return true if the current number of + * wakeup events being processed is different from zero. + */ +bool pm_wakeup_pending(void) +{ + unsigned long flags; + bool ret = false; + + spin_lock_irqsave(&events_lock, flags); + if (events_check_enabled) { + unsigned int cnt, inpr; + + split_counters(&cnt, &inpr); + ret = (cnt != saved_count || inpr > 0); + events_check_enabled = !ret; + } + spin_unlock_irqrestore(&events_lock, flags); + + if (ret) + print_active_wakeup_sources(); + + return ret; +} + +/** + * pm_get_wakeup_count - Read the number of registered wakeup events. + * @count: Address to store the value at. + * @block: Whether or not to block. + * + * Store the number of registered wakeup events at the address in @count. If + * @block is set, block until the current number of wakeup events being + * processed is zero. + * + * Return 'false' if the current number of wakeup events being processed is + * nonzero. Otherwise return 'true'. + */ +bool pm_get_wakeup_count(unsigned int *count, bool block) +{ + unsigned int cnt, inpr; + + if (block) { + DEFINE_WAIT(wait); + + for (;;) { + prepare_to_wait(&wakeup_count_wait_queue, &wait, + TASK_INTERRUPTIBLE); + split_counters(&cnt, &inpr); + if (inpr == 0 || signal_pending(current)) + break; + + schedule(); + } + finish_wait(&wakeup_count_wait_queue, &wait); + } + + split_counters(&cnt, &inpr); + *count = cnt; + return !inpr; +} + +/** + * pm_save_wakeup_count - Save the current number of registered wakeup events. + * @count: Value to compare with the current number of registered wakeup events. + * + * If @count is equal to the current number of registered wakeup events and the + * current number of wakeup events being processed is zero, store @count as the + * old number of registered wakeup events for pm_check_wakeup_events(), enable + * wakeup events detection and return 'true'. Otherwise disable wakeup events + * detection and return 'false'. + */ +bool pm_save_wakeup_count(unsigned int count) +{ + unsigned int cnt, inpr; + unsigned long flags; + + events_check_enabled = false; + spin_lock_irqsave(&events_lock, flags); + split_counters(&cnt, &inpr); + if (cnt == count && inpr == 0) { + saved_count = count; + events_check_enabled = true; + } + spin_unlock_irqrestore(&events_lock, flags); + return events_check_enabled; +} + +#ifdef CONFIG_PM_AUTOSLEEP +/** + * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources. + * @enabled: Whether to set or to clear the autosleep_enabled flags. + */ +void pm_wakep_autosleep_enabled(bool set) +{ + struct wakeup_source *ws; + ktime_t now = ktime_get(); + + rcu_read_lock(); + list_for_each_entry_rcu(ws, &wakeup_sources, entry) { + spin_lock_irq(&ws->lock); + if (ws->autosleep_enabled != set) { + ws->autosleep_enabled = set; + if (ws->active) { + if (set) + ws->start_prevent_time = now; + else + update_prevent_sleep_time(ws, now); + } + } + spin_unlock_irq(&ws->lock); + } + rcu_read_unlock(); +} +#endif /* CONFIG_PM_AUTOSLEEP */ + +static struct dentry *wakeup_sources_stats_dentry; + +/** + * print_wakeup_source_stats - Print wakeup source statistics information. + * @m: seq_file to print the statistics into. + * @ws: Wakeup source object to print the statistics for. + */ +static int print_wakeup_source_stats(struct seq_file *m, + struct wakeup_source *ws) +{ + unsigned long flags; + ktime_t total_time; + ktime_t max_time; + unsigned long active_count; + ktime_t active_time; + ktime_t prevent_sleep_time; + int ret; + + spin_lock_irqsave(&ws->lock, flags); + + total_time = ws->total_time; + max_time = ws->max_time; + prevent_sleep_time = ws->prevent_sleep_time; + active_count = ws->active_count; + if (ws->active) { + ktime_t now = ktime_get(); + + active_time = ktime_sub(now, ws->last_time); + total_time = ktime_add(total_time, active_time); + if (active_time.tv64 > max_time.tv64) + max_time = active_time; + + if (ws->autosleep_enabled) + prevent_sleep_time = ktime_add(prevent_sleep_time, + ktime_sub(now, ws->start_prevent_time)); + } else { + active_time = ktime_set(0, 0); + } + + ret = seq_printf(m, "%-20s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t" + "%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n", + ws->name, active_count, ws->event_count, + ws->wakeup_count, ws->expire_count, + ktime_to_ms(active_time), ktime_to_ms(total_time), + ktime_to_ms(max_time), ktime_to_ms(ws->last_time), + ktime_to_ms(prevent_sleep_time)); + + spin_unlock_irqrestore(&ws->lock, flags); + + return ret; +} + +/** + * wakeup_sources_stats_show - Print wakeup sources statistics information. + * @m: seq_file to print the statistics into. + */ +static int wakeup_sources_stats_show(struct seq_file *m, void *unused) +{ + struct wakeup_source *ws; + + seq_puts(m, "name\t\t\tactive_count\tevent_count\twakeup_count\t" + "expire_count\tactive_since\ttotal_time\tmax_time\t" + "last_change\tprevent_suspend_time\n"); + + rcu_read_lock(); + list_for_each_entry_rcu(ws, &wakeup_sources, entry) + print_wakeup_source_stats(m, ws); + rcu_read_unlock(); + + return 0; +} + +static int wakeup_sources_stats_open(struct inode *inode, struct file *file) +{ + return single_open(file, wakeup_sources_stats_show, NULL); +} + +static const struct file_operations wakeup_sources_stats_fops = { + .owner = THIS_MODULE, + .open = wakeup_sources_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init wakeup_sources_debugfs_init(void) +{ + wakeup_sources_stats_dentry = debugfs_create_file("wakeup_sources", + S_IRUGO, NULL, NULL, &wakeup_sources_stats_fops); + return 0; +} + +postcore_initcall(wakeup_sources_debugfs_init); |