summaryrefslogtreecommitdiff
path: root/ANDROID_3.4.5/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'ANDROID_3.4.5/drivers/base')
-rw-r--r--ANDROID_3.4.5/drivers/base/Kconfig221
-rw-r--r--ANDROID_3.4.5/drivers/base/Makefile28
-rw-r--r--ANDROID_3.4.5/drivers/base/attribute_container.c441
-rw-r--r--ANDROID_3.4.5/drivers/base/base.h140
-rw-r--r--ANDROID_3.4.5/drivers/base/bus.c1295
-rw-r--r--ANDROID_3.4.5/drivers/base/class.c609
-rw-r--r--ANDROID_3.4.5/drivers/base/core.c1904
-rw-r--r--ANDROID_3.4.5/drivers/base/cpu.c337
-rw-r--r--ANDROID_3.4.5/drivers/base/dd.c577
-rw-r--r--ANDROID_3.4.5/drivers/base/devres.c651
-rw-r--r--ANDROID_3.4.5/drivers/base/devtmpfs.c455
-rw-r--r--ANDROID_3.4.5/drivers/base/dma-buf.c470
-rw-r--r--ANDROID_3.4.5/drivers/base/dma-coherent.c178
-rw-r--r--ANDROID_3.4.5/drivers/base/dma-mapping.c220
-rw-r--r--ANDROID_3.4.5/drivers/base/driver.c236
-rw-r--r--ANDROID_3.4.5/drivers/base/firmware.c27
-rw-r--r--ANDROID_3.4.5/drivers/base/firmware_class.c730
-rw-r--r--ANDROID_3.4.5/drivers/base/hypervisor.c25
-rw-r--r--ANDROID_3.4.5/drivers/base/init.c36
-rw-r--r--ANDROID_3.4.5/drivers/base/isa.c183
-rw-r--r--ANDROID_3.4.5/drivers/base/map.c155
-rw-r--r--ANDROID_3.4.5/drivers/base/memory.c705
-rw-r--r--ANDROID_3.4.5/drivers/base/module.c93
-rw-r--r--ANDROID_3.4.5/drivers/base/node.c668
-rw-r--r--ANDROID_3.4.5/drivers/base/platform.c1167
-rw-r--r--ANDROID_3.4.5/drivers/base/power/Makefile9
-rw-r--r--ANDROID_3.4.5/drivers/base/power/clock_ops.c487
-rw-r--r--ANDROID_3.4.5/drivers/base/power/common.c87
-rw-r--r--ANDROID_3.4.5/drivers/base/power/domain.c1815
-rw-r--r--ANDROID_3.4.5/drivers/base/power/domain_governor.c254
-rw-r--r--ANDROID_3.4.5/drivers/base/power/generic_ops.c329
-rw-r--r--ANDROID_3.4.5/drivers/base/power/main.c1409
-rw-r--r--ANDROID_3.4.5/drivers/base/power/opp.c676
-rw-r--r--ANDROID_3.4.5/drivers/base/power/power.h87
-rw-r--r--ANDROID_3.4.5/drivers/base/power/qos.c513
-rw-r--r--ANDROID_3.4.5/drivers/base/power/runtime.c1317
-rw-r--r--ANDROID_3.4.5/drivers/base/power/sysfs.c634
-rw-r--r--ANDROID_3.4.5/drivers/base/power/trace.c266
-rw-r--r--ANDROID_3.4.5/drivers/base/power/wakeup.c997
-rw-r--r--ANDROID_3.4.5/drivers/base/regmap/Kconfig18
-rw-r--r--ANDROID_3.4.5/drivers/base/regmap/Makefile6
-rw-r--r--ANDROID_3.4.5/drivers/base/regmap/internal.h130
-rw-r--r--ANDROID_3.4.5/drivers/base/regmap/regcache-lzo.c379
-rw-r--r--ANDROID_3.4.5/drivers/base/regmap/regcache-rbtree.c430
-rw-r--r--ANDROID_3.4.5/drivers/base/regmap/regcache.c491
-rw-r--r--ANDROID_3.4.5/drivers/base/regmap/regmap-debugfs.c286
-rw-r--r--ANDROID_3.4.5/drivers/base/regmap/regmap-i2c.c131
-rw-r--r--ANDROID_3.4.5/drivers/base/regmap/regmap-irq.c303
-rw-r--r--ANDROID_3.4.5/drivers/base/regmap/regmap-spi.c90
-rw-r--r--ANDROID_3.4.5/drivers/base/regmap/regmap.c936
-rw-r--r--ANDROID_3.4.5/drivers/base/soc.c181
-rw-r--r--ANDROID_3.4.5/drivers/base/sw_sync.c262
-rw-r--r--ANDROID_3.4.5/drivers/base/sync.c1046
-rw-r--r--ANDROID_3.4.5/drivers/base/syscore.c127
-rw-r--r--ANDROID_3.4.5/drivers/base/topology.c196
-rw-r--r--ANDROID_3.4.5/drivers/base/transport_class.c280
56 files changed, 0 insertions, 25723 deletions
diff --git a/ANDROID_3.4.5/drivers/base/Kconfig b/ANDROID_3.4.5/drivers/base/Kconfig
deleted file mode 100644
index 1131dd73..00000000
--- a/ANDROID_3.4.5/drivers/base/Kconfig
+++ /dev/null
@@ -1,221 +0,0 @@
-menu "Generic Driver Options"
-
-config UEVENT_HELPER_PATH
- string "path to uevent helper"
- depends on HOTPLUG
- default ""
- help
- Path to uevent helper program forked by the kernel for
- every uevent.
- Before the switch to the netlink-based uevent source, this was
- used to hook hotplug scripts into kernel device events. It
- usually pointed to a shell script at /sbin/hotplug.
- This should not be used today, because usual systems create
- many events at bootup or device discovery in a very short time
- frame. One forked process per event can create so many processes
- that it creates a high system load, or on smaller systems
- it is known to create out-of-memory situations during bootup.
-
- To disable user space helper program execution at early boot
- time specify an empty string here. This setting can be altered
- via /proc/sys/kernel/hotplug or via /sys/kernel/uevent_helper
- later at runtime.
-
-config DEVTMPFS
- bool "Maintain a devtmpfs filesystem to mount at /dev"
- depends on HOTPLUG
- help
- This creates a tmpfs/ramfs filesystem instance early at bootup.
- In this filesystem, the kernel driver core maintains device
- nodes with their default names and permissions for all
- registered devices with an assigned major/minor number.
- Userspace can modify the filesystem content as needed, add
- symlinks, and apply needed permissions.
- It provides a fully functional /dev directory, where usually
- udev runs on top, managing permissions and adding meaningful
- symlinks.
- In very limited environments, it may provide a sufficient
- functional /dev without any further help. It also allows simple
- rescue systems, and reliably handles dynamic major/minor numbers.
-
- Notice: if CONFIG_TMPFS isn't enabled, the simpler ramfs
- file system will be used instead.
-
-config DEVTMPFS_MOUNT
- bool "Automount devtmpfs at /dev, after the kernel mounted the rootfs"
- depends on DEVTMPFS
- help
- This will instruct the kernel to automatically mount the
- devtmpfs filesystem at /dev, directly after the kernel has
- mounted the root filesystem. The behavior can be overridden
- with the commandline parameter: devtmpfs.mount=0|1.
- This option does not affect initramfs based booting, here
- the devtmpfs filesystem always needs to be mounted manually
- after the roots is mounted.
- With this option enabled, it allows to bring up a system in
- rescue mode with init=/bin/sh, even when the /dev directory
- on the rootfs is completely empty.
-
-config STANDALONE
- bool "Select only drivers that don't need compile-time external firmware" if EXPERIMENTAL
- default y
- help
- Select this option if you don't have magic firmware for drivers that
- need it.
-
- If unsure, say Y.
-
-config PREVENT_FIRMWARE_BUILD
- bool "Prevent firmware from being built"
- default y
- help
- Say yes to avoid building firmware. Firmware is usually shipped
- with the driver and only when updating the firmware should a
- rebuild be made.
- If unsure, say Y here.
-
-config FW_LOADER
- tristate "Userspace firmware loading support" if EXPERT
- default y
- ---help---
- This option is provided for the case where none of the in-tree modules
- require userspace firmware loading support, but a module built
- out-of-tree does.
-
-config FIRMWARE_IN_KERNEL
- bool "Include in-kernel firmware blobs in kernel binary"
- depends on FW_LOADER
- default y
- help
- The kernel source tree includes a number of firmware 'blobs'
- that are used by various drivers. The recommended way to
- use these is to run "make firmware_install", which, after
- converting ihex files to binary, copies all of the needed
- binary files in firmware/ to /lib/firmware/ on your system so
- that they can be loaded by userspace helpers on request.
-
- Enabling this option will build each required firmware blob
- into the kernel directly, where request_firmware() will find
- them without having to call out to userspace. This may be
- useful if your root file system requires a device that uses
- such firmware and do not wish to use an initrd.
-
- This single option controls the inclusion of firmware for
- every driver that uses request_firmware() and ships its
- firmware in the kernel source tree, which avoids a
- proliferation of 'Include firmware for xxx device' options.
-
- Say 'N' and let firmware be loaded from userspace.
-
-config EXTRA_FIRMWARE
- string "External firmware blobs to build into the kernel binary"
- depends on FW_LOADER
- help
- This option allows firmware to be built into the kernel for the case
- where the user either cannot or doesn't want to provide it from
- userspace at runtime (for example, when the firmware in question is
- required for accessing the boot device, and the user doesn't want to
- use an initrd).
-
- This option is a string and takes the (space-separated) names of the
- firmware files -- the same names that appear in MODULE_FIRMWARE()
- and request_firmware() in the source. These files should exist under
- the directory specified by the EXTRA_FIRMWARE_DIR option, which is
- by default the firmware subdirectory of the kernel source tree.
-
- For example, you might set CONFIG_EXTRA_FIRMWARE="usb8388.bin", copy
- the usb8388.bin file into the firmware directory, and build the kernel.
- Then any request_firmware("usb8388.bin") will be satisfied internally
- without needing to call out to userspace.
-
- WARNING: If you include additional firmware files into your binary
- kernel image that are not available under the terms of the GPL,
- then it may be a violation of the GPL to distribute the resulting
- image since it combines both GPL and non-GPL work. You should
- consult a lawyer of your own before distributing such an image.
-
-config EXTRA_FIRMWARE_DIR
- string "Firmware blobs root directory"
- depends on EXTRA_FIRMWARE != ""
- default "firmware"
- help
- This option controls the directory in which the kernel build system
- looks for the firmware files listed in the EXTRA_FIRMWARE option.
- The default is firmware/ in the kernel source tree, but by changing
- this option you can point it elsewhere, such as /lib/firmware/ or
- some other directory containing the firmware files.
-
-config DEBUG_DRIVER
- bool "Driver Core verbose debug messages"
- depends on DEBUG_KERNEL
- help
- Say Y here if you want the Driver core to produce a bunch of
- debug messages to the system log. Select this if you are having a
- problem with the driver core and want to see more of what is
- going on.
-
- If you are unsure about this, say N here.
-
-config DEBUG_DEVRES
- bool "Managed device resources verbose debug messages"
- depends on DEBUG_KERNEL
- help
- This option enables kernel parameter devres.log. If set to
- non-zero, devres debug messages are printed. Select this if
- you are having a problem with devres or want to debug
- resource management for a managed device. devres.log can be
- switched on and off from sysfs node.
-
- If you are unsure about this, Say N here.
-
-config SYS_HYPERVISOR
- bool
- default n
-
-config GENERIC_CPU_DEVICES
- bool
- default n
-
-config SOC_BUS
- bool
-
-source "drivers/base/regmap/Kconfig"
-
-config DMA_SHARED_BUFFER
- bool
- default n
- select ANON_INODES
- depends on EXPERIMENTAL
- help
- This option enables the framework for buffer-sharing between
- multiple drivers. A buffer is associated with a file using driver
- APIs extension; the file's descriptor can then be passed on to other
- driver.
-
-config SYNC
- bool "Synchronization framework"
- default n
- select ANON_INODES
- help
- This option enables the framework for synchronization between multiple
- drivers. Sync implementations can take advantage of hardware
- synchronization built into devices like GPUs.
-
-config SW_SYNC
- bool "Software synchronization objects"
- default n
- depends on SYNC
- help
- A sync object driver that uses a 32bit counter to coordinate
- syncrhronization. Useful when there is no hardware primitive backing
- the synchronization.
-
-config SW_SYNC_USER
- bool "Userspace API for SW_SYNC"
- default n
- depends on SW_SYNC
- help
- Provides a user space API to the sw sync object.
- *WARNING* improper use of this can result in deadlocking kernel
- drivers from userspace.
-endmenu
diff --git a/ANDROID_3.4.5/drivers/base/Makefile b/ANDROID_3.4.5/drivers/base/Makefile
deleted file mode 100644
index 0e4d3dad..00000000
--- a/ANDROID_3.4.5/drivers/base/Makefile
+++ /dev/null
@@ -1,28 +0,0 @@
-# Makefile for the Linux device tree
-
-obj-y := core.o bus.o dd.o syscore.o \
- driver.o class.o platform.o \
- cpu.o firmware.o init.o map.o devres.o \
- attribute_container.o transport_class.o \
- topology.o
-obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
-obj-y += power/
-obj-$(CONFIG_HAS_DMA) += dma-mapping.o
-obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
-obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf.o
-obj-$(CONFIG_ISA) += isa.o
-obj-$(CONFIG_FW_LOADER) += firmware_class.o
-obj-$(CONFIG_NUMA) += node.o
-obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o
-ifeq ($(CONFIG_SYSFS),y)
-obj-$(CONFIG_MODULES) += module.o
-endif
-obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o
-obj-$(CONFIG_REGMAP) += regmap/
-obj-$(CONFIG_SOC_BUS) += soc.o
-
-obj-$(CONFIG_SYNC) += sync.o
-obj-$(CONFIG_SW_SYNC) += sw_sync.o
-
-ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
-
diff --git a/ANDROID_3.4.5/drivers/base/attribute_container.c b/ANDROID_3.4.5/drivers/base/attribute_container.c
deleted file mode 100644
index 8fc200b2..00000000
--- a/ANDROID_3.4.5/drivers/base/attribute_container.c
+++ /dev/null
@@ -1,441 +0,0 @@
-/*
- * attribute_container.c - implementation of a simple container for classes
- *
- * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com>
- *
- * This file is licensed under GPLv2
- *
- * The basic idea here is to enable a device to be attached to an
- * aritrary numer of classes without having to allocate storage for them.
- * Instead, the contained classes select the devices they need to attach
- * to via a matching function.
- */
-
-#include <linux/attribute_container.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-
-#include "base.h"
-
-/* This is a private structure used to tie the classdev and the
- * container .. it should never be visible outside this file */
-struct internal_container {
- struct klist_node node;
- struct attribute_container *cont;
- struct device classdev;
-};
-
-static void internal_container_klist_get(struct klist_node *n)
-{
- struct internal_container *ic =
- container_of(n, struct internal_container, node);
- get_device(&ic->classdev);
-}
-
-static void internal_container_klist_put(struct klist_node *n)
-{
- struct internal_container *ic =
- container_of(n, struct internal_container, node);
- put_device(&ic->classdev);
-}
-
-
-/**
- * attribute_container_classdev_to_container - given a classdev, return the container
- *
- * @classdev: the class device created by attribute_container_add_device.
- *
- * Returns the container associated with this classdev.
- */
-struct attribute_container *
-attribute_container_classdev_to_container(struct device *classdev)
-{
- struct internal_container *ic =
- container_of(classdev, struct internal_container, classdev);
- return ic->cont;
-}
-EXPORT_SYMBOL_GPL(attribute_container_classdev_to_container);
-
-static LIST_HEAD(attribute_container_list);
-
-static DEFINE_MUTEX(attribute_container_mutex);
-
-/**
- * attribute_container_register - register an attribute container
- *
- * @cont: The container to register. This must be allocated by the
- * callee and should also be zeroed by it.
- */
-int
-attribute_container_register(struct attribute_container *cont)
-{
- INIT_LIST_HEAD(&cont->node);
- klist_init(&cont->containers,internal_container_klist_get,
- internal_container_klist_put);
-
- mutex_lock(&attribute_container_mutex);
- list_add_tail(&cont->node, &attribute_container_list);
- mutex_unlock(&attribute_container_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(attribute_container_register);
-
-/**
- * attribute_container_unregister - remove a container registration
- *
- * @cont: previously registered container to remove
- */
-int
-attribute_container_unregister(struct attribute_container *cont)
-{
- int retval = -EBUSY;
- mutex_lock(&attribute_container_mutex);
- spin_lock(&cont->containers.k_lock);
- if (!list_empty(&cont->containers.k_list))
- goto out;
- retval = 0;
- list_del(&cont->node);
- out:
- spin_unlock(&cont->containers.k_lock);
- mutex_unlock(&attribute_container_mutex);
- return retval;
-
-}
-EXPORT_SYMBOL_GPL(attribute_container_unregister);
-
-/* private function used as class release */
-static void attribute_container_release(struct device *classdev)
-{
- struct internal_container *ic
- = container_of(classdev, struct internal_container, classdev);
- struct device *dev = classdev->parent;
-
- kfree(ic);
- put_device(dev);
-}
-
-/**
- * attribute_container_add_device - see if any container is interested in dev
- *
- * @dev: device to add attributes to
- * @fn: function to trigger addition of class device.
- *
- * This function allocates storage for the class device(s) to be
- * attached to dev (one for each matching attribute_container). If no
- * fn is provided, the code will simply register the class device via
- * device_add. If a function is provided, it is expected to add
- * the class device at the appropriate time. One of the things that
- * might be necessary is to allocate and initialise the classdev and
- * then add it a later time. To do this, call this routine for
- * allocation and initialisation and then use
- * attribute_container_device_trigger() to call device_add() on
- * it. Note: after this, the class device contains a reference to dev
- * which is not relinquished until the release of the classdev.
- */
-void
-attribute_container_add_device(struct device *dev,
- int (*fn)(struct attribute_container *,
- struct device *,
- struct device *))
-{
- struct attribute_container *cont;
-
- mutex_lock(&attribute_container_mutex);
- list_for_each_entry(cont, &attribute_container_list, node) {
- struct internal_container *ic;
-
- if (attribute_container_no_classdevs(cont))
- continue;
-
- if (!cont->match(cont, dev))
- continue;
-
- ic = kzalloc(sizeof(*ic), GFP_KERNEL);
- if (!ic) {
- dev_printk(KERN_ERR, dev, "failed to allocate class container\n");
- continue;
- }
-
- ic->cont = cont;
- device_initialize(&ic->classdev);
- ic->classdev.parent = get_device(dev);
- ic->classdev.class = cont->class;
- cont->class->dev_release = attribute_container_release;
- dev_set_name(&ic->classdev, dev_name(dev));
- if (fn)
- fn(cont, dev, &ic->classdev);
- else
- attribute_container_add_class_device(&ic->classdev);
- klist_add_tail(&ic->node, &cont->containers);
- }
- mutex_unlock(&attribute_container_mutex);
-}
-
-/* FIXME: can't break out of this unless klist_iter_exit is also
- * called before doing the break
- */
-#define klist_for_each_entry(pos, head, member, iter) \
- for (klist_iter_init(head, iter); (pos = ({ \
- struct klist_node *n = klist_next(iter); \
- n ? container_of(n, typeof(*pos), member) : \
- ({ klist_iter_exit(iter) ; NULL; }); \
- }) ) != NULL; )
-
-
-/**
- * attribute_container_remove_device - make device eligible for removal.
- *
- * @dev: The generic device
- * @fn: A function to call to remove the device
- *
- * This routine triggers device removal. If fn is NULL, then it is
- * simply done via device_unregister (note that if something
- * still has a reference to the classdev, then the memory occupied
- * will not be freed until the classdev is released). If you want a
- * two phase release: remove from visibility and then delete the
- * device, then you should use this routine with a fn that calls
- * device_del() and then use attribute_container_device_trigger()
- * to do the final put on the classdev.
- */
-void
-attribute_container_remove_device(struct device *dev,
- void (*fn)(struct attribute_container *,
- struct device *,
- struct device *))
-{
- struct attribute_container *cont;
-
- mutex_lock(&attribute_container_mutex);
- list_for_each_entry(cont, &attribute_container_list, node) {
- struct internal_container *ic;
- struct klist_iter iter;
-
- if (attribute_container_no_classdevs(cont))
- continue;
-
- if (!cont->match(cont, dev))
- continue;
-
- klist_for_each_entry(ic, &cont->containers, node, &iter) {
- if (dev != ic->classdev.parent)
- continue;
- klist_del(&ic->node);
- if (fn)
- fn(cont, dev, &ic->classdev);
- else {
- attribute_container_remove_attrs(&ic->classdev);
- device_unregister(&ic->classdev);
- }
- }
- }
- mutex_unlock(&attribute_container_mutex);
-}
-
-/**
- * attribute_container_device_trigger - execute a trigger for each matching classdev
- *
- * @dev: The generic device to run the trigger for
- * @fn the function to execute for each classdev.
- *
- * This funcion is for executing a trigger when you need to know both
- * the container and the classdev. If you only care about the
- * container, then use attribute_container_trigger() instead.
- */
-void
-attribute_container_device_trigger(struct device *dev,
- int (*fn)(struct attribute_container *,
- struct device *,
- struct device *))
-{
- struct attribute_container *cont;
-
- mutex_lock(&attribute_container_mutex);
- list_for_each_entry(cont, &attribute_container_list, node) {
- struct internal_container *ic;
- struct klist_iter iter;
-
- if (!cont->match(cont, dev))
- continue;
-
- if (attribute_container_no_classdevs(cont)) {
- fn(cont, dev, NULL);
- continue;
- }
-
- klist_for_each_entry(ic, &cont->containers, node, &iter) {
- if (dev == ic->classdev.parent)
- fn(cont, dev, &ic->classdev);
- }
- }
- mutex_unlock(&attribute_container_mutex);
-}
-
-/**
- * attribute_container_trigger - trigger a function for each matching container
- *
- * @dev: The generic device to activate the trigger for
- * @fn: the function to trigger
- *
- * This routine triggers a function that only needs to know the
- * matching containers (not the classdev) associated with a device.
- * It is more lightweight than attribute_container_device_trigger, so
- * should be used in preference unless the triggering function
- * actually needs to know the classdev.
- */
-void
-attribute_container_trigger(struct device *dev,
- int (*fn)(struct attribute_container *,
- struct device *))
-{
- struct attribute_container *cont;
-
- mutex_lock(&attribute_container_mutex);
- list_for_each_entry(cont, &attribute_container_list, node) {
- if (cont->match(cont, dev))
- fn(cont, dev);
- }
- mutex_unlock(&attribute_container_mutex);
-}
-
-/**
- * attribute_container_add_attrs - add attributes
- *
- * @classdev: The class device
- *
- * This simply creates all the class device sysfs files from the
- * attributes listed in the container
- */
-int
-attribute_container_add_attrs(struct device *classdev)
-{
- struct attribute_container *cont =
- attribute_container_classdev_to_container(classdev);
- struct device_attribute **attrs = cont->attrs;
- int i, error;
-
- BUG_ON(attrs && cont->grp);
-
- if (!attrs && !cont->grp)
- return 0;
-
- if (cont->grp)
- return sysfs_create_group(&classdev->kobj, cont->grp);
-
- for (i = 0; attrs[i]; i++) {
- sysfs_attr_init(&attrs[i]->attr);
- error = device_create_file(classdev, attrs[i]);
- if (error)
- return error;
- }
-
- return 0;
-}
-
-/**
- * attribute_container_add_class_device - same function as device_add
- *
- * @classdev: the class device to add
- *
- * This performs essentially the same function as device_add except for
- * attribute containers, namely add the classdev to the system and then
- * create the attribute files
- */
-int
-attribute_container_add_class_device(struct device *classdev)
-{
- int error = device_add(classdev);
- if (error)
- return error;
- return attribute_container_add_attrs(classdev);
-}
-
-/**
- * attribute_container_add_class_device_adapter - simple adapter for triggers
- *
- * This function is identical to attribute_container_add_class_device except
- * that it is designed to be called from the triggers
- */
-int
-attribute_container_add_class_device_adapter(struct attribute_container *cont,
- struct device *dev,
- struct device *classdev)
-{
- return attribute_container_add_class_device(classdev);
-}
-
-/**
- * attribute_container_remove_attrs - remove any attribute files
- *
- * @classdev: The class device to remove the files from
- *
- */
-void
-attribute_container_remove_attrs(struct device *classdev)
-{
- struct attribute_container *cont =
- attribute_container_classdev_to_container(classdev);
- struct device_attribute **attrs = cont->attrs;
- int i;
-
- if (!attrs && !cont->grp)
- return;
-
- if (cont->grp) {
- sysfs_remove_group(&classdev->kobj, cont->grp);
- return ;
- }
-
- for (i = 0; attrs[i]; i++)
- device_remove_file(classdev, attrs[i]);
-}
-
-/**
- * attribute_container_class_device_del - equivalent of class_device_del
- *
- * @classdev: the class device
- *
- * This function simply removes all the attribute files and then calls
- * device_del.
- */
-void
-attribute_container_class_device_del(struct device *classdev)
-{
- attribute_container_remove_attrs(classdev);
- device_del(classdev);
-}
-
-/**
- * attribute_container_find_class_device - find the corresponding class_device
- *
- * @cont: the container
- * @dev: the generic device
- *
- * Looks up the device in the container's list of class devices and returns
- * the corresponding class_device.
- */
-struct device *
-attribute_container_find_class_device(struct attribute_container *cont,
- struct device *dev)
-{
- struct device *cdev = NULL;
- struct internal_container *ic;
- struct klist_iter iter;
-
- klist_for_each_entry(ic, &cont->containers, node, &iter) {
- if (ic->classdev.parent == dev) {
- cdev = &ic->classdev;
- /* FIXME: must exit iterator then break */
- klist_iter_exit(&iter);
- break;
- }
- }
-
- return cdev;
-}
-EXPORT_SYMBOL_GPL(attribute_container_find_class_device);
diff --git a/ANDROID_3.4.5/drivers/base/base.h b/ANDROID_3.4.5/drivers/base/base.h
deleted file mode 100644
index 6ee17bb3..00000000
--- a/ANDROID_3.4.5/drivers/base/base.h
+++ /dev/null
@@ -1,140 +0,0 @@
-#include <linux/notifier.h>
-
-/**
- * struct subsys_private - structure to hold the private to the driver core portions of the bus_type/class structure.
- *
- * @subsys - the struct kset that defines this subsystem
- * @devices_kset - the subsystem's 'devices' directory
- * @interfaces - list of subsystem interfaces associated
- * @mutex - protect the devices, and interfaces lists.
- *
- * @drivers_kset - the list of drivers associated
- * @klist_devices - the klist to iterate over the @devices_kset
- * @klist_drivers - the klist to iterate over the @drivers_kset
- * @bus_notifier - the bus notifier list for anything that cares about things
- * on this bus.
- * @bus - pointer back to the struct bus_type that this structure is associated
- * with.
- *
- * @glue_dirs - "glue" directory to put in-between the parent device to
- * avoid namespace conflicts
- * @class - pointer back to the struct class that this structure is associated
- * with.
- *
- * This structure is the one that is the actual kobject allowing struct
- * bus_type/class to be statically allocated safely. Nothing outside of the
- * driver core should ever touch these fields.
- */
-struct subsys_private {
- struct kset subsys;
- struct kset *devices_kset;
- struct list_head interfaces;
- struct mutex mutex;
-
- struct kset *drivers_kset;
- struct klist klist_devices;
- struct klist klist_drivers;
- struct blocking_notifier_head bus_notifier;
- unsigned int drivers_autoprobe:1;
- struct bus_type *bus;
-
- struct kset glue_dirs;
- struct class *class;
-};
-#define to_subsys_private(obj) container_of(obj, struct subsys_private, subsys.kobj)
-
-struct driver_private {
- struct kobject kobj;
- struct klist klist_devices;
- struct klist_node knode_bus;
- struct module_kobject *mkobj;
- struct device_driver *driver;
-};
-#define to_driver(obj) container_of(obj, struct driver_private, kobj)
-
-/**
- * struct device_private - structure to hold the private to the driver core portions of the device structure.
- *
- * @klist_children - klist containing all children of this device
- * @knode_parent - node in sibling list
- * @knode_driver - node in driver list
- * @knode_bus - node in bus list
- * @deferred_probe - entry in deferred_probe_list which is used to retry the
- * binding of drivers which were unable to get all the resources needed by
- * the device; typically because it depends on another driver getting
- * probed first.
- * @driver_data - private pointer for driver specific info. Will turn into a
- * list soon.
- * @device - pointer back to the struct class that this structure is
- * associated with.
- *
- * Nothing outside of the driver core should ever touch these fields.
- */
-struct device_private {
- struct klist klist_children;
- struct klist_node knode_parent;
- struct klist_node knode_driver;
- struct klist_node knode_bus;
- struct list_head deferred_probe;
- void *driver_data;
- struct device *device;
-};
-#define to_device_private_parent(obj) \
- container_of(obj, struct device_private, knode_parent)
-#define to_device_private_driver(obj) \
- container_of(obj, struct device_private, knode_driver)
-#define to_device_private_bus(obj) \
- container_of(obj, struct device_private, knode_bus)
-
-extern int device_private_init(struct device *dev);
-
-/* initialisation functions */
-extern int devices_init(void);
-extern int buses_init(void);
-extern int classes_init(void);
-extern int firmware_init(void);
-#ifdef CONFIG_SYS_HYPERVISOR
-extern int hypervisor_init(void);
-#else
-static inline int hypervisor_init(void) { return 0; }
-#endif
-extern int platform_bus_init(void);
-extern void cpu_dev_init(void);
-
-extern int bus_add_device(struct device *dev);
-extern void bus_probe_device(struct device *dev);
-extern void bus_remove_device(struct device *dev);
-
-extern int bus_add_driver(struct device_driver *drv);
-extern void bus_remove_driver(struct device_driver *drv);
-
-extern void driver_detach(struct device_driver *drv);
-extern int driver_probe_device(struct device_driver *drv, struct device *dev);
-extern void driver_deferred_probe_del(struct device *dev);
-static inline int driver_match_device(struct device_driver *drv,
- struct device *dev)
-{
- return drv->bus->match ? drv->bus->match(dev, drv) : 1;
-}
-
-extern char *make_class_name(const char *name, struct kobject *kobj);
-
-extern int devres_release_all(struct device *dev);
-
-/* /sys/devices directory */
-extern struct kset *devices_kset;
-
-#if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
-extern void module_add_driver(struct module *mod, struct device_driver *drv);
-extern void module_remove_driver(struct device_driver *drv);
-#else
-static inline void module_add_driver(struct module *mod,
- struct device_driver *drv) { }
-static inline void module_remove_driver(struct device_driver *drv) { }
-#endif
-
-#ifdef CONFIG_DEVTMPFS
-extern int devtmpfs_init(void);
-#else
-static inline int devtmpfs_init(void) { return 0; }
-#endif
diff --git a/ANDROID_3.4.5/drivers/base/bus.c b/ANDROID_3.4.5/drivers/base/bus.c
deleted file mode 100644
index 26a06b80..00000000
--- a/ANDROID_3.4.5/drivers/base/bus.c
+++ /dev/null
@@ -1,1295 +0,0 @@
-/*
- * bus.c - bus driver management
- *
- * Copyright (c) 2002-3 Patrick Mochel
- * Copyright (c) 2002-3 Open Source Development Labs
- * Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de>
- * Copyright (c) 2007 Novell Inc.
- *
- * This file is released under the GPLv2
- *
- */
-
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/mutex.h>
-#include "base.h"
-#include "power/power.h"
-
-/* /sys/devices/system */
-/* FIXME: make static after drivers/base/sys.c is deleted */
-struct kset *system_kset;
-
-#define to_bus_attr(_attr) container_of(_attr, struct bus_attribute, attr)
-
-/*
- * sysfs bindings for drivers
- */
-
-#define to_drv_attr(_attr) container_of(_attr, struct driver_attribute, attr)
-
-
-static int __must_check bus_rescan_devices_helper(struct device *dev,
- void *data);
-
-static struct bus_type *bus_get(struct bus_type *bus)
-{
- if (bus) {
- kset_get(&bus->p->subsys);
- return bus;
- }
- return NULL;
-}
-
-static void bus_put(struct bus_type *bus)
-{
- if (bus)
- kset_put(&bus->p->subsys);
-}
-
-static ssize_t drv_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct driver_attribute *drv_attr = to_drv_attr(attr);
- struct driver_private *drv_priv = to_driver(kobj);
- ssize_t ret = -EIO;
-
- if (drv_attr->show)
- ret = drv_attr->show(drv_priv->driver, buf);
- return ret;
-}
-
-static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
-{
- struct driver_attribute *drv_attr = to_drv_attr(attr);
- struct driver_private *drv_priv = to_driver(kobj);
- ssize_t ret = -EIO;
-
- if (drv_attr->store)
- ret = drv_attr->store(drv_priv->driver, buf, count);
- return ret;
-}
-
-static const struct sysfs_ops driver_sysfs_ops = {
- .show = drv_attr_show,
- .store = drv_attr_store,
-};
-
-static void driver_release(struct kobject *kobj)
-{
- struct driver_private *drv_priv = to_driver(kobj);
-
- pr_debug("driver: '%s': %s\n", kobject_name(kobj), __func__);
- kfree(drv_priv);
-}
-
-static struct kobj_type driver_ktype = {
- .sysfs_ops = &driver_sysfs_ops,
- .release = driver_release,
-};
-
-/*
- * sysfs bindings for buses
- */
-static ssize_t bus_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct bus_attribute *bus_attr = to_bus_attr(attr);
- struct subsys_private *subsys_priv = to_subsys_private(kobj);
- ssize_t ret = 0;
-
- if (bus_attr->show)
- ret = bus_attr->show(subsys_priv->bus, buf);
- return ret;
-}
-
-static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
-{
- struct bus_attribute *bus_attr = to_bus_attr(attr);
- struct subsys_private *subsys_priv = to_subsys_private(kobj);
- ssize_t ret = 0;
-
- if (bus_attr->store)
- ret = bus_attr->store(subsys_priv->bus, buf, count);
- return ret;
-}
-
-static const struct sysfs_ops bus_sysfs_ops = {
- .show = bus_attr_show,
- .store = bus_attr_store,
-};
-
-int bus_create_file(struct bus_type *bus, struct bus_attribute *attr)
-{
- int error;
- if (bus_get(bus)) {
- error = sysfs_create_file(&bus->p->subsys.kobj, &attr->attr);
- bus_put(bus);
- } else
- error = -EINVAL;
- return error;
-}
-EXPORT_SYMBOL_GPL(bus_create_file);
-
-void bus_remove_file(struct bus_type *bus, struct bus_attribute *attr)
-{
- if (bus_get(bus)) {
- sysfs_remove_file(&bus->p->subsys.kobj, &attr->attr);
- bus_put(bus);
- }
-}
-EXPORT_SYMBOL_GPL(bus_remove_file);
-
-static struct kobj_type bus_ktype = {
- .sysfs_ops = &bus_sysfs_ops,
-};
-
-static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
-{
- struct kobj_type *ktype = get_ktype(kobj);
-
- if (ktype == &bus_ktype)
- return 1;
- return 0;
-}
-
-static const struct kset_uevent_ops bus_uevent_ops = {
- .filter = bus_uevent_filter,
-};
-
-static struct kset *bus_kset;
-
-
-#ifdef CONFIG_HOTPLUG
-/* Manually detach a device from its associated driver. */
-static ssize_t driver_unbind(struct device_driver *drv,
- const char *buf, size_t count)
-{
- struct bus_type *bus = bus_get(drv->bus);
- struct device *dev;
- int err = -ENODEV;
-
- dev = bus_find_device_by_name(bus, NULL, buf);
- if (dev && dev->driver == drv) {
- if (dev->parent) /* Needed for USB */
- device_lock(dev->parent);
- device_release_driver(dev);
- if (dev->parent)
- device_unlock(dev->parent);
- err = count;
- }
- put_device(dev);
- bus_put(bus);
- return err;
-}
-static DRIVER_ATTR(unbind, S_IWUSR, NULL, driver_unbind);
-
-/*
- * Manually attach a device to a driver.
- * Note: the driver must want to bind to the device,
- * it is not possible to override the driver's id table.
- */
-static ssize_t driver_bind(struct device_driver *drv,
- const char *buf, size_t count)
-{
- struct bus_type *bus = bus_get(drv->bus);
- struct device *dev;
- int err = -ENODEV;
-
- dev = bus_find_device_by_name(bus, NULL, buf);
- if (dev && dev->driver == NULL && driver_match_device(drv, dev)) {
- if (dev->parent) /* Needed for USB */
- device_lock(dev->parent);
- device_lock(dev);
- err = driver_probe_device(drv, dev);
- device_unlock(dev);
- if (dev->parent)
- device_unlock(dev->parent);
-
- if (err > 0) {
- /* success */
- err = count;
- } else if (err == 0) {
- /* driver didn't accept device */
- err = -ENODEV;
- }
- }
- put_device(dev);
- bus_put(bus);
- return err;
-}
-static DRIVER_ATTR(bind, S_IWUSR, NULL, driver_bind);
-
-static ssize_t show_drivers_autoprobe(struct bus_type *bus, char *buf)
-{
- return sprintf(buf, "%d\n", bus->p->drivers_autoprobe);
-}
-
-static ssize_t store_drivers_autoprobe(struct bus_type *bus,
- const char *buf, size_t count)
-{
- if (buf[0] == '0')
- bus->p->drivers_autoprobe = 0;
- else
- bus->p->drivers_autoprobe = 1;
- return count;
-}
-
-static ssize_t store_drivers_probe(struct bus_type *bus,
- const char *buf, size_t count)
-{
- struct device *dev;
-
- dev = bus_find_device_by_name(bus, NULL, buf);
- if (!dev)
- return -ENODEV;
- if (bus_rescan_devices_helper(dev, NULL) != 0)
- return -EINVAL;
- return count;
-}
-#endif
-
-static struct device *next_device(struct klist_iter *i)
-{
- struct klist_node *n = klist_next(i);
- struct device *dev = NULL;
- struct device_private *dev_prv;
-
- if (n) {
- dev_prv = to_device_private_bus(n);
- dev = dev_prv->device;
- }
- return dev;
-}
-
-/**
- * bus_for_each_dev - device iterator.
- * @bus: bus type.
- * @start: device to start iterating from.
- * @data: data for the callback.
- * @fn: function to be called for each device.
- *
- * Iterate over @bus's list of devices, and call @fn for each,
- * passing it @data. If @start is not NULL, we use that device to
- * begin iterating from.
- *
- * We check the return of @fn each time. If it returns anything
- * other than 0, we break out and return that value.
- *
- * NOTE: The device that returns a non-zero value is not retained
- * in any way, nor is its refcount incremented. If the caller needs
- * to retain this data, it should do so, and increment the reference
- * count in the supplied callback.
- */
-int bus_for_each_dev(struct bus_type *bus, struct device *start,
- void *data, int (*fn)(struct device *, void *))
-{
- struct klist_iter i;
- struct device *dev;
- int error = 0;
-
- if (!bus)
- return -EINVAL;
-
- klist_iter_init_node(&bus->p->klist_devices, &i,
- (start ? &start->p->knode_bus : NULL));
- while ((dev = next_device(&i)) && !error)
- error = fn(dev, data);
- klist_iter_exit(&i);
- return error;
-}
-EXPORT_SYMBOL_GPL(bus_for_each_dev);
-
-/**
- * bus_find_device - device iterator for locating a particular device.
- * @bus: bus type
- * @start: Device to begin with
- * @data: Data to pass to match function
- * @match: Callback function to check device
- *
- * This is similar to the bus_for_each_dev() function above, but it
- * returns a reference to a device that is 'found' for later use, as
- * determined by the @match callback.
- *
- * The callback should return 0 if the device doesn't match and non-zero
- * if it does. If the callback returns non-zero, this function will
- * return to the caller and not iterate over any more devices.
- */
-struct device *bus_find_device(struct bus_type *bus,
- struct device *start, void *data,
- int (*match)(struct device *dev, void *data))
-{
- struct klist_iter i;
- struct device *dev;
-
- if (!bus)
- return NULL;
-
- klist_iter_init_node(&bus->p->klist_devices, &i,
- (start ? &start->p->knode_bus : NULL));
- while ((dev = next_device(&i)))
- if (match(dev, data) && get_device(dev))
- break;
- klist_iter_exit(&i);
- return dev;
-}
-EXPORT_SYMBOL_GPL(bus_find_device);
-
-static int match_name(struct device *dev, void *data)
-{
- const char *name = data;
-
- return sysfs_streq(name, dev_name(dev));
-}
-
-/**
- * bus_find_device_by_name - device iterator for locating a particular device of a specific name
- * @bus: bus type
- * @start: Device to begin with
- * @name: name of the device to match
- *
- * This is similar to the bus_find_device() function above, but it handles
- * searching by a name automatically, no need to write another strcmp matching
- * function.
- */
-struct device *bus_find_device_by_name(struct bus_type *bus,
- struct device *start, const char *name)
-{
- return bus_find_device(bus, start, (void *)name, match_name);
-}
-EXPORT_SYMBOL_GPL(bus_find_device_by_name);
-
-/**
- * subsys_find_device_by_id - find a device with a specific enumeration number
- * @subsys: subsystem
- * @id: index 'id' in struct device
- * @hint: device to check first
- *
- * Check the hint's next object and if it is a match return it directly,
- * otherwise, fall back to a full list search. Either way a reference for
- * the returned object is taken.
- */
-struct device *subsys_find_device_by_id(struct bus_type *subsys, unsigned int id,
- struct device *hint)
-{
- struct klist_iter i;
- struct device *dev;
-
- if (!subsys)
- return NULL;
-
- if (hint) {
- klist_iter_init_node(&subsys->p->klist_devices, &i, &hint->p->knode_bus);
- dev = next_device(&i);
- if (dev && dev->id == id && get_device(dev)) {
- klist_iter_exit(&i);
- return dev;
- }
- klist_iter_exit(&i);
- }
-
- klist_iter_init_node(&subsys->p->klist_devices, &i, NULL);
- while ((dev = next_device(&i))) {
- if (dev->id == id && get_device(dev)) {
- klist_iter_exit(&i);
- return dev;
- }
- }
- klist_iter_exit(&i);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(subsys_find_device_by_id);
-
-static struct device_driver *next_driver(struct klist_iter *i)
-{
- struct klist_node *n = klist_next(i);
- struct driver_private *drv_priv;
-
- if (n) {
- drv_priv = container_of(n, struct driver_private, knode_bus);
- return drv_priv->driver;
- }
- return NULL;
-}
-
-/**
- * bus_for_each_drv - driver iterator
- * @bus: bus we're dealing with.
- * @start: driver to start iterating on.
- * @data: data to pass to the callback.
- * @fn: function to call for each driver.
- *
- * This is nearly identical to the device iterator above.
- * We iterate over each driver that belongs to @bus, and call
- * @fn for each. If @fn returns anything but 0, we break out
- * and return it. If @start is not NULL, we use it as the head
- * of the list.
- *
- * NOTE: we don't return the driver that returns a non-zero
- * value, nor do we leave the reference count incremented for that
- * driver. If the caller needs to know that info, it must set it
- * in the callback. It must also be sure to increment the refcount
- * so it doesn't disappear before returning to the caller.
- */
-int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
- void *data, int (*fn)(struct device_driver *, void *))
-{
- struct klist_iter i;
- struct device_driver *drv;
- int error = 0;
-
- if (!bus)
- return -EINVAL;
-
- klist_iter_init_node(&bus->p->klist_drivers, &i,
- start ? &start->p->knode_bus : NULL);
- while ((drv = next_driver(&i)) && !error)
- error = fn(drv, data);
- klist_iter_exit(&i);
- return error;
-}
-EXPORT_SYMBOL_GPL(bus_for_each_drv);
-
-static int device_add_attrs(struct bus_type *bus, struct device *dev)
-{
- int error = 0;
- int i;
-
- if (!bus->dev_attrs)
- return 0;
-
- for (i = 0; attr_name(bus->dev_attrs[i]); i++) {
- error = device_create_file(dev, &bus->dev_attrs[i]);
- if (error) {
- while (--i >= 0)
- device_remove_file(dev, &bus->dev_attrs[i]);
- break;
- }
- }
- return error;
-}
-
-static void device_remove_attrs(struct bus_type *bus, struct device *dev)
-{
- int i;
-
- if (bus->dev_attrs) {
- for (i = 0; attr_name(bus->dev_attrs[i]); i++)
- device_remove_file(dev, &bus->dev_attrs[i]);
- }
-}
-
-/**
- * bus_add_device - add device to bus
- * @dev: device being added
- *
- * - Add device's bus attributes.
- * - Create links to device's bus.
- * - Add the device to its bus's list of devices.
- */
-int bus_add_device(struct device *dev)
-{
- struct bus_type *bus = bus_get(dev->bus);
- int error = 0;
-
- if (bus) {
- pr_debug("bus: '%s': add device %s\n", bus->name, dev_name(dev));
- error = device_add_attrs(bus, dev);
- if (error)
- goto out_put;
- error = sysfs_create_link(&bus->p->devices_kset->kobj,
- &dev->kobj, dev_name(dev));
- if (error)
- goto out_id;
- error = sysfs_create_link(&dev->kobj,
- &dev->bus->p->subsys.kobj, "subsystem");
- if (error)
- goto out_subsys;
- klist_add_tail(&dev->p->knode_bus, &bus->p->klist_devices);
- }
- return 0;
-
-out_subsys:
- sysfs_remove_link(&bus->p->devices_kset->kobj, dev_name(dev));
-out_id:
- device_remove_attrs(bus, dev);
-out_put:
- bus_put(dev->bus);
- return error;
-}
-
-/**
- * bus_probe_device - probe drivers for a new device
- * @dev: device to probe
- *
- * - Automatically probe for a driver if the bus allows it.
- */
-void bus_probe_device(struct device *dev)
-{
- struct bus_type *bus = dev->bus;
- struct subsys_interface *sif;
- int ret;
-
- if (!bus)
- return;
-
- if (bus->p->drivers_autoprobe) {
- ret = device_attach(dev);
- WARN_ON(ret < 0);
- }
-
- mutex_lock(&bus->p->mutex);
- list_for_each_entry(sif, &bus->p->interfaces, node)
- if (sif->add_dev)
- sif->add_dev(dev, sif);
- mutex_unlock(&bus->p->mutex);
-}
-
-/**
- * bus_remove_device - remove device from bus
- * @dev: device to be removed
- *
- * - Remove device from all interfaces.
- * - Remove symlink from bus' directory.
- * - Delete device from bus's list.
- * - Detach from its driver.
- * - Drop reference taken in bus_add_device().
- */
-void bus_remove_device(struct device *dev)
-{
- struct bus_type *bus = dev->bus;
- struct subsys_interface *sif;
-
- if (!bus)
- return;
-
- mutex_lock(&bus->p->mutex);
- list_for_each_entry(sif, &bus->p->interfaces, node)
- if (sif->remove_dev)
- sif->remove_dev(dev, sif);
- mutex_unlock(&bus->p->mutex);
-
- sysfs_remove_link(&dev->kobj, "subsystem");
- sysfs_remove_link(&dev->bus->p->devices_kset->kobj,
- dev_name(dev));
- device_remove_attrs(dev->bus, dev);
- if (klist_node_attached(&dev->p->knode_bus))
- klist_del(&dev->p->knode_bus);
-
- pr_debug("bus: '%s': remove device %s\n",
- dev->bus->name, dev_name(dev));
- device_release_driver(dev);
- bus_put(dev->bus);
-}
-
-static int driver_add_attrs(struct bus_type *bus, struct device_driver *drv)
-{
- int error = 0;
- int i;
-
- if (bus->drv_attrs) {
- for (i = 0; attr_name(bus->drv_attrs[i]); i++) {
- error = driver_create_file(drv, &bus->drv_attrs[i]);
- if (error)
- goto err;
- }
- }
-done:
- return error;
-err:
- while (--i >= 0)
- driver_remove_file(drv, &bus->drv_attrs[i]);
- goto done;
-}
-
-static void driver_remove_attrs(struct bus_type *bus,
- struct device_driver *drv)
-{
- int i;
-
- if (bus->drv_attrs) {
- for (i = 0; attr_name(bus->drv_attrs[i]); i++)
- driver_remove_file(drv, &bus->drv_attrs[i]);
- }
-}
-
-#ifdef CONFIG_HOTPLUG
-/*
- * Thanks to drivers making their tables __devinit, we can't allow manual
- * bind and unbind from userspace unless CONFIG_HOTPLUG is enabled.
- */
-static int __must_check add_bind_files(struct device_driver *drv)
-{
- int ret;
-
- ret = driver_create_file(drv, &driver_attr_unbind);
- if (ret == 0) {
- ret = driver_create_file(drv, &driver_attr_bind);
- if (ret)
- driver_remove_file(drv, &driver_attr_unbind);
- }
- return ret;
-}
-
-static void remove_bind_files(struct device_driver *drv)
-{
- driver_remove_file(drv, &driver_attr_bind);
- driver_remove_file(drv, &driver_attr_unbind);
-}
-
-static BUS_ATTR(drivers_probe, S_IWUSR, NULL, store_drivers_probe);
-static BUS_ATTR(drivers_autoprobe, S_IWUSR | S_IRUGO,
- show_drivers_autoprobe, store_drivers_autoprobe);
-
-static int add_probe_files(struct bus_type *bus)
-{
- int retval;
-
- retval = bus_create_file(bus, &bus_attr_drivers_probe);
- if (retval)
- goto out;
-
- retval = bus_create_file(bus, &bus_attr_drivers_autoprobe);
- if (retval)
- bus_remove_file(bus, &bus_attr_drivers_probe);
-out:
- return retval;
-}
-
-static void remove_probe_files(struct bus_type *bus)
-{
- bus_remove_file(bus, &bus_attr_drivers_autoprobe);
- bus_remove_file(bus, &bus_attr_drivers_probe);
-}
-#else
-static inline int add_bind_files(struct device_driver *drv) { return 0; }
-static inline void remove_bind_files(struct device_driver *drv) {}
-static inline int add_probe_files(struct bus_type *bus) { return 0; }
-static inline void remove_probe_files(struct bus_type *bus) {}
-#endif
-
-static ssize_t driver_uevent_store(struct device_driver *drv,
- const char *buf, size_t count)
-{
- enum kobject_action action;
-
- if (kobject_action_type(buf, count, &action) == 0)
- kobject_uevent(&drv->p->kobj, action);
- return count;
-}
-static DRIVER_ATTR(uevent, S_IWUSR, NULL, driver_uevent_store);
-
-/**
- * bus_add_driver - Add a driver to the bus.
- * @drv: driver.
- */
-int bus_add_driver(struct device_driver *drv)
-{
- struct bus_type *bus;
- struct driver_private *priv;
- int error = 0;
-
- bus = bus_get(drv->bus);
- if (!bus)
- return -EINVAL;
-
- pr_debug("bus: '%s': add driver %s\n", bus->name, drv->name);
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- error = -ENOMEM;
- goto out_put_bus;
- }
- klist_init(&priv->klist_devices, NULL, NULL);
- priv->driver = drv;
- drv->p = priv;
- priv->kobj.kset = bus->p->drivers_kset;
- error = kobject_init_and_add(&priv->kobj, &driver_ktype, NULL,
- "%s", drv->name);
- if (error)
- goto out_unregister;
-
- if (drv->bus->p->drivers_autoprobe) {
- error = driver_attach(drv);
- if (error)
- goto out_unregister;
- }
- klist_add_tail(&priv->knode_bus, &bus->p->klist_drivers);
- module_add_driver(drv->owner, drv);
-
- error = driver_create_file(drv, &driver_attr_uevent);
- if (error) {
- printk(KERN_ERR "%s: uevent attr (%s) failed\n",
- __func__, drv->name);
- }
- error = driver_add_attrs(bus, drv);
- if (error) {
- /* How the hell do we get out of this pickle? Give up */
- printk(KERN_ERR "%s: driver_add_attrs(%s) failed\n",
- __func__, drv->name);
- }
-
- if (!drv->suppress_bind_attrs) {
- error = add_bind_files(drv);
- if (error) {
- /* Ditto */
- printk(KERN_ERR "%s: add_bind_files(%s) failed\n",
- __func__, drv->name);
- }
- }
-
- kobject_uevent(&priv->kobj, KOBJ_ADD);
- return 0;
-
-out_unregister:
- kobject_put(&priv->kobj);
- kfree(drv->p);
- drv->p = NULL;
-out_put_bus:
- bus_put(bus);
- return error;
-}
-
-/**
- * bus_remove_driver - delete driver from bus's knowledge.
- * @drv: driver.
- *
- * Detach the driver from the devices it controls, and remove
- * it from its bus's list of drivers. Finally, we drop the reference
- * to the bus we took in bus_add_driver().
- */
-void bus_remove_driver(struct device_driver *drv)
-{
- if (!drv->bus)
- return;
-
- if (!drv->suppress_bind_attrs)
- remove_bind_files(drv);
- driver_remove_attrs(drv->bus, drv);
- driver_remove_file(drv, &driver_attr_uevent);
- klist_remove(&drv->p->knode_bus);
- pr_debug("bus: '%s': remove driver %s\n", drv->bus->name, drv->name);
- driver_detach(drv);
- module_remove_driver(drv);
- kobject_put(&drv->p->kobj);
- bus_put(drv->bus);
-}
-
-/* Helper for bus_rescan_devices's iter */
-static int __must_check bus_rescan_devices_helper(struct device *dev,
- void *data)
-{
- int ret = 0;
-
- if (!dev->driver) {
- if (dev->parent) /* Needed for USB */
- device_lock(dev->parent);
- ret = device_attach(dev);
- if (dev->parent)
- device_unlock(dev->parent);
- }
- return ret < 0 ? ret : 0;
-}
-
-/**
- * bus_rescan_devices - rescan devices on the bus for possible drivers
- * @bus: the bus to scan.
- *
- * This function will look for devices on the bus with no driver
- * attached and rescan it against existing drivers to see if it matches
- * any by calling device_attach() for the unbound devices.
- */
-int bus_rescan_devices(struct bus_type *bus)
-{
- return bus_for_each_dev(bus, NULL, NULL, bus_rescan_devices_helper);
-}
-EXPORT_SYMBOL_GPL(bus_rescan_devices);
-
-/**
- * device_reprobe - remove driver for a device and probe for a new driver
- * @dev: the device to reprobe
- *
- * This function detaches the attached driver (if any) for the given
- * device and restarts the driver probing process. It is intended
- * to use if probing criteria changed during a devices lifetime and
- * driver attachment should change accordingly.
- */
-int device_reprobe(struct device *dev)
-{
- if (dev->driver) {
- if (dev->parent) /* Needed for USB */
- device_lock(dev->parent);
- device_release_driver(dev);
- if (dev->parent)
- device_unlock(dev->parent);
- }
- return bus_rescan_devices_helper(dev, NULL);
-}
-EXPORT_SYMBOL_GPL(device_reprobe);
-
-/**
- * find_bus - locate bus by name.
- * @name: name of bus.
- *
- * Call kset_find_obj() to iterate over list of buses to
- * find a bus by name. Return bus if found.
- *
- * Note that kset_find_obj increments bus' reference count.
- */
-#if 0
-struct bus_type *find_bus(char *name)
-{
- struct kobject *k = kset_find_obj(bus_kset, name);
- return k ? to_bus(k) : NULL;
-}
-#endif /* 0 */
-
-
-/**
- * bus_add_attrs - Add default attributes for this bus.
- * @bus: Bus that has just been registered.
- */
-
-static int bus_add_attrs(struct bus_type *bus)
-{
- int error = 0;
- int i;
-
- if (bus->bus_attrs) {
- for (i = 0; attr_name(bus->bus_attrs[i]); i++) {
- error = bus_create_file(bus, &bus->bus_attrs[i]);
- if (error)
- goto err;
- }
- }
-done:
- return error;
-err:
- while (--i >= 0)
- bus_remove_file(bus, &bus->bus_attrs[i]);
- goto done;
-}
-
-static void bus_remove_attrs(struct bus_type *bus)
-{
- int i;
-
- if (bus->bus_attrs) {
- for (i = 0; attr_name(bus->bus_attrs[i]); i++)
- bus_remove_file(bus, &bus->bus_attrs[i]);
- }
-}
-
-static void klist_devices_get(struct klist_node *n)
-{
- struct device_private *dev_prv = to_device_private_bus(n);
- struct device *dev = dev_prv->device;
-
- get_device(dev);
-}
-
-static void klist_devices_put(struct klist_node *n)
-{
- struct device_private *dev_prv = to_device_private_bus(n);
- struct device *dev = dev_prv->device;
-
- put_device(dev);
-}
-
-static ssize_t bus_uevent_store(struct bus_type *bus,
- const char *buf, size_t count)
-{
- enum kobject_action action;
-
- if (kobject_action_type(buf, count, &action) == 0)
- kobject_uevent(&bus->p->subsys.kobj, action);
- return count;
-}
-static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store);
-
-/**
- * __bus_register - register a driver-core subsystem
- * @bus: bus to register
- * @key: lockdep class key
- *
- * Once we have that, we register the bus with the kobject
- * infrastructure, then register the children subsystems it has:
- * the devices and drivers that belong to the subsystem.
- */
-int __bus_register(struct bus_type *bus, struct lock_class_key *key)
-{
- int retval;
- struct subsys_private *priv;
-
- priv = kzalloc(sizeof(struct subsys_private), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->bus = bus;
- bus->p = priv;
-
- BLOCKING_INIT_NOTIFIER_HEAD(&priv->bus_notifier);
-
- retval = kobject_set_name(&priv->subsys.kobj, "%s", bus->name);
- if (retval)
- goto out;
-
- priv->subsys.kobj.kset = bus_kset;
- priv->subsys.kobj.ktype = &bus_ktype;
- priv->drivers_autoprobe = 1;
-
- retval = kset_register(&priv->subsys);
- if (retval)
- goto out;
-
- retval = bus_create_file(bus, &bus_attr_uevent);
- if (retval)
- goto bus_uevent_fail;
-
- priv->devices_kset = kset_create_and_add("devices", NULL,
- &priv->subsys.kobj);
- if (!priv->devices_kset) {
- retval = -ENOMEM;
- goto bus_devices_fail;
- }
-
- priv->drivers_kset = kset_create_and_add("drivers", NULL,
- &priv->subsys.kobj);
- if (!priv->drivers_kset) {
- retval = -ENOMEM;
- goto bus_drivers_fail;
- }
-
- INIT_LIST_HEAD(&priv->interfaces);
- __mutex_init(&priv->mutex, "subsys mutex", key);
- klist_init(&priv->klist_devices, klist_devices_get, klist_devices_put);
- klist_init(&priv->klist_drivers, NULL, NULL);
-
- retval = add_probe_files(bus);
- if (retval)
- goto bus_probe_files_fail;
-
- retval = bus_add_attrs(bus);
- if (retval)
- goto bus_attrs_fail;
-
- pr_debug("bus: '%s': registered\n", bus->name);
- return 0;
-
-bus_attrs_fail:
- remove_probe_files(bus);
-bus_probe_files_fail:
- kset_unregister(bus->p->drivers_kset);
-bus_drivers_fail:
- kset_unregister(bus->p->devices_kset);
-bus_devices_fail:
- bus_remove_file(bus, &bus_attr_uevent);
-bus_uevent_fail:
- kset_unregister(&bus->p->subsys);
-out:
- kfree(bus->p);
- bus->p = NULL;
- return retval;
-}
-EXPORT_SYMBOL_GPL(__bus_register);
-
-/**
- * bus_unregister - remove a bus from the system
- * @bus: bus.
- *
- * Unregister the child subsystems and the bus itself.
- * Finally, we call bus_put() to release the refcount
- */
-void bus_unregister(struct bus_type *bus)
-{
- pr_debug("bus: '%s': unregistering\n", bus->name);
- if (bus->dev_root)
- device_unregister(bus->dev_root);
- bus_remove_attrs(bus);
- remove_probe_files(bus);
- kset_unregister(bus->p->drivers_kset);
- kset_unregister(bus->p->devices_kset);
- bus_remove_file(bus, &bus_attr_uevent);
- kset_unregister(&bus->p->subsys);
- kfree(bus->p);
- bus->p = NULL;
-}
-EXPORT_SYMBOL_GPL(bus_unregister);
-
-int bus_register_notifier(struct bus_type *bus, struct notifier_block *nb)
-{
- return blocking_notifier_chain_register(&bus->p->bus_notifier, nb);
-}
-EXPORT_SYMBOL_GPL(bus_register_notifier);
-
-int bus_unregister_notifier(struct bus_type *bus, struct notifier_block *nb)
-{
- return blocking_notifier_chain_unregister(&bus->p->bus_notifier, nb);
-}
-EXPORT_SYMBOL_GPL(bus_unregister_notifier);
-
-struct kset *bus_get_kset(struct bus_type *bus)
-{
- return &bus->p->subsys;
-}
-EXPORT_SYMBOL_GPL(bus_get_kset);
-
-struct klist *bus_get_device_klist(struct bus_type *bus)
-{
- return &bus->p->klist_devices;
-}
-EXPORT_SYMBOL_GPL(bus_get_device_klist);
-
-/*
- * Yes, this forcibly breaks the klist abstraction temporarily. It
- * just wants to sort the klist, not change reference counts and
- * take/drop locks rapidly in the process. It does all this while
- * holding the lock for the list, so objects can't otherwise be
- * added/removed while we're swizzling.
- */
-static void device_insertion_sort_klist(struct device *a, struct list_head *list,
- int (*compare)(const struct device *a,
- const struct device *b))
-{
- struct list_head *pos;
- struct klist_node *n;
- struct device_private *dev_prv;
- struct device *b;
-
- list_for_each(pos, list) {
- n = container_of(pos, struct klist_node, n_node);
- dev_prv = to_device_private_bus(n);
- b = dev_prv->device;
- if (compare(a, b) <= 0) {
- list_move_tail(&a->p->knode_bus.n_node,
- &b->p->knode_bus.n_node);
- return;
- }
- }
- list_move_tail(&a->p->knode_bus.n_node, list);
-}
-
-void bus_sort_breadthfirst(struct bus_type *bus,
- int (*compare)(const struct device *a,
- const struct device *b))
-{
- LIST_HEAD(sorted_devices);
- struct list_head *pos, *tmp;
- struct klist_node *n;
- struct device_private *dev_prv;
- struct device *dev;
- struct klist *device_klist;
-
- device_klist = bus_get_device_klist(bus);
-
- spin_lock(&device_klist->k_lock);
- list_for_each_safe(pos, tmp, &device_klist->k_list) {
- n = container_of(pos, struct klist_node, n_node);
- dev_prv = to_device_private_bus(n);
- dev = dev_prv->device;
- device_insertion_sort_klist(dev, &sorted_devices, compare);
- }
- list_splice(&sorted_devices, &device_klist->k_list);
- spin_unlock(&device_klist->k_lock);
-}
-EXPORT_SYMBOL_GPL(bus_sort_breadthfirst);
-
-/**
- * subsys_dev_iter_init - initialize subsys device iterator
- * @iter: subsys iterator to initialize
- * @subsys: the subsys we wanna iterate over
- * @start: the device to start iterating from, if any
- * @type: device_type of the devices to iterate over, NULL for all
- *
- * Initialize subsys iterator @iter such that it iterates over devices
- * of @subsys. If @start is set, the list iteration will start there,
- * otherwise if it is NULL, the iteration starts at the beginning of
- * the list.
- */
-void subsys_dev_iter_init(struct subsys_dev_iter *iter, struct bus_type *subsys,
- struct device *start, const struct device_type *type)
-{
- struct klist_node *start_knode = NULL;
-
- if (start)
- start_knode = &start->p->knode_bus;
- klist_iter_init_node(&subsys->p->klist_devices, &iter->ki, start_knode);
- iter->type = type;
-}
-EXPORT_SYMBOL_GPL(subsys_dev_iter_init);
-
-/**
- * subsys_dev_iter_next - iterate to the next device
- * @iter: subsys iterator to proceed
- *
- * Proceed @iter to the next device and return it. Returns NULL if
- * iteration is complete.
- *
- * The returned device is referenced and won't be released till
- * iterator is proceed to the next device or exited. The caller is
- * free to do whatever it wants to do with the device including
- * calling back into subsys code.
- */
-struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter)
-{
- struct klist_node *knode;
- struct device *dev;
-
- for (;;) {
- knode = klist_next(&iter->ki);
- if (!knode)
- return NULL;
- dev = container_of(knode, struct device_private, knode_bus)->device;
- if (!iter->type || iter->type == dev->type)
- return dev;
- }
-}
-EXPORT_SYMBOL_GPL(subsys_dev_iter_next);
-
-/**
- * subsys_dev_iter_exit - finish iteration
- * @iter: subsys iterator to finish
- *
- * Finish an iteration. Always call this function after iteration is
- * complete whether the iteration ran till the end or not.
- */
-void subsys_dev_iter_exit(struct subsys_dev_iter *iter)
-{
- klist_iter_exit(&iter->ki);
-}
-EXPORT_SYMBOL_GPL(subsys_dev_iter_exit);
-
-int subsys_interface_register(struct subsys_interface *sif)
-{
- struct bus_type *subsys;
- struct subsys_dev_iter iter;
- struct device *dev;
-
- if (!sif || !sif->subsys)
- return -ENODEV;
-
- subsys = bus_get(sif->subsys);
- if (!subsys)
- return -EINVAL;
-
- mutex_lock(&subsys->p->mutex);
- list_add_tail(&sif->node, &subsys->p->interfaces);
- if (sif->add_dev) {
- subsys_dev_iter_init(&iter, subsys, NULL, NULL);
- while ((dev = subsys_dev_iter_next(&iter)))
- sif->add_dev(dev, sif);
- subsys_dev_iter_exit(&iter);
- }
- mutex_unlock(&subsys->p->mutex);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(subsys_interface_register);
-
-void subsys_interface_unregister(struct subsys_interface *sif)
-{
- struct bus_type *subsys;
- struct subsys_dev_iter iter;
- struct device *dev;
-
- if (!sif || !sif->subsys)
- return;
-
- subsys = sif->subsys;
-
- mutex_lock(&subsys->p->mutex);
- list_del_init(&sif->node);
- if (sif->remove_dev) {
- subsys_dev_iter_init(&iter, subsys, NULL, NULL);
- while ((dev = subsys_dev_iter_next(&iter)))
- sif->remove_dev(dev, sif);
- subsys_dev_iter_exit(&iter);
- }
- mutex_unlock(&subsys->p->mutex);
-
- bus_put(subsys);
-}
-EXPORT_SYMBOL_GPL(subsys_interface_unregister);
-
-static void system_root_device_release(struct device *dev)
-{
- kfree(dev);
-}
-/**
- * subsys_system_register - register a subsystem at /sys/devices/system/
- * @subsys: system subsystem
- * @groups: default attributes for the root device
- *
- * All 'system' subsystems have a /sys/devices/system/<name> root device
- * with the name of the subsystem. The root device can carry subsystem-
- * wide attributes. All registered devices are below this single root
- * device and are named after the subsystem with a simple enumeration
- * number appended. The registered devices are not explicitely named;
- * only 'id' in the device needs to be set.
- *
- * Do not use this interface for anything new, it exists for compatibility
- * with bad ideas only. New subsystems should use plain subsystems; and
- * add the subsystem-wide attributes should be added to the subsystem
- * directory itself and not some create fake root-device placed in
- * /sys/devices/system/<name>.
- */
-int subsys_system_register(struct bus_type *subsys,
- const struct attribute_group **groups)
-{
- struct device *dev;
- int err;
-
- err = bus_register(subsys);
- if (err < 0)
- return err;
-
- dev = kzalloc(sizeof(struct device), GFP_KERNEL);
- if (!dev) {
- err = -ENOMEM;
- goto err_dev;
- }
-
- err = dev_set_name(dev, "%s", subsys->name);
- if (err < 0)
- goto err_name;
-
- dev->kobj.parent = &system_kset->kobj;
- dev->groups = groups;
- dev->release = system_root_device_release;
-
- err = device_register(dev);
- if (err < 0)
- goto err_dev_reg;
-
- subsys->dev_root = dev;
- return 0;
-
-err_dev_reg:
- put_device(dev);
- dev = NULL;
-err_name:
- kfree(dev);
-err_dev:
- bus_unregister(subsys);
- return err;
-}
-EXPORT_SYMBOL_GPL(subsys_system_register);
-
-int __init buses_init(void)
-{
- bus_kset = kset_create_and_add("bus", &bus_uevent_ops, NULL);
- if (!bus_kset)
- return -ENOMEM;
-
- system_kset = kset_create_and_add("system", NULL, &devices_kset->kobj);
- if (!system_kset)
- return -ENOMEM;
-
- return 0;
-}
diff --git a/ANDROID_3.4.5/drivers/base/class.c b/ANDROID_3.4.5/drivers/base/class.c
deleted file mode 100644
index 03243d40..00000000
--- a/ANDROID_3.4.5/drivers/base/class.c
+++ /dev/null
@@ -1,609 +0,0 @@
-/*
- * class.c - basic device class management
- *
- * Copyright (c) 2002-3 Patrick Mochel
- * Copyright (c) 2002-3 Open Source Development Labs
- * Copyright (c) 2003-2004 Greg Kroah-Hartman
- * Copyright (c) 2003-2004 IBM Corp.
- *
- * This file is released under the GPLv2
- *
- */
-
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/kdev_t.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/genhd.h>
-#include <linux/mutex.h>
-#include "base.h"
-
-#define to_class_attr(_attr) container_of(_attr, struct class_attribute, attr)
-
-static ssize_t class_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct class_attribute *class_attr = to_class_attr(attr);
- struct subsys_private *cp = to_subsys_private(kobj);
- ssize_t ret = -EIO;
-
- if (class_attr->show)
- ret = class_attr->show(cp->class, class_attr, buf);
- return ret;
-}
-
-static ssize_t class_attr_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
-{
- struct class_attribute *class_attr = to_class_attr(attr);
- struct subsys_private *cp = to_subsys_private(kobj);
- ssize_t ret = -EIO;
-
- if (class_attr->store)
- ret = class_attr->store(cp->class, class_attr, buf, count);
- return ret;
-}
-
-static const void *class_attr_namespace(struct kobject *kobj,
- const struct attribute *attr)
-{
- struct class_attribute *class_attr = to_class_attr(attr);
- struct subsys_private *cp = to_subsys_private(kobj);
- const void *ns = NULL;
-
- if (class_attr->namespace)
- ns = class_attr->namespace(cp->class, class_attr);
- return ns;
-}
-
-static void class_release(struct kobject *kobj)
-{
- struct subsys_private *cp = to_subsys_private(kobj);
- struct class *class = cp->class;
-
- pr_debug("class '%s': release.\n", class->name);
-
- if (class->class_release)
- class->class_release(class);
- else
- pr_debug("class '%s' does not have a release() function, "
- "be careful\n", class->name);
-
- kfree(cp);
-}
-
-static const struct kobj_ns_type_operations *class_child_ns_type(struct kobject *kobj)
-{
- struct subsys_private *cp = to_subsys_private(kobj);
- struct class *class = cp->class;
-
- return class->ns_type;
-}
-
-static const struct sysfs_ops class_sysfs_ops = {
- .show = class_attr_show,
- .store = class_attr_store,
- .namespace = class_attr_namespace,
-};
-
-static struct kobj_type class_ktype = {
- .sysfs_ops = &class_sysfs_ops,
- .release = class_release,
- .child_ns_type = class_child_ns_type,
-};
-
-/* Hotplug events for classes go to the class subsys */
-static struct kset *class_kset;
-
-
-int class_create_file(struct class *cls, const struct class_attribute *attr)
-{
- int error;
- if (cls)
- error = sysfs_create_file(&cls->p->subsys.kobj,
- &attr->attr);
- else
- error = -EINVAL;
- return error;
-}
-
-void class_remove_file(struct class *cls, const struct class_attribute *attr)
-{
- if (cls)
- sysfs_remove_file(&cls->p->subsys.kobj, &attr->attr);
-}
-
-static struct class *class_get(struct class *cls)
-{
- if (cls)
- kset_get(&cls->p->subsys);
- return cls;
-}
-
-static void class_put(struct class *cls)
-{
- if (cls)
- kset_put(&cls->p->subsys);
-}
-
-static int add_class_attrs(struct class *cls)
-{
- int i;
- int error = 0;
-
- if (cls->class_attrs) {
- for (i = 0; attr_name(cls->class_attrs[i]); i++) {
- error = class_create_file(cls, &cls->class_attrs[i]);
- if (error)
- goto error;
- }
- }
-done:
- return error;
-error:
- while (--i >= 0)
- class_remove_file(cls, &cls->class_attrs[i]);
- goto done;
-}
-
-static void remove_class_attrs(struct class *cls)
-{
- int i;
-
- if (cls->class_attrs) {
- for (i = 0; attr_name(cls->class_attrs[i]); i++)
- class_remove_file(cls, &cls->class_attrs[i]);
- }
-}
-
-static void klist_class_dev_get(struct klist_node *n)
-{
- struct device *dev = container_of(n, struct device, knode_class);
-
- get_device(dev);
-}
-
-static void klist_class_dev_put(struct klist_node *n)
-{
- struct device *dev = container_of(n, struct device, knode_class);
-
- put_device(dev);
-}
-
-int __class_register(struct class *cls, struct lock_class_key *key)
-{
- struct subsys_private *cp;
- int error;
-
- pr_debug("device class '%s': registering\n", cls->name);
-
- cp = kzalloc(sizeof(*cp), GFP_KERNEL);
- if (!cp)
- return -ENOMEM;
- klist_init(&cp->klist_devices, klist_class_dev_get, klist_class_dev_put);
- INIT_LIST_HEAD(&cp->interfaces);
- kset_init(&cp->glue_dirs);
- __mutex_init(&cp->mutex, "subsys mutex", key);
- error = kobject_set_name(&cp->subsys.kobj, "%s", cls->name);
- if (error) {
- kfree(cp);
- return error;
- }
-
- /* set the default /sys/dev directory for devices of this class */
- if (!cls->dev_kobj)
- cls->dev_kobj = sysfs_dev_char_kobj;
-
-#if defined(CONFIG_BLOCK)
- /* let the block class directory show up in the root of sysfs */
- if (!sysfs_deprecated || cls != &block_class)
- cp->subsys.kobj.kset = class_kset;
-#else
- cp->subsys.kobj.kset = class_kset;
-#endif
- cp->subsys.kobj.ktype = &class_ktype;
- cp->class = cls;
- cls->p = cp;
-
- error = kset_register(&cp->subsys);
- if (error) {
- kfree(cp);
- return error;
- }
- error = add_class_attrs(class_get(cls));
- class_put(cls);
- return error;
-}
-EXPORT_SYMBOL_GPL(__class_register);
-
-void class_unregister(struct class *cls)
-{
- pr_debug("device class '%s': unregistering\n", cls->name);
- remove_class_attrs(cls);
- kset_unregister(&cls->p->subsys);
-}
-
-static void class_create_release(struct class *cls)
-{
- pr_debug("%s called for %s\n", __func__, cls->name);
- kfree(cls);
-}
-
-/**
- * class_create - create a struct class structure
- * @owner: pointer to the module that is to "own" this struct class
- * @name: pointer to a string for the name of this class.
- * @key: the lock_class_key for this class; used by mutex lock debugging
- *
- * This is used to create a struct class pointer that can then be used
- * in calls to device_create().
- *
- * Returns &struct class pointer on success, or ERR_PTR() on error.
- *
- * Note, the pointer created here is to be destroyed when finished by
- * making a call to class_destroy().
- */
-struct class *__class_create(struct module *owner, const char *name,
- struct lock_class_key *key)
-{
- struct class *cls;
- int retval;
-
- cls = kzalloc(sizeof(*cls), GFP_KERNEL);
- if (!cls) {
- retval = -ENOMEM;
- goto error;
- }
-
- cls->name = name;
- cls->owner = owner;
- cls->class_release = class_create_release;
-
- retval = __class_register(cls, key);
- if (retval)
- goto error;
-
- return cls;
-
-error:
- kfree(cls);
- return ERR_PTR(retval);
-}
-EXPORT_SYMBOL_GPL(__class_create);
-
-/**
- * class_destroy - destroys a struct class structure
- * @cls: pointer to the struct class that is to be destroyed
- *
- * Note, the pointer to be destroyed must have been created with a call
- * to class_create().
- */
-void class_destroy(struct class *cls)
-{
- if ((cls == NULL) || (IS_ERR(cls)))
- return;
-
- class_unregister(cls);
-}
-
-/**
- * class_dev_iter_init - initialize class device iterator
- * @iter: class iterator to initialize
- * @class: the class we wanna iterate over
- * @start: the device to start iterating from, if any
- * @type: device_type of the devices to iterate over, NULL for all
- *
- * Initialize class iterator @iter such that it iterates over devices
- * of @class. If @start is set, the list iteration will start there,
- * otherwise if it is NULL, the iteration starts at the beginning of
- * the list.
- */
-void class_dev_iter_init(struct class_dev_iter *iter, struct class *class,
- struct device *start, const struct device_type *type)
-{
- struct klist_node *start_knode = NULL;
-
- if (start)
- start_knode = &start->knode_class;
- klist_iter_init_node(&class->p->klist_devices, &iter->ki, start_knode);
- iter->type = type;
-}
-EXPORT_SYMBOL_GPL(class_dev_iter_init);
-
-/**
- * class_dev_iter_next - iterate to the next device
- * @iter: class iterator to proceed
- *
- * Proceed @iter to the next device and return it. Returns NULL if
- * iteration is complete.
- *
- * The returned device is referenced and won't be released till
- * iterator is proceed to the next device or exited. The caller is
- * free to do whatever it wants to do with the device including
- * calling back into class code.
- */
-struct device *class_dev_iter_next(struct class_dev_iter *iter)
-{
- struct klist_node *knode;
- struct device *dev;
-
- while (1) {
- knode = klist_next(&iter->ki);
- if (!knode)
- return NULL;
- dev = container_of(knode, struct device, knode_class);
- if (!iter->type || iter->type == dev->type)
- return dev;
- }
-}
-EXPORT_SYMBOL_GPL(class_dev_iter_next);
-
-/**
- * class_dev_iter_exit - finish iteration
- * @iter: class iterator to finish
- *
- * Finish an iteration. Always call this function after iteration is
- * complete whether the iteration ran till the end or not.
- */
-void class_dev_iter_exit(struct class_dev_iter *iter)
-{
- klist_iter_exit(&iter->ki);
-}
-EXPORT_SYMBOL_GPL(class_dev_iter_exit);
-
-/**
- * class_for_each_device - device iterator
- * @class: the class we're iterating
- * @start: the device to start with in the list, if any.
- * @data: data for the callback
- * @fn: function to be called for each device
- *
- * Iterate over @class's list of devices, and call @fn for each,
- * passing it @data. If @start is set, the list iteration will start
- * there, otherwise if it is NULL, the iteration starts at the
- * beginning of the list.
- *
- * We check the return of @fn each time. If it returns anything
- * other than 0, we break out and return that value.
- *
- * @fn is allowed to do anything including calling back into class
- * code. There's no locking restriction.
- */
-int class_for_each_device(struct class *class, struct device *start,
- void *data, int (*fn)(struct device *, void *))
-{
- struct class_dev_iter iter;
- struct device *dev;
- int error = 0;
-
- if (!class)
- return -EINVAL;
- if (!class->p) {
- WARN(1, "%s called for class '%s' before it was initialized",
- __func__, class->name);
- return -EINVAL;
- }
-
- class_dev_iter_init(&iter, class, start, NULL);
- while ((dev = class_dev_iter_next(&iter))) {
- error = fn(dev, data);
- if (error)
- break;
- }
- class_dev_iter_exit(&iter);
-
- return error;
-}
-EXPORT_SYMBOL_GPL(class_for_each_device);
-
-/**
- * class_find_device - device iterator for locating a particular device
- * @class: the class we're iterating
- * @start: Device to begin with
- * @data: data for the match function
- * @match: function to check device
- *
- * This is similar to the class_for_each_dev() function above, but it
- * returns a reference to a device that is 'found' for later use, as
- * determined by the @match callback.
- *
- * The callback should return 0 if the device doesn't match and non-zero
- * if it does. If the callback returns non-zero, this function will
- * return to the caller and not iterate over any more devices.
- *
- * Note, you will need to drop the reference with put_device() after use.
- *
- * @fn is allowed to do anything including calling back into class
- * code. There's no locking restriction.
- */
-struct device *class_find_device(struct class *class, struct device *start,
- void *data,
- int (*match)(struct device *, void *))
-{
- struct class_dev_iter iter;
- struct device *dev;
-
- if (!class)
- return NULL;
- if (!class->p) {
- WARN(1, "%s called for class '%s' before it was initialized",
- __func__, class->name);
- return NULL;
- }
-
- class_dev_iter_init(&iter, class, start, NULL);
- while ((dev = class_dev_iter_next(&iter))) {
- if (match(dev, data)) {
- get_device(dev);
- break;
- }
- }
- class_dev_iter_exit(&iter);
-
- return dev;
-}
-EXPORT_SYMBOL_GPL(class_find_device);
-
-int class_interface_register(struct class_interface *class_intf)
-{
- struct class *parent;
- struct class_dev_iter iter;
- struct device *dev;
-
- if (!class_intf || !class_intf->class)
- return -ENODEV;
-
- parent = class_get(class_intf->class);
- if (!parent)
- return -EINVAL;
-
- mutex_lock(&parent->p->mutex);
- list_add_tail(&class_intf->node, &parent->p->interfaces);
- if (class_intf->add_dev) {
- class_dev_iter_init(&iter, parent, NULL, NULL);
- while ((dev = class_dev_iter_next(&iter)))
- class_intf->add_dev(dev, class_intf);
- class_dev_iter_exit(&iter);
- }
- mutex_unlock(&parent->p->mutex);
-
- return 0;
-}
-
-void class_interface_unregister(struct class_interface *class_intf)
-{
- struct class *parent = class_intf->class;
- struct class_dev_iter iter;
- struct device *dev;
-
- if (!parent)
- return;
-
- mutex_lock(&parent->p->mutex);
- list_del_init(&class_intf->node);
- if (class_intf->remove_dev) {
- class_dev_iter_init(&iter, parent, NULL, NULL);
- while ((dev = class_dev_iter_next(&iter)))
- class_intf->remove_dev(dev, class_intf);
- class_dev_iter_exit(&iter);
- }
- mutex_unlock(&parent->p->mutex);
-
- class_put(parent);
-}
-
-ssize_t show_class_attr_string(struct class *class,
- struct class_attribute *attr, char *buf)
-{
- struct class_attribute_string *cs;
- cs = container_of(attr, struct class_attribute_string, attr);
- return snprintf(buf, PAGE_SIZE, "%s\n", cs->str);
-}
-
-EXPORT_SYMBOL_GPL(show_class_attr_string);
-
-struct class_compat {
- struct kobject *kobj;
-};
-
-/**
- * class_compat_register - register a compatibility class
- * @name: the name of the class
- *
- * Compatibility class are meant as a temporary user-space compatibility
- * workaround when converting a family of class devices to a bus devices.
- */
-struct class_compat *class_compat_register(const char *name)
-{
- struct class_compat *cls;
-
- cls = kmalloc(sizeof(struct class_compat), GFP_KERNEL);
- if (!cls)
- return NULL;
- cls->kobj = kobject_create_and_add(name, &class_kset->kobj);
- if (!cls->kobj) {
- kfree(cls);
- return NULL;
- }
- return cls;
-}
-EXPORT_SYMBOL_GPL(class_compat_register);
-
-/**
- * class_compat_unregister - unregister a compatibility class
- * @cls: the class to unregister
- */
-void class_compat_unregister(struct class_compat *cls)
-{
- kobject_put(cls->kobj);
- kfree(cls);
-}
-EXPORT_SYMBOL_GPL(class_compat_unregister);
-
-/**
- * class_compat_create_link - create a compatibility class device link to
- * a bus device
- * @cls: the compatibility class
- * @dev: the target bus device
- * @device_link: an optional device to which a "device" link should be created
- */
-int class_compat_create_link(struct class_compat *cls, struct device *dev,
- struct device *device_link)
-{
- int error;
-
- error = sysfs_create_link(cls->kobj, &dev->kobj, dev_name(dev));
- if (error)
- return error;
-
- /*
- * Optionally add a "device" link (typically to the parent), as a
- * class device would have one and we want to provide as much
- * backwards compatibility as possible.
- */
- if (device_link) {
- error = sysfs_create_link(&dev->kobj, &device_link->kobj,
- "device");
- if (error)
- sysfs_remove_link(cls->kobj, dev_name(dev));
- }
-
- return error;
-}
-EXPORT_SYMBOL_GPL(class_compat_create_link);
-
-/**
- * class_compat_remove_link - remove a compatibility class device link to
- * a bus device
- * @cls: the compatibility class
- * @dev: the target bus device
- * @device_link: an optional device to which a "device" link was previously
- * created
- */
-void class_compat_remove_link(struct class_compat *cls, struct device *dev,
- struct device *device_link)
-{
- if (device_link)
- sysfs_remove_link(&dev->kobj, "device");
- sysfs_remove_link(cls->kobj, dev_name(dev));
-}
-EXPORT_SYMBOL_GPL(class_compat_remove_link);
-
-int __init classes_init(void)
-{
- class_kset = kset_create_and_add("class", NULL, NULL);
- if (!class_kset)
- return -ENOMEM;
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(class_create_file);
-EXPORT_SYMBOL_GPL(class_remove_file);
-EXPORT_SYMBOL_GPL(class_unregister);
-EXPORT_SYMBOL_GPL(class_destroy);
-
-EXPORT_SYMBOL_GPL(class_interface_register);
-EXPORT_SYMBOL_GPL(class_interface_unregister);
diff --git a/ANDROID_3.4.5/drivers/base/core.c b/ANDROID_3.4.5/drivers/base/core.c
deleted file mode 100644
index e28ce989..00000000
--- a/ANDROID_3.4.5/drivers/base/core.c
+++ /dev/null
@@ -1,1904 +0,0 @@
-/*
- * drivers/base/core.c - core driver model code (device registration, etc)
- *
- * Copyright (c) 2002-3 Patrick Mochel
- * Copyright (c) 2002-3 Open Source Development Labs
- * Copyright (c) 2006 Greg Kroah-Hartman <gregkh@suse.de>
- * Copyright (c) 2006 Novell, Inc.
- *
- * This file is released under the GPLv2
- *
- */
-
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/kdev_t.h>
-#include <linux/notifier.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/genhd.h>
-#include <linux/kallsyms.h>
-#include <linux/mutex.h>
-#include <linux/async.h>
-#include <linux/pm_runtime.h>
-
-#include "base.h"
-#include "power/power.h"
-
-#ifdef CONFIG_SYSFS_DEPRECATED
-#ifdef CONFIG_SYSFS_DEPRECATED_V2
-long sysfs_deprecated = 1;
-#else
-long sysfs_deprecated = 0;
-#endif
-static __init int sysfs_deprecated_setup(char *arg)
-{
- return strict_strtol(arg, 10, &sysfs_deprecated);
-}
-early_param("sysfs.deprecated", sysfs_deprecated_setup);
-#endif
-
-int (*platform_notify)(struct device *dev) = NULL;
-int (*platform_notify_remove)(struct device *dev) = NULL;
-static struct kobject *dev_kobj;
-struct kobject *sysfs_dev_char_kobj;
-struct kobject *sysfs_dev_block_kobj;
-
-#ifdef CONFIG_BLOCK
-static inline int device_is_not_partition(struct device *dev)
-{
- return !(dev->type == &part_type);
-}
-#else
-static inline int device_is_not_partition(struct device *dev)
-{
- return 1;
-}
-#endif
-
-/**
- * dev_driver_string - Return a device's driver name, if at all possible
- * @dev: struct device to get the name of
- *
- * Will return the device's driver's name if it is bound to a device. If
- * the device is not bound to a device, it will return the name of the bus
- * it is attached to. If it is not attached to a bus either, an empty
- * string will be returned.
- */
-const char *dev_driver_string(const struct device *dev)
-{
- struct device_driver *drv;
-
- /* dev->driver can change to NULL underneath us because of unbinding,
- * so be careful about accessing it. dev->bus and dev->class should
- * never change once they are set, so they don't need special care.
- */
- drv = ACCESS_ONCE(dev->driver);
- return drv ? drv->name :
- (dev->bus ? dev->bus->name :
- (dev->class ? dev->class->name : ""));
-}
-EXPORT_SYMBOL(dev_driver_string);
-
-#define to_dev(obj) container_of(obj, struct device, kobj)
-#define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
-
-static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct device_attribute *dev_attr = to_dev_attr(attr);
- struct device *dev = to_dev(kobj);
- ssize_t ret = -EIO;
-
- if (dev_attr->show)
- ret = dev_attr->show(dev, dev_attr, buf);
- if (ret >= (ssize_t)PAGE_SIZE) {
- print_symbol("dev_attr_show: %s returned bad count\n",
- (unsigned long)dev_attr->show);
- }
- return ret;
-}
-
-static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
-{
- struct device_attribute *dev_attr = to_dev_attr(attr);
- struct device *dev = to_dev(kobj);
- ssize_t ret = -EIO;
-
- if (dev_attr->store)
- ret = dev_attr->store(dev, dev_attr, buf, count);
- return ret;
-}
-
-static const struct sysfs_ops dev_sysfs_ops = {
- .show = dev_attr_show,
- .store = dev_attr_store,
-};
-
-#define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr)
-
-ssize_t device_store_ulong(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct dev_ext_attribute *ea = to_ext_attr(attr);
- char *end;
- unsigned long new = simple_strtoul(buf, &end, 0);
- if (end == buf)
- return -EINVAL;
- *(unsigned long *)(ea->var) = new;
- /* Always return full write size even if we didn't consume all */
- return size;
-}
-EXPORT_SYMBOL_GPL(device_store_ulong);
-
-ssize_t device_show_ulong(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct dev_ext_attribute *ea = to_ext_attr(attr);
- return snprintf(buf, PAGE_SIZE, "%lx\n", *(unsigned long *)(ea->var));
-}
-EXPORT_SYMBOL_GPL(device_show_ulong);
-
-ssize_t device_store_int(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct dev_ext_attribute *ea = to_ext_attr(attr);
- char *end;
- long new = simple_strtol(buf, &end, 0);
- if (end == buf || new > INT_MAX || new < INT_MIN)
- return -EINVAL;
- *(int *)(ea->var) = new;
- /* Always return full write size even if we didn't consume all */
- return size;
-}
-EXPORT_SYMBOL_GPL(device_store_int);
-
-ssize_t device_show_int(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct dev_ext_attribute *ea = to_ext_attr(attr);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", *(int *)(ea->var));
-}
-EXPORT_SYMBOL_GPL(device_show_int);
-
-/**
- * device_release - free device structure.
- * @kobj: device's kobject.
- *
- * This is called once the reference count for the object
- * reaches 0. We forward the call to the device's release
- * method, which should handle actually freeing the structure.
- */
-static void device_release(struct kobject *kobj)
-{
- struct device *dev = to_dev(kobj);
- struct device_private *p = dev->p;
-
- if (dev->release)
- dev->release(dev);
- else if (dev->type && dev->type->release)
- dev->type->release(dev);
- else if (dev->class && dev->class->dev_release)
- dev->class->dev_release(dev);
- else
- WARN(1, KERN_ERR "Device '%s' does not have a release() "
- "function, it is broken and must be fixed.\n",
- dev_name(dev));
- kfree(p);
-}
-
-static const void *device_namespace(struct kobject *kobj)
-{
- struct device *dev = to_dev(kobj);
- const void *ns = NULL;
-
- if (dev->class && dev->class->ns_type)
- ns = dev->class->namespace(dev);
-
- return ns;
-}
-
-static struct kobj_type device_ktype = {
- .release = device_release,
- .sysfs_ops = &dev_sysfs_ops,
- .namespace = device_namespace,
-};
-
-
-static int dev_uevent_filter(struct kset *kset, struct kobject *kobj)
-{
- struct kobj_type *ktype = get_ktype(kobj);
-
- if (ktype == &device_ktype) {
- struct device *dev = to_dev(kobj);
- if (dev->bus)
- return 1;
- if (dev->class)
- return 1;
- }
- return 0;
-}
-
-static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj)
-{
- struct device *dev = to_dev(kobj);
-
- if (dev->bus)
- return dev->bus->name;
- if (dev->class)
- return dev->class->name;
- return NULL;
-}
-
-static int dev_uevent(struct kset *kset, struct kobject *kobj,
- struct kobj_uevent_env *env)
-{
- struct device *dev = to_dev(kobj);
- int retval = 0;
-
- /* add device node properties if present */
- if (MAJOR(dev->devt)) {
- const char *tmp;
- const char *name;
- umode_t mode = 0;
-
- add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
- add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
- name = device_get_devnode(dev, &mode, &tmp);
- if (name) {
- add_uevent_var(env, "DEVNAME=%s", name);
- kfree(tmp);
- if (mode)
- add_uevent_var(env, "DEVMODE=%#o", mode & 0777);
- }
- }
-
- if (dev->type && dev->type->name)
- add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
-
- if (dev->driver)
- add_uevent_var(env, "DRIVER=%s", dev->driver->name);
-
- /* Add common DT information about the device */
- of_device_uevent(dev, env);
-
- /* have the bus specific function add its stuff */
- if (dev->bus && dev->bus->uevent) {
- retval = dev->bus->uevent(dev, env);
- if (retval)
- pr_debug("device: '%s': %s: bus uevent() returned %d\n",
- dev_name(dev), __func__, retval);
- }
-
- /* have the class specific function add its stuff */
- if (dev->class && dev->class->dev_uevent) {
- retval = dev->class->dev_uevent(dev, env);
- if (retval)
- pr_debug("device: '%s': %s: class uevent() "
- "returned %d\n", dev_name(dev),
- __func__, retval);
- }
-
- /* have the device type specific function add its stuff */
- if (dev->type && dev->type->uevent) {
- retval = dev->type->uevent(dev, env);
- if (retval)
- pr_debug("device: '%s': %s: dev_type uevent() "
- "returned %d\n", dev_name(dev),
- __func__, retval);
- }
-
- return retval;
-}
-
-static const struct kset_uevent_ops device_uevent_ops = {
- .filter = dev_uevent_filter,
- .name = dev_uevent_name,
- .uevent = dev_uevent,
-};
-
-static ssize_t show_uevent(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct kobject *top_kobj;
- struct kset *kset;
- struct kobj_uevent_env *env = NULL;
- int i;
- size_t count = 0;
- int retval;
-
- /* search the kset, the device belongs to */
- top_kobj = &dev->kobj;
- while (!top_kobj->kset && top_kobj->parent)
- top_kobj = top_kobj->parent;
- if (!top_kobj->kset)
- goto out;
-
- kset = top_kobj->kset;
- if (!kset->uevent_ops || !kset->uevent_ops->uevent)
- goto out;
-
- /* respect filter */
- if (kset->uevent_ops && kset->uevent_ops->filter)
- if (!kset->uevent_ops->filter(kset, &dev->kobj))
- goto out;
-
- env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
- if (!env)
- return -ENOMEM;
-
- /* let the kset specific function add its keys */
- retval = kset->uevent_ops->uevent(kset, &dev->kobj, env);
- if (retval)
- goto out;
-
- /* copy keys to file */
- for (i = 0; i < env->envp_idx; i++)
- count += sprintf(&buf[count], "%s\n", env->envp[i]);
-out:
- kfree(env);
- return count;
-}
-
-static ssize_t store_uevent(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- enum kobject_action action;
-
- if (kobject_action_type(buf, count, &action) == 0)
- kobject_uevent(&dev->kobj, action);
- else
- dev_err(dev, "uevent: unknown action-string\n");
- return count;
-}
-
-static struct device_attribute uevent_attr =
- __ATTR(uevent, S_IRUGO | S_IWUSR, show_uevent, store_uevent);
-
-static int device_add_attributes(struct device *dev,
- struct device_attribute *attrs)
-{
- int error = 0;
- int i;
-
- if (attrs) {
- for (i = 0; attr_name(attrs[i]); i++) {
- error = device_create_file(dev, &attrs[i]);
- if (error)
- break;
- }
- if (error)
- while (--i >= 0)
- device_remove_file(dev, &attrs[i]);
- }
- return error;
-}
-
-static void device_remove_attributes(struct device *dev,
- struct device_attribute *attrs)
-{
- int i;
-
- if (attrs)
- for (i = 0; attr_name(attrs[i]); i++)
- device_remove_file(dev, &attrs[i]);
-}
-
-static int device_add_bin_attributes(struct device *dev,
- struct bin_attribute *attrs)
-{
- int error = 0;
- int i;
-
- if (attrs) {
- for (i = 0; attr_name(attrs[i]); i++) {
- error = device_create_bin_file(dev, &attrs[i]);
- if (error)
- break;
- }
- if (error)
- while (--i >= 0)
- device_remove_bin_file(dev, &attrs[i]);
- }
- return error;
-}
-
-static void device_remove_bin_attributes(struct device *dev,
- struct bin_attribute *attrs)
-{
- int i;
-
- if (attrs)
- for (i = 0; attr_name(attrs[i]); i++)
- device_remove_bin_file(dev, &attrs[i]);
-}
-
-static int device_add_groups(struct device *dev,
- const struct attribute_group **groups)
-{
- int error = 0;
- int i;
-
- if (groups) {
- for (i = 0; groups[i]; i++) {
- error = sysfs_create_group(&dev->kobj, groups[i]);
- if (error) {
- while (--i >= 0)
- sysfs_remove_group(&dev->kobj,
- groups[i]);
- break;
- }
- }
- }
- return error;
-}
-
-static void device_remove_groups(struct device *dev,
- const struct attribute_group **groups)
-{
- int i;
-
- if (groups)
- for (i = 0; groups[i]; i++)
- sysfs_remove_group(&dev->kobj, groups[i]);
-}
-
-static int device_add_attrs(struct device *dev)
-{
- struct class *class = dev->class;
- const struct device_type *type = dev->type;
- int error;
-
- if (class) {
- error = device_add_attributes(dev, class->dev_attrs);
- if (error)
- return error;
- error = device_add_bin_attributes(dev, class->dev_bin_attrs);
- if (error)
- goto err_remove_class_attrs;
- }
-
- if (type) {
- error = device_add_groups(dev, type->groups);
- if (error)
- goto err_remove_class_bin_attrs;
- }
-
- error = device_add_groups(dev, dev->groups);
- if (error)
- goto err_remove_type_groups;
-
- return 0;
-
- err_remove_type_groups:
- if (type)
- device_remove_groups(dev, type->groups);
- err_remove_class_bin_attrs:
- if (class)
- device_remove_bin_attributes(dev, class->dev_bin_attrs);
- err_remove_class_attrs:
- if (class)
- device_remove_attributes(dev, class->dev_attrs);
-
- return error;
-}
-
-static void device_remove_attrs(struct device *dev)
-{
- struct class *class = dev->class;
- const struct device_type *type = dev->type;
-
- device_remove_groups(dev, dev->groups);
-
- if (type)
- device_remove_groups(dev, type->groups);
-
- if (class) {
- device_remove_attributes(dev, class->dev_attrs);
- device_remove_bin_attributes(dev, class->dev_bin_attrs);
- }
-}
-
-
-static ssize_t show_dev(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- return print_dev_t(buf, dev->devt);
-}
-
-static struct device_attribute devt_attr =
- __ATTR(dev, S_IRUGO, show_dev, NULL);
-
-/* /sys/devices/ */
-struct kset *devices_kset;
-
-/**
- * device_create_file - create sysfs attribute file for device.
- * @dev: device.
- * @attr: device attribute descriptor.
- */
-int device_create_file(struct device *dev,
- const struct device_attribute *attr)
-{
- int error = 0;
- if (dev)
- error = sysfs_create_file(&dev->kobj, &attr->attr);
- return error;
-}
-
-/**
- * device_remove_file - remove sysfs attribute file.
- * @dev: device.
- * @attr: device attribute descriptor.
- */
-void device_remove_file(struct device *dev,
- const struct device_attribute *attr)
-{
- if (dev)
- sysfs_remove_file(&dev->kobj, &attr->attr);
-}
-
-/**
- * device_create_bin_file - create sysfs binary attribute file for device.
- * @dev: device.
- * @attr: device binary attribute descriptor.
- */
-int device_create_bin_file(struct device *dev,
- const struct bin_attribute *attr)
-{
- int error = -EINVAL;
- if (dev)
- error = sysfs_create_bin_file(&dev->kobj, attr);
- return error;
-}
-EXPORT_SYMBOL_GPL(device_create_bin_file);
-
-/**
- * device_remove_bin_file - remove sysfs binary attribute file
- * @dev: device.
- * @attr: device binary attribute descriptor.
- */
-void device_remove_bin_file(struct device *dev,
- const struct bin_attribute *attr)
-{
- if (dev)
- sysfs_remove_bin_file(&dev->kobj, attr);
-}
-EXPORT_SYMBOL_GPL(device_remove_bin_file);
-
-/**
- * device_schedule_callback_owner - helper to schedule a callback for a device
- * @dev: device.
- * @func: callback function to invoke later.
- * @owner: module owning the callback routine
- *
- * Attribute methods must not unregister themselves or their parent device
- * (which would amount to the same thing). Attempts to do so will deadlock,
- * since unregistration is mutually exclusive with driver callbacks.
- *
- * Instead methods can call this routine, which will attempt to allocate
- * and schedule a workqueue request to call back @func with @dev as its
- * argument in the workqueue's process context. @dev will be pinned until
- * @func returns.
- *
- * This routine is usually called via the inline device_schedule_callback(),
- * which automatically sets @owner to THIS_MODULE.
- *
- * Returns 0 if the request was submitted, -ENOMEM if storage could not
- * be allocated, -ENODEV if a reference to @owner isn't available.
- *
- * NOTE: This routine won't work if CONFIG_SYSFS isn't set! It uses an
- * underlying sysfs routine (since it is intended for use by attribute
- * methods), and if sysfs isn't available you'll get nothing but -ENOSYS.
- */
-int device_schedule_callback_owner(struct device *dev,
- void (*func)(struct device *), struct module *owner)
-{
- return sysfs_schedule_callback(&dev->kobj,
- (void (*)(void *)) func, dev, owner);
-}
-EXPORT_SYMBOL_GPL(device_schedule_callback_owner);
-
-static void klist_children_get(struct klist_node *n)
-{
- struct device_private *p = to_device_private_parent(n);
- struct device *dev = p->device;
-
- get_device(dev);
-}
-
-static void klist_children_put(struct klist_node *n)
-{
- struct device_private *p = to_device_private_parent(n);
- struct device *dev = p->device;
-
- put_device(dev);
-}
-
-/**
- * device_initialize - init device structure.
- * @dev: device.
- *
- * This prepares the device for use by other layers by initializing
- * its fields.
- * It is the first half of device_register(), if called by
- * that function, though it can also be called separately, so one
- * may use @dev's fields. In particular, get_device()/put_device()
- * may be used for reference counting of @dev after calling this
- * function.
- *
- * All fields in @dev must be initialized by the caller to 0, except
- * for those explicitly set to some other value. The simplest
- * approach is to use kzalloc() to allocate the structure containing
- * @dev.
- *
- * NOTE: Use put_device() to give up your reference instead of freeing
- * @dev directly once you have called this function.
- */
-void device_initialize(struct device *dev)
-{
- dev->kobj.kset = devices_kset;
- kobject_init(&dev->kobj, &device_ktype);
- INIT_LIST_HEAD(&dev->dma_pools);
- mutex_init(&dev->mutex);
- lockdep_set_novalidate_class(&dev->mutex);
- spin_lock_init(&dev->devres_lock);
- INIT_LIST_HEAD(&dev->devres_head);
- device_pm_init(dev);
- set_dev_node(dev, -1);
-}
-
-static struct kobject *virtual_device_parent(struct device *dev)
-{
- static struct kobject *virtual_dir = NULL;
-
- if (!virtual_dir)
- virtual_dir = kobject_create_and_add("virtual",
- &devices_kset->kobj);
-
- return virtual_dir;
-}
-
-struct class_dir {
- struct kobject kobj;
- struct class *class;
-};
-
-#define to_class_dir(obj) container_of(obj, struct class_dir, kobj)
-
-static void class_dir_release(struct kobject *kobj)
-{
- struct class_dir *dir = to_class_dir(kobj);
- kfree(dir);
-}
-
-static const
-struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj)
-{
- struct class_dir *dir = to_class_dir(kobj);
- return dir->class->ns_type;
-}
-
-static struct kobj_type class_dir_ktype = {
- .release = class_dir_release,
- .sysfs_ops = &kobj_sysfs_ops,
- .child_ns_type = class_dir_child_ns_type
-};
-
-static struct kobject *
-class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
-{
- struct class_dir *dir;
- int retval;
-
- dir = kzalloc(sizeof(*dir), GFP_KERNEL);
- if (!dir)
- return NULL;
-
- dir->class = class;
- kobject_init(&dir->kobj, &class_dir_ktype);
-
- dir->kobj.kset = &class->p->glue_dirs;
-
- retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
- if (retval < 0) {
- kobject_put(&dir->kobj);
- return NULL;
- }
- return &dir->kobj;
-}
-
-
-static struct kobject *get_device_parent(struct device *dev,
- struct device *parent)
-{
- if (dev->class) {
- static DEFINE_MUTEX(gdp_mutex);
- struct kobject *kobj = NULL;
- struct kobject *parent_kobj;
- struct kobject *k;
-
-#ifdef CONFIG_BLOCK
- /* block disks show up in /sys/block */
- if (sysfs_deprecated && dev->class == &block_class) {
- if (parent && parent->class == &block_class)
- return &parent->kobj;
- return &block_class.p->subsys.kobj;
- }
-#endif
-
- /*
- * If we have no parent, we live in "virtual".
- * Class-devices with a non class-device as parent, live
- * in a "glue" directory to prevent namespace collisions.
- */
- if (parent == NULL)
- parent_kobj = virtual_device_parent(dev);
- else if (parent->class && !dev->class->ns_type)
- return &parent->kobj;
- else
- parent_kobj = &parent->kobj;
-
- mutex_lock(&gdp_mutex);
-
- /* find our class-directory at the parent and reference it */
- spin_lock(&dev->class->p->glue_dirs.list_lock);
- list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry)
- if (k->parent == parent_kobj) {
- kobj = kobject_get(k);
- break;
- }
- spin_unlock(&dev->class->p->glue_dirs.list_lock);
- if (kobj) {
- mutex_unlock(&gdp_mutex);
- return kobj;
- }
-
- /* or create a new class-directory at the parent device */
- k = class_dir_create_and_add(dev->class, parent_kobj);
- /* do not emit an uevent for this simple "glue" directory */
- mutex_unlock(&gdp_mutex);
- return k;
- }
-
- /* subsystems can specify a default root directory for their devices */
- if (!parent && dev->bus && dev->bus->dev_root)
- return &dev->bus->dev_root->kobj;
-
- if (parent)
- return &parent->kobj;
- return NULL;
-}
-
-static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
-{
- /* see if we live in a "glue" directory */
- if (!glue_dir || !dev->class ||
- glue_dir->kset != &dev->class->p->glue_dirs)
- return;
-
- kobject_put(glue_dir);
-}
-
-static void cleanup_device_parent(struct device *dev)
-{
- cleanup_glue_dir(dev, dev->kobj.parent);
-}
-
-static int device_add_class_symlinks(struct device *dev)
-{
- int error;
-
- if (!dev->class)
- return 0;
-
- error = sysfs_create_link(&dev->kobj,
- &dev->class->p->subsys.kobj,
- "subsystem");
- if (error)
- goto out;
-
- if (dev->parent && device_is_not_partition(dev)) {
- error = sysfs_create_link(&dev->kobj, &dev->parent->kobj,
- "device");
- if (error)
- goto out_subsys;
- }
-
-#ifdef CONFIG_BLOCK
- /* /sys/block has directories and does not need symlinks */
- if (sysfs_deprecated && dev->class == &block_class)
- return 0;
-#endif
-
- /* link in the class directory pointing to the device */
- error = sysfs_create_link(&dev->class->p->subsys.kobj,
- &dev->kobj, dev_name(dev));
- if (error)
- goto out_device;
-
- return 0;
-
-out_device:
- sysfs_remove_link(&dev->kobj, "device");
-
-out_subsys:
- sysfs_remove_link(&dev->kobj, "subsystem");
-out:
- return error;
-}
-
-static void device_remove_class_symlinks(struct device *dev)
-{
- if (!dev->class)
- return;
-
- if (dev->parent && device_is_not_partition(dev))
- sysfs_remove_link(&dev->kobj, "device");
- sysfs_remove_link(&dev->kobj, "subsystem");
-#ifdef CONFIG_BLOCK
- if (sysfs_deprecated && dev->class == &block_class)
- return;
-#endif
- sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev));
-}
-
-/**
- * dev_set_name - set a device name
- * @dev: device
- * @fmt: format string for the device's name
- */
-int dev_set_name(struct device *dev, const char *fmt, ...)
-{
- va_list vargs;
- int err;
-
- va_start(vargs, fmt);
- err = kobject_set_name_vargs(&dev->kobj, fmt, vargs);
- va_end(vargs);
- return err;
-}
-EXPORT_SYMBOL_GPL(dev_set_name);
-
-/**
- * device_to_dev_kobj - select a /sys/dev/ directory for the device
- * @dev: device
- *
- * By default we select char/ for new entries. Setting class->dev_obj
- * to NULL prevents an entry from being created. class->dev_kobj must
- * be set (or cleared) before any devices are registered to the class
- * otherwise device_create_sys_dev_entry() and
- * device_remove_sys_dev_entry() will disagree about the the presence
- * of the link.
- */
-static struct kobject *device_to_dev_kobj(struct device *dev)
-{
- struct kobject *kobj;
-
- if (dev->class)
- kobj = dev->class->dev_kobj;
- else
- kobj = sysfs_dev_char_kobj;
-
- return kobj;
-}
-
-static int device_create_sys_dev_entry(struct device *dev)
-{
- struct kobject *kobj = device_to_dev_kobj(dev);
- int error = 0;
- char devt_str[15];
-
- if (kobj) {
- format_dev_t(devt_str, dev->devt);
- error = sysfs_create_link(kobj, &dev->kobj, devt_str);
- }
-
- return error;
-}
-
-static void device_remove_sys_dev_entry(struct device *dev)
-{
- struct kobject *kobj = device_to_dev_kobj(dev);
- char devt_str[15];
-
- if (kobj) {
- format_dev_t(devt_str, dev->devt);
- sysfs_remove_link(kobj, devt_str);
- }
-}
-
-int device_private_init(struct device *dev)
-{
- dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL);
- if (!dev->p)
- return -ENOMEM;
- dev->p->device = dev;
- klist_init(&dev->p->klist_children, klist_children_get,
- klist_children_put);
- INIT_LIST_HEAD(&dev->p->deferred_probe);
- return 0;
-}
-
-/**
- * device_add - add device to device hierarchy.
- * @dev: device.
- *
- * This is part 2 of device_register(), though may be called
- * separately _iff_ device_initialize() has been called separately.
- *
- * This adds @dev to the kobject hierarchy via kobject_add(), adds it
- * to the global and sibling lists for the device, then
- * adds it to the other relevant subsystems of the driver model.
- *
- * Do not call this routine or device_register() more than once for
- * any device structure. The driver model core is not designed to work
- * with devices that get unregistered and then spring back to life.
- * (Among other things, it's very hard to guarantee that all references
- * to the previous incarnation of @dev have been dropped.) Allocate
- * and register a fresh new struct device instead.
- *
- * NOTE: _Never_ directly free @dev after calling this function, even
- * if it returned an error! Always use put_device() to give up your
- * reference instead.
- */
-int device_add(struct device *dev)
-{
- struct device *parent = NULL;
- struct kobject *kobj;
- struct class_interface *class_intf;
- int error = -EINVAL;
-
- dev = get_device(dev);
- if (!dev)
- goto done;
-
- if (!dev->p) {
- error = device_private_init(dev);
- if (error)
- goto done;
- }
-
- /*
- * for statically allocated devices, which should all be converted
- * some day, we need to initialize the name. We prevent reading back
- * the name, and force the use of dev_name()
- */
- if (dev->init_name) {
- dev_set_name(dev, "%s", dev->init_name);
- dev->init_name = NULL;
- }
-
- /* subsystems can specify simple device enumeration */
- if (!dev_name(dev) && dev->bus && dev->bus->dev_name)
- dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id);
-
- if (!dev_name(dev)) {
- error = -EINVAL;
- goto name_error;
- }
-
- pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
-
- parent = get_device(dev->parent);
- kobj = get_device_parent(dev, parent);
- if (kobj)
- dev->kobj.parent = kobj;
-
- /* use parent numa_node */
- if (parent)
- set_dev_node(dev, dev_to_node(parent));
-
- /* first, register with generic layer. */
- /* we require the name to be set before, and pass NULL */
- error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
- if (error)
- goto Error;
-
- /* notify platform of device entry */
- if (platform_notify)
- platform_notify(dev);
-
- error = device_create_file(dev, &uevent_attr);
- if (error)
- goto attrError;
-
- if (MAJOR(dev->devt)) {
- error = device_create_file(dev, &devt_attr);
- if (error)
- goto ueventattrError;
-
- error = device_create_sys_dev_entry(dev);
- if (error)
- goto devtattrError;
-
- devtmpfs_create_node(dev);
- }
-
- error = device_add_class_symlinks(dev);
- if (error)
- goto SymlinkError;
- error = device_add_attrs(dev);
- if (error)
- goto AttrsError;
- error = bus_add_device(dev);
- if (error)
- goto BusError;
- error = dpm_sysfs_add(dev);
- if (error)
- goto DPMError;
- device_pm_add(dev);
-
- /* Notify clients of device addition. This call must come
- * after dpm_sysfs_add() and before kobject_uevent().
- */
- if (dev->bus)
- blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- BUS_NOTIFY_ADD_DEVICE, dev);
-
- kobject_uevent(&dev->kobj, KOBJ_ADD);
- bus_probe_device(dev);
- if (parent)
- klist_add_tail(&dev->p->knode_parent,
- &parent->p->klist_children);
-
- if (dev->class) {
- mutex_lock(&dev->class->p->mutex);
- /* tie the class to the device */
- klist_add_tail(&dev->knode_class,
- &dev->class->p->klist_devices);
-
- /* notify any interfaces that the device is here */
- list_for_each_entry(class_intf,
- &dev->class->p->interfaces, node)
- if (class_intf->add_dev)
- class_intf->add_dev(dev, class_intf);
- mutex_unlock(&dev->class->p->mutex);
- }
-done:
- put_device(dev);
- return error;
- DPMError:
- bus_remove_device(dev);
- BusError:
- device_remove_attrs(dev);
- AttrsError:
- device_remove_class_symlinks(dev);
- SymlinkError:
- if (MAJOR(dev->devt))
- devtmpfs_delete_node(dev);
- if (MAJOR(dev->devt))
- device_remove_sys_dev_entry(dev);
- devtattrError:
- if (MAJOR(dev->devt))
- device_remove_file(dev, &devt_attr);
- ueventattrError:
- device_remove_file(dev, &uevent_attr);
- attrError:
- kobject_uevent(&dev->kobj, KOBJ_REMOVE);
- kobject_del(&dev->kobj);
- Error:
- cleanup_device_parent(dev);
- if (parent)
- put_device(parent);
-name_error:
- kfree(dev->p);
- dev->p = NULL;
- goto done;
-}
-
-/**
- * device_register - register a device with the system.
- * @dev: pointer to the device structure
- *
- * This happens in two clean steps - initialize the device
- * and add it to the system. The two steps can be called
- * separately, but this is the easiest and most common.
- * I.e. you should only call the two helpers separately if
- * have a clearly defined need to use and refcount the device
- * before it is added to the hierarchy.
- *
- * For more information, see the kerneldoc for device_initialize()
- * and device_add().
- *
- * NOTE: _Never_ directly free @dev after calling this function, even
- * if it returned an error! Always use put_device() to give up the
- * reference initialized in this function instead.
- */
-int device_register(struct device *dev)
-{
- device_initialize(dev);
- return device_add(dev);
-}
-
-/**
- * get_device - increment reference count for device.
- * @dev: device.
- *
- * This simply forwards the call to kobject_get(), though
- * we do take care to provide for the case that we get a NULL
- * pointer passed in.
- */
-struct device *get_device(struct device *dev)
-{
- return dev ? to_dev(kobject_get(&dev->kobj)) : NULL;
-}
-
-/**
- * put_device - decrement reference count.
- * @dev: device in question.
- */
-void put_device(struct device *dev)
-{
- /* might_sleep(); */
- if (dev)
- kobject_put(&dev->kobj);
-}
-
-/**
- * device_del - delete device from system.
- * @dev: device.
- *
- * This is the first part of the device unregistration
- * sequence. This removes the device from the lists we control
- * from here, has it removed from the other driver model
- * subsystems it was added to in device_add(), and removes it
- * from the kobject hierarchy.
- *
- * NOTE: this should be called manually _iff_ device_add() was
- * also called manually.
- */
-void device_del(struct device *dev)
-{
- struct device *parent = dev->parent;
- struct class_interface *class_intf;
-
- /* Notify clients of device removal. This call must come
- * before dpm_sysfs_remove().
- */
- if (dev->bus)
- blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- BUS_NOTIFY_DEL_DEVICE, dev);
- device_pm_remove(dev);
- dpm_sysfs_remove(dev);
- if (parent)
- klist_del(&dev->p->knode_parent);
- if (MAJOR(dev->devt)) {
- devtmpfs_delete_node(dev);
- device_remove_sys_dev_entry(dev);
- device_remove_file(dev, &devt_attr);
- }
- if (dev->class) {
- device_remove_class_symlinks(dev);
-
- mutex_lock(&dev->class->p->mutex);
- /* notify any interfaces that the device is now gone */
- list_for_each_entry(class_intf,
- &dev->class->p->interfaces, node)
- if (class_intf->remove_dev)
- class_intf->remove_dev(dev, class_intf);
- /* remove the device from the class list */
- klist_del(&dev->knode_class);
- mutex_unlock(&dev->class->p->mutex);
- }
- device_remove_file(dev, &uevent_attr);
- device_remove_attrs(dev);
- bus_remove_device(dev);
- driver_deferred_probe_del(dev);
-
- /*
- * Some platform devices are driven without driver attached
- * and managed resources may have been acquired. Make sure
- * all resources are released.
- */
- devres_release_all(dev);
-
- /* Notify the platform of the removal, in case they
- * need to do anything...
- */
- if (platform_notify_remove)
- platform_notify_remove(dev);
- kobject_uevent(&dev->kobj, KOBJ_REMOVE);
- cleanup_device_parent(dev);
- kobject_del(&dev->kobj);
- put_device(parent);
-}
-
-/**
- * device_unregister - unregister device from system.
- * @dev: device going away.
- *
- * We do this in two parts, like we do device_register(). First,
- * we remove it from all the subsystems with device_del(), then
- * we decrement the reference count via put_device(). If that
- * is the final reference count, the device will be cleaned up
- * via device_release() above. Otherwise, the structure will
- * stick around until the final reference to the device is dropped.
- */
-void device_unregister(struct device *dev)
-{
- pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
- device_del(dev);
- put_device(dev);
-}
-
-static struct device *next_device(struct klist_iter *i)
-{
- struct klist_node *n = klist_next(i);
- struct device *dev = NULL;
- struct device_private *p;
-
- if (n) {
- p = to_device_private_parent(n);
- dev = p->device;
- }
- return dev;
-}
-
-/**
- * device_get_devnode - path of device node file
- * @dev: device
- * @mode: returned file access mode
- * @tmp: possibly allocated string
- *
- * Return the relative path of a possible device node.
- * Non-default names may need to allocate a memory to compose
- * a name. This memory is returned in tmp and needs to be
- * freed by the caller.
- */
-const char *device_get_devnode(struct device *dev,
- umode_t *mode, const char **tmp)
-{
- char *s;
-
- *tmp = NULL;
-
- /* the device type may provide a specific name */
- if (dev->type && dev->type->devnode)
- *tmp = dev->type->devnode(dev, mode);
- if (*tmp)
- return *tmp;
-
- /* the class may provide a specific name */
- if (dev->class && dev->class->devnode)
- *tmp = dev->class->devnode(dev, mode);
- if (*tmp)
- return *tmp;
-
- /* return name without allocation, tmp == NULL */
- if (strchr(dev_name(dev), '!') == NULL)
- return dev_name(dev);
-
- /* replace '!' in the name with '/' */
- *tmp = kstrdup(dev_name(dev), GFP_KERNEL);
- if (!*tmp)
- return NULL;
- while ((s = strchr(*tmp, '!')))
- s[0] = '/';
- return *tmp;
-}
-
-/**
- * device_for_each_child - device child iterator.
- * @parent: parent struct device.
- * @data: data for the callback.
- * @fn: function to be called for each device.
- *
- * Iterate over @parent's child devices, and call @fn for each,
- * passing it @data.
- *
- * We check the return of @fn each time. If it returns anything
- * other than 0, we break out and return that value.
- */
-int device_for_each_child(struct device *parent, void *data,
- int (*fn)(struct device *dev, void *data))
-{
- struct klist_iter i;
- struct device *child;
- int error = 0;
-
- if (!parent->p)
- return 0;
-
- klist_iter_init(&parent->p->klist_children, &i);
- while ((child = next_device(&i)) && !error)
- error = fn(child, data);
- klist_iter_exit(&i);
- return error;
-}
-
-/**
- * device_find_child - device iterator for locating a particular device.
- * @parent: parent struct device
- * @data: Data to pass to match function
- * @match: Callback function to check device
- *
- * This is similar to the device_for_each_child() function above, but it
- * returns a reference to a device that is 'found' for later use, as
- * determined by the @match callback.
- *
- * The callback should return 0 if the device doesn't match and non-zero
- * if it does. If the callback returns non-zero and a reference to the
- * current device can be obtained, this function will return to the caller
- * and not iterate over any more devices.
- */
-struct device *device_find_child(struct device *parent, void *data,
- int (*match)(struct device *dev, void *data))
-{
- struct klist_iter i;
- struct device *child;
-
- if (!parent)
- return NULL;
-
- klist_iter_init(&parent->p->klist_children, &i);
- while ((child = next_device(&i)))
- if (match(child, data) && get_device(child))
- break;
- klist_iter_exit(&i);
- return child;
-}
-
-int __init devices_init(void)
-{
- devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL);
- if (!devices_kset)
- return -ENOMEM;
- dev_kobj = kobject_create_and_add("dev", NULL);
- if (!dev_kobj)
- goto dev_kobj_err;
- sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj);
- if (!sysfs_dev_block_kobj)
- goto block_kobj_err;
- sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
- if (!sysfs_dev_char_kobj)
- goto char_kobj_err;
-
- return 0;
-
- char_kobj_err:
- kobject_put(sysfs_dev_block_kobj);
- block_kobj_err:
- kobject_put(dev_kobj);
- dev_kobj_err:
- kset_unregister(devices_kset);
- return -ENOMEM;
-}
-
-EXPORT_SYMBOL_GPL(device_for_each_child);
-EXPORT_SYMBOL_GPL(device_find_child);
-
-EXPORT_SYMBOL_GPL(device_initialize);
-EXPORT_SYMBOL_GPL(device_add);
-EXPORT_SYMBOL_GPL(device_register);
-
-EXPORT_SYMBOL_GPL(device_del);
-EXPORT_SYMBOL_GPL(device_unregister);
-EXPORT_SYMBOL_GPL(get_device);
-EXPORT_SYMBOL_GPL(put_device);
-
-EXPORT_SYMBOL_GPL(device_create_file);
-EXPORT_SYMBOL_GPL(device_remove_file);
-
-struct root_device {
- struct device dev;
- struct module *owner;
-};
-
-inline struct root_device *to_root_device(struct device *d)
-{
- return container_of(d, struct root_device, dev);
-}
-
-static void root_device_release(struct device *dev)
-{
- kfree(to_root_device(dev));
-}
-
-/**
- * __root_device_register - allocate and register a root device
- * @name: root device name
- * @owner: owner module of the root device, usually THIS_MODULE
- *
- * This function allocates a root device and registers it
- * using device_register(). In order to free the returned
- * device, use root_device_unregister().
- *
- * Root devices are dummy devices which allow other devices
- * to be grouped under /sys/devices. Use this function to
- * allocate a root device and then use it as the parent of
- * any device which should appear under /sys/devices/{name}
- *
- * The /sys/devices/{name} directory will also contain a
- * 'module' symlink which points to the @owner directory
- * in sysfs.
- *
- * Returns &struct device pointer on success, or ERR_PTR() on error.
- *
- * Note: You probably want to use root_device_register().
- */
-struct device *__root_device_register(const char *name, struct module *owner)
-{
- struct root_device *root;
- int err = -ENOMEM;
-
- root = kzalloc(sizeof(struct root_device), GFP_KERNEL);
- if (!root)
- return ERR_PTR(err);
-
- err = dev_set_name(&root->dev, "%s", name);
- if (err) {
- kfree(root);
- return ERR_PTR(err);
- }
-
- root->dev.release = root_device_release;
-
- err = device_register(&root->dev);
- if (err) {
- put_device(&root->dev);
- return ERR_PTR(err);
- }
-
-#ifdef CONFIG_MODULES /* gotta find a "cleaner" way to do this */
- if (owner) {
- struct module_kobject *mk = &owner->mkobj;
-
- err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module");
- if (err) {
- device_unregister(&root->dev);
- return ERR_PTR(err);
- }
- root->owner = owner;
- }
-#endif
-
- return &root->dev;
-}
-EXPORT_SYMBOL_GPL(__root_device_register);
-
-/**
- * root_device_unregister - unregister and free a root device
- * @dev: device going away
- *
- * This function unregisters and cleans up a device that was created by
- * root_device_register().
- */
-void root_device_unregister(struct device *dev)
-{
- struct root_device *root = to_root_device(dev);
-
- if (root->owner)
- sysfs_remove_link(&root->dev.kobj, "module");
-
- device_unregister(dev);
-}
-EXPORT_SYMBOL_GPL(root_device_unregister);
-
-
-static void device_create_release(struct device *dev)
-{
- pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
- kfree(dev);
-}
-
-/**
- * device_create_vargs - creates a device and registers it with sysfs
- * @class: pointer to the struct class that this device should be registered to
- * @parent: pointer to the parent struct device of this new device, if any
- * @devt: the dev_t for the char device to be added
- * @drvdata: the data to be added to the device for callbacks
- * @fmt: string for the device's name
- * @args: va_list for the device's name
- *
- * This function can be used by char device classes. A struct device
- * will be created in sysfs, registered to the specified class.
- *
- * A "dev" file will be created, showing the dev_t for the device, if
- * the dev_t is not 0,0.
- * If a pointer to a parent struct device is passed in, the newly created
- * struct device will be a child of that device in sysfs.
- * The pointer to the struct device will be returned from the call.
- * Any further sysfs files that might be required can be created using this
- * pointer.
- *
- * Returns &struct device pointer on success, or ERR_PTR() on error.
- *
- * Note: the struct class passed to this function must have previously
- * been created with a call to class_create().
- */
-struct device *device_create_vargs(struct class *class, struct device *parent,
- dev_t devt, void *drvdata, const char *fmt,
- va_list args)
-{
- struct device *dev = NULL;
- int retval = -ENODEV;
-
- if (class == NULL || IS_ERR(class))
- goto error;
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- retval = -ENOMEM;
- goto error;
- }
-
- dev->devt = devt;
- dev->class = class;
- dev->parent = parent;
- dev->release = device_create_release;
- dev_set_drvdata(dev, drvdata);
-
- retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
- if (retval)
- goto error;
-
- retval = device_register(dev);
- if (retval)
- goto error;
-
- return dev;
-
-error:
- put_device(dev);
- return ERR_PTR(retval);
-}
-EXPORT_SYMBOL_GPL(device_create_vargs);
-
-/**
- * device_create - creates a device and registers it with sysfs
- * @class: pointer to the struct class that this device should be registered to
- * @parent: pointer to the parent struct device of this new device, if any
- * @devt: the dev_t for the char device to be added
- * @drvdata: the data to be added to the device for callbacks
- * @fmt: string for the device's name
- *
- * This function can be used by char device classes. A struct device
- * will be created in sysfs, registered to the specified class.
- *
- * A "dev" file will be created, showing the dev_t for the device, if
- * the dev_t is not 0,0.
- * If a pointer to a parent struct device is passed in, the newly created
- * struct device will be a child of that device in sysfs.
- * The pointer to the struct device will be returned from the call.
- * Any further sysfs files that might be required can be created using this
- * pointer.
- *
- * Returns &struct device pointer on success, or ERR_PTR() on error.
- *
- * Note: the struct class passed to this function must have previously
- * been created with a call to class_create().
- */
-struct device *device_create(struct class *class, struct device *parent,
- dev_t devt, void *drvdata, const char *fmt, ...)
-{
- va_list vargs;
- struct device *dev;
-
- va_start(vargs, fmt);
- dev = device_create_vargs(class, parent, devt, drvdata, fmt, vargs);
- va_end(vargs);
- return dev;
-}
-EXPORT_SYMBOL_GPL(device_create);
-
-static int __match_devt(struct device *dev, void *data)
-{
- dev_t *devt = data;
-
- return dev->devt == *devt;
-}
-
-/**
- * device_destroy - removes a device that was created with device_create()
- * @class: pointer to the struct class that this device was registered with
- * @devt: the dev_t of the device that was previously registered
- *
- * This call unregisters and cleans up a device that was created with a
- * call to device_create().
- */
-void device_destroy(struct class *class, dev_t devt)
-{
- struct device *dev;
-
- dev = class_find_device(class, NULL, &devt, __match_devt);
- if (dev) {
- put_device(dev);
- device_unregister(dev);
- }
-}
-EXPORT_SYMBOL_GPL(device_destroy);
-
-/**
- * device_rename - renames a device
- * @dev: the pointer to the struct device to be renamed
- * @new_name: the new name of the device
- *
- * It is the responsibility of the caller to provide mutual
- * exclusion between two different calls of device_rename
- * on the same device to ensure that new_name is valid and
- * won't conflict with other devices.
- *
- * Note: Don't call this function. Currently, the networking layer calls this
- * function, but that will change. The following text from Kay Sievers offers
- * some insight:
- *
- * Renaming devices is racy at many levels, symlinks and other stuff are not
- * replaced atomically, and you get a "move" uevent, but it's not easy to
- * connect the event to the old and new device. Device nodes are not renamed at
- * all, there isn't even support for that in the kernel now.
- *
- * In the meantime, during renaming, your target name might be taken by another
- * driver, creating conflicts. Or the old name is taken directly after you
- * renamed it -- then you get events for the same DEVPATH, before you even see
- * the "move" event. It's just a mess, and nothing new should ever rely on
- * kernel device renaming. Besides that, it's not even implemented now for
- * other things than (driver-core wise very simple) network devices.
- *
- * We are currently about to change network renaming in udev to completely
- * disallow renaming of devices in the same namespace as the kernel uses,
- * because we can't solve the problems properly, that arise with swapping names
- * of multiple interfaces without races. Means, renaming of eth[0-9]* will only
- * be allowed to some other name than eth[0-9]*, for the aforementioned
- * reasons.
- *
- * Make up a "real" name in the driver before you register anything, or add
- * some other attributes for userspace to find the device, or use udev to add
- * symlinks -- but never rename kernel devices later, it's a complete mess. We
- * don't even want to get into that and try to implement the missing pieces in
- * the core. We really have other pieces to fix in the driver core mess. :)
- */
-int device_rename(struct device *dev, const char *new_name)
-{
- char *old_class_name = NULL;
- char *new_class_name = NULL;
- char *old_device_name = NULL;
- int error;
-
- dev = get_device(dev);
- if (!dev)
- return -EINVAL;
-
- pr_debug("device: '%s': %s: renaming to '%s'\n", dev_name(dev),
- __func__, new_name);
-
- old_device_name = kstrdup(dev_name(dev), GFP_KERNEL);
- if (!old_device_name) {
- error = -ENOMEM;
- goto out;
- }
-
- if (dev->class) {
- error = sysfs_rename_link(&dev->class->p->subsys.kobj,
- &dev->kobj, old_device_name, new_name);
- if (error)
- goto out;
- }
-
- error = kobject_rename(&dev->kobj, new_name);
- if (error)
- goto out;
-
-out:
- put_device(dev);
-
- kfree(new_class_name);
- kfree(old_class_name);
- kfree(old_device_name);
-
- return error;
-}
-EXPORT_SYMBOL_GPL(device_rename);
-
-static int device_move_class_links(struct device *dev,
- struct device *old_parent,
- struct device *new_parent)
-{
- int error = 0;
-
- if (old_parent)
- sysfs_remove_link(&dev->kobj, "device");
- if (new_parent)
- error = sysfs_create_link(&dev->kobj, &new_parent->kobj,
- "device");
- return error;
-}
-
-/**
- * device_move - moves a device to a new parent
- * @dev: the pointer to the struct device to be moved
- * @new_parent: the new parent of the device (can by NULL)
- * @dpm_order: how to reorder the dpm_list
- */
-int device_move(struct device *dev, struct device *new_parent,
- enum dpm_order dpm_order)
-{
- int error;
- struct device *old_parent;
- struct kobject *new_parent_kobj;
-
- dev = get_device(dev);
- if (!dev)
- return -EINVAL;
-
- device_pm_lock();
- new_parent = get_device(new_parent);
- new_parent_kobj = get_device_parent(dev, new_parent);
-
- pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
- __func__, new_parent ? dev_name(new_parent) : "<NULL>");
- error = kobject_move(&dev->kobj, new_parent_kobj);
- if (error) {
- cleanup_glue_dir(dev, new_parent_kobj);
- put_device(new_parent);
- goto out;
- }
- old_parent = dev->parent;
- dev->parent = new_parent;
- if (old_parent)
- klist_remove(&dev->p->knode_parent);
- if (new_parent) {
- klist_add_tail(&dev->p->knode_parent,
- &new_parent->p->klist_children);
- set_dev_node(dev, dev_to_node(new_parent));
- }
-
- if (!dev->class)
- goto out_put;
- error = device_move_class_links(dev, old_parent, new_parent);
- if (error) {
- /* We ignore errors on cleanup since we're hosed anyway... */
- device_move_class_links(dev, new_parent, old_parent);
- if (!kobject_move(&dev->kobj, &old_parent->kobj)) {
- if (new_parent)
- klist_remove(&dev->p->knode_parent);
- dev->parent = old_parent;
- if (old_parent) {
- klist_add_tail(&dev->p->knode_parent,
- &old_parent->p->klist_children);
- set_dev_node(dev, dev_to_node(old_parent));
- }
- }
- cleanup_glue_dir(dev, new_parent_kobj);
- put_device(new_parent);
- goto out;
- }
- switch (dpm_order) {
- case DPM_ORDER_NONE:
- break;
- case DPM_ORDER_DEV_AFTER_PARENT:
- device_pm_move_after(dev, new_parent);
- break;
- case DPM_ORDER_PARENT_BEFORE_DEV:
- device_pm_move_before(new_parent, dev);
- break;
- case DPM_ORDER_DEV_LAST:
- device_pm_move_last(dev);
- break;
- }
-out_put:
- put_device(old_parent);
-out:
- device_pm_unlock();
- put_device(dev);
- return error;
-}
-EXPORT_SYMBOL_GPL(device_move);
-
-/**
- * device_shutdown - call ->shutdown() on each device to shutdown.
- */
-void device_shutdown(void)
-{
- struct device *dev;
-
- spin_lock(&devices_kset->list_lock);
- /*
- * Walk the devices list backward, shutting down each in turn.
- * Beware that device unplug events may also start pulling
- * devices offline, even as the system is shutting down.
- */
- while (!list_empty(&devices_kset->list)) {
- dev = list_entry(devices_kset->list.prev, struct device,
- kobj.entry);
- get_device(dev);
- /*
- * Make sure the device is off the kset list, in the
- * event that dev->*->shutdown() doesn't remove it.
- */
- list_del_init(&dev->kobj.entry);
- spin_unlock(&devices_kset->list_lock);
-
- /* Don't allow any more runtime suspends */
- pm_runtime_get_noresume(dev);
- pm_runtime_barrier(dev);
-
- if (dev->bus && dev->bus->shutdown) {
- dev_dbg(dev, "shutdown\n");
- dev->bus->shutdown(dev);
- } else if (dev->driver && dev->driver->shutdown) {
- dev_dbg(dev, "shutdown\n");
- dev->driver->shutdown(dev);
- }
- put_device(dev);
-
- spin_lock(&devices_kset->list_lock);
- }
- spin_unlock(&devices_kset->list_lock);
- async_synchronize_full();
-}
-
-/*
- * Device logging functions
- */
-
-#ifdef CONFIG_PRINTK
-
-int __dev_printk(const char *level, const struct device *dev,
- struct va_format *vaf)
-{
- if (!dev)
- return printk("%s(NULL device *): %pV", level, vaf);
-
- return printk("%s%s %s: %pV",
- level, dev_driver_string(dev), dev_name(dev), vaf);
-}
-EXPORT_SYMBOL(__dev_printk);
-
-int dev_printk(const char *level, const struct device *dev,
- const char *fmt, ...)
-{
- struct va_format vaf;
- va_list args;
- int r;
-
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- r = __dev_printk(level, dev, &vaf);
- va_end(args);
-
- return r;
-}
-EXPORT_SYMBOL(dev_printk);
-
-#define define_dev_printk_level(func, kern_level) \
-int func(const struct device *dev, const char *fmt, ...) \
-{ \
- struct va_format vaf; \
- va_list args; \
- int r; \
- \
- va_start(args, fmt); \
- \
- vaf.fmt = fmt; \
- vaf.va = &args; \
- \
- r = __dev_printk(kern_level, dev, &vaf); \
- va_end(args); \
- \
- return r; \
-} \
-EXPORT_SYMBOL(func);
-
-define_dev_printk_level(dev_emerg, KERN_EMERG);
-define_dev_printk_level(dev_alert, KERN_ALERT);
-define_dev_printk_level(dev_crit, KERN_CRIT);
-define_dev_printk_level(dev_err, KERN_ERR);
-define_dev_printk_level(dev_warn, KERN_WARNING);
-define_dev_printk_level(dev_notice, KERN_NOTICE);
-define_dev_printk_level(_dev_info, KERN_INFO);
-
-#endif
diff --git a/ANDROID_3.4.5/drivers/base/cpu.c b/ANDROID_3.4.5/drivers/base/cpu.c
deleted file mode 100644
index adf937bf..00000000
--- a/ANDROID_3.4.5/drivers/base/cpu.c
+++ /dev/null
@@ -1,337 +0,0 @@
-/*
- * CPU subsystem support
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/cpu.h>
-#include <linux/topology.h>
-#include <linux/device.h>
-#include <linux/node.h>
-#include <linux/gfp.h>
-#include <linux/slab.h>
-#include <linux/percpu.h>
-
-#include "base.h"
-
-struct bus_type cpu_subsys = {
- .name = "cpu",
- .dev_name = "cpu",
-};
-EXPORT_SYMBOL_GPL(cpu_subsys);
-
-static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
-
-#ifdef CONFIG_HOTPLUG_CPU
-static ssize_t show_online(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cpu *cpu = container_of(dev, struct cpu, dev);
-
- return sprintf(buf, "%u\n", !!cpu_online(cpu->dev.id));
-}
-
-static ssize_t __ref store_online(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cpu *cpu = container_of(dev, struct cpu, dev);
- ssize_t ret;
-
- cpu_hotplug_driver_lock();
- switch (buf[0]) {
- case '0':
- ret = cpu_down(cpu->dev.id);
- if (!ret)
- kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
- break;
- case '1':
- ret = cpu_up(cpu->dev.id);
- if (!ret)
- kobject_uevent(&dev->kobj, KOBJ_ONLINE);
- break;
- default:
- ret = -EINVAL;
- }
- cpu_hotplug_driver_unlock();
-
- if (ret >= 0)
- ret = count;
- return ret;
-}
-static DEVICE_ATTR(online, 0644, show_online, store_online);
-
-static void __cpuinit register_cpu_control(struct cpu *cpu)
-{
- device_create_file(&cpu->dev, &dev_attr_online);
-}
-void unregister_cpu(struct cpu *cpu)
-{
- int logical_cpu = cpu->dev.id;
-
- unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
-
- device_remove_file(&cpu->dev, &dev_attr_online);
-
- device_unregister(&cpu->dev);
- per_cpu(cpu_sys_devices, logical_cpu) = NULL;
- return;
-}
-
-#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
-static ssize_t cpu_probe_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- return arch_cpu_probe(buf, count);
-}
-
-static ssize_t cpu_release_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- return arch_cpu_release(buf, count);
-}
-
-static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
-static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
-#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
-
-#else /* ... !CONFIG_HOTPLUG_CPU */
-static inline void register_cpu_control(struct cpu *cpu)
-{
-}
-#endif /* CONFIG_HOTPLUG_CPU */
-
-#ifdef CONFIG_KEXEC
-#include <linux/kexec.h>
-
-static ssize_t show_crash_notes(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct cpu *cpu = container_of(dev, struct cpu, dev);
- ssize_t rc;
- unsigned long long addr;
- int cpunum;
-
- cpunum = cpu->dev.id;
-
- /*
- * Might be reading other cpu's data based on which cpu read thread
- * has been scheduled. But cpu data (memory) is allocated once during
- * boot up and this data does not change there after. Hence this
- * operation should be safe. No locking required.
- */
- addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
- rc = sprintf(buf, "%Lx\n", addr);
- return rc;
-}
-static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL);
-#endif
-
-/*
- * Print cpu online, possible, present, and system maps
- */
-
-struct cpu_attr {
- struct device_attribute attr;
- const struct cpumask *const * const map;
-};
-
-static ssize_t show_cpus_attr(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
- int n = cpulist_scnprintf(buf, PAGE_SIZE-2, *(ca->map));
-
- buf[n++] = '\n';
- buf[n] = '\0';
- return n;
-}
-
-#define _CPU_ATTR(name, map) \
- { __ATTR(name, 0444, show_cpus_attr, NULL), map }
-
-/* Keep in sync with cpu_subsys_attrs */
-static struct cpu_attr cpu_attrs[] = {
- _CPU_ATTR(online, &cpu_online_mask),
- _CPU_ATTR(possible, &cpu_possible_mask),
- _CPU_ATTR(present, &cpu_present_mask),
-};
-
-/*
- * Print values for NR_CPUS and offlined cpus
- */
-static ssize_t print_cpus_kernel_max(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1);
- return n;
-}
-static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
-
-/* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
-unsigned int total_cpus;
-
-static ssize_t print_cpus_offline(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int n = 0, len = PAGE_SIZE-2;
- cpumask_var_t offline;
-
- /* display offline cpus < nr_cpu_ids */
- if (!alloc_cpumask_var(&offline, GFP_KERNEL))
- return -ENOMEM;
- cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
- n = cpulist_scnprintf(buf, len, offline);
- free_cpumask_var(offline);
-
- /* display offline cpus >= nr_cpu_ids */
- if (total_cpus && nr_cpu_ids < total_cpus) {
- if (n && n < len)
- buf[n++] = ',';
-
- if (nr_cpu_ids == total_cpus-1)
- n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids);
- else
- n += snprintf(&buf[n], len - n, "%d-%d",
- nr_cpu_ids, total_cpus-1);
- }
-
- n += snprintf(&buf[n], len - n, "\n");
- return n;
-}
-static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
-
-static void cpu_device_release(struct device *dev)
-{
- /*
- * This is an empty function to prevent the driver core from spitting a
- * warning at us. Yes, I know this is directly opposite of what the
- * documentation for the driver core and kobjects say, and the author
- * of this code has already been publically ridiculed for doing
- * something as foolish as this. However, at this point in time, it is
- * the only way to handle the issue of statically allocated cpu
- * devices. The different architectures will have their cpu device
- * code reworked to properly handle this in the near future, so this
- * function will then be changed to correctly free up the memory held
- * by the cpu device.
- *
- * Never copy this way of doing things, or you too will be made fun of
- * on the linux-kerenl list, you have been warned.
- */
-}
-
-/*
- * register_cpu - Setup a sysfs device for a CPU.
- * @cpu - cpu->hotpluggable field set to 1 will generate a control file in
- * sysfs for this CPU.
- * @num - CPU number to use when creating the device.
- *
- * Initialize and register the CPU device.
- */
-int __cpuinit register_cpu(struct cpu *cpu, int num)
-{
- int error;
-
- cpu->node_id = cpu_to_node(num);
- memset(&cpu->dev, 0x00, sizeof(struct device));
- cpu->dev.id = num;
- cpu->dev.bus = &cpu_subsys;
- cpu->dev.release = cpu_device_release;
-#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
- cpu->dev.bus->uevent = arch_cpu_uevent;
-#endif
- error = device_register(&cpu->dev);
- if (!error && cpu->hotpluggable)
- register_cpu_control(cpu);
- if (!error)
- per_cpu(cpu_sys_devices, num) = &cpu->dev;
- if (!error)
- register_cpu_under_node(num, cpu_to_node(num));
-
-#ifdef CONFIG_KEXEC
- if (!error)
- error = device_create_file(&cpu->dev, &dev_attr_crash_notes);
-#endif
- return error;
-}
-
-struct device *get_cpu_device(unsigned cpu)
-{
- if (cpu < nr_cpu_ids && cpu_possible(cpu))
- return per_cpu(cpu_sys_devices, cpu);
- else
- return NULL;
-}
-EXPORT_SYMBOL_GPL(get_cpu_device);
-
-#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
-static DEVICE_ATTR(modalias, 0444, arch_print_cpu_modalias, NULL);
-#endif
-
-static struct attribute *cpu_root_attrs[] = {
-#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
- &dev_attr_probe.attr,
- &dev_attr_release.attr,
-#endif
- &cpu_attrs[0].attr.attr,
- &cpu_attrs[1].attr.attr,
- &cpu_attrs[2].attr.attr,
- &dev_attr_kernel_max.attr,
- &dev_attr_offline.attr,
-#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
- &dev_attr_modalias.attr,
-#endif
- NULL
-};
-
-static struct attribute_group cpu_root_attr_group = {
- .attrs = cpu_root_attrs,
-};
-
-static const struct attribute_group *cpu_root_attr_groups[] = {
- &cpu_root_attr_group,
- NULL,
-};
-
-bool cpu_is_hotpluggable(unsigned cpu)
-{
- struct device *dev = get_cpu_device(cpu);
- return dev && container_of(dev, struct cpu, dev)->hotpluggable;
-}
-EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
-
-#ifdef CONFIG_GENERIC_CPU_DEVICES
-static DEFINE_PER_CPU(struct cpu, cpu_devices);
-#endif
-
-static void __init cpu_dev_register_generic(void)
-{
-#ifdef CONFIG_GENERIC_CPU_DEVICES
- int i;
-
- for_each_possible_cpu(i) {
- if (register_cpu(&per_cpu(cpu_devices, i), i))
- panic("Failed to register CPU device");
- }
-#endif
-}
-
-void __init cpu_dev_init(void)
-{
- if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
- panic("Failed to register CPU subsystem");
-
- cpu_dev_register_generic();
-
-#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
- sched_create_sysfs_power_savings_entries(cpu_subsys.dev_root);
-#endif
-}
diff --git a/ANDROID_3.4.5/drivers/base/dd.c b/ANDROID_3.4.5/drivers/base/dd.c
deleted file mode 100644
index 1b1cbb57..00000000
--- a/ANDROID_3.4.5/drivers/base/dd.c
+++ /dev/null
@@ -1,577 +0,0 @@
-/*
- * drivers/base/dd.c - The core device/driver interactions.
- *
- * This file contains the (sometimes tricky) code that controls the
- * interactions between devices and drivers, which primarily includes
- * driver binding and unbinding.
- *
- * All of this code used to exist in drivers/base/bus.c, but was
- * relocated to here in the name of compartmentalization (since it wasn't
- * strictly code just for the 'struct bus_type'.
- *
- * Copyright (c) 2002-5 Patrick Mochel
- * Copyright (c) 2002-3 Open Source Development Labs
- * Copyright (c) 2007-2009 Greg Kroah-Hartman <gregkh@suse.de>
- * Copyright (c) 2007-2009 Novell Inc.
- *
- * This file is released under the GPLv2
- */
-
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/kthread.h>
-#include <linux/wait.h>
-#include <linux/async.h>
-#include <linux/pm_runtime.h>
-
-#include "base.h"
-#include "power/power.h"
-
-/*
- * Deferred Probe infrastructure.
- *
- * Sometimes driver probe order matters, but the kernel doesn't always have
- * dependency information which means some drivers will get probed before a
- * resource it depends on is available. For example, an SDHCI driver may
- * first need a GPIO line from an i2c GPIO controller before it can be
- * initialized. If a required resource is not available yet, a driver can
- * request probing to be deferred by returning -EPROBE_DEFER from its probe hook
- *
- * Deferred probe maintains two lists of devices, a pending list and an active
- * list. A driver returning -EPROBE_DEFER causes the device to be added to the
- * pending list. A successful driver probe will trigger moving all devices
- * from the pending to the active list so that the workqueue will eventually
- * retry them.
- *
- * The deferred_probe_mutex must be held any time the deferred_probe_*_list
- * of the (struct device*)->p->deferred_probe pointers are manipulated
- */
-static DEFINE_MUTEX(deferred_probe_mutex);
-static LIST_HEAD(deferred_probe_pending_list);
-static LIST_HEAD(deferred_probe_active_list);
-static struct workqueue_struct *deferred_wq;
-
-/**
- * deferred_probe_work_func() - Retry probing devices in the active list.
- */
-static void deferred_probe_work_func(struct work_struct *work)
-{
- struct device *dev;
- struct device_private *private;
- /*
- * This block processes every device in the deferred 'active' list.
- * Each device is removed from the active list and passed to
- * bus_probe_device() to re-attempt the probe. The loop continues
- * until every device in the active list is removed and retried.
- *
- * Note: Once the device is removed from the list and the mutex is
- * released, it is possible for the device get freed by another thread
- * and cause a illegal pointer dereference. This code uses
- * get/put_device() to ensure the device structure cannot disappear
- * from under our feet.
- */
- mutex_lock(&deferred_probe_mutex);
- while (!list_empty(&deferred_probe_active_list)) {
- private = list_first_entry(&deferred_probe_active_list,
- typeof(*dev->p), deferred_probe);
- dev = private->device;
- list_del_init(&private->deferred_probe);
-
- get_device(dev);
-
- /*
- * Drop the mutex while probing each device; the probe path may
- * manipulate the deferred list
- */
- mutex_unlock(&deferred_probe_mutex);
- dev_dbg(dev, "Retrying from deferred list\n");
- bus_probe_device(dev);
- mutex_lock(&deferred_probe_mutex);
-
- put_device(dev);
- }
- mutex_unlock(&deferred_probe_mutex);
-}
-static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func);
-
-static void driver_deferred_probe_add(struct device *dev)
-{
- mutex_lock(&deferred_probe_mutex);
- if (list_empty(&dev->p->deferred_probe)) {
- dev_dbg(dev, "Added to deferred list\n");
- list_add(&dev->p->deferred_probe, &deferred_probe_pending_list);
- }
- mutex_unlock(&deferred_probe_mutex);
-}
-
-void driver_deferred_probe_del(struct device *dev)
-{
- mutex_lock(&deferred_probe_mutex);
- if (!list_empty(&dev->p->deferred_probe)) {
- dev_dbg(dev, "Removed from deferred list\n");
- list_del_init(&dev->p->deferred_probe);
- }
- mutex_unlock(&deferred_probe_mutex);
-}
-
-static bool driver_deferred_probe_enable = false;
-/**
- * driver_deferred_probe_trigger() - Kick off re-probing deferred devices
- *
- * This functions moves all devices from the pending list to the active
- * list and schedules the deferred probe workqueue to process them. It
- * should be called anytime a driver is successfully bound to a device.
- */
-static void driver_deferred_probe_trigger(void)
-{
- if (!driver_deferred_probe_enable)
- return;
-
- /*
- * A successful probe means that all the devices in the pending list
- * should be triggered to be reprobed. Move all the deferred devices
- * into the active list so they can be retried by the workqueue
- */
- mutex_lock(&deferred_probe_mutex);
- list_splice_tail_init(&deferred_probe_pending_list,
- &deferred_probe_active_list);
- mutex_unlock(&deferred_probe_mutex);
-
- /*
- * Kick the re-probe thread. It may already be scheduled, but it is
- * safe to kick it again.
- */
- queue_work(deferred_wq, &deferred_probe_work);
-}
-
-/**
- * deferred_probe_initcall() - Enable probing of deferred devices
- *
- * We don't want to get in the way when the bulk of drivers are getting probed.
- * Instead, this initcall makes sure that deferred probing is delayed until
- * late_initcall time.
- */
-static int deferred_probe_initcall(void)
-{
- deferred_wq = create_singlethread_workqueue("deferwq");
- if (WARN_ON(!deferred_wq))
- return -ENOMEM;
-
- driver_deferred_probe_enable = true;
- driver_deferred_probe_trigger();
- return 0;
-}
-late_initcall(deferred_probe_initcall);
-
-static void driver_bound(struct device *dev)
-{
- if (klist_node_attached(&dev->p->knode_driver)) {
- printk(KERN_WARNING "%s: device %s already bound\n",
- __func__, kobject_name(&dev->kobj));
- return;
- }
-
- pr_debug("driver: '%s': %s: bound to device '%s'\n", dev_name(dev),
- __func__, dev->driver->name);
-
- klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
-
- /*
- * Make sure the device is no longer in one of the deferred lists and
- * kick off retrying all pending devices
- */
- driver_deferred_probe_del(dev);
- driver_deferred_probe_trigger();
-
- if (dev->bus)
- blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- BUS_NOTIFY_BOUND_DRIVER, dev);
-}
-
-static int driver_sysfs_add(struct device *dev)
-{
- int ret;
-
- if (dev->bus)
- blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- BUS_NOTIFY_BIND_DRIVER, dev);
-
- ret = sysfs_create_link(&dev->driver->p->kobj, &dev->kobj,
- kobject_name(&dev->kobj));
- if (ret == 0) {
- ret = sysfs_create_link(&dev->kobj, &dev->driver->p->kobj,
- "driver");
- if (ret)
- sysfs_remove_link(&dev->driver->p->kobj,
- kobject_name(&dev->kobj));
- }
- return ret;
-}
-
-static void driver_sysfs_remove(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
-
- if (drv) {
- sysfs_remove_link(&drv->p->kobj, kobject_name(&dev->kobj));
- sysfs_remove_link(&dev->kobj, "driver");
- }
-}
-
-/**
- * device_bind_driver - bind a driver to one device.
- * @dev: device.
- *
- * Allow manual attachment of a driver to a device.
- * Caller must have already set @dev->driver.
- *
- * Note that this does not modify the bus reference count
- * nor take the bus's rwsem. Please verify those are accounted
- * for before calling this. (It is ok to call with no other effort
- * from a driver's probe() method.)
- *
- * This function must be called with the device lock held.
- */
-int device_bind_driver(struct device *dev)
-{
- int ret;
-
- ret = driver_sysfs_add(dev);
- if (!ret)
- driver_bound(dev);
- return ret;
-}
-EXPORT_SYMBOL_GPL(device_bind_driver);
-
-static atomic_t probe_count = ATOMIC_INIT(0);
-static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
-
-static int really_probe(struct device *dev, struct device_driver *drv)
-{
- int ret = 0;
-
- atomic_inc(&probe_count);
- pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
- drv->bus->name, __func__, drv->name, dev_name(dev));
- WARN_ON(!list_empty(&dev->devres_head));
-
- dev->driver = drv;
- if (driver_sysfs_add(dev)) {
- printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n",
- __func__, dev_name(dev));
- goto probe_failed;
- }
-
- if (dev->bus->probe) {
- ret = dev->bus->probe(dev);
- if (ret)
- goto probe_failed;
- } else if (drv->probe) {
- ret = drv->probe(dev);
- if (ret)
- goto probe_failed;
- }
-
- driver_bound(dev);
- ret = 1;
- pr_debug("bus: '%s': %s: bound device %s to driver %s\n",
- drv->bus->name, __func__, dev_name(dev), drv->name);
- goto done;
-
-probe_failed:
- devres_release_all(dev);
- driver_sysfs_remove(dev);
- dev->driver = NULL;
-
- if (ret == -EPROBE_DEFER) {
- /* Driver requested deferred probing */
- dev_info(dev, "Driver %s requests probe deferral\n", drv->name);
- driver_deferred_probe_add(dev);
- } else if (ret != -ENODEV && ret != -ENXIO) {
- /* driver matched but the probe failed */
- printk(KERN_WARNING
- "%s: probe of %s failed with error %d\n",
- drv->name, dev_name(dev), ret);
- } else {
- pr_debug("%s: probe of %s rejects match %d\n",
- drv->name, dev_name(dev), ret);
- }
- /*
- * Ignore errors returned by ->probe so that the next driver can try
- * its luck.
- */
- ret = 0;
-done:
- atomic_dec(&probe_count);
- wake_up(&probe_waitqueue);
- return ret;
-}
-
-/**
- * driver_probe_done
- * Determine if the probe sequence is finished or not.
- *
- * Should somehow figure out how to use a semaphore, not an atomic variable...
- */
-int driver_probe_done(void)
-{
- pr_debug("%s: probe_count = %d\n", __func__,
- atomic_read(&probe_count));
- if (atomic_read(&probe_count))
- return -EBUSY;
- return 0;
-}
-
-/**
- * wait_for_device_probe
- * Wait for device probing to be completed.
- */
-void wait_for_device_probe(void)
-{
- /* wait for the known devices to complete their probing */
- wait_event(probe_waitqueue, atomic_read(&probe_count) == 0);
- async_synchronize_full();
-}
-EXPORT_SYMBOL_GPL(wait_for_device_probe);
-
-/**
- * driver_probe_device - attempt to bind device & driver together
- * @drv: driver to bind a device to
- * @dev: device to try to bind to the driver
- *
- * This function returns -ENODEV if the device is not registered,
- * 1 if the device is bound successfully and 0 otherwise.
- *
- * This function must be called with @dev lock held. When called for a
- * USB interface, @dev->parent lock must be held as well.
- */
-int driver_probe_device(struct device_driver *drv, struct device *dev)
-{
- int ret = 0;
-
- if (!device_is_registered(dev))
- return -ENODEV;
-
- pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
- drv->bus->name, __func__, dev_name(dev), drv->name);
-
- pm_runtime_get_noresume(dev);
- pm_runtime_barrier(dev);
- ret = really_probe(dev, drv);
- pm_runtime_put_sync(dev);
-
- return ret;
-}
-
-static int __device_attach(struct device_driver *drv, void *data)
-{
- struct device *dev = data;
-
- if (!driver_match_device(drv, dev))
- return 0;
-
- return driver_probe_device(drv, dev);
-}
-
-/**
- * device_attach - try to attach device to a driver.
- * @dev: device.
- *
- * Walk the list of drivers that the bus has and call
- * driver_probe_device() for each pair. If a compatible
- * pair is found, break out and return.
- *
- * Returns 1 if the device was bound to a driver;
- * 0 if no matching driver was found;
- * -ENODEV if the device is not registered.
- *
- * When called for a USB interface, @dev->parent lock must be held.
- */
-int device_attach(struct device *dev)
-{
- int ret = 0;
-
- device_lock(dev);
- if (dev->driver) {
- if (klist_node_attached(&dev->p->knode_driver)) {
- ret = 1;
- goto out_unlock;
- }
- ret = device_bind_driver(dev);
- if (ret == 0)
- ret = 1;
- else {
- dev->driver = NULL;
- ret = 0;
- }
- } else {
- pm_runtime_get_noresume(dev);
- ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach);
- pm_runtime_put_sync(dev);
- }
-out_unlock:
- device_unlock(dev);
- return ret;
-}
-EXPORT_SYMBOL_GPL(device_attach);
-
-static int __driver_attach(struct device *dev, void *data)
-{
- struct device_driver *drv = data;
-
- /*
- * Lock device and try to bind to it. We drop the error
- * here and always return 0, because we need to keep trying
- * to bind to devices and some drivers will return an error
- * simply if it didn't support the device.
- *
- * driver_probe_device() will spit a warning if there
- * is an error.
- */
-
- if (!driver_match_device(drv, dev))
- return 0;
-
- if (dev->parent) /* Needed for USB */
- device_lock(dev->parent);
- device_lock(dev);
- if (!dev->driver)
- driver_probe_device(drv, dev);
- device_unlock(dev);
- if (dev->parent)
- device_unlock(dev->parent);
-
- return 0;
-}
-
-/**
- * driver_attach - try to bind driver to devices.
- * @drv: driver.
- *
- * Walk the list of devices that the bus has on it and try to
- * match the driver with each one. If driver_probe_device()
- * returns 0 and the @dev->driver is set, we've found a
- * compatible pair.
- */
-int driver_attach(struct device_driver *drv)
-{
- return bus_for_each_dev(drv->bus, NULL, drv, __driver_attach);
-}
-EXPORT_SYMBOL_GPL(driver_attach);
-
-/*
- * __device_release_driver() must be called with @dev lock held.
- * When called for a USB interface, @dev->parent lock must be held as well.
- */
-static void __device_release_driver(struct device *dev)
-{
- struct device_driver *drv;
-
- drv = dev->driver;
- if (drv) {
- pm_runtime_get_sync(dev);
-
- driver_sysfs_remove(dev);
-
- if (dev->bus)
- blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- BUS_NOTIFY_UNBIND_DRIVER,
- dev);
-
- pm_runtime_put_sync(dev);
-
- if (dev->bus && dev->bus->remove)
- dev->bus->remove(dev);
- else if (drv->remove)
- drv->remove(dev);
- devres_release_all(dev);
- dev->driver = NULL;
- klist_remove(&dev->p->knode_driver);
- if (dev->bus)
- blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- BUS_NOTIFY_UNBOUND_DRIVER,
- dev);
-
- }
-}
-
-/**
- * device_release_driver - manually detach device from driver.
- * @dev: device.
- *
- * Manually detach device from driver.
- * When called for a USB interface, @dev->parent lock must be held.
- */
-void device_release_driver(struct device *dev)
-{
- /*
- * If anyone calls device_release_driver() recursively from
- * within their ->remove callback for the same device, they
- * will deadlock right here.
- */
- device_lock(dev);
- __device_release_driver(dev);
- device_unlock(dev);
-}
-EXPORT_SYMBOL_GPL(device_release_driver);
-
-/**
- * driver_detach - detach driver from all devices it controls.
- * @drv: driver.
- */
-void driver_detach(struct device_driver *drv)
-{
- struct device_private *dev_prv;
- struct device *dev;
-
- for (;;) {
- spin_lock(&drv->p->klist_devices.k_lock);
- if (list_empty(&drv->p->klist_devices.k_list)) {
- spin_unlock(&drv->p->klist_devices.k_lock);
- break;
- }
- dev_prv = list_entry(drv->p->klist_devices.k_list.prev,
- struct device_private,
- knode_driver.n_node);
- dev = dev_prv->device;
- get_device(dev);
- spin_unlock(&drv->p->klist_devices.k_lock);
-
- if (dev->parent) /* Needed for USB */
- device_lock(dev->parent);
- device_lock(dev);
- if (dev->driver == drv)
- __device_release_driver(dev);
- device_unlock(dev);
- if (dev->parent)
- device_unlock(dev->parent);
- put_device(dev);
- }
-}
-
-/*
- * These exports can't be _GPL due to .h files using this within them, and it
- * might break something that was previously working...
- */
-void *dev_get_drvdata(const struct device *dev)
-{
- if (dev && dev->p)
- return dev->p->driver_data;
- return NULL;
-}
-EXPORT_SYMBOL(dev_get_drvdata);
-
-int dev_set_drvdata(struct device *dev, void *data)
-{
- int error;
-
- if (!dev->p) {
- error = device_private_init(dev);
- if (error)
- return error;
- }
- dev->p->driver_data = data;
- return 0;
-}
-EXPORT_SYMBOL(dev_set_drvdata);
diff --git a/ANDROID_3.4.5/drivers/base/devres.c b/ANDROID_3.4.5/drivers/base/devres.c
deleted file mode 100644
index 524bf96c..00000000
--- a/ANDROID_3.4.5/drivers/base/devres.c
+++ /dev/null
@@ -1,651 +0,0 @@
-/*
- * drivers/base/devres.c - device resource management
- *
- * Copyright (c) 2006 SUSE Linux Products GmbH
- * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
- *
- * This file is released under the GPLv2.
- */
-
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include "base.h"
-
-struct devres_node {
- struct list_head entry;
- dr_release_t release;
-#ifdef CONFIG_DEBUG_DEVRES
- const char *name;
- size_t size;
-#endif
-};
-
-struct devres {
- struct devres_node node;
- /* -- 3 pointers */
- unsigned long long data[]; /* guarantee ull alignment */
-};
-
-struct devres_group {
- struct devres_node node[2];
- void *id;
- int color;
- /* -- 8 pointers */
-};
-
-#ifdef CONFIG_DEBUG_DEVRES
-static int log_devres = 0;
-module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR);
-
-static void set_node_dbginfo(struct devres_node *node, const char *name,
- size_t size)
-{
- node->name = name;
- node->size = size;
-}
-
-static void devres_log(struct device *dev, struct devres_node *node,
- const char *op)
-{
- if (unlikely(log_devres))
- dev_printk(KERN_ERR, dev, "DEVRES %3s %p %s (%lu bytes)\n",
- op, node, node->name, (unsigned long)node->size);
-}
-#else /* CONFIG_DEBUG_DEVRES */
-#define set_node_dbginfo(node, n, s) do {} while (0)
-#define devres_log(dev, node, op) do {} while (0)
-#endif /* CONFIG_DEBUG_DEVRES */
-
-/*
- * Release functions for devres group. These callbacks are used only
- * for identification.
- */
-static void group_open_release(struct device *dev, void *res)
-{
- /* noop */
-}
-
-static void group_close_release(struct device *dev, void *res)
-{
- /* noop */
-}
-
-static struct devres_group * node_to_group(struct devres_node *node)
-{
- if (node->release == &group_open_release)
- return container_of(node, struct devres_group, node[0]);
- if (node->release == &group_close_release)
- return container_of(node, struct devres_group, node[1]);
- return NULL;
-}
-
-static __always_inline struct devres * alloc_dr(dr_release_t release,
- size_t size, gfp_t gfp)
-{
- size_t tot_size = sizeof(struct devres) + size;
- struct devres *dr;
-
- dr = kmalloc_track_caller(tot_size, gfp);
- if (unlikely(!dr))
- return NULL;
-
- memset(dr, 0, tot_size);
- INIT_LIST_HEAD(&dr->node.entry);
- dr->node.release = release;
- return dr;
-}
-
-static void add_dr(struct device *dev, struct devres_node *node)
-{
- devres_log(dev, node, "ADD");
- BUG_ON(!list_empty(&node->entry));
- list_add_tail(&node->entry, &dev->devres_head);
-}
-
-#ifdef CONFIG_DEBUG_DEVRES
-void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
- const char *name)
-{
- struct devres *dr;
-
- dr = alloc_dr(release, size, gfp);
- if (unlikely(!dr))
- return NULL;
- set_node_dbginfo(&dr->node, name, size);
- return dr->data;
-}
-EXPORT_SYMBOL_GPL(__devres_alloc);
-#else
-/**
- * devres_alloc - Allocate device resource data
- * @release: Release function devres will be associated with
- * @size: Allocation size
- * @gfp: Allocation flags
- *
- * Allocate devres of @size bytes. The allocated area is zeroed, then
- * associated with @release. The returned pointer can be passed to
- * other devres_*() functions.
- *
- * RETURNS:
- * Pointer to allocated devres on success, NULL on failure.
- */
-void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
-{
- struct devres *dr;
-
- dr = alloc_dr(release, size, gfp);
- if (unlikely(!dr))
- return NULL;
- return dr->data;
-}
-EXPORT_SYMBOL_GPL(devres_alloc);
-#endif
-
-/**
- * devres_free - Free device resource data
- * @res: Pointer to devres data to free
- *
- * Free devres created with devres_alloc().
- */
-void devres_free(void *res)
-{
- if (res) {
- struct devres *dr = container_of(res, struct devres, data);
-
- BUG_ON(!list_empty(&dr->node.entry));
- kfree(dr);
- }
-}
-EXPORT_SYMBOL_GPL(devres_free);
-
-/**
- * devres_add - Register device resource
- * @dev: Device to add resource to
- * @res: Resource to register
- *
- * Register devres @res to @dev. @res should have been allocated
- * using devres_alloc(). On driver detach, the associated release
- * function will be invoked and devres will be freed automatically.
- */
-void devres_add(struct device *dev, void *res)
-{
- struct devres *dr = container_of(res, struct devres, data);
- unsigned long flags;
-
- spin_lock_irqsave(&dev->devres_lock, flags);
- add_dr(dev, &dr->node);
- spin_unlock_irqrestore(&dev->devres_lock, flags);
-}
-EXPORT_SYMBOL_GPL(devres_add);
-
-static struct devres *find_dr(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data)
-{
- struct devres_node *node;
-
- list_for_each_entry_reverse(node, &dev->devres_head, entry) {
- struct devres *dr = container_of(node, struct devres, node);
-
- if (node->release != release)
- continue;
- if (match && !match(dev, dr->data, match_data))
- continue;
- return dr;
- }
-
- return NULL;
-}
-
-/**
- * devres_find - Find device resource
- * @dev: Device to lookup resource from
- * @release: Look for resources associated with this release function
- * @match: Match function (optional)
- * @match_data: Data for the match function
- *
- * Find the latest devres of @dev which is associated with @release
- * and for which @match returns 1. If @match is NULL, it's considered
- * to match all.
- *
- * RETURNS:
- * Pointer to found devres, NULL if not found.
- */
-void * devres_find(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data)
-{
- struct devres *dr;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->devres_lock, flags);
- dr = find_dr(dev, release, match, match_data);
- spin_unlock_irqrestore(&dev->devres_lock, flags);
-
- if (dr)
- return dr->data;
- return NULL;
-}
-EXPORT_SYMBOL_GPL(devres_find);
-
-/**
- * devres_get - Find devres, if non-existent, add one atomically
- * @dev: Device to lookup or add devres for
- * @new_res: Pointer to new initialized devres to add if not found
- * @match: Match function (optional)
- * @match_data: Data for the match function
- *
- * Find the latest devres of @dev which has the same release function
- * as @new_res and for which @match return 1. If found, @new_res is
- * freed; otherwise, @new_res is added atomically.
- *
- * RETURNS:
- * Pointer to found or added devres.
- */
-void * devres_get(struct device *dev, void *new_res,
- dr_match_t match, void *match_data)
-{
- struct devres *new_dr = container_of(new_res, struct devres, data);
- struct devres *dr;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->devres_lock, flags);
- dr = find_dr(dev, new_dr->node.release, match, match_data);
- if (!dr) {
- add_dr(dev, &new_dr->node);
- dr = new_dr;
- new_dr = NULL;
- }
- spin_unlock_irqrestore(&dev->devres_lock, flags);
- devres_free(new_dr);
-
- return dr->data;
-}
-EXPORT_SYMBOL_GPL(devres_get);
-
-/**
- * devres_remove - Find a device resource and remove it
- * @dev: Device to find resource from
- * @release: Look for resources associated with this release function
- * @match: Match function (optional)
- * @match_data: Data for the match function
- *
- * Find the latest devres of @dev associated with @release and for
- * which @match returns 1. If @match is NULL, it's considered to
- * match all. If found, the resource is removed atomically and
- * returned.
- *
- * RETURNS:
- * Pointer to removed devres on success, NULL if not found.
- */
-void * devres_remove(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data)
-{
- struct devres *dr;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->devres_lock, flags);
- dr = find_dr(dev, release, match, match_data);
- if (dr) {
- list_del_init(&dr->node.entry);
- devres_log(dev, &dr->node, "REM");
- }
- spin_unlock_irqrestore(&dev->devres_lock, flags);
-
- if (dr)
- return dr->data;
- return NULL;
-}
-EXPORT_SYMBOL_GPL(devres_remove);
-
-/**
- * devres_destroy - Find a device resource and destroy it
- * @dev: Device to find resource from
- * @release: Look for resources associated with this release function
- * @match: Match function (optional)
- * @match_data: Data for the match function
- *
- * Find the latest devres of @dev associated with @release and for
- * which @match returns 1. If @match is NULL, it's considered to
- * match all. If found, the resource is removed atomically and freed.
- *
- * RETURNS:
- * 0 if devres is found and freed, -ENOENT if not found.
- */
-int devres_destroy(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data)
-{
- void *res;
-
- res = devres_remove(dev, release, match, match_data);
- if (unlikely(!res))
- return -ENOENT;
-
- devres_free(res);
- return 0;
-}
-EXPORT_SYMBOL_GPL(devres_destroy);
-
-static int remove_nodes(struct device *dev,
- struct list_head *first, struct list_head *end,
- struct list_head *todo)
-{
- int cnt = 0, nr_groups = 0;
- struct list_head *cur;
-
- /* First pass - move normal devres entries to @todo and clear
- * devres_group colors.
- */
- cur = first;
- while (cur != end) {
- struct devres_node *node;
- struct devres_group *grp;
-
- node = list_entry(cur, struct devres_node, entry);
- cur = cur->next;
-
- grp = node_to_group(node);
- if (grp) {
- /* clear color of group markers in the first pass */
- grp->color = 0;
- nr_groups++;
- } else {
- /* regular devres entry */
- if (&node->entry == first)
- first = first->next;
- list_move_tail(&node->entry, todo);
- cnt++;
- }
- }
-
- if (!nr_groups)
- return cnt;
-
- /* Second pass - Scan groups and color them. A group gets
- * color value of two iff the group is wholly contained in
- * [cur, end). That is, for a closed group, both opening and
- * closing markers should be in the range, while just the
- * opening marker is enough for an open group.
- */
- cur = first;
- while (cur != end) {
- struct devres_node *node;
- struct devres_group *grp;
-
- node = list_entry(cur, struct devres_node, entry);
- cur = cur->next;
-
- grp = node_to_group(node);
- BUG_ON(!grp || list_empty(&grp->node[0].entry));
-
- grp->color++;
- if (list_empty(&grp->node[1].entry))
- grp->color++;
-
- BUG_ON(grp->color <= 0 || grp->color > 2);
- if (grp->color == 2) {
- /* No need to update cur or end. The removed
- * nodes are always before both.
- */
- list_move_tail(&grp->node[0].entry, todo);
- list_del_init(&grp->node[1].entry);
- }
- }
-
- return cnt;
-}
-
-static int release_nodes(struct device *dev, struct list_head *first,
- struct list_head *end, unsigned long flags)
- __releases(&dev->devres_lock)
-{
- LIST_HEAD(todo);
- int cnt;
- struct devres *dr, *tmp;
-
- cnt = remove_nodes(dev, first, end, &todo);
-
- spin_unlock_irqrestore(&dev->devres_lock, flags);
-
- /* Release. Note that both devres and devres_group are
- * handled as devres in the following loop. This is safe.
- */
- list_for_each_entry_safe_reverse(dr, tmp, &todo, node.entry) {
- devres_log(dev, &dr->node, "REL");
- dr->node.release(dev, dr->data);
- kfree(dr);
- }
-
- return cnt;
-}
-
-/**
- * devres_release_all - Release all managed resources
- * @dev: Device to release resources for
- *
- * Release all resources associated with @dev. This function is
- * called on driver detach.
- */
-int devres_release_all(struct device *dev)
-{
- unsigned long flags;
-
- /* Looks like an uninitialized device structure */
- if (WARN_ON(dev->devres_head.next == NULL))
- return -ENODEV;
- spin_lock_irqsave(&dev->devres_lock, flags);
- return release_nodes(dev, dev->devres_head.next, &dev->devres_head,
- flags);
-}
-
-/**
- * devres_open_group - Open a new devres group
- * @dev: Device to open devres group for
- * @id: Separator ID
- * @gfp: Allocation flags
- *
- * Open a new devres group for @dev with @id. For @id, using a
- * pointer to an object which won't be used for another group is
- * recommended. If @id is NULL, address-wise unique ID is created.
- *
- * RETURNS:
- * ID of the new group, NULL on failure.
- */
-void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
-{
- struct devres_group *grp;
- unsigned long flags;
-
- grp = kmalloc(sizeof(*grp), gfp);
- if (unlikely(!grp))
- return NULL;
-
- grp->node[0].release = &group_open_release;
- grp->node[1].release = &group_close_release;
- INIT_LIST_HEAD(&grp->node[0].entry);
- INIT_LIST_HEAD(&grp->node[1].entry);
- set_node_dbginfo(&grp->node[0], "grp<", 0);
- set_node_dbginfo(&grp->node[1], "grp>", 0);
- grp->id = grp;
- if (id)
- grp->id = id;
-
- spin_lock_irqsave(&dev->devres_lock, flags);
- add_dr(dev, &grp->node[0]);
- spin_unlock_irqrestore(&dev->devres_lock, flags);
- return grp->id;
-}
-EXPORT_SYMBOL_GPL(devres_open_group);
-
-/* Find devres group with ID @id. If @id is NULL, look for the latest. */
-static struct devres_group * find_group(struct device *dev, void *id)
-{
- struct devres_node *node;
-
- list_for_each_entry_reverse(node, &dev->devres_head, entry) {
- struct devres_group *grp;
-
- if (node->release != &group_open_release)
- continue;
-
- grp = container_of(node, struct devres_group, node[0]);
-
- if (id) {
- if (grp->id == id)
- return grp;
- } else if (list_empty(&grp->node[1].entry))
- return grp;
- }
-
- return NULL;
-}
-
-/**
- * devres_close_group - Close a devres group
- * @dev: Device to close devres group for
- * @id: ID of target group, can be NULL
- *
- * Close the group identified by @id. If @id is NULL, the latest open
- * group is selected.
- */
-void devres_close_group(struct device *dev, void *id)
-{
- struct devres_group *grp;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->devres_lock, flags);
-
- grp = find_group(dev, id);
- if (grp)
- add_dr(dev, &grp->node[1]);
- else
- WARN_ON(1);
-
- spin_unlock_irqrestore(&dev->devres_lock, flags);
-}
-EXPORT_SYMBOL_GPL(devres_close_group);
-
-/**
- * devres_remove_group - Remove a devres group
- * @dev: Device to remove group for
- * @id: ID of target group, can be NULL
- *
- * Remove the group identified by @id. If @id is NULL, the latest
- * open group is selected. Note that removing a group doesn't affect
- * any other resources.
- */
-void devres_remove_group(struct device *dev, void *id)
-{
- struct devres_group *grp;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->devres_lock, flags);
-
- grp = find_group(dev, id);
- if (grp) {
- list_del_init(&grp->node[0].entry);
- list_del_init(&grp->node[1].entry);
- devres_log(dev, &grp->node[0], "REM");
- } else
- WARN_ON(1);
-
- spin_unlock_irqrestore(&dev->devres_lock, flags);
-
- kfree(grp);
-}
-EXPORT_SYMBOL_GPL(devres_remove_group);
-
-/**
- * devres_release_group - Release resources in a devres group
- * @dev: Device to release group for
- * @id: ID of target group, can be NULL
- *
- * Release all resources in the group identified by @id. If @id is
- * NULL, the latest open group is selected. The selected group and
- * groups properly nested inside the selected group are removed.
- *
- * RETURNS:
- * The number of released non-group resources.
- */
-int devres_release_group(struct device *dev, void *id)
-{
- struct devres_group *grp;
- unsigned long flags;
- int cnt = 0;
-
- spin_lock_irqsave(&dev->devres_lock, flags);
-
- grp = find_group(dev, id);
- if (grp) {
- struct list_head *first = &grp->node[0].entry;
- struct list_head *end = &dev->devres_head;
-
- if (!list_empty(&grp->node[1].entry))
- end = grp->node[1].entry.next;
-
- cnt = release_nodes(dev, first, end, flags);
- } else {
- WARN_ON(1);
- spin_unlock_irqrestore(&dev->devres_lock, flags);
- }
-
- return cnt;
-}
-EXPORT_SYMBOL_GPL(devres_release_group);
-
-/*
- * Managed kzalloc/kfree
- */
-static void devm_kzalloc_release(struct device *dev, void *res)
-{
- /* noop */
-}
-
-static int devm_kzalloc_match(struct device *dev, void *res, void *data)
-{
- return res == data;
-}
-
-/**
- * devm_kzalloc - Resource-managed kzalloc
- * @dev: Device to allocate memory for
- * @size: Allocation size
- * @gfp: Allocation gfp flags
- *
- * Managed kzalloc. Memory allocated with this function is
- * automatically freed on driver detach. Like all other devres
- * resources, guaranteed alignment is unsigned long long.
- *
- * RETURNS:
- * Pointer to allocated memory on success, NULL on failure.
- */
-void * devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
-{
- struct devres *dr;
-
- /* use raw alloc_dr for kmalloc caller tracing */
- dr = alloc_dr(devm_kzalloc_release, size, gfp);
- if (unlikely(!dr))
- return NULL;
-
- set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
- devres_add(dev, dr->data);
- return dr->data;
-}
-EXPORT_SYMBOL_GPL(devm_kzalloc);
-
-/**
- * devm_kfree - Resource-managed kfree
- * @dev: Device this memory belongs to
- * @p: Memory to free
- *
- * Free memory allocated with devm_kzalloc().
- */
-void devm_kfree(struct device *dev, void *p)
-{
- int rc;
-
- rc = devres_destroy(dev, devm_kzalloc_release, devm_kzalloc_match, p);
- WARN_ON(rc);
-}
-EXPORT_SYMBOL_GPL(devm_kfree);
diff --git a/ANDROID_3.4.5/drivers/base/devtmpfs.c b/ANDROID_3.4.5/drivers/base/devtmpfs.c
deleted file mode 100644
index 8493536e..00000000
--- a/ANDROID_3.4.5/drivers/base/devtmpfs.c
+++ /dev/null
@@ -1,455 +0,0 @@
-/*
- * devtmpfs - kernel-maintained tmpfs-based /dev
- *
- * Copyright (C) 2009, Kay Sievers <kay.sievers@vrfy.org>
- *
- * During bootup, before any driver core device is registered,
- * devtmpfs, a tmpfs-based filesystem is created. Every driver-core
- * device which requests a device node, will add a node in this
- * filesystem.
- * By default, all devices are named after the the name of the
- * device, owned by root and have a default mode of 0600. Subsystems
- * can overwrite the default setting if needed.
- */
-
-#include <linux/kernel.h>
-#include <linux/syscalls.h>
-#include <linux/mount.h>
-#include <linux/device.h>
-#include <linux/genhd.h>
-#include <linux/namei.h>
-#include <linux/fs.h>
-#include <linux/shmem_fs.h>
-#include <linux/ramfs.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/kthread.h>
-
-static struct task_struct *thread;
-
-#if defined CONFIG_DEVTMPFS_MOUNT
-static int mount_dev = 1;
-#else
-static int mount_dev;
-#endif
-
-static DEFINE_SPINLOCK(req_lock);
-
-static struct req {
- struct req *next;
- struct completion done;
- int err;
- const char *name;
- umode_t mode; /* 0 => delete */
- struct device *dev;
-} *requests;
-
-static int __init mount_param(char *str)
-{
- mount_dev = simple_strtoul(str, NULL, 0);
- return 1;
-}
-__setup("devtmpfs.mount=", mount_param);
-
-static struct dentry *dev_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
-{
-#ifdef CONFIG_TMPFS
- return mount_single(fs_type, flags, data, shmem_fill_super);
-#else
- return mount_single(fs_type, flags, data, ramfs_fill_super);
-#endif
-}
-
-static struct file_system_type dev_fs_type = {
- .name = "devtmpfs",
- .mount = dev_mount,
- .kill_sb = kill_litter_super,
-};
-
-#ifdef CONFIG_BLOCK
-static inline int is_blockdev(struct device *dev)
-{
- return dev->class == &block_class;
-}
-#else
-static inline int is_blockdev(struct device *dev) { return 0; }
-#endif
-
-int devtmpfs_create_node(struct device *dev)
-{
- const char *tmp = NULL;
- struct req req;
-
- if (!thread)
- return 0;
-
- req.mode = 0;
- req.name = device_get_devnode(dev, &req.mode, &tmp);
- if (!req.name)
- return -ENOMEM;
-
- if (req.mode == 0)
- req.mode = 0600;
- if (is_blockdev(dev))
- req.mode |= S_IFBLK;
- else
- req.mode |= S_IFCHR;
-
- req.dev = dev;
-
- init_completion(&req.done);
-
- spin_lock(&req_lock);
- req.next = requests;
- requests = &req;
- spin_unlock(&req_lock);
-
- wake_up_process(thread);
- wait_for_completion(&req.done);
-
- kfree(tmp);
-
- return req.err;
-}
-
-int devtmpfs_delete_node(struct device *dev)
-{
- const char *tmp = NULL;
- struct req req;
-
- if (!thread)
- return 0;
-
- req.name = device_get_devnode(dev, NULL, &tmp);
- if (!req.name)
- return -ENOMEM;
-
- req.mode = 0;
- req.dev = dev;
-
- init_completion(&req.done);
-
- spin_lock(&req_lock);
- req.next = requests;
- requests = &req;
- spin_unlock(&req_lock);
-
- wake_up_process(thread);
- wait_for_completion(&req.done);
-
- kfree(tmp);
- return req.err;
-}
-
-static int dev_mkdir(const char *name, umode_t mode)
-{
- struct dentry *dentry;
- struct path path;
- int err;
-
- dentry = kern_path_create(AT_FDCWD, name, &path, 1);
- if (IS_ERR(dentry))
- return PTR_ERR(dentry);
-
- err = vfs_mkdir(path.dentry->d_inode, dentry, mode);
- if (!err)
- /* mark as kernel-created inode */
- dentry->d_inode->i_private = &thread;
- dput(dentry);
- mutex_unlock(&path.dentry->d_inode->i_mutex);
- path_put(&path);
- return err;
-}
-
-static int create_path(const char *nodepath)
-{
- char *path;
- char *s;
- int err = 0;
-
- /* parent directories do not exist, create them */
- path = kstrdup(nodepath, GFP_KERNEL);
- if (!path)
- return -ENOMEM;
-
- s = path;
- for (;;) {
- s = strchr(s, '/');
- if (!s)
- break;
- s[0] = '\0';
- err = dev_mkdir(path, 0755);
- if (err && err != -EEXIST)
- break;
- s[0] = '/';
- s++;
- }
- kfree(path);
- return err;
-}
-
-static int handle_create(const char *nodename, umode_t mode, struct device *dev)
-{
- struct dentry *dentry;
- struct path path;
- int err;
-
- dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
- if (dentry == ERR_PTR(-ENOENT)) {
- create_path(nodename);
- dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
- }
- if (IS_ERR(dentry))
- return PTR_ERR(dentry);
-
- err = vfs_mknod(path.dentry->d_inode,
- dentry, mode, dev->devt);
- if (!err) {
- struct iattr newattrs;
-
- /* fixup possibly umasked mode */
- newattrs.ia_mode = mode;
- newattrs.ia_valid = ATTR_MODE;
- mutex_lock(&dentry->d_inode->i_mutex);
- notify_change(dentry, &newattrs);
- mutex_unlock(&dentry->d_inode->i_mutex);
-
- /* mark as kernel-created inode */
- dentry->d_inode->i_private = &thread;
- }
- dput(dentry);
-
- mutex_unlock(&path.dentry->d_inode->i_mutex);
- path_put(&path);
- return err;
-}
-
-static int dev_rmdir(const char *name)
-{
- struct nameidata nd;
- struct dentry *dentry;
- int err;
-
- err = kern_path_parent(name, &nd);
- if (err)
- return err;
-
- mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
- dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len);
- if (!IS_ERR(dentry)) {
- if (dentry->d_inode) {
- if (dentry->d_inode->i_private == &thread)
- err = vfs_rmdir(nd.path.dentry->d_inode,
- dentry);
- else
- err = -EPERM;
- } else {
- err = -ENOENT;
- }
- dput(dentry);
- } else {
- err = PTR_ERR(dentry);
- }
-
- mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
- path_put(&nd.path);
- return err;
-}
-
-static int delete_path(const char *nodepath)
-{
- const char *path;
- int err = 0;
-
- path = kstrdup(nodepath, GFP_KERNEL);
- if (!path)
- return -ENOMEM;
-
- for (;;) {
- char *base;
-
- base = strrchr(path, '/');
- if (!base)
- break;
- base[0] = '\0';
- err = dev_rmdir(path);
- if (err)
- break;
- }
-
- kfree(path);
- return err;
-}
-
-static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *stat)
-{
- /* did we create it */
- if (inode->i_private != &thread)
- return 0;
-
- /* does the dev_t match */
- if (is_blockdev(dev)) {
- if (!S_ISBLK(stat->mode))
- return 0;
- } else {
- if (!S_ISCHR(stat->mode))
- return 0;
- }
- if (stat->rdev != dev->devt)
- return 0;
-
- /* ours */
- return 1;
-}
-
-static int handle_remove(const char *nodename, struct device *dev)
-{
- struct nameidata nd;
- struct dentry *dentry;
- struct kstat stat;
- int deleted = 1;
- int err;
-
- err = kern_path_parent(nodename, &nd);
- if (err)
- return err;
-
- mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
- dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len);
- if (!IS_ERR(dentry)) {
- if (dentry->d_inode) {
- err = vfs_getattr(nd.path.mnt, dentry, &stat);
- if (!err && dev_mynode(dev, dentry->d_inode, &stat)) {
- struct iattr newattrs;
- /*
- * before unlinking this node, reset permissions
- * of possible references like hardlinks
- */
- newattrs.ia_uid = 0;
- newattrs.ia_gid = 0;
- newattrs.ia_mode = stat.mode & ~0777;
- newattrs.ia_valid =
- ATTR_UID|ATTR_GID|ATTR_MODE;
- mutex_lock(&dentry->d_inode->i_mutex);
- notify_change(dentry, &newattrs);
- mutex_unlock(&dentry->d_inode->i_mutex);
- err = vfs_unlink(nd.path.dentry->d_inode,
- dentry);
- if (!err || err == -ENOENT)
- deleted = 1;
- }
- } else {
- err = -ENOENT;
- }
- dput(dentry);
- } else {
- err = PTR_ERR(dentry);
- }
- mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
-
- path_put(&nd.path);
- if (deleted && strchr(nodename, '/'))
- delete_path(nodename);
- return err;
-}
-
-/*
- * If configured, or requested by the commandline, devtmpfs will be
- * auto-mounted after the kernel mounted the root filesystem.
- */
-int devtmpfs_mount(const char *mntdir)
-{
- int err;
-
- if (!mount_dev)
- return 0;
-
- if (!thread)
- return 0;
-
- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
- if (err)
- printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
- else
- printk(KERN_INFO "devtmpfs: mounted\n");
- return err;
-}
-
-static DECLARE_COMPLETION(setup_done);
-
-static int handle(const char *name, umode_t mode, struct device *dev)
-{
- if (mode)
- return handle_create(name, mode, dev);
- else
- return handle_remove(name, dev);
-}
-
-static int devtmpfsd(void *p)
-{
- char options[] = "mode=0755";
- int *err = p;
- *err = sys_unshare(CLONE_NEWNS);
- if (*err)
- goto out;
- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
- if (*err)
- goto out;
- sys_chdir("/.."); /* will traverse into overmounted root */
- sys_chroot(".");
- complete(&setup_done);
- while (1) {
- spin_lock(&req_lock);
- while (requests) {
- struct req *req = requests;
- requests = NULL;
- spin_unlock(&req_lock);
- while (req) {
- struct req *next = req->next;
- req->err = handle(req->name, req->mode, req->dev);
- complete(&req->done);
- req = next;
- }
- spin_lock(&req_lock);
- }
- __set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock(&req_lock);
- schedule();
- }
- return 0;
-out:
- complete(&setup_done);
- return *err;
-}
-
-/*
- * Create devtmpfs instance, driver-core devices will add their device
- * nodes here.
- */
-int __init devtmpfs_init(void)
-{
- int err = register_filesystem(&dev_fs_type);
- if (err) {
- printk(KERN_ERR "devtmpfs: unable to register devtmpfs "
- "type %i\n", err);
- return err;
- }
-
- thread = kthread_run(devtmpfsd, &err, "kdevtmpfs");
- if (!IS_ERR(thread)) {
- wait_for_completion(&setup_done);
- } else {
- err = PTR_ERR(thread);
- thread = NULL;
- }
-
- if (err) {
- printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err);
- unregister_filesystem(&dev_fs_type);
- return err;
- }
-
- printk(KERN_INFO "devtmpfs: initialized\n");
- return 0;
-}
diff --git a/ANDROID_3.4.5/drivers/base/dma-buf.c b/ANDROID_3.4.5/drivers/base/dma-buf.c
deleted file mode 100644
index 7cfb405b..00000000
--- a/ANDROID_3.4.5/drivers/base/dma-buf.c
+++ /dev/null
@@ -1,470 +0,0 @@
-/*
- * Framework for buffer objects that can be shared across devices/subsystems.
- *
- * Copyright(C) 2011 Linaro Limited. All rights reserved.
- * Author: Sumit Semwal <sumit.semwal@ti.com>
- *
- * Many thanks to linaro-mm-sig list, and specially
- * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
- * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
- * refining of this idea.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/dma-buf.h>
-#include <linux/anon_inodes.h>
-#include <linux/export.h>
-
-static inline int is_dma_buf_file(struct file *);
-
-static int dma_buf_release(struct inode *inode, struct file *file)
-{
- struct dma_buf *dmabuf;
-
- if (!is_dma_buf_file(file))
- return -EINVAL;
-
- dmabuf = file->private_data;
-
- dmabuf->ops->release(dmabuf);
- kfree(dmabuf);
- return 0;
-}
-
-static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
-{
- struct dma_buf *dmabuf;
-
- if (!is_dma_buf_file(file))
- return -EINVAL;
-
- dmabuf = file->private_data;
-
- /* check for overflowing the buffer's size */
- if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
- dmabuf->size >> PAGE_SHIFT)
- return -EINVAL;
-
- return dmabuf->ops->mmap(dmabuf, vma);
-}
-
-static const struct file_operations dma_buf_fops = {
- .release = dma_buf_release,
- .mmap = dma_buf_mmap_internal,
-};
-
-/*
- * is_dma_buf_file - Check if struct file* is associated with dma_buf
- */
-static inline int is_dma_buf_file(struct file *file)
-{
- return file->f_op == &dma_buf_fops;
-}
-
-/**
- * dma_buf_export - Creates a new dma_buf, and associates an anon file
- * with this buffer, so it can be exported.
- * Also connect the allocator specific data and ops to the buffer.
- *
- * @priv: [in] Attach private data of allocator to this buffer
- * @ops: [in] Attach allocator-defined dma buf ops to the new buffer.
- * @size: [in] Size of the buffer
- * @flags: [in] mode flags for the file.
- *
- * Returns, on success, a newly created dma_buf object, which wraps the
- * supplied private data and operations for dma_buf_ops. On either missing
- * ops, or error in allocating struct dma_buf, will return negative error.
- *
- */
-struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops,
- size_t size, int flags)
-{
- struct dma_buf *dmabuf;
- struct file *file;
-
- if (WARN_ON(!priv || !ops
- || !ops->map_dma_buf
- || !ops->unmap_dma_buf
- || !ops->release
- || !ops->kmap_atomic
- || !ops->kmap
- || !ops->mmap)) {
- return ERR_PTR(-EINVAL);
- }
-
- dmabuf = kzalloc(sizeof(struct dma_buf), GFP_KERNEL);
- if (dmabuf == NULL)
- return ERR_PTR(-ENOMEM);
-
- dmabuf->priv = priv;
- dmabuf->ops = ops;
- dmabuf->size = size;
-
- file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags);
-
- dmabuf->file = file;
-
- mutex_init(&dmabuf->lock);
- INIT_LIST_HEAD(&dmabuf->attachments);
-
- return dmabuf;
-}
-EXPORT_SYMBOL_GPL(dma_buf_export);
-
-
-/**
- * dma_buf_fd - returns a file descriptor for the given dma_buf
- * @dmabuf: [in] pointer to dma_buf for which fd is required.
- * @flags: [in] flags to give to fd
- *
- * On success, returns an associated 'fd'. Else, returns error.
- */
-int dma_buf_fd(struct dma_buf *dmabuf, int flags)
-{
- int error, fd;
-
- if (!dmabuf || !dmabuf->file)
- return -EINVAL;
-
- error = get_unused_fd_flags(flags);
- if (error < 0)
- return error;
- fd = error;
-
- fd_install(fd, dmabuf->file);
-
- return fd;
-}
-EXPORT_SYMBOL_GPL(dma_buf_fd);
-
-/**
- * dma_buf_get - returns the dma_buf structure related to an fd
- * @fd: [in] fd associated with the dma_buf to be returned
- *
- * On success, returns the dma_buf structure associated with an fd; uses
- * file's refcounting done by fget to increase refcount. returns ERR_PTR
- * otherwise.
- */
-struct dma_buf *dma_buf_get(int fd)
-{
- struct file *file;
-
- file = fget(fd);
-
- if (!file)
- return ERR_PTR(-EBADF);
-
- if (!is_dma_buf_file(file)) {
- fput(file);
- return ERR_PTR(-EINVAL);
- }
-
- return file->private_data;
-}
-EXPORT_SYMBOL_GPL(dma_buf_get);
-
-/**
- * dma_buf_put - decreases refcount of the buffer
- * @dmabuf: [in] buffer to reduce refcount of
- *
- * Uses file's refcounting done implicitly by fput()
- */
-void dma_buf_put(struct dma_buf *dmabuf)
-{
- if (WARN_ON(!dmabuf || !dmabuf->file))
- return;
-
- fput(dmabuf->file);
-}
-EXPORT_SYMBOL_GPL(dma_buf_put);
-
-/**
- * dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
- * calls attach() of dma_buf_ops to allow device-specific attach functionality
- * @dmabuf: [in] buffer to attach device to.
- * @dev: [in] device to be attached.
- *
- * Returns struct dma_buf_attachment * for this attachment; may return negative
- * error codes.
- *
- */
-struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
- struct device *dev)
-{
- struct dma_buf_attachment *attach;
- int ret;
-
- if (WARN_ON(!dmabuf || !dev))
- return ERR_PTR(-EINVAL);
-
- attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL);
- if (attach == NULL)
- return ERR_PTR(-ENOMEM);
-
- attach->dev = dev;
- attach->dmabuf = dmabuf;
-
- mutex_lock(&dmabuf->lock);
-
- if (dmabuf->ops->attach) {
- ret = dmabuf->ops->attach(dmabuf, dev, attach);
- if (ret)
- goto err_attach;
- }
- list_add(&attach->node, &dmabuf->attachments);
-
- mutex_unlock(&dmabuf->lock);
- return attach;
-
-err_attach:
- kfree(attach);
- mutex_unlock(&dmabuf->lock);
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL_GPL(dma_buf_attach);
-
-/**
- * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
- * optionally calls detach() of dma_buf_ops for device-specific detach
- * @dmabuf: [in] buffer to detach from.
- * @attach: [in] attachment to be detached; is free'd after this call.
- *
- */
-void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
-{
- if (WARN_ON(!dmabuf || !attach))
- return;
-
- mutex_lock(&dmabuf->lock);
- list_del(&attach->node);
- if (dmabuf->ops->detach)
- dmabuf->ops->detach(dmabuf, attach);
-
- mutex_unlock(&dmabuf->lock);
- kfree(attach);
-}
-EXPORT_SYMBOL_GPL(dma_buf_detach);
-
-/**
- * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
- * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
- * dma_buf_ops.
- * @attach: [in] attachment whose scatterlist is to be returned
- * @direction: [in] direction of DMA transfer
- *
- * Returns sg_table containing the scatterlist to be returned; may return NULL
- * or ERR_PTR.
- *
- */
-struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
- enum dma_data_direction direction)
-{
- struct sg_table *sg_table = ERR_PTR(-EINVAL);
-
- might_sleep();
-
- if (WARN_ON(!attach || !attach->dmabuf))
- return ERR_PTR(-EINVAL);
-
- sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
-
- return sg_table;
-}
-EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
-
-/**
- * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
- * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
- * dma_buf_ops.
- * @attach: [in] attachment to unmap buffer from
- * @sg_table: [in] scatterlist info of the buffer to unmap
- * @direction: [in] direction of DMA transfer
- *
- */
-void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
- struct sg_table *sg_table,
- enum dma_data_direction direction)
-{
- if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
- return;
-
- attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
- direction);
-}
-EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
-
-
-/**
- * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
- * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
- * preparations. Coherency is only guaranteed in the specified range for the
- * specified access direction.
- * @dma_buf: [in] buffer to prepare cpu access for.
- * @start: [in] start of range for cpu access.
- * @len: [in] length of range for cpu access.
- * @direction: [in] length of range for cpu access.
- *
- * Can return negative error values, returns 0 on success.
- */
-int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
- enum dma_data_direction direction)
-{
- int ret = 0;
-
- if (WARN_ON(!dmabuf))
- return -EINVAL;
-
- if (dmabuf->ops->begin_cpu_access)
- ret = dmabuf->ops->begin_cpu_access(dmabuf, start, len, direction);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
-
-/**
- * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
- * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
- * actions. Coherency is only guaranteed in the specified range for the
- * specified access direction.
- * @dma_buf: [in] buffer to complete cpu access for.
- * @start: [in] start of range for cpu access.
- * @len: [in] length of range for cpu access.
- * @direction: [in] length of range for cpu access.
- *
- * This call must always succeed.
- */
-void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
- enum dma_data_direction direction)
-{
- WARN_ON(!dmabuf);
-
- if (dmabuf->ops->end_cpu_access)
- dmabuf->ops->end_cpu_access(dmabuf, start, len, direction);
-}
-EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
-
-/**
- * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
- * space. The same restrictions as for kmap_atomic and friends apply.
- * @dma_buf: [in] buffer to map page from.
- * @page_num: [in] page in PAGE_SIZE units to map.
- *
- * This call must always succeed, any necessary preparations that might fail
- * need to be done in begin_cpu_access.
- */
-void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
-{
- WARN_ON(!dmabuf);
-
- return dmabuf->ops->kmap_atomic(dmabuf, page_num);
-}
-EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
-
-/**
- * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
- * @dma_buf: [in] buffer to unmap page from.
- * @page_num: [in] page in PAGE_SIZE units to unmap.
- * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap_atomic.
- *
- * This call must always succeed.
- */
-void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
- void *vaddr)
-{
- WARN_ON(!dmabuf);
-
- if (dmabuf->ops->kunmap_atomic)
- dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr);
-}
-EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
-
-/**
- * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
- * same restrictions as for kmap and friends apply.
- * @dma_buf: [in] buffer to map page from.
- * @page_num: [in] page in PAGE_SIZE units to map.
- *
- * This call must always succeed, any necessary preparations that might fail
- * need to be done in begin_cpu_access.
- */
-void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
-{
- WARN_ON(!dmabuf);
-
- return dmabuf->ops->kmap(dmabuf, page_num);
-}
-EXPORT_SYMBOL_GPL(dma_buf_kmap);
-
-/**
- * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
- * @dma_buf: [in] buffer to unmap page from.
- * @page_num: [in] page in PAGE_SIZE units to unmap.
- * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap.
- *
- * This call must always succeed.
- */
-void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
- void *vaddr)
-{
- WARN_ON(!dmabuf);
-
- if (dmabuf->ops->kunmap)
- dmabuf->ops->kunmap(dmabuf, page_num, vaddr);
-}
-EXPORT_SYMBOL_GPL(dma_buf_kunmap);
-
-
-/**
- * dma_buf_mmap - Setup up a userspace mmap with the given vma
- * @dma_buf: [in] buffer that should back the vma
- * @vma: [in] vma for the mmap
- * @pgoff: [in] offset in pages where this mmap should start within the
- * dma-buf buffer.
- *
- * This function adjusts the passed in vma so that it points at the file of the
- * dma_buf operation. It alsog adjusts the starting pgoff and does bounds
- * checking on the size of the vma. Then it calls the exporters mmap function to
- * set up the mapping.
- *
- * Can return negative error values, returns 0 on success.
- */
-int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
- unsigned long pgoff)
-{
- if (WARN_ON(!dmabuf || !vma))
- return -EINVAL;
-
- /* check for offset overflow */
- if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff)
- return -EOVERFLOW;
-
- /* check for overflowing the buffer's size */
- if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
- dmabuf->size >> PAGE_SHIFT)
- return -EINVAL;
-
- /* readjust the vma */
- if (vma->vm_file)
- fput(vma->vm_file);
-
- vma->vm_file = dmabuf->file;
- get_file(vma->vm_file);
-
- vma->vm_pgoff = pgoff;
-
- return dmabuf->ops->mmap(dmabuf, vma);
-}
-EXPORT_SYMBOL_GPL(dma_buf_mmap);
diff --git a/ANDROID_3.4.5/drivers/base/dma-coherent.c b/ANDROID_3.4.5/drivers/base/dma-coherent.c
deleted file mode 100644
index bb0025c5..00000000
--- a/ANDROID_3.4.5/drivers/base/dma-coherent.c
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Coherent per-device memory handling.
- * Borrowed from i386
- */
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/dma-mapping.h>
-
-struct dma_coherent_mem {
- void *virt_base;
- dma_addr_t device_base;
- int size;
- int flags;
- unsigned long *bitmap;
-};
-
-int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
- dma_addr_t device_addr, size_t size, int flags)
-{
- void __iomem *mem_base = NULL;
- int pages = size >> PAGE_SHIFT;
- int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
-
- if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
- goto out;
- if (!size)
- goto out;
- if (dev->dma_mem)
- goto out;
-
- /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
-
- mem_base = ioremap(bus_addr, size);
- if (!mem_base)
- goto out;
-
- dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
- if (!dev->dma_mem)
- goto out;
- dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
- if (!dev->dma_mem->bitmap)
- goto free1_out;
-
- dev->dma_mem->virt_base = mem_base;
- dev->dma_mem->device_base = device_addr;
- dev->dma_mem->size = pages;
- dev->dma_mem->flags = flags;
-
- if (flags & DMA_MEMORY_MAP)
- return DMA_MEMORY_MAP;
-
- return DMA_MEMORY_IO;
-
- free1_out:
- kfree(dev->dma_mem);
- out:
- if (mem_base)
- iounmap(mem_base);
- return 0;
-}
-EXPORT_SYMBOL(dma_declare_coherent_memory);
-
-void dma_release_declared_memory(struct device *dev)
-{
- struct dma_coherent_mem *mem = dev->dma_mem;
-
- if (!mem)
- return;
- dev->dma_mem = NULL;
- iounmap(mem->virt_base);
- kfree(mem->bitmap);
- kfree(mem);
-}
-EXPORT_SYMBOL(dma_release_declared_memory);
-
-void *dma_mark_declared_memory_occupied(struct device *dev,
- dma_addr_t device_addr, size_t size)
-{
- struct dma_coherent_mem *mem = dev->dma_mem;
- int pos, err;
-
- size += device_addr & ~PAGE_MASK;
-
- if (!mem)
- return ERR_PTR(-EINVAL);
-
- pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
- err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
- if (err != 0)
- return ERR_PTR(err);
- return mem->virt_base + (pos << PAGE_SHIFT);
-}
-EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
-
-/**
- * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
- *
- * @dev: device from which we allocate memory
- * @size: size of requested memory area
- * @dma_handle: This will be filled with the correct dma handle
- * @ret: This pointer will be filled with the virtual address
- * to allocated area.
- *
- * This function should be only called from per-arch dma_alloc_coherent()
- * to support allocation from per-device coherent memory pools.
- *
- * Returns 0 if dma_alloc_coherent should continue with allocating from
- * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
- */
-int dma_alloc_from_coherent(struct device *dev, ssize_t size,
- dma_addr_t *dma_handle, void **ret)
-{
- struct dma_coherent_mem *mem;
- int order = get_order(size);
- int pageno;
-
- if (!dev)
- return 0;
- mem = dev->dma_mem;
- if (!mem)
- return 0;
-
- *ret = NULL;
-
- if (unlikely(size > (mem->size << PAGE_SHIFT)))
- goto err;
-
- pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
- if (unlikely(pageno < 0))
- goto err;
-
- /*
- * Memory was found in the per-device area.
- */
- *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
- *ret = mem->virt_base + (pageno << PAGE_SHIFT);
- memset(*ret, 0, size);
-
- return 1;
-
-err:
- /*
- * In the case where the allocation can not be satisfied from the
- * per-device area, try to fall back to generic memory if the
- * constraints allow it.
- */
- return mem->flags & DMA_MEMORY_EXCLUSIVE;
-}
-EXPORT_SYMBOL(dma_alloc_from_coherent);
-
-/**
- * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
- * @dev: device from which the memory was allocated
- * @order: the order of pages allocated
- * @vaddr: virtual address of allocated pages
- *
- * This checks whether the memory was allocated from the per-device
- * coherent memory pool and if so, releases that memory.
- *
- * Returns 1 if we correctly released the memory, or 0 if
- * dma_release_coherent() should proceed with releasing memory from
- * generic pools.
- */
-int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
-{
- struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
-
- if (mem && vaddr >= mem->virt_base && vaddr <
- (mem->virt_base + (mem->size << PAGE_SHIFT))) {
- int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
-
- bitmap_release_region(mem->bitmap, page, order);
- return 1;
- }
- return 0;
-}
-EXPORT_SYMBOL(dma_release_from_coherent);
diff --git a/ANDROID_3.4.5/drivers/base/dma-mapping.c b/ANDROID_3.4.5/drivers/base/dma-mapping.c
deleted file mode 100644
index 6f3676f1..00000000
--- a/ANDROID_3.4.5/drivers/base/dma-mapping.c
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * drivers/base/dma-mapping.c - arch-independent dma-mapping routines
- *
- * Copyright (c) 2006 SUSE Linux Products GmbH
- * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
- *
- * This file is released under the GPLv2.
- */
-
-#include <linux/dma-mapping.h>
-#include <linux/export.h>
-#include <linux/gfp.h>
-
-/*
- * Managed DMA API
- */
-struct dma_devres {
- size_t size;
- void *vaddr;
- dma_addr_t dma_handle;
-};
-
-static void dmam_coherent_release(struct device *dev, void *res)
-{
- struct dma_devres *this = res;
-
- dma_free_coherent(dev, this->size, this->vaddr, this->dma_handle);
-}
-
-static void dmam_noncoherent_release(struct device *dev, void *res)
-{
- struct dma_devres *this = res;
-
- dma_free_noncoherent(dev, this->size, this->vaddr, this->dma_handle);
-}
-
-static int dmam_match(struct device *dev, void *res, void *match_data)
-{
- struct dma_devres *this = res, *match = match_data;
-
- if (this->vaddr == match->vaddr) {
- WARN_ON(this->size != match->size ||
- this->dma_handle != match->dma_handle);
- return 1;
- }
- return 0;
-}
-
-/**
- * dmam_alloc_coherent - Managed dma_alloc_coherent()
- * @dev: Device to allocate coherent memory for
- * @size: Size of allocation
- * @dma_handle: Out argument for allocated DMA handle
- * @gfp: Allocation flags
- *
- * Managed dma_alloc_coherent(). Memory allocated using this function
- * will be automatically released on driver detach.
- *
- * RETURNS:
- * Pointer to allocated memory on success, NULL on failure.
- */
-void * dmam_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
-{
- struct dma_devres *dr;
- void *vaddr;
-
- dr = devres_alloc(dmam_coherent_release, sizeof(*dr), gfp);
- if (!dr)
- return NULL;
-
- vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp);
- if (!vaddr) {
- devres_free(dr);
- return NULL;
- }
-
- dr->vaddr = vaddr;
- dr->dma_handle = *dma_handle;
- dr->size = size;
-
- devres_add(dev, dr);
-
- return vaddr;
-}
-EXPORT_SYMBOL(dmam_alloc_coherent);
-
-/**
- * dmam_free_coherent - Managed dma_free_coherent()
- * @dev: Device to free coherent memory for
- * @size: Size of allocation
- * @vaddr: Virtual address of the memory to free
- * @dma_handle: DMA handle of the memory to free
- *
- * Managed dma_free_coherent().
- */
-void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle)
-{
- struct dma_devres match_data = { size, vaddr, dma_handle };
-
- dma_free_coherent(dev, size, vaddr, dma_handle);
- WARN_ON(devres_destroy(dev, dmam_coherent_release, dmam_match,
- &match_data));
-}
-EXPORT_SYMBOL(dmam_free_coherent);
-
-/**
- * dmam_alloc_non_coherent - Managed dma_alloc_non_coherent()
- * @dev: Device to allocate non_coherent memory for
- * @size: Size of allocation
- * @dma_handle: Out argument for allocated DMA handle
- * @gfp: Allocation flags
- *
- * Managed dma_alloc_non_coherent(). Memory allocated using this
- * function will be automatically released on driver detach.
- *
- * RETURNS:
- * Pointer to allocated memory on success, NULL on failure.
- */
-void *dmam_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
-{
- struct dma_devres *dr;
- void *vaddr;
-
- dr = devres_alloc(dmam_noncoherent_release, sizeof(*dr), gfp);
- if (!dr)
- return NULL;
-
- vaddr = dma_alloc_noncoherent(dev, size, dma_handle, gfp);
- if (!vaddr) {
- devres_free(dr);
- return NULL;
- }
-
- dr->vaddr = vaddr;
- dr->dma_handle = *dma_handle;
- dr->size = size;
-
- devres_add(dev, dr);
-
- return vaddr;
-}
-EXPORT_SYMBOL(dmam_alloc_noncoherent);
-
-/**
- * dmam_free_coherent - Managed dma_free_noncoherent()
- * @dev: Device to free noncoherent memory for
- * @size: Size of allocation
- * @vaddr: Virtual address of the memory to free
- * @dma_handle: DMA handle of the memory to free
- *
- * Managed dma_free_noncoherent().
- */
-void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle)
-{
- struct dma_devres match_data = { size, vaddr, dma_handle };
-
- dma_free_noncoherent(dev, size, vaddr, dma_handle);
- WARN_ON(!devres_destroy(dev, dmam_noncoherent_release, dmam_match,
- &match_data));
-}
-EXPORT_SYMBOL(dmam_free_noncoherent);
-
-#ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
-
-static void dmam_coherent_decl_release(struct device *dev, void *res)
-{
- dma_release_declared_memory(dev);
-}
-
-/**
- * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory()
- * @dev: Device to declare coherent memory for
- * @bus_addr: Bus address of coherent memory to be declared
- * @device_addr: Device address of coherent memory to be declared
- * @size: Size of coherent memory to be declared
- * @flags: Flags
- *
- * Managed dma_declare_coherent_memory().
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
- dma_addr_t device_addr, size_t size, int flags)
-{
- void *res;
- int rc;
-
- res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL);
- if (!res)
- return -ENOMEM;
-
- rc = dma_declare_coherent_memory(dev, bus_addr, device_addr, size,
- flags);
- if (rc == 0)
- devres_add(dev, res);
- else
- devres_free(res);
-
- return rc;
-}
-EXPORT_SYMBOL(dmam_declare_coherent_memory);
-
-/**
- * dmam_release_declared_memory - Managed dma_release_declared_memory().
- * @dev: Device to release declared coherent memory for
- *
- * Managed dmam_release_declared_memory().
- */
-void dmam_release_declared_memory(struct device *dev)
-{
- WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL));
-}
-EXPORT_SYMBOL(dmam_release_declared_memory);
-
-#endif
diff --git a/ANDROID_3.4.5/drivers/base/driver.c b/ANDROID_3.4.5/drivers/base/driver.c
deleted file mode 100644
index 3ec3896c..00000000
--- a/ANDROID_3.4.5/drivers/base/driver.c
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- * driver.c - centralized device driver management
- *
- * Copyright (c) 2002-3 Patrick Mochel
- * Copyright (c) 2002-3 Open Source Development Labs
- * Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de>
- * Copyright (c) 2007 Novell Inc.
- *
- * This file is released under the GPLv2
- *
- */
-
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include "base.h"
-
-static struct device *next_device(struct klist_iter *i)
-{
- struct klist_node *n = klist_next(i);
- struct device *dev = NULL;
- struct device_private *dev_prv;
-
- if (n) {
- dev_prv = to_device_private_driver(n);
- dev = dev_prv->device;
- }
- return dev;
-}
-
-/**
- * driver_for_each_device - Iterator for devices bound to a driver.
- * @drv: Driver we're iterating.
- * @start: Device to begin with
- * @data: Data to pass to the callback.
- * @fn: Function to call for each device.
- *
- * Iterate over the @drv's list of devices calling @fn for each one.
- */
-int driver_for_each_device(struct device_driver *drv, struct device *start,
- void *data, int (*fn)(struct device *, void *))
-{
- struct klist_iter i;
- struct device *dev;
- int error = 0;
-
- if (!drv)
- return -EINVAL;
-
- klist_iter_init_node(&drv->p->klist_devices, &i,
- start ? &start->p->knode_driver : NULL);
- while ((dev = next_device(&i)) && !error)
- error = fn(dev, data);
- klist_iter_exit(&i);
- return error;
-}
-EXPORT_SYMBOL_GPL(driver_for_each_device);
-
-/**
- * driver_find_device - device iterator for locating a particular device.
- * @drv: The device's driver
- * @start: Device to begin with
- * @data: Data to pass to match function
- * @match: Callback function to check device
- *
- * This is similar to the driver_for_each_device() function above, but
- * it returns a reference to a device that is 'found' for later use, as
- * determined by the @match callback.
- *
- * The callback should return 0 if the device doesn't match and non-zero
- * if it does. If the callback returns non-zero, this function will
- * return to the caller and not iterate over any more devices.
- */
-struct device *driver_find_device(struct device_driver *drv,
- struct device *start, void *data,
- int (*match)(struct device *dev, void *data))
-{
- struct klist_iter i;
- struct device *dev;
-
- if (!drv)
- return NULL;
-
- klist_iter_init_node(&drv->p->klist_devices, &i,
- (start ? &start->p->knode_driver : NULL));
- while ((dev = next_device(&i)))
- if (match(dev, data) && get_device(dev))
- break;
- klist_iter_exit(&i);
- return dev;
-}
-EXPORT_SYMBOL_GPL(driver_find_device);
-
-/**
- * driver_create_file - create sysfs file for driver.
- * @drv: driver.
- * @attr: driver attribute descriptor.
- */
-int driver_create_file(struct device_driver *drv,
- const struct driver_attribute *attr)
-{
- int error;
- if (drv)
- error = sysfs_create_file(&drv->p->kobj, &attr->attr);
- else
- error = -EINVAL;
- return error;
-}
-EXPORT_SYMBOL_GPL(driver_create_file);
-
-/**
- * driver_remove_file - remove sysfs file for driver.
- * @drv: driver.
- * @attr: driver attribute descriptor.
- */
-void driver_remove_file(struct device_driver *drv,
- const struct driver_attribute *attr)
-{
- if (drv)
- sysfs_remove_file(&drv->p->kobj, &attr->attr);
-}
-EXPORT_SYMBOL_GPL(driver_remove_file);
-
-static int driver_add_groups(struct device_driver *drv,
- const struct attribute_group **groups)
-{
- int error = 0;
- int i;
-
- if (groups) {
- for (i = 0; groups[i]; i++) {
- error = sysfs_create_group(&drv->p->kobj, groups[i]);
- if (error) {
- while (--i >= 0)
- sysfs_remove_group(&drv->p->kobj,
- groups[i]);
- break;
- }
- }
- }
- return error;
-}
-
-static void driver_remove_groups(struct device_driver *drv,
- const struct attribute_group **groups)
-{
- int i;
-
- if (groups)
- for (i = 0; groups[i]; i++)
- sysfs_remove_group(&drv->p->kobj, groups[i]);
-}
-
-/**
- * driver_register - register driver with bus
- * @drv: driver to register
- *
- * We pass off most of the work to the bus_add_driver() call,
- * since most of the things we have to do deal with the bus
- * structures.
- */
-int driver_register(struct device_driver *drv)
-{
- int ret;
- struct device_driver *other;
-
- BUG_ON(!drv->bus->p);
-
- if ((drv->bus->probe && drv->probe) ||
- (drv->bus->remove && drv->remove) ||
- (drv->bus->shutdown && drv->shutdown))
- printk(KERN_WARNING "Driver '%s' needs updating - please use "
- "bus_type methods\n", drv->name);
-
- other = driver_find(drv->name, drv->bus);
- if (other) {
- printk(KERN_ERR "Error: Driver '%s' is already registered, "
- "aborting...\n", drv->name);
- return -EBUSY;
- }
-
- ret = bus_add_driver(drv);
- if (ret)
- return ret;
- ret = driver_add_groups(drv, drv->groups);
- if (ret)
- bus_remove_driver(drv);
- return ret;
-}
-EXPORT_SYMBOL_GPL(driver_register);
-
-/**
- * driver_unregister - remove driver from system.
- * @drv: driver.
- *
- * Again, we pass off most of the work to the bus-level call.
- */
-void driver_unregister(struct device_driver *drv)
-{
- if (!drv || !drv->p) {
- WARN(1, "Unexpected driver unregister!\n");
- return;
- }
- driver_remove_groups(drv, drv->groups);
- bus_remove_driver(drv);
-}
-EXPORT_SYMBOL_GPL(driver_unregister);
-
-/**
- * driver_find - locate driver on a bus by its name.
- * @name: name of the driver.
- * @bus: bus to scan for the driver.
- *
- * Call kset_find_obj() to iterate over list of drivers on
- * a bus to find driver by name. Return driver if found.
- *
- * This routine provides no locking to prevent the driver it returns
- * from being unregistered or unloaded while the caller is using it.
- * The caller is responsible for preventing this.
- */
-struct device_driver *driver_find(const char *name, struct bus_type *bus)
-{
- struct kobject *k = kset_find_obj(bus->p->drivers_kset, name);
- struct driver_private *priv;
-
- if (k) {
- /* Drop reference added by kset_find_obj() */
- kobject_put(k);
- priv = to_driver(k);
- return priv->driver;
- }
- return NULL;
-}
-EXPORT_SYMBOL_GPL(driver_find);
diff --git a/ANDROID_3.4.5/drivers/base/firmware.c b/ANDROID_3.4.5/drivers/base/firmware.c
deleted file mode 100644
index 11381555..00000000
--- a/ANDROID_3.4.5/drivers/base/firmware.c
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * firmware.c - firmware subsystem hoohaw.
- *
- * Copyright (c) 2002-3 Patrick Mochel
- * Copyright (c) 2002-3 Open Source Development Labs
- * Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de>
- * Copyright (c) 2007 Novell Inc.
- *
- * This file is released under the GPLv2
- */
-#include <linux/kobject.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/device.h>
-
-#include "base.h"
-
-struct kobject *firmware_kobj;
-EXPORT_SYMBOL_GPL(firmware_kobj);
-
-int __init firmware_init(void)
-{
- firmware_kobj = kobject_create_and_add("firmware", NULL);
- if (!firmware_kobj)
- return -ENOMEM;
- return 0;
-}
diff --git a/ANDROID_3.4.5/drivers/base/firmware_class.c b/ANDROID_3.4.5/drivers/base/firmware_class.c
deleted file mode 100644
index 5401814c..00000000
--- a/ANDROID_3.4.5/drivers/base/firmware_class.c
+++ /dev/null
@@ -1,730 +0,0 @@
-/*
- * firmware_class.c - Multi purpose firmware loading support
- *
- * Copyright (c) 2003 Manuel Estrada Sainz
- *
- * Please see Documentation/firmware_class/ for more information.
- *
- */
-
-#include <linux/capability.h>
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/timer.h>
-#include <linux/vmalloc.h>
-#include <linux/interrupt.h>
-#include <linux/bitops.h>
-#include <linux/mutex.h>
-#include <linux/workqueue.h>
-#include <linux/highmem.h>
-#include <linux/firmware.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-
-#define to_dev(obj) container_of(obj, struct device, kobj)
-
-MODULE_AUTHOR("Manuel Estrada Sainz");
-MODULE_DESCRIPTION("Multi purpose firmware loading support");
-MODULE_LICENSE("GPL");
-
-/* Builtin firmware support */
-
-#ifdef CONFIG_FW_LOADER
-
-extern struct builtin_fw __start_builtin_fw[];
-extern struct builtin_fw __end_builtin_fw[];
-
-static bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
-{
- struct builtin_fw *b_fw;
-
- for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
- if (strcmp(name, b_fw->name) == 0) {
- fw->size = b_fw->size;
- fw->data = b_fw->data;
- return true;
- }
- }
-
- return false;
-}
-
-static bool fw_is_builtin_firmware(const struct firmware *fw)
-{
- struct builtin_fw *b_fw;
-
- for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++)
- if (fw->data == b_fw->data)
- return true;
-
- return false;
-}
-
-#else /* Module case - no builtin firmware support */
-
-static inline bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
-{
- return false;
-}
-
-static inline bool fw_is_builtin_firmware(const struct firmware *fw)
-{
- return false;
-}
-#endif
-
-enum {
- FW_STATUS_LOADING,
- FW_STATUS_DONE,
- FW_STATUS_ABORT,
-};
-
-static int loading_timeout = 60; /* In seconds */
-
-static inline long firmware_loading_timeout(void)
-{
- return loading_timeout > 0 ? loading_timeout * HZ : MAX_SCHEDULE_TIMEOUT;
-}
-
-/* fw_lock could be moved to 'struct firmware_priv' but since it is just
- * guarding for corner cases a global lock should be OK */
-static DEFINE_MUTEX(fw_lock);
-
-struct firmware_priv {
- struct completion completion;
- struct firmware *fw;
- unsigned long status;
- struct page **pages;
- int nr_pages;
- int page_array_size;
- struct timer_list timeout;
- struct device dev;
- bool nowait;
- char fw_id[];
-};
-
-static struct firmware_priv *to_firmware_priv(struct device *dev)
-{
- return container_of(dev, struct firmware_priv, dev);
-}
-
-static void fw_load_abort(struct firmware_priv *fw_priv)
-{
- set_bit(FW_STATUS_ABORT, &fw_priv->status);
- wmb();
- complete(&fw_priv->completion);
-}
-
-static ssize_t firmware_timeout_show(struct class *class,
- struct class_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%d\n", loading_timeout);
-}
-
-/**
- * firmware_timeout_store - set number of seconds to wait for firmware
- * @class: device class pointer
- * @attr: device attribute pointer
- * @buf: buffer to scan for timeout value
- * @count: number of bytes in @buf
- *
- * Sets the number of seconds to wait for the firmware. Once
- * this expires an error will be returned to the driver and no
- * firmware will be provided.
- *
- * Note: zero means 'wait forever'.
- **/
-static ssize_t firmware_timeout_store(struct class *class,
- struct class_attribute *attr,
- const char *buf, size_t count)
-{
- loading_timeout = simple_strtol(buf, NULL, 10);
- if (loading_timeout < 0)
- loading_timeout = 0;
-
- return count;
-}
-
-static struct class_attribute firmware_class_attrs[] = {
- __ATTR(timeout, S_IWUSR | S_IRUGO,
- firmware_timeout_show, firmware_timeout_store),
- __ATTR_NULL
-};
-
-static void fw_dev_release(struct device *dev)
-{
- struct firmware_priv *fw_priv = to_firmware_priv(dev);
- int i;
-
- for (i = 0; i < fw_priv->nr_pages; i++)
- __free_page(fw_priv->pages[i]);
- kfree(fw_priv->pages);
- kfree(fw_priv);
-
- module_put(THIS_MODULE);
-}
-
-static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
- struct firmware_priv *fw_priv = to_firmware_priv(dev);
-
- if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->fw_id))
- return -ENOMEM;
- if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
- return -ENOMEM;
- if (add_uevent_var(env, "ASYNC=%d", fw_priv->nowait))
- return -ENOMEM;
-
- return 0;
-}
-
-static struct class firmware_class = {
- .name = "firmware",
- .class_attrs = firmware_class_attrs,
- .dev_uevent = firmware_uevent,
- .dev_release = fw_dev_release,
-};
-
-static ssize_t firmware_loading_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct firmware_priv *fw_priv = to_firmware_priv(dev);
- int loading = test_bit(FW_STATUS_LOADING, &fw_priv->status);
-
- return sprintf(buf, "%d\n", loading);
-}
-
-static void firmware_free_data(const struct firmware *fw)
-{
- int i;
- vunmap(fw->data);
- if (fw->pages) {
- for (i = 0; i < PFN_UP(fw->size); i++)
- __free_page(fw->pages[i]);
- kfree(fw->pages);
- }
-}
-
-/* Some architectures don't have PAGE_KERNEL_RO */
-#ifndef PAGE_KERNEL_RO
-#define PAGE_KERNEL_RO PAGE_KERNEL
-#endif
-/**
- * firmware_loading_store - set value in the 'loading' control file
- * @dev: device pointer
- * @attr: device attribute pointer
- * @buf: buffer to scan for loading control value
- * @count: number of bytes in @buf
- *
- * The relevant values are:
- *
- * 1: Start a load, discarding any previous partial load.
- * 0: Conclude the load and hand the data to the driver code.
- * -1: Conclude the load with an error and discard any written data.
- **/
-static ssize_t firmware_loading_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct firmware_priv *fw_priv = to_firmware_priv(dev);
- int loading = simple_strtol(buf, NULL, 10);
- int i;
-
- mutex_lock(&fw_lock);
-
- if (!fw_priv->fw)
- goto out;
-
- switch (loading) {
- case 1:
- firmware_free_data(fw_priv->fw);
- memset(fw_priv->fw, 0, sizeof(struct firmware));
- /* If the pages are not owned by 'struct firmware' */
- for (i = 0; i < fw_priv->nr_pages; i++)
- __free_page(fw_priv->pages[i]);
- kfree(fw_priv->pages);
- fw_priv->pages = NULL;
- fw_priv->page_array_size = 0;
- fw_priv->nr_pages = 0;
- set_bit(FW_STATUS_LOADING, &fw_priv->status);
- break;
- case 0:
- if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) {
- vunmap(fw_priv->fw->data);
- fw_priv->fw->data = vmap(fw_priv->pages,
- fw_priv->nr_pages,
- 0, PAGE_KERNEL_RO);
- if (!fw_priv->fw->data) {
- dev_err(dev, "%s: vmap() failed\n", __func__);
- goto err;
- }
- /* Pages are now owned by 'struct firmware' */
- fw_priv->fw->pages = fw_priv->pages;
- fw_priv->pages = NULL;
-
- fw_priv->page_array_size = 0;
- fw_priv->nr_pages = 0;
- complete(&fw_priv->completion);
- clear_bit(FW_STATUS_LOADING, &fw_priv->status);
- break;
- }
- /* fallthrough */
- default:
- dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
- /* fallthrough */
- case -1:
- err:
- fw_load_abort(fw_priv);
- break;
- }
-out:
- mutex_unlock(&fw_lock);
- return count;
-}
-
-static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
-
-static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buffer, loff_t offset, size_t count)
-{
- struct device *dev = to_dev(kobj);
- struct firmware_priv *fw_priv = to_firmware_priv(dev);
- struct firmware *fw;
- ssize_t ret_count;
-
- mutex_lock(&fw_lock);
- fw = fw_priv->fw;
- if (!fw || test_bit(FW_STATUS_DONE, &fw_priv->status)) {
- ret_count = -ENODEV;
- goto out;
- }
- if (offset > fw->size) {
- ret_count = 0;
- goto out;
- }
- if (count > fw->size - offset)
- count = fw->size - offset;
-
- ret_count = count;
-
- while (count) {
- void *page_data;
- int page_nr = offset >> PAGE_SHIFT;
- int page_ofs = offset & (PAGE_SIZE-1);
- int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
-
- page_data = kmap(fw_priv->pages[page_nr]);
-
- memcpy(buffer, page_data + page_ofs, page_cnt);
-
- kunmap(fw_priv->pages[page_nr]);
- buffer += page_cnt;
- offset += page_cnt;
- count -= page_cnt;
- }
-out:
- mutex_unlock(&fw_lock);
- return ret_count;
-}
-
-static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
-{
- int pages_needed = ALIGN(min_size, PAGE_SIZE) >> PAGE_SHIFT;
-
- /* If the array of pages is too small, grow it... */
- if (fw_priv->page_array_size < pages_needed) {
- int new_array_size = max(pages_needed,
- fw_priv->page_array_size * 2);
- struct page **new_pages;
-
- new_pages = kmalloc(new_array_size * sizeof(void *),
- GFP_KERNEL);
- if (!new_pages) {
- fw_load_abort(fw_priv);
- return -ENOMEM;
- }
- memcpy(new_pages, fw_priv->pages,
- fw_priv->page_array_size * sizeof(void *));
- memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) *
- (new_array_size - fw_priv->page_array_size));
- kfree(fw_priv->pages);
- fw_priv->pages = new_pages;
- fw_priv->page_array_size = new_array_size;
- }
-
- while (fw_priv->nr_pages < pages_needed) {
- fw_priv->pages[fw_priv->nr_pages] =
- alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
-
- if (!fw_priv->pages[fw_priv->nr_pages]) {
- fw_load_abort(fw_priv);
- return -ENOMEM;
- }
- fw_priv->nr_pages++;
- }
- return 0;
-}
-
-/**
- * firmware_data_write - write method for firmware
- * @filp: open sysfs file
- * @kobj: kobject for the device
- * @bin_attr: bin_attr structure
- * @buffer: buffer being written
- * @offset: buffer offset for write in total data store area
- * @count: buffer size
- *
- * Data written to the 'data' attribute will be later handed to
- * the driver as a firmware image.
- **/
-static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buffer, loff_t offset, size_t count)
-{
- struct device *dev = to_dev(kobj);
- struct firmware_priv *fw_priv = to_firmware_priv(dev);
- struct firmware *fw;
- ssize_t retval;
-
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
-
- mutex_lock(&fw_lock);
- fw = fw_priv->fw;
- if (!fw || test_bit(FW_STATUS_DONE, &fw_priv->status)) {
- retval = -ENODEV;
- goto out;
- }
- retval = fw_realloc_buffer(fw_priv, offset + count);
- if (retval)
- goto out;
-
- retval = count;
-
- while (count) {
- void *page_data;
- int page_nr = offset >> PAGE_SHIFT;
- int page_ofs = offset & (PAGE_SIZE - 1);
- int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
-
- page_data = kmap(fw_priv->pages[page_nr]);
-
- memcpy(page_data + page_ofs, buffer, page_cnt);
-
- kunmap(fw_priv->pages[page_nr]);
- buffer += page_cnt;
- offset += page_cnt;
- count -= page_cnt;
- }
-
- fw->size = max_t(size_t, offset, fw->size);
-out:
- mutex_unlock(&fw_lock);
- return retval;
-}
-
-static struct bin_attribute firmware_attr_data = {
- .attr = { .name = "data", .mode = 0644 },
- .size = 0,
- .read = firmware_data_read,
- .write = firmware_data_write,
-};
-
-static void firmware_class_timeout(u_long data)
-{
- struct firmware_priv *fw_priv = (struct firmware_priv *) data;
-
- fw_load_abort(fw_priv);
-}
-
-static struct firmware_priv *
-fw_create_instance(struct firmware *firmware, const char *fw_name,
- struct device *device, bool uevent, bool nowait)
-{
- struct firmware_priv *fw_priv;
- struct device *f_dev;
-
- fw_priv = kzalloc(sizeof(*fw_priv) + strlen(fw_name) + 1 , GFP_KERNEL);
- if (!fw_priv) {
- dev_err(device, "%s: kmalloc failed\n", __func__);
- return ERR_PTR(-ENOMEM);
- }
-
- fw_priv->fw = firmware;
- fw_priv->nowait = nowait;
- strcpy(fw_priv->fw_id, fw_name);
- init_completion(&fw_priv->completion);
- setup_timer(&fw_priv->timeout,
- firmware_class_timeout, (u_long) fw_priv);
-
- f_dev = &fw_priv->dev;
-
- device_initialize(f_dev);
- dev_set_name(f_dev, "%s", dev_name(device));
- f_dev->parent = device;
- f_dev->class = &firmware_class;
-
- return fw_priv;
-}
-
-static struct firmware_priv *
-_request_firmware_prepare(const struct firmware **firmware_p, const char *name,
- struct device *device, bool uevent, bool nowait)
-{
- struct firmware *firmware;
- struct firmware_priv *fw_priv;
-
- if (!firmware_p)
- return ERR_PTR(-EINVAL);
-
- *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
- if (!firmware) {
- dev_err(device, "%s: kmalloc(struct firmware) failed\n",
- __func__);
- return ERR_PTR(-ENOMEM);
- }
-
- if (fw_get_builtin_firmware(firmware, name)) {
- dev_dbg(device, "firmware: using built-in firmware %s\n", name);
- return NULL;
- }
-
- fw_priv = fw_create_instance(firmware, name, device, uevent, nowait);
- if (IS_ERR(fw_priv)) {
- release_firmware(firmware);
- *firmware_p = NULL;
- }
- return fw_priv;
-}
-
-static void _request_firmware_cleanup(const struct firmware **firmware_p)
-{
- release_firmware(*firmware_p);
- *firmware_p = NULL;
-}
-
-static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
- long timeout)
-{
- int retval = 0;
- struct device *f_dev = &fw_priv->dev;
-
- dev_set_uevent_suppress(f_dev, true);
-
- /* Need to pin this module until class device is destroyed */
- __module_get(THIS_MODULE);
-
- retval = device_add(f_dev);
- if (retval) {
- dev_err(f_dev, "%s: device_register failed\n", __func__);
- goto err_put_dev;
- }
-
- retval = device_create_bin_file(f_dev, &firmware_attr_data);
- if (retval) {
- dev_err(f_dev, "%s: sysfs_create_bin_file failed\n", __func__);
- goto err_del_dev;
- }
-
- retval = device_create_file(f_dev, &dev_attr_loading);
- if (retval) {
- dev_err(f_dev, "%s: device_create_file failed\n", __func__);
- goto err_del_bin_attr;
- }
-
- if (uevent) {
- dev_set_uevent_suppress(f_dev, false);
- dev_dbg(f_dev, "firmware: requesting %s\n", fw_priv->fw_id);
- if (timeout != MAX_SCHEDULE_TIMEOUT)
- mod_timer(&fw_priv->timeout,
- round_jiffies_up(jiffies + timeout));
-
- kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
- }
-
- wait_for_completion(&fw_priv->completion);
-
- set_bit(FW_STATUS_DONE, &fw_priv->status);
- del_timer_sync(&fw_priv->timeout);
-
- mutex_lock(&fw_lock);
- if (!fw_priv->fw->size || test_bit(FW_STATUS_ABORT, &fw_priv->status))
- retval = -ENOENT;
- fw_priv->fw = NULL;
- mutex_unlock(&fw_lock);
-
- device_remove_file(f_dev, &dev_attr_loading);
-err_del_bin_attr:
- device_remove_bin_file(f_dev, &firmware_attr_data);
-err_del_dev:
- device_del(f_dev);
-err_put_dev:
- put_device(f_dev);
- return retval;
-}
-
-/**
- * request_firmware: - send firmware request and wait for it
- * @firmware_p: pointer to firmware image
- * @name: name of firmware file
- * @device: device for which firmware is being loaded
- *
- * @firmware_p will be used to return a firmware image by the name
- * of @name for device @device.
- *
- * Should be called from user context where sleeping is allowed.
- *
- * @name will be used as $FIRMWARE in the uevent environment and
- * should be distinctive enough not to be confused with any other
- * firmware image for this or any other device.
- **/
-int
-request_firmware(const struct firmware **firmware_p, const char *name,
- struct device *device)
-{
- struct firmware_priv *fw_priv;
- int ret;
-
- fw_priv = _request_firmware_prepare(firmware_p, name, device, true,
- false);
- if (IS_ERR_OR_NULL(fw_priv))
- return PTR_RET(fw_priv);
-
- ret = usermodehelper_read_trylock();
- if (WARN_ON(ret)) {
- dev_err(device, "firmware: %s will not be loaded\n", name);
- } else {
- ret = _request_firmware_load(fw_priv, true,
- firmware_loading_timeout());
- usermodehelper_read_unlock();
- }
- if (ret)
- _request_firmware_cleanup(firmware_p);
-
- return ret;
-}
-
-/**
- * release_firmware: - release the resource associated with a firmware image
- * @fw: firmware resource to release
- **/
-void release_firmware(const struct firmware *fw)
-{
- if (fw) {
- if (!fw_is_builtin_firmware(fw))
- firmware_free_data(fw);
- kfree(fw);
- }
-}
-
-/* Async support */
-struct firmware_work {
- struct work_struct work;
- struct module *module;
- const char *name;
- struct device *device;
- void *context;
- void (*cont)(const struct firmware *fw, void *context);
- bool uevent;
-};
-
-static void request_firmware_work_func(struct work_struct *work)
-{
- struct firmware_work *fw_work;
- const struct firmware *fw;
- struct firmware_priv *fw_priv;
- long timeout;
- int ret;
-
- fw_work = container_of(work, struct firmware_work, work);
- fw_priv = _request_firmware_prepare(&fw, fw_work->name, fw_work->device,
- fw_work->uevent, true);
- if (IS_ERR_OR_NULL(fw_priv)) {
- ret = PTR_RET(fw_priv);
- goto out;
- }
-
- timeout = usermodehelper_read_lock_wait(firmware_loading_timeout());
- if (timeout) {
- ret = _request_firmware_load(fw_priv, fw_work->uevent, timeout);
- usermodehelper_read_unlock();
- } else {
- dev_dbg(fw_work->device, "firmware: %s loading timed out\n",
- fw_work->name);
- ret = -EAGAIN;
- }
- if (ret)
- _request_firmware_cleanup(&fw);
-
- out:
- fw_work->cont(fw, fw_work->context);
-
- module_put(fw_work->module);
- kfree(fw_work);
-}
-
-/**
- * request_firmware_nowait - asynchronous version of request_firmware
- * @module: module requesting the firmware
- * @uevent: sends uevent to copy the firmware image if this flag
- * is non-zero else the firmware copy must be done manually.
- * @name: name of firmware file
- * @device: device for which firmware is being loaded
- * @gfp: allocation flags
- * @context: will be passed over to @cont, and
- * @fw may be %NULL if firmware request fails.
- * @cont: function will be called asynchronously when the firmware
- * request is over.
- *
- * Asynchronous variant of request_firmware() for user contexts where
- * it is not possible to sleep for long time. It can't be called
- * in atomic contexts.
- **/
-int
-request_firmware_nowait(
- struct module *module, bool uevent,
- const char *name, struct device *device, gfp_t gfp, void *context,
- void (*cont)(const struct firmware *fw, void *context))
-{
- struct firmware_work *fw_work;
-
- fw_work = kzalloc(sizeof (struct firmware_work), gfp);
- if (!fw_work)
- return -ENOMEM;
-
- fw_work->module = module;
- fw_work->name = name;
- fw_work->device = device;
- fw_work->context = context;
- fw_work->cont = cont;
- fw_work->uevent = uevent;
-
- if (!try_module_get(module)) {
- kfree(fw_work);
- return -EFAULT;
- }
-
- INIT_WORK(&fw_work->work, request_firmware_work_func);
- schedule_work(&fw_work->work);
- return 0;
-}
-
-static int __init firmware_class_init(void)
-{
- return class_register(&firmware_class);
-}
-
-static void __exit firmware_class_exit(void)
-{
- class_unregister(&firmware_class);
-}
-
-fs_initcall(firmware_class_init);
-module_exit(firmware_class_exit);
-
-EXPORT_SYMBOL(release_firmware);
-EXPORT_SYMBOL(request_firmware);
-EXPORT_SYMBOL(request_firmware_nowait);
diff --git a/ANDROID_3.4.5/drivers/base/hypervisor.c b/ANDROID_3.4.5/drivers/base/hypervisor.c
deleted file mode 100644
index 4f8b741f..00000000
--- a/ANDROID_3.4.5/drivers/base/hypervisor.c
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * hypervisor.c - /sys/hypervisor subsystem.
- *
- * Copyright (C) IBM Corp. 2006
- * Copyright (C) 2007 Greg Kroah-Hartman <gregkh@suse.de>
- * Copyright (C) 2007 Novell Inc.
- *
- * This file is released under the GPLv2
- */
-
-#include <linux/kobject.h>
-#include <linux/device.h>
-#include <linux/export.h>
-#include "base.h"
-
-struct kobject *hypervisor_kobj;
-EXPORT_SYMBOL_GPL(hypervisor_kobj);
-
-int __init hypervisor_init(void)
-{
- hypervisor_kobj = kobject_create_and_add("hypervisor", NULL);
- if (!hypervisor_kobj)
- return -ENOMEM;
- return 0;
-}
diff --git a/ANDROID_3.4.5/drivers/base/init.c b/ANDROID_3.4.5/drivers/base/init.c
deleted file mode 100644
index c16f0b80..00000000
--- a/ANDROID_3.4.5/drivers/base/init.c
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2002-3 Patrick Mochel
- * Copyright (c) 2002-3 Open Source Development Labs
- *
- * This file is released under the GPLv2
- */
-
-#include <linux/device.h>
-#include <linux/init.h>
-#include <linux/memory.h>
-
-#include "base.h"
-
-/**
- * driver_init - initialize driver model.
- *
- * Call the driver model init functions to initialize their
- * subsystems. Called early from init/main.c.
- */
-void __init driver_init(void)
-{
- /* These are the core pieces */
- devtmpfs_init();
- devices_init();
- buses_init();
- classes_init();
- firmware_init();
- hypervisor_init();
-
- /* These are also core pieces, but must come after the
- * core core pieces.
- */
- platform_bus_init();
- cpu_dev_init();
- memory_dev_init();
-}
diff --git a/ANDROID_3.4.5/drivers/base/isa.c b/ANDROID_3.4.5/drivers/base/isa.c
deleted file mode 100644
index 91dba65d..00000000
--- a/ANDROID_3.4.5/drivers/base/isa.c
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * ISA bus.
- */
-
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/dma-mapping.h>
-#include <linux/isa.h>
-
-static struct device isa_bus = {
- .init_name = "isa"
-};
-
-struct isa_dev {
- struct device dev;
- struct device *next;
- unsigned int id;
-};
-
-#define to_isa_dev(x) container_of((x), struct isa_dev, dev)
-
-static int isa_bus_match(struct device *dev, struct device_driver *driver)
-{
- struct isa_driver *isa_driver = to_isa_driver(driver);
-
- if (dev->platform_data == isa_driver) {
- if (!isa_driver->match ||
- isa_driver->match(dev, to_isa_dev(dev)->id))
- return 1;
- dev->platform_data = NULL;
- }
- return 0;
-}
-
-static int isa_bus_probe(struct device *dev)
-{
- struct isa_driver *isa_driver = dev->platform_data;
-
- if (isa_driver->probe)
- return isa_driver->probe(dev, to_isa_dev(dev)->id);
-
- return 0;
-}
-
-static int isa_bus_remove(struct device *dev)
-{
- struct isa_driver *isa_driver = dev->platform_data;
-
- if (isa_driver->remove)
- return isa_driver->remove(dev, to_isa_dev(dev)->id);
-
- return 0;
-}
-
-static void isa_bus_shutdown(struct device *dev)
-{
- struct isa_driver *isa_driver = dev->platform_data;
-
- if (isa_driver->shutdown)
- isa_driver->shutdown(dev, to_isa_dev(dev)->id);
-}
-
-static int isa_bus_suspend(struct device *dev, pm_message_t state)
-{
- struct isa_driver *isa_driver = dev->platform_data;
-
- if (isa_driver->suspend)
- return isa_driver->suspend(dev, to_isa_dev(dev)->id, state);
-
- return 0;
-}
-
-static int isa_bus_resume(struct device *dev)
-{
- struct isa_driver *isa_driver = dev->platform_data;
-
- if (isa_driver->resume)
- return isa_driver->resume(dev, to_isa_dev(dev)->id);
-
- return 0;
-}
-
-static struct bus_type isa_bus_type = {
- .name = "isa",
- .match = isa_bus_match,
- .probe = isa_bus_probe,
- .remove = isa_bus_remove,
- .shutdown = isa_bus_shutdown,
- .suspend = isa_bus_suspend,
- .resume = isa_bus_resume
-};
-
-static void isa_dev_release(struct device *dev)
-{
- kfree(to_isa_dev(dev));
-}
-
-void isa_unregister_driver(struct isa_driver *isa_driver)
-{
- struct device *dev = isa_driver->devices;
-
- while (dev) {
- struct device *tmp = to_isa_dev(dev)->next;
- device_unregister(dev);
- dev = tmp;
- }
- driver_unregister(&isa_driver->driver);
-}
-EXPORT_SYMBOL_GPL(isa_unregister_driver);
-
-int isa_register_driver(struct isa_driver *isa_driver, unsigned int ndev)
-{
- int error;
- unsigned int id;
-
- isa_driver->driver.bus = &isa_bus_type;
- isa_driver->devices = NULL;
-
- error = driver_register(&isa_driver->driver);
- if (error)
- return error;
-
- for (id = 0; id < ndev; id++) {
- struct isa_dev *isa_dev;
-
- isa_dev = kzalloc(sizeof *isa_dev, GFP_KERNEL);
- if (!isa_dev) {
- error = -ENOMEM;
- break;
- }
-
- isa_dev->dev.parent = &isa_bus;
- isa_dev->dev.bus = &isa_bus_type;
-
- dev_set_name(&isa_dev->dev, "%s.%u",
- isa_driver->driver.name, id);
- isa_dev->dev.platform_data = isa_driver;
- isa_dev->dev.release = isa_dev_release;
- isa_dev->id = id;
-
- isa_dev->dev.coherent_dma_mask = DMA_BIT_MASK(24);
- isa_dev->dev.dma_mask = &isa_dev->dev.coherent_dma_mask;
-
- error = device_register(&isa_dev->dev);
- if (error) {
- put_device(&isa_dev->dev);
- break;
- }
-
- if (isa_dev->dev.platform_data) {
- isa_dev->next = isa_driver->devices;
- isa_driver->devices = &isa_dev->dev;
- } else
- device_unregister(&isa_dev->dev);
- }
-
- if (!error && !isa_driver->devices)
- error = -ENODEV;
-
- if (error)
- isa_unregister_driver(isa_driver);
-
- return error;
-}
-EXPORT_SYMBOL_GPL(isa_register_driver);
-
-static int __init isa_bus_init(void)
-{
- int error;
-
- error = bus_register(&isa_bus_type);
- if (!error) {
- error = device_register(&isa_bus);
- if (error)
- bus_unregister(&isa_bus_type);
- }
- return error;
-}
-
-device_initcall(isa_bus_init);
diff --git a/ANDROID_3.4.5/drivers/base/map.c b/ANDROID_3.4.5/drivers/base/map.c
deleted file mode 100644
index e87017f3..00000000
--- a/ANDROID_3.4.5/drivers/base/map.c
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * linux/drivers/base/map.c
- *
- * (C) Copyright Al Viro 2002,2003
- * Released under GPL v2.
- *
- * NOTE: data structure needs to be changed. It works, but for large dev_t
- * it will be too slow. It is isolated, though, so these changes will be
- * local to that file.
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/kdev_t.h>
-#include <linux/kobject.h>
-#include <linux/kobj_map.h>
-
-struct kobj_map {
- struct probe {
- struct probe *next;
- dev_t dev;
- unsigned long range;
- struct module *owner;
- kobj_probe_t *get;
- int (*lock)(dev_t, void *);
- void *data;
- } *probes[255];
- struct mutex *lock;
-};
-
-int kobj_map(struct kobj_map *domain, dev_t dev, unsigned long range,
- struct module *module, kobj_probe_t *probe,
- int (*lock)(dev_t, void *), void *data)
-{
- unsigned n = MAJOR(dev + range - 1) - MAJOR(dev) + 1;
- unsigned index = MAJOR(dev);
- unsigned i;
- struct probe *p;
-
- if (n > 255)
- n = 255;
-
- p = kmalloc(sizeof(struct probe) * n, GFP_KERNEL);
-
- if (p == NULL)
- return -ENOMEM;
-
- for (i = 0; i < n; i++, p++) {
- p->owner = module;
- p->get = probe;
- p->lock = lock;
- p->dev = dev;
- p->range = range;
- p->data = data;
- }
- mutex_lock(domain->lock);
- for (i = 0, p -= n; i < n; i++, p++, index++) {
- struct probe **s = &domain->probes[index % 255];
- while (*s && (*s)->range < range)
- s = &(*s)->next;
- p->next = *s;
- *s = p;
- }
- mutex_unlock(domain->lock);
- return 0;
-}
-
-void kobj_unmap(struct kobj_map *domain, dev_t dev, unsigned long range)
-{
- unsigned n = MAJOR(dev + range - 1) - MAJOR(dev) + 1;
- unsigned index = MAJOR(dev);
- unsigned i;
- struct probe *found = NULL;
-
- if (n > 255)
- n = 255;
-
- mutex_lock(domain->lock);
- for (i = 0; i < n; i++, index++) {
- struct probe **s;
- for (s = &domain->probes[index % 255]; *s; s = &(*s)->next) {
- struct probe *p = *s;
- if (p->dev == dev && p->range == range) {
- *s = p->next;
- if (!found)
- found = p;
- break;
- }
- }
- }
- mutex_unlock(domain->lock);
- kfree(found);
-}
-
-struct kobject *kobj_lookup(struct kobj_map *domain, dev_t dev, int *index)
-{
- struct kobject *kobj;
- struct probe *p;
- unsigned long best = ~0UL;
-
-retry:
- mutex_lock(domain->lock);
- for (p = domain->probes[MAJOR(dev) % 255]; p; p = p->next) {
- struct kobject *(*probe)(dev_t, int *, void *);
- struct module *owner;
- void *data;
-
- if (p->dev > dev || p->dev + p->range - 1 < dev)
- continue;
- if (p->range - 1 >= best)
- break;
- if (!try_module_get(p->owner))
- continue;
- owner = p->owner;
- data = p->data;
- probe = p->get;
- best = p->range - 1;
- *index = dev - p->dev;
- if (p->lock && p->lock(dev, data) < 0) {
- module_put(owner);
- continue;
- }
- mutex_unlock(domain->lock);
- kobj = probe(dev, index, data);
- /* Currently ->owner protects _only_ ->probe() itself. */
- module_put(owner);
- if (kobj)
- return kobj;
- goto retry;
- }
- mutex_unlock(domain->lock);
- return NULL;
-}
-
-struct kobj_map *kobj_map_init(kobj_probe_t *base_probe, struct mutex *lock)
-{
- struct kobj_map *p = kmalloc(sizeof(struct kobj_map), GFP_KERNEL);
- struct probe *base = kzalloc(sizeof(*base), GFP_KERNEL);
- int i;
-
- if ((p == NULL) || (base == NULL)) {
- kfree(p);
- kfree(base);
- return NULL;
- }
-
- base->dev = 1;
- base->range = ~0;
- base->get = base_probe;
- for (i = 0; i < 255; i++)
- p->probes[i] = base;
- p->lock = lock;
- return p;
-}
diff --git a/ANDROID_3.4.5/drivers/base/memory.c b/ANDROID_3.4.5/drivers/base/memory.c
deleted file mode 100644
index 7dda4f79..00000000
--- a/ANDROID_3.4.5/drivers/base/memory.c
+++ /dev/null
@@ -1,705 +0,0 @@
-/*
- * Memory subsystem support
- *
- * Written by Matt Tolentino <matthew.e.tolentino@intel.com>
- * Dave Hansen <haveblue@us.ibm.com>
- *
- * This file provides the necessary infrastructure to represent
- * a SPARSEMEM-memory-model system's physical memory in /sysfs.
- * All arch-independent code that assumes MEMORY_HOTPLUG requires
- * SPARSEMEM should be contained here, or in mm/memory_hotplug.c.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/topology.h>
-#include <linux/capability.h>
-#include <linux/device.h>
-#include <linux/memory.h>
-#include <linux/kobject.h>
-#include <linux/memory_hotplug.h>
-#include <linux/mm.h>
-#include <linux/mutex.h>
-#include <linux/stat.h>
-#include <linux/slab.h>
-
-#include <linux/atomic.h>
-#include <asm/uaccess.h>
-
-static DEFINE_MUTEX(mem_sysfs_mutex);
-
-#define MEMORY_CLASS_NAME "memory"
-
-static int sections_per_block;
-
-static inline int base_memory_block_id(int section_nr)
-{
- return section_nr / sections_per_block;
-}
-
-static struct bus_type memory_subsys = {
- .name = MEMORY_CLASS_NAME,
- .dev_name = MEMORY_CLASS_NAME,
-};
-
-static BLOCKING_NOTIFIER_HEAD(memory_chain);
-
-int register_memory_notifier(struct notifier_block *nb)
-{
- return blocking_notifier_chain_register(&memory_chain, nb);
-}
-EXPORT_SYMBOL(register_memory_notifier);
-
-void unregister_memory_notifier(struct notifier_block *nb)
-{
- blocking_notifier_chain_unregister(&memory_chain, nb);
-}
-EXPORT_SYMBOL(unregister_memory_notifier);
-
-static ATOMIC_NOTIFIER_HEAD(memory_isolate_chain);
-
-int register_memory_isolate_notifier(struct notifier_block *nb)
-{
- return atomic_notifier_chain_register(&memory_isolate_chain, nb);
-}
-EXPORT_SYMBOL(register_memory_isolate_notifier);
-
-void unregister_memory_isolate_notifier(struct notifier_block *nb)
-{
- atomic_notifier_chain_unregister(&memory_isolate_chain, nb);
-}
-EXPORT_SYMBOL(unregister_memory_isolate_notifier);
-
-/*
- * register_memory - Setup a sysfs device for a memory block
- */
-static
-int register_memory(struct memory_block *memory)
-{
- int error;
-
- memory->dev.bus = &memory_subsys;
- memory->dev.id = memory->start_section_nr / sections_per_block;
-
- error = device_register(&memory->dev);
- return error;
-}
-
-static void
-unregister_memory(struct memory_block *memory)
-{
- BUG_ON(memory->dev.bus != &memory_subsys);
-
- /* drop the ref. we got in remove_memory_block() */
- kobject_put(&memory->dev.kobj);
- device_unregister(&memory->dev);
-}
-
-unsigned long __weak memory_block_size_bytes(void)
-{
- return MIN_MEMORY_BLOCK_SIZE;
-}
-
-static unsigned long get_memory_block_size(void)
-{
- unsigned long block_sz;
-
- block_sz = memory_block_size_bytes();
-
- /* Validate blk_sz is a power of 2 and not less than section size */
- if ((block_sz & (block_sz - 1)) || (block_sz < MIN_MEMORY_BLOCK_SIZE)) {
- WARN_ON(1);
- block_sz = MIN_MEMORY_BLOCK_SIZE;
- }
-
- return block_sz;
-}
-
-/*
- * use this as the physical section index that this memsection
- * uses.
- */
-
-static ssize_t show_mem_start_phys_index(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct memory_block *mem =
- container_of(dev, struct memory_block, dev);
- unsigned long phys_index;
-
- phys_index = mem->start_section_nr / sections_per_block;
- return sprintf(buf, "%08lx\n", phys_index);
-}
-
-static ssize_t show_mem_end_phys_index(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct memory_block *mem =
- container_of(dev, struct memory_block, dev);
- unsigned long phys_index;
-
- phys_index = mem->end_section_nr / sections_per_block;
- return sprintf(buf, "%08lx\n", phys_index);
-}
-
-/*
- * Show whether the section of memory is likely to be hot-removable
- */
-static ssize_t show_mem_removable(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long i, pfn;
- int ret = 1;
- struct memory_block *mem =
- container_of(dev, struct memory_block, dev);
-
- for (i = 0; i < sections_per_block; i++) {
- pfn = section_nr_to_pfn(mem->start_section_nr + i);
- ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
- }
-
- return sprintf(buf, "%d\n", ret);
-}
-
-/*
- * online, offline, going offline, etc.
- */
-static ssize_t show_mem_state(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct memory_block *mem =
- container_of(dev, struct memory_block, dev);
- ssize_t len = 0;
-
- /*
- * We can probably put these states in a nice little array
- * so that they're not open-coded
- */
- switch (mem->state) {
- case MEM_ONLINE:
- len = sprintf(buf, "online\n");
- break;
- case MEM_OFFLINE:
- len = sprintf(buf, "offline\n");
- break;
- case MEM_GOING_OFFLINE:
- len = sprintf(buf, "going-offline\n");
- break;
- default:
- len = sprintf(buf, "ERROR-UNKNOWN-%ld\n",
- mem->state);
- WARN_ON(1);
- break;
- }
-
- return len;
-}
-
-int memory_notify(unsigned long val, void *v)
-{
- return blocking_notifier_call_chain(&memory_chain, val, v);
-}
-
-int memory_isolate_notify(unsigned long val, void *v)
-{
- return atomic_notifier_call_chain(&memory_isolate_chain, val, v);
-}
-
-/*
- * The probe routines leave the pages reserved, just as the bootmem code does.
- * Make sure they're still that way.
- */
-static bool pages_correctly_reserved(unsigned long start_pfn,
- unsigned long nr_pages)
-{
- int i, j;
- struct page *page;
- unsigned long pfn = start_pfn;
-
- /*
- * memmap between sections is not contiguous except with
- * SPARSEMEM_VMEMMAP. We lookup the page once per section
- * and assume memmap is contiguous within each section
- */
- for (i = 0; i < sections_per_block; i++, pfn += PAGES_PER_SECTION) {
- if (WARN_ON_ONCE(!pfn_valid(pfn)))
- return false;
- page = pfn_to_page(pfn);
-
- for (j = 0; j < PAGES_PER_SECTION; j++) {
- if (PageReserved(page + j))
- continue;
-
- printk(KERN_WARNING "section number %ld page number %d "
- "not reserved, was it already online?\n",
- pfn_to_section_nr(pfn), j);
-
- return false;
- }
- }
-
- return true;
-}
-
-/*
- * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
- * OK to have direct references to sparsemem variables in here.
- */
-static int
-memory_block_action(unsigned long phys_index, unsigned long action)
-{
- unsigned long start_pfn, start_paddr;
- unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
- struct page *first_page;
- int ret;
-
- first_page = pfn_to_page(phys_index << PFN_SECTION_SHIFT);
-
- switch (action) {
- case MEM_ONLINE:
- start_pfn = page_to_pfn(first_page);
-
- if (!pages_correctly_reserved(start_pfn, nr_pages))
- return -EBUSY;
-
- ret = online_pages(start_pfn, nr_pages);
- break;
- case MEM_OFFLINE:
- start_paddr = page_to_pfn(first_page) << PAGE_SHIFT;
- ret = remove_memory(start_paddr,
- nr_pages << PAGE_SHIFT);
- break;
- default:
- WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
- "%ld\n", __func__, phys_index, action, action);
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-static int memory_block_change_state(struct memory_block *mem,
- unsigned long to_state, unsigned long from_state_req)
-{
- int ret = 0;
-
- mutex_lock(&mem->state_mutex);
-
- if (mem->state != from_state_req) {
- ret = -EINVAL;
- goto out;
- }
-
- if (to_state == MEM_OFFLINE)
- mem->state = MEM_GOING_OFFLINE;
-
- ret = memory_block_action(mem->start_section_nr, to_state);
-
- if (ret) {
- mem->state = from_state_req;
- goto out;
- }
-
- mem->state = to_state;
- switch (mem->state) {
- case MEM_OFFLINE:
- kobject_uevent(&mem->dev.kobj, KOBJ_OFFLINE);
- break;
- case MEM_ONLINE:
- kobject_uevent(&mem->dev.kobj, KOBJ_ONLINE);
- break;
- default:
- break;
- }
-out:
- mutex_unlock(&mem->state_mutex);
- return ret;
-}
-
-static ssize_t
-store_mem_state(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct memory_block *mem;
- int ret = -EINVAL;
-
- mem = container_of(dev, struct memory_block, dev);
-
- if (!strncmp(buf, "online", min((int)count, 6)))
- ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
- else if(!strncmp(buf, "offline", min((int)count, 7)))
- ret = memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
-
- if (ret)
- return ret;
- return count;
-}
-
-/*
- * phys_device is a bad name for this. What I really want
- * is a way to differentiate between memory ranges that
- * are part of physical devices that constitute
- * a complete removable unit or fru.
- * i.e. do these ranges belong to the same physical device,
- * s.t. if I offline all of these sections I can then
- * remove the physical device?
- */
-static ssize_t show_phys_device(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct memory_block *mem =
- container_of(dev, struct memory_block, dev);
- return sprintf(buf, "%d\n", mem->phys_device);
-}
-
-static DEVICE_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL);
-static DEVICE_ATTR(end_phys_index, 0444, show_mem_end_phys_index, NULL);
-static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state);
-static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL);
-static DEVICE_ATTR(removable, 0444, show_mem_removable, NULL);
-
-#define mem_create_simple_file(mem, attr_name) \
- device_create_file(&mem->dev, &dev_attr_##attr_name)
-#define mem_remove_simple_file(mem, attr_name) \
- device_remove_file(&mem->dev, &dev_attr_##attr_name)
-
-/*
- * Block size attribute stuff
- */
-static ssize_t
-print_block_size(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%lx\n", get_memory_block_size());
-}
-
-static DEVICE_ATTR(block_size_bytes, 0444, print_block_size, NULL);
-
-static int block_size_init(void)
-{
- return device_create_file(memory_subsys.dev_root,
- &dev_attr_block_size_bytes);
-}
-
-/*
- * Some architectures will have custom drivers to do this, and
- * will not need to do it from userspace. The fake hot-add code
- * as well as ppc64 will do all of their discovery in userspace
- * and will require this interface.
- */
-#ifdef CONFIG_ARCH_MEMORY_PROBE
-static ssize_t
-memory_probe_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- u64 phys_addr;
- int nid;
- int i, ret;
- unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
-
- phys_addr = simple_strtoull(buf, NULL, 0);
-
- if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
- return -EINVAL;
-
- for (i = 0; i < sections_per_block; i++) {
- nid = memory_add_physaddr_to_nid(phys_addr);
- ret = add_memory(nid, phys_addr,
- PAGES_PER_SECTION << PAGE_SHIFT);
- if (ret)
- goto out;
-
- phys_addr += MIN_MEMORY_BLOCK_SIZE;
- }
-
- ret = count;
-out:
- return ret;
-}
-static DEVICE_ATTR(probe, S_IWUSR, NULL, memory_probe_store);
-
-static int memory_probe_init(void)
-{
- return device_create_file(memory_subsys.dev_root, &dev_attr_probe);
-}
-#else
-static inline int memory_probe_init(void)
-{
- return 0;
-}
-#endif
-
-#ifdef CONFIG_MEMORY_FAILURE
-/*
- * Support for offlining pages of memory
- */
-
-/* Soft offline a page */
-static ssize_t
-store_soft_offline_page(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- int ret;
- u64 pfn;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- if (strict_strtoull(buf, 0, &pfn) < 0)
- return -EINVAL;
- pfn >>= PAGE_SHIFT;
- if (!pfn_valid(pfn))
- return -ENXIO;
- ret = soft_offline_page(pfn_to_page(pfn), 0);
- return ret == 0 ? count : ret;
-}
-
-/* Forcibly offline a page, including killing processes. */
-static ssize_t
-store_hard_offline_page(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- int ret;
- u64 pfn;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- if (strict_strtoull(buf, 0, &pfn) < 0)
- return -EINVAL;
- pfn >>= PAGE_SHIFT;
- ret = memory_failure(pfn, 0, 0);
- return ret ? ret : count;
-}
-
-static DEVICE_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page);
-static DEVICE_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page);
-
-static __init int memory_fail_init(void)
-{
- int err;
-
- err = device_create_file(memory_subsys.dev_root,
- &dev_attr_soft_offline_page);
- if (!err)
- err = device_create_file(memory_subsys.dev_root,
- &dev_attr_hard_offline_page);
- return err;
-}
-#else
-static inline int memory_fail_init(void)
-{
- return 0;
-}
-#endif
-
-/*
- * Note that phys_device is optional. It is here to allow for
- * differentiation between which *physical* devices each
- * section belongs to...
- */
-int __weak arch_get_memory_phys_device(unsigned long start_pfn)
-{
- return 0;
-}
-
-/*
- * A reference for the returned object is held and the reference for the
- * hinted object is released.
- */
-struct memory_block *find_memory_block_hinted(struct mem_section *section,
- struct memory_block *hint)
-{
- int block_id = base_memory_block_id(__section_nr(section));
- struct device *hintdev = hint ? &hint->dev : NULL;
- struct device *dev;
-
- dev = subsys_find_device_by_id(&memory_subsys, block_id, hintdev);
- if (hint)
- put_device(&hint->dev);
- if (!dev)
- return NULL;
- return container_of(dev, struct memory_block, dev);
-}
-
-/*
- * For now, we have a linear search to go find the appropriate
- * memory_block corresponding to a particular phys_index. If
- * this gets to be a real problem, we can always use a radix
- * tree or something here.
- *
- * This could be made generic for all device subsystems.
- */
-struct memory_block *find_memory_block(struct mem_section *section)
-{
- return find_memory_block_hinted(section, NULL);
-}
-
-static int init_memory_block(struct memory_block **memory,
- struct mem_section *section, unsigned long state)
-{
- struct memory_block *mem;
- unsigned long start_pfn;
- int scn_nr;
- int ret = 0;
-
- mem = kzalloc(sizeof(*mem), GFP_KERNEL);
- if (!mem)
- return -ENOMEM;
-
- scn_nr = __section_nr(section);
- mem->start_section_nr =
- base_memory_block_id(scn_nr) * sections_per_block;
- mem->end_section_nr = mem->start_section_nr + sections_per_block - 1;
- mem->state = state;
- mem->section_count++;
- mutex_init(&mem->state_mutex);
- start_pfn = section_nr_to_pfn(mem->start_section_nr);
- mem->phys_device = arch_get_memory_phys_device(start_pfn);
-
- ret = register_memory(mem);
- if (!ret)
- ret = mem_create_simple_file(mem, phys_index);
- if (!ret)
- ret = mem_create_simple_file(mem, end_phys_index);
- if (!ret)
- ret = mem_create_simple_file(mem, state);
- if (!ret)
- ret = mem_create_simple_file(mem, phys_device);
- if (!ret)
- ret = mem_create_simple_file(mem, removable);
-
- *memory = mem;
- return ret;
-}
-
-static int add_memory_section(int nid, struct mem_section *section,
- struct memory_block **mem_p,
- unsigned long state, enum mem_add_context context)
-{
- struct memory_block *mem = NULL;
- int scn_nr = __section_nr(section);
- int ret = 0;
-
- mutex_lock(&mem_sysfs_mutex);
-
- if (context == BOOT) {
- /* same memory block ? */
- if (mem_p && *mem_p)
- if (scn_nr >= (*mem_p)->start_section_nr &&
- scn_nr <= (*mem_p)->end_section_nr) {
- mem = *mem_p;
- kobject_get(&mem->dev.kobj);
- }
- } else
- mem = find_memory_block(section);
-
- if (mem) {
- mem->section_count++;
- kobject_put(&mem->dev.kobj);
- } else {
- ret = init_memory_block(&mem, section, state);
- /* store memory_block pointer for next loop */
- if (!ret && context == BOOT)
- if (mem_p)
- *mem_p = mem;
- }
-
- if (!ret) {
- if (context == HOTPLUG &&
- mem->section_count == sections_per_block)
- ret = register_mem_sect_under_node(mem, nid);
- }
-
- mutex_unlock(&mem_sysfs_mutex);
- return ret;
-}
-
-int remove_memory_block(unsigned long node_id, struct mem_section *section,
- int phys_device)
-{
- struct memory_block *mem;
-
- mutex_lock(&mem_sysfs_mutex);
- mem = find_memory_block(section);
- unregister_mem_sect_under_nodes(mem, __section_nr(section));
-
- mem->section_count--;
- if (mem->section_count == 0) {
- mem_remove_simple_file(mem, phys_index);
- mem_remove_simple_file(mem, end_phys_index);
- mem_remove_simple_file(mem, state);
- mem_remove_simple_file(mem, phys_device);
- mem_remove_simple_file(mem, removable);
- unregister_memory(mem);
- kfree(mem);
- } else
- kobject_put(&mem->dev.kobj);
-
- mutex_unlock(&mem_sysfs_mutex);
- return 0;
-}
-
-/*
- * need an interface for the VM to add new memory regions,
- * but without onlining it.
- */
-int register_new_memory(int nid, struct mem_section *section)
-{
- return add_memory_section(nid, section, NULL, MEM_OFFLINE, HOTPLUG);
-}
-
-int unregister_memory_section(struct mem_section *section)
-{
- if (!present_section(section))
- return -EINVAL;
-
- return remove_memory_block(0, section, 0);
-}
-
-/*
- * Initialize the sysfs support for memory devices...
- */
-int __init memory_dev_init(void)
-{
- unsigned int i;
- int ret;
- int err;
- unsigned long block_sz;
- struct memory_block *mem = NULL;
-
- ret = subsys_system_register(&memory_subsys, NULL);
- if (ret)
- goto out;
-
- block_sz = get_memory_block_size();
- sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
-
- /*
- * Create entries for memory sections that were found
- * during boot and have been initialized
- */
- for (i = 0; i < NR_MEM_SECTIONS; i++) {
- if (!present_section_nr(i))
- continue;
- /* don't need to reuse memory_block if only one per block */
- err = add_memory_section(0, __nr_to_section(i),
- (sections_per_block == 1) ? NULL : &mem,
- MEM_ONLINE,
- BOOT);
- if (!ret)
- ret = err;
- }
-
- err = memory_probe_init();
- if (!ret)
- ret = err;
- err = memory_fail_init();
- if (!ret)
- ret = err;
- err = block_size_init();
- if (!ret)
- ret = err;
-out:
- if (ret)
- printk(KERN_ERR "%s() failed: %d\n", __func__, ret);
- return ret;
-}
diff --git a/ANDROID_3.4.5/drivers/base/module.c b/ANDROID_3.4.5/drivers/base/module.c
deleted file mode 100644
index db930d3e..00000000
--- a/ANDROID_3.4.5/drivers/base/module.c
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * module.c - module sysfs fun for drivers
- *
- * This file is released under the GPLv2
- *
- */
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include "base.h"
-
-static char *make_driver_name(struct device_driver *drv)
-{
- char *driver_name;
-
- driver_name = kasprintf(GFP_KERNEL, "%s:%s", drv->bus->name, drv->name);
- if (!driver_name)
- return NULL;
-
- return driver_name;
-}
-
-static void module_create_drivers_dir(struct module_kobject *mk)
-{
- if (!mk || mk->drivers_dir)
- return;
-
- mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
-}
-
-void module_add_driver(struct module *mod, struct device_driver *drv)
-{
- char *driver_name;
- int no_warn;
- struct module_kobject *mk = NULL;
-
- if (!drv)
- return;
-
- if (mod)
- mk = &mod->mkobj;
- else if (drv->mod_name) {
- struct kobject *mkobj;
-
- /* Lookup built-in module entry in /sys/modules */
- mkobj = kset_find_obj(module_kset, drv->mod_name);
- if (mkobj) {
- mk = container_of(mkobj, struct module_kobject, kobj);
- /* remember our module structure */
- drv->p->mkobj = mk;
- /* kset_find_obj took a reference */
- kobject_put(mkobj);
- }
- }
-
- if (!mk)
- return;
-
- /* Don't check return codes; these calls are idempotent */
- no_warn = sysfs_create_link(&drv->p->kobj, &mk->kobj, "module");
- driver_name = make_driver_name(drv);
- if (driver_name) {
- module_create_drivers_dir(mk);
- no_warn = sysfs_create_link(mk->drivers_dir, &drv->p->kobj,
- driver_name);
- kfree(driver_name);
- }
-}
-
-void module_remove_driver(struct device_driver *drv)
-{
- struct module_kobject *mk = NULL;
- char *driver_name;
-
- if (!drv)
- return;
-
- sysfs_remove_link(&drv->p->kobj, "module");
-
- if (drv->owner)
- mk = &drv->owner->mkobj;
- else if (drv->p->mkobj)
- mk = drv->p->mkobj;
- if (mk && mk->drivers_dir) {
- driver_name = make_driver_name(drv);
- if (driver_name) {
- sysfs_remove_link(mk->drivers_dir, driver_name);
- kfree(driver_name);
- }
- }
-}
diff --git a/ANDROID_3.4.5/drivers/base/node.c b/ANDROID_3.4.5/drivers/base/node.c
deleted file mode 100644
index 90aa2a11..00000000
--- a/ANDROID_3.4.5/drivers/base/node.c
+++ /dev/null
@@ -1,668 +0,0 @@
-/*
- * Basic Node interface support
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/memory.h>
-#include <linux/vmstat.h>
-#include <linux/node.h>
-#include <linux/hugetlb.h>
-#include <linux/compaction.h>
-#include <linux/cpumask.h>
-#include <linux/topology.h>
-#include <linux/nodemask.h>
-#include <linux/cpu.h>
-#include <linux/device.h>
-#include <linux/swap.h>
-#include <linux/slab.h>
-
-static struct bus_type node_subsys = {
- .name = "node",
- .dev_name = "node",
-};
-
-
-static ssize_t node_read_cpumap(struct device *dev, int type, char *buf)
-{
- struct node *node_dev = to_node(dev);
- const struct cpumask *mask = cpumask_of_node(node_dev->dev.id);
- int len;
-
- /* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
- BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1));
-
- len = type?
- cpulist_scnprintf(buf, PAGE_SIZE-2, mask) :
- cpumask_scnprintf(buf, PAGE_SIZE-2, mask);
- buf[len++] = '\n';
- buf[len] = '\0';
- return len;
-}
-
-static inline ssize_t node_read_cpumask(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return node_read_cpumap(dev, 0, buf);
-}
-static inline ssize_t node_read_cpulist(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return node_read_cpumap(dev, 1, buf);
-}
-
-static DEVICE_ATTR(cpumap, S_IRUGO, node_read_cpumask, NULL);
-static DEVICE_ATTR(cpulist, S_IRUGO, node_read_cpulist, NULL);
-
-#define K(x) ((x) << (PAGE_SHIFT - 10))
-static ssize_t node_read_meminfo(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int n;
- int nid = dev->id;
- struct sysinfo i;
-
- si_meminfo_node(&i, nid);
- n = sprintf(buf,
- "Node %d MemTotal: %8lu kB\n"
- "Node %d MemFree: %8lu kB\n"
- "Node %d MemUsed: %8lu kB\n"
- "Node %d Active: %8lu kB\n"
- "Node %d Inactive: %8lu kB\n"
- "Node %d Active(anon): %8lu kB\n"
- "Node %d Inactive(anon): %8lu kB\n"
- "Node %d Active(file): %8lu kB\n"
- "Node %d Inactive(file): %8lu kB\n"
- "Node %d Unevictable: %8lu kB\n"
- "Node %d Mlocked: %8lu kB\n",
- nid, K(i.totalram),
- nid, K(i.freeram),
- nid, K(i.totalram - i.freeram),
- nid, K(node_page_state(nid, NR_ACTIVE_ANON) +
- node_page_state(nid, NR_ACTIVE_FILE)),
- nid, K(node_page_state(nid, NR_INACTIVE_ANON) +
- node_page_state(nid, NR_INACTIVE_FILE)),
- nid, K(node_page_state(nid, NR_ACTIVE_ANON)),
- nid, K(node_page_state(nid, NR_INACTIVE_ANON)),
- nid, K(node_page_state(nid, NR_ACTIVE_FILE)),
- nid, K(node_page_state(nid, NR_INACTIVE_FILE)),
- nid, K(node_page_state(nid, NR_UNEVICTABLE)),
- nid, K(node_page_state(nid, NR_MLOCK)));
-
-#ifdef CONFIG_HIGHMEM
- n += sprintf(buf + n,
- "Node %d HighTotal: %8lu kB\n"
- "Node %d HighFree: %8lu kB\n"
- "Node %d LowTotal: %8lu kB\n"
- "Node %d LowFree: %8lu kB\n",
- nid, K(i.totalhigh),
- nid, K(i.freehigh),
- nid, K(i.totalram - i.totalhigh),
- nid, K(i.freeram - i.freehigh));
-#endif
- n += sprintf(buf + n,
- "Node %d Dirty: %8lu kB\n"
- "Node %d Writeback: %8lu kB\n"
- "Node %d FilePages: %8lu kB\n"
- "Node %d Mapped: %8lu kB\n"
- "Node %d AnonPages: %8lu kB\n"
- "Node %d Shmem: %8lu kB\n"
- "Node %d KernelStack: %8lu kB\n"
- "Node %d PageTables: %8lu kB\n"
- "Node %d NFS_Unstable: %8lu kB\n"
- "Node %d Bounce: %8lu kB\n"
- "Node %d WritebackTmp: %8lu kB\n"
- "Node %d Slab: %8lu kB\n"
- "Node %d SReclaimable: %8lu kB\n"
- "Node %d SUnreclaim: %8lu kB\n"
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- "Node %d AnonHugePages: %8lu kB\n"
-#endif
- ,
- nid, K(node_page_state(nid, NR_FILE_DIRTY)),
- nid, K(node_page_state(nid, NR_WRITEBACK)),
- nid, K(node_page_state(nid, NR_FILE_PAGES)),
- nid, K(node_page_state(nid, NR_FILE_MAPPED)),
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- nid, K(node_page_state(nid, NR_ANON_PAGES)
- + node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
- HPAGE_PMD_NR),
-#else
- nid, K(node_page_state(nid, NR_ANON_PAGES)),
-#endif
- nid, K(node_page_state(nid, NR_SHMEM)),
- nid, node_page_state(nid, NR_KERNEL_STACK) *
- THREAD_SIZE / 1024,
- nid, K(node_page_state(nid, NR_PAGETABLE)),
- nid, K(node_page_state(nid, NR_UNSTABLE_NFS)),
- nid, K(node_page_state(nid, NR_BOUNCE)),
- nid, K(node_page_state(nid, NR_WRITEBACK_TEMP)),
- nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) +
- node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
- nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)),
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))
- , nid,
- K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
- HPAGE_PMD_NR));
-#else
- nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)));
-#endif
- n += hugetlb_report_node_meminfo(nid, buf + n);
- return n;
-}
-
-#undef K
-static DEVICE_ATTR(meminfo, S_IRUGO, node_read_meminfo, NULL);
-
-static ssize_t node_read_numastat(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sprintf(buf,
- "numa_hit %lu\n"
- "numa_miss %lu\n"
- "numa_foreign %lu\n"
- "interleave_hit %lu\n"
- "local_node %lu\n"
- "other_node %lu\n",
- node_page_state(dev->id, NUMA_HIT),
- node_page_state(dev->id, NUMA_MISS),
- node_page_state(dev->id, NUMA_FOREIGN),
- node_page_state(dev->id, NUMA_INTERLEAVE_HIT),
- node_page_state(dev->id, NUMA_LOCAL),
- node_page_state(dev->id, NUMA_OTHER));
-}
-static DEVICE_ATTR(numastat, S_IRUGO, node_read_numastat, NULL);
-
-static ssize_t node_read_vmstat(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int nid = dev->id;
- int i;
- int n = 0;
-
- for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
- n += sprintf(buf+n, "%s %lu\n", vmstat_text[i],
- node_page_state(nid, i));
-
- return n;
-}
-static DEVICE_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL);
-
-static ssize_t node_read_distance(struct device *dev,
- struct device_attribute *attr, char * buf)
-{
- int nid = dev->id;
- int len = 0;
- int i;
-
- /*
- * buf is currently PAGE_SIZE in length and each node needs 4 chars
- * at the most (distance + space or newline).
- */
- BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE);
-
- for_each_online_node(i)
- len += sprintf(buf + len, "%s%d", i ? " " : "", node_distance(nid, i));
-
- len += sprintf(buf + len, "\n");
- return len;
-}
-static DEVICE_ATTR(distance, S_IRUGO, node_read_distance, NULL);
-
-#ifdef CONFIG_HUGETLBFS
-/*
- * hugetlbfs per node attributes registration interface:
- * When/if hugetlb[fs] subsystem initializes [sometime after this module],
- * it will register its per node attributes for all online nodes with
- * memory. It will also call register_hugetlbfs_with_node(), below, to
- * register its attribute registration functions with this node driver.
- * Once these hooks have been initialized, the node driver will call into
- * the hugetlb module to [un]register attributes for hot-plugged nodes.
- */
-static node_registration_func_t __hugetlb_register_node;
-static node_registration_func_t __hugetlb_unregister_node;
-
-static inline bool hugetlb_register_node(struct node *node)
-{
- if (__hugetlb_register_node &&
- node_state(node->dev.id, N_HIGH_MEMORY)) {
- __hugetlb_register_node(node);
- return true;
- }
- return false;
-}
-
-static inline void hugetlb_unregister_node(struct node *node)
-{
- if (__hugetlb_unregister_node)
- __hugetlb_unregister_node(node);
-}
-
-void register_hugetlbfs_with_node(node_registration_func_t doregister,
- node_registration_func_t unregister)
-{
- __hugetlb_register_node = doregister;
- __hugetlb_unregister_node = unregister;
-}
-#else
-static inline void hugetlb_register_node(struct node *node) {}
-
-static inline void hugetlb_unregister_node(struct node *node) {}
-#endif
-
-
-/*
- * register_node - Setup a sysfs device for a node.
- * @num - Node number to use when creating the device.
- *
- * Initialize and register the node device.
- */
-int register_node(struct node *node, int num, struct node *parent)
-{
- int error;
-
- node->dev.id = num;
- node->dev.bus = &node_subsys;
- error = device_register(&node->dev);
-
- if (!error){
- device_create_file(&node->dev, &dev_attr_cpumap);
- device_create_file(&node->dev, &dev_attr_cpulist);
- device_create_file(&node->dev, &dev_attr_meminfo);
- device_create_file(&node->dev, &dev_attr_numastat);
- device_create_file(&node->dev, &dev_attr_distance);
- device_create_file(&node->dev, &dev_attr_vmstat);
-
- scan_unevictable_register_node(node);
-
- hugetlb_register_node(node);
-
- compaction_register_node(node);
- }
- return error;
-}
-
-/**
- * unregister_node - unregister a node device
- * @node: node going away
- *
- * Unregisters a node device @node. All the devices on the node must be
- * unregistered before calling this function.
- */
-void unregister_node(struct node *node)
-{
- device_remove_file(&node->dev, &dev_attr_cpumap);
- device_remove_file(&node->dev, &dev_attr_cpulist);
- device_remove_file(&node->dev, &dev_attr_meminfo);
- device_remove_file(&node->dev, &dev_attr_numastat);
- device_remove_file(&node->dev, &dev_attr_distance);
- device_remove_file(&node->dev, &dev_attr_vmstat);
-
- scan_unevictable_unregister_node(node);
- hugetlb_unregister_node(node); /* no-op, if memoryless node */
-
- device_unregister(&node->dev);
-}
-
-struct node node_devices[MAX_NUMNODES];
-
-/*
- * register cpu under node
- */
-int register_cpu_under_node(unsigned int cpu, unsigned int nid)
-{
- int ret;
- struct device *obj;
-
- if (!node_online(nid))
- return 0;
-
- obj = get_cpu_device(cpu);
- if (!obj)
- return 0;
-
- ret = sysfs_create_link(&node_devices[nid].dev.kobj,
- &obj->kobj,
- kobject_name(&obj->kobj));
- if (ret)
- return ret;
-
- return sysfs_create_link(&obj->kobj,
- &node_devices[nid].dev.kobj,
- kobject_name(&node_devices[nid].dev.kobj));
-}
-
-int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
-{
- struct device *obj;
-
- if (!node_online(nid))
- return 0;
-
- obj = get_cpu_device(cpu);
- if (!obj)
- return 0;
-
- sysfs_remove_link(&node_devices[nid].dev.kobj,
- kobject_name(&obj->kobj));
- sysfs_remove_link(&obj->kobj,
- kobject_name(&node_devices[nid].dev.kobj));
-
- return 0;
-}
-
-#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
-#define page_initialized(page) (page->lru.next)
-
-static int get_nid_for_pfn(unsigned long pfn)
-{
- struct page *page;
-
- if (!pfn_valid_within(pfn))
- return -1;
- page = pfn_to_page(pfn);
- if (!page_initialized(page))
- return -1;
- return pfn_to_nid(pfn);
-}
-
-/* register memory section under specified node if it spans that node */
-int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
-{
- int ret;
- unsigned long pfn, sect_start_pfn, sect_end_pfn;
-
- if (!mem_blk)
- return -EFAULT;
- if (!node_online(nid))
- return 0;
-
- sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
- sect_end_pfn = section_nr_to_pfn(mem_blk->end_section_nr);
- sect_end_pfn += PAGES_PER_SECTION - 1;
- for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
- int page_nid;
-
- page_nid = get_nid_for_pfn(pfn);
- if (page_nid < 0)
- continue;
- if (page_nid != nid)
- continue;
- ret = sysfs_create_link_nowarn(&node_devices[nid].dev.kobj,
- &mem_blk->dev.kobj,
- kobject_name(&mem_blk->dev.kobj));
- if (ret)
- return ret;
-
- return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
- &node_devices[nid].dev.kobj,
- kobject_name(&node_devices[nid].dev.kobj));
- }
- /* mem section does not span the specified node */
- return 0;
-}
-
-/* unregister memory section under all nodes that it spans */
-int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
- unsigned long phys_index)
-{
- NODEMASK_ALLOC(nodemask_t, unlinked_nodes, GFP_KERNEL);
- unsigned long pfn, sect_start_pfn, sect_end_pfn;
-
- if (!mem_blk) {
- NODEMASK_FREE(unlinked_nodes);
- return -EFAULT;
- }
- if (!unlinked_nodes)
- return -ENOMEM;
- nodes_clear(*unlinked_nodes);
-
- sect_start_pfn = section_nr_to_pfn(phys_index);
- sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1;
- for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
- int nid;
-
- nid = get_nid_for_pfn(pfn);
- if (nid < 0)
- continue;
- if (!node_online(nid))
- continue;
- if (node_test_and_set(nid, *unlinked_nodes))
- continue;
- sysfs_remove_link(&node_devices[nid].dev.kobj,
- kobject_name(&mem_blk->dev.kobj));
- sysfs_remove_link(&mem_blk->dev.kobj,
- kobject_name(&node_devices[nid].dev.kobj));
- }
- NODEMASK_FREE(unlinked_nodes);
- return 0;
-}
-
-static int link_mem_sections(int nid)
-{
- unsigned long start_pfn = NODE_DATA(nid)->node_start_pfn;
- unsigned long end_pfn = start_pfn + NODE_DATA(nid)->node_spanned_pages;
- unsigned long pfn;
- struct memory_block *mem_blk = NULL;
- int err = 0;
-
- for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
- unsigned long section_nr = pfn_to_section_nr(pfn);
- struct mem_section *mem_sect;
- int ret;
-
- if (!present_section_nr(section_nr))
- continue;
- mem_sect = __nr_to_section(section_nr);
-
- /* same memblock ? */
- if (mem_blk)
- if ((section_nr >= mem_blk->start_section_nr) &&
- (section_nr <= mem_blk->end_section_nr))
- continue;
-
- mem_blk = find_memory_block_hinted(mem_sect, mem_blk);
-
- ret = register_mem_sect_under_node(mem_blk, nid);
- if (!err)
- err = ret;
-
- /* discard ref obtained in find_memory_block() */
- }
-
- if (mem_blk)
- kobject_put(&mem_blk->dev.kobj);
- return err;
-}
-
-#ifdef CONFIG_HUGETLBFS
-/*
- * Handle per node hstate attribute [un]registration on transistions
- * to/from memoryless state.
- */
-static void node_hugetlb_work(struct work_struct *work)
-{
- struct node *node = container_of(work, struct node, node_work);
-
- /*
- * We only get here when a node transitions to/from memoryless state.
- * We can detect which transition occurred by examining whether the
- * node has memory now. hugetlb_register_node() already check this
- * so we try to register the attributes. If that fails, then the
- * node has transitioned to memoryless, try to unregister the
- * attributes.
- */
- if (!hugetlb_register_node(node))
- hugetlb_unregister_node(node);
-}
-
-static void init_node_hugetlb_work(int nid)
-{
- INIT_WORK(&node_devices[nid].node_work, node_hugetlb_work);
-}
-
-static int node_memory_callback(struct notifier_block *self,
- unsigned long action, void *arg)
-{
- struct memory_notify *mnb = arg;
- int nid = mnb->status_change_nid;
-
- switch (action) {
- case MEM_ONLINE:
- case MEM_OFFLINE:
- /*
- * offload per node hstate [un]registration to a work thread
- * when transitioning to/from memoryless state.
- */
- if (nid != NUMA_NO_NODE)
- schedule_work(&node_devices[nid].node_work);
- break;
-
- case MEM_GOING_ONLINE:
- case MEM_GOING_OFFLINE:
- case MEM_CANCEL_ONLINE:
- case MEM_CANCEL_OFFLINE:
- default:
- break;
- }
-
- return NOTIFY_OK;
-}
-#endif /* CONFIG_HUGETLBFS */
-#else /* !CONFIG_MEMORY_HOTPLUG_SPARSE */
-
-static int link_mem_sections(int nid) { return 0; }
-#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
-
-#if !defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || \
- !defined(CONFIG_HUGETLBFS)
-static inline int node_memory_callback(struct notifier_block *self,
- unsigned long action, void *arg)
-{
- return NOTIFY_OK;
-}
-
-static void init_node_hugetlb_work(int nid) { }
-
-#endif
-
-int register_one_node(int nid)
-{
- int error = 0;
- int cpu;
-
- if (node_online(nid)) {
- int p_node = parent_node(nid);
- struct node *parent = NULL;
-
- if (p_node != nid)
- parent = &node_devices[p_node];
-
- error = register_node(&node_devices[nid], nid, parent);
-
- /* link cpu under this node */
- for_each_present_cpu(cpu) {
- if (cpu_to_node(cpu) == nid)
- register_cpu_under_node(cpu, nid);
- }
-
- /* link memory sections under this node */
- error = link_mem_sections(nid);
-
- /* initialize work queue for memory hot plug */
- init_node_hugetlb_work(nid);
- }
-
- return error;
-
-}
-
-void unregister_one_node(int nid)
-{
- unregister_node(&node_devices[nid]);
-}
-
-/*
- * node states attributes
- */
-
-static ssize_t print_nodes_state(enum node_states state, char *buf)
-{
- int n;
-
- n = nodelist_scnprintf(buf, PAGE_SIZE, node_states[state]);
- if (n > 0 && PAGE_SIZE > n + 1) {
- *(buf + n++) = '\n';
- *(buf + n++) = '\0';
- }
- return n;
-}
-
-struct node_attr {
- struct device_attribute attr;
- enum node_states state;
-};
-
-static ssize_t show_node_state(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct node_attr *na = container_of(attr, struct node_attr, attr);
- return print_nodes_state(na->state, buf);
-}
-
-#define _NODE_ATTR(name, state) \
- { __ATTR(name, 0444, show_node_state, NULL), state }
-
-static struct node_attr node_state_attr[] = {
- _NODE_ATTR(possible, N_POSSIBLE),
- _NODE_ATTR(online, N_ONLINE),
- _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY),
- _NODE_ATTR(has_cpu, N_CPU),
-#ifdef CONFIG_HIGHMEM
- _NODE_ATTR(has_high_memory, N_HIGH_MEMORY),
-#endif
-};
-
-static struct attribute *node_state_attrs[] = {
- &node_state_attr[0].attr.attr,
- &node_state_attr[1].attr.attr,
- &node_state_attr[2].attr.attr,
- &node_state_attr[3].attr.attr,
-#ifdef CONFIG_HIGHMEM
- &node_state_attr[4].attr.attr,
-#endif
- NULL
-};
-
-static struct attribute_group memory_root_attr_group = {
- .attrs = node_state_attrs,
-};
-
-static const struct attribute_group *cpu_root_attr_groups[] = {
- &memory_root_attr_group,
- NULL,
-};
-
-#define NODE_CALLBACK_PRI 2 /* lower than SLAB */
-static int __init register_node_type(void)
-{
- int ret;
-
- BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES);
- BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES);
-
- ret = subsys_system_register(&node_subsys, cpu_root_attr_groups);
- if (!ret) {
- hotplug_memory_notifier(node_memory_callback,
- NODE_CALLBACK_PRI);
- }
-
- /*
- * Note: we're not going to unregister the node class if we fail
- * to register the node state class attribute files.
- */
- return ret;
-}
-postcore_initcall(register_node_type);
diff --git a/ANDROID_3.4.5/drivers/base/platform.c b/ANDROID_3.4.5/drivers/base/platform.c
deleted file mode 100644
index 42069c46..00000000
--- a/ANDROID_3.4.5/drivers/base/platform.c
+++ /dev/null
@@ -1,1167 +0,0 @@
-/*
- * platform.c - platform 'pseudo' bus for legacy devices
- *
- * Copyright (c) 2002-3 Patrick Mochel
- * Copyright (c) 2002-3 Open Source Development Labs
- *
- * This file is released under the GPLv2
- *
- * Please see Documentation/driver-model/platform.txt for more
- * information.
- */
-
-#include <linux/string.h>
-#include <linux/platform_device.h>
-#include <linux/of_device.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/dma-mapping.h>
-#include <linux/bootmem.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/pm_runtime.h>
-
-#include "base.h"
-
-#define to_platform_driver(drv) (container_of((drv), struct platform_driver, \
- driver))
-
-struct device platform_bus = {
- .init_name = "platform",
-};
-EXPORT_SYMBOL_GPL(platform_bus);
-
-/**
- * arch_setup_pdev_archdata - Allow manipulation of archdata before its used
- * @pdev: platform device
- *
- * This is called before platform_device_add() such that any pdev_archdata may
- * be setup before the platform_notifier is called. So if a user needs to
- * manipulate any relevant information in the pdev_archdata they can do:
- *
- * platform_devic_alloc()
- * ... manipulate ...
- * platform_device_add()
- *
- * And if they don't care they can just call platform_device_register() and
- * everything will just work out.
- */
-void __weak arch_setup_pdev_archdata(struct platform_device *pdev)
-{
-}
-
-/**
- * platform_get_resource - get a resource for a device
- * @dev: platform device
- * @type: resource type
- * @num: resource index
- */
-struct resource *platform_get_resource(struct platform_device *dev,
- unsigned int type, unsigned int num)
-{
- int i;
-
- for (i = 0; i < dev->num_resources; i++) {
- struct resource *r = &dev->resource[i];
-
- if (type == resource_type(r) && num-- == 0)
- return r;
- }
- return NULL;
-}
-EXPORT_SYMBOL_GPL(platform_get_resource);
-
-/**
- * platform_get_irq - get an IRQ for a device
- * @dev: platform device
- * @num: IRQ number index
- */
-int platform_get_irq(struct platform_device *dev, unsigned int num)
-{
- struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num);
-
- return r ? r->start : -ENXIO;
-}
-EXPORT_SYMBOL_GPL(platform_get_irq);
-
-/**
- * platform_get_resource_byname - get a resource for a device by name
- * @dev: platform device
- * @type: resource type
- * @name: resource name
- */
-struct resource *platform_get_resource_byname(struct platform_device *dev,
- unsigned int type,
- const char *name)
-{
- int i;
-
- for (i = 0; i < dev->num_resources; i++) {
- struct resource *r = &dev->resource[i];
-
- if (type == resource_type(r) && !strcmp(r->name, name))
- return r;
- }
- return NULL;
-}
-EXPORT_SYMBOL_GPL(platform_get_resource_byname);
-
-/**
- * platform_get_irq - get an IRQ for a device
- * @dev: platform device
- * @name: IRQ name
- */
-int platform_get_irq_byname(struct platform_device *dev, const char *name)
-{
- struct resource *r = platform_get_resource_byname(dev, IORESOURCE_IRQ,
- name);
-
- return r ? r->start : -ENXIO;
-}
-EXPORT_SYMBOL_GPL(platform_get_irq_byname);
-
-/**
- * platform_add_devices - add a numbers of platform devices
- * @devs: array of platform devices to add
- * @num: number of platform devices in array
- */
-int platform_add_devices(struct platform_device **devs, int num)
-{
- int i, ret = 0;
-
- for (i = 0; i < num; i++) {
- ret = platform_device_register(devs[i]);
- if (ret) {
- while (--i >= 0)
- platform_device_unregister(devs[i]);
- break;
- }
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(platform_add_devices);
-
-struct platform_object {
- struct platform_device pdev;
- char name[1];
-};
-
-/**
- * platform_device_put - destroy a platform device
- * @pdev: platform device to free
- *
- * Free all memory associated with a platform device. This function must
- * _only_ be externally called in error cases. All other usage is a bug.
- */
-void platform_device_put(struct platform_device *pdev)
-{
- if (pdev)
- put_device(&pdev->dev);
-}
-EXPORT_SYMBOL_GPL(platform_device_put);
-
-static void platform_device_release(struct device *dev)
-{
- struct platform_object *pa = container_of(dev, struct platform_object,
- pdev.dev);
-
- of_device_node_put(&pa->pdev.dev);
- kfree(pa->pdev.dev.platform_data);
- kfree(pa->pdev.mfd_cell);
- kfree(pa->pdev.resource);
- kfree(pa);
-}
-
-/**
- * platform_device_alloc - create a platform device
- * @name: base name of the device we're adding
- * @id: instance id
- *
- * Create a platform device object which can have other objects attached
- * to it, and which will have attached objects freed when it is released.
- */
-struct platform_device *platform_device_alloc(const char *name, int id)
-{
- struct platform_object *pa;
-
- pa = kzalloc(sizeof(struct platform_object) + strlen(name), GFP_KERNEL);
- if (pa) {
- strcpy(pa->name, name);
- pa->pdev.name = pa->name;
- pa->pdev.id = id;
- device_initialize(&pa->pdev.dev);
- pa->pdev.dev.release = platform_device_release;
- arch_setup_pdev_archdata(&pa->pdev);
- }
-
- return pa ? &pa->pdev : NULL;
-}
-EXPORT_SYMBOL_GPL(platform_device_alloc);
-
-/**
- * platform_device_add_resources - add resources to a platform device
- * @pdev: platform device allocated by platform_device_alloc to add resources to
- * @res: set of resources that needs to be allocated for the device
- * @num: number of resources
- *
- * Add a copy of the resources to the platform device. The memory
- * associated with the resources will be freed when the platform device is
- * released.
- */
-int platform_device_add_resources(struct platform_device *pdev,
- const struct resource *res, unsigned int num)
-{
- struct resource *r = NULL;
-
- if (res) {
- r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL);
- if (!r)
- return -ENOMEM;
- }
-
- kfree(pdev->resource);
- pdev->resource = r;
- pdev->num_resources = num;
- return 0;
-}
-EXPORT_SYMBOL_GPL(platform_device_add_resources);
-
-/**
- * platform_device_add_data - add platform-specific data to a platform device
- * @pdev: platform device allocated by platform_device_alloc to add resources to
- * @data: platform specific data for this platform device
- * @size: size of platform specific data
- *
- * Add a copy of platform specific data to the platform device's
- * platform_data pointer. The memory associated with the platform data
- * will be freed when the platform device is released.
- */
-int platform_device_add_data(struct platform_device *pdev, const void *data,
- size_t size)
-{
- void *d = NULL;
-
- if (data) {
- d = kmemdup(data, size, GFP_KERNEL);
- if (!d)
- return -ENOMEM;
- }
-
- kfree(pdev->dev.platform_data);
- pdev->dev.platform_data = d;
- return 0;
-}
-EXPORT_SYMBOL_GPL(platform_device_add_data);
-
-/**
- * platform_device_add - add a platform device to device hierarchy
- * @pdev: platform device we're adding
- *
- * This is part 2 of platform_device_register(), though may be called
- * separately _iff_ pdev was allocated by platform_device_alloc().
- */
-int platform_device_add(struct platform_device *pdev)
-{
- int i, ret = 0;
-
- if (!pdev)
- return -EINVAL;
-
- if (!pdev->dev.parent)
- pdev->dev.parent = &platform_bus;
-
- pdev->dev.bus = &platform_bus_type;
-
- if (pdev->id != -1)
- dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
- else
- dev_set_name(&pdev->dev, "%s", pdev->name);
-
- for (i = 0; i < pdev->num_resources; i++) {
- struct resource *p, *r = &pdev->resource[i];
-
- if (r->name == NULL)
- r->name = dev_name(&pdev->dev);
-
- p = r->parent;
- if (!p) {
- if (resource_type(r) == IORESOURCE_MEM)
- p = &iomem_resource;
- else if (resource_type(r) == IORESOURCE_IO)
- p = &ioport_resource;
- }
-
- if (p && insert_resource(p, r)) {
- printk(KERN_ERR
- "%s: failed to claim resource %d\n",
- dev_name(&pdev->dev), i);
- ret = -EBUSY;
- goto failed;
- }
- }
-
- pr_debug("Registering platform device '%s'. Parent at %s\n",
- dev_name(&pdev->dev), dev_name(pdev->dev.parent));
-
- ret = device_add(&pdev->dev);
- if (ret == 0)
- return ret;
-
- failed:
- while (--i >= 0) {
- struct resource *r = &pdev->resource[i];
- unsigned long type = resource_type(r);
-
- if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
- release_resource(r);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(platform_device_add);
-
-/**
- * platform_device_del - remove a platform-level device
- * @pdev: platform device we're removing
- *
- * Note that this function will also release all memory- and port-based
- * resources owned by the device (@dev->resource). This function must
- * _only_ be externally called in error cases. All other usage is a bug.
- */
-void platform_device_del(struct platform_device *pdev)
-{
- int i;
-
- if (pdev) {
- device_del(&pdev->dev);
-
- for (i = 0; i < pdev->num_resources; i++) {
- struct resource *r = &pdev->resource[i];
- unsigned long type = resource_type(r);
-
- if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
- release_resource(r);
- }
- }
-}
-EXPORT_SYMBOL_GPL(platform_device_del);
-
-/**
- * platform_device_register - add a platform-level device
- * @pdev: platform device we're adding
- */
-int platform_device_register(struct platform_device *pdev)
-{
- device_initialize(&pdev->dev);
- arch_setup_pdev_archdata(pdev);
- return platform_device_add(pdev);
-}
-EXPORT_SYMBOL_GPL(platform_device_register);
-
-/**
- * platform_device_unregister - unregister a platform-level device
- * @pdev: platform device we're unregistering
- *
- * Unregistration is done in 2 steps. First we release all resources
- * and remove it from the subsystem, then we drop reference count by
- * calling platform_device_put().
- */
-void platform_device_unregister(struct platform_device *pdev)
-{
- platform_device_del(pdev);
- platform_device_put(pdev);
-}
-EXPORT_SYMBOL_GPL(platform_device_unregister);
-
-/**
- * platform_device_register_full - add a platform-level device with
- * resources and platform-specific data
- *
- * @pdevinfo: data used to create device
- *
- * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
- */
-struct platform_device *platform_device_register_full(
- const struct platform_device_info *pdevinfo)
-{
- int ret = -ENOMEM;
- struct platform_device *pdev;
-
- pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id);
- if (!pdev)
- goto err_alloc;
-
- pdev->dev.parent = pdevinfo->parent;
-
- if (pdevinfo->dma_mask) {
- /*
- * This memory isn't freed when the device is put,
- * I don't have a nice idea for that though. Conceptually
- * dma_mask in struct device should not be a pointer.
- * See http://thread.gmane.org/gmane.linux.kernel.pci/9081
- */
- pdev->dev.dma_mask =
- kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
- if (!pdev->dev.dma_mask)
- goto err;
-
- *pdev->dev.dma_mask = pdevinfo->dma_mask;
- pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
- }
-
- ret = platform_device_add_resources(pdev,
- pdevinfo->res, pdevinfo->num_res);
- if (ret)
- goto err;
-
- ret = platform_device_add_data(pdev,
- pdevinfo->data, pdevinfo->size_data);
- if (ret)
- goto err;
-
- ret = platform_device_add(pdev);
- if (ret) {
-err:
- kfree(pdev->dev.dma_mask);
-
-err_alloc:
- platform_device_put(pdev);
- return ERR_PTR(ret);
- }
-
- return pdev;
-}
-EXPORT_SYMBOL_GPL(platform_device_register_full);
-
-static int platform_drv_probe(struct device *_dev)
-{
- struct platform_driver *drv = to_platform_driver(_dev->driver);
- struct platform_device *dev = to_platform_device(_dev);
-
- return drv->probe(dev);
-}
-
-static int platform_drv_probe_fail(struct device *_dev)
-{
- return -ENXIO;
-}
-
-static int platform_drv_remove(struct device *_dev)
-{
- struct platform_driver *drv = to_platform_driver(_dev->driver);
- struct platform_device *dev = to_platform_device(_dev);
-
- return drv->remove(dev);
-}
-
-static void platform_drv_shutdown(struct device *_dev)
-{
- struct platform_driver *drv = to_platform_driver(_dev->driver);
- struct platform_device *dev = to_platform_device(_dev);
-
- drv->shutdown(dev);
-}
-
-/**
- * platform_driver_register - register a driver for platform-level devices
- * @drv: platform driver structure
- */
-int platform_driver_register(struct platform_driver *drv)
-{
- drv->driver.bus = &platform_bus_type;
- if (drv->probe)
- drv->driver.probe = platform_drv_probe;
- if (drv->remove)
- drv->driver.remove = platform_drv_remove;
- if (drv->shutdown)
- drv->driver.shutdown = platform_drv_shutdown;
-
- return driver_register(&drv->driver);
-}
-EXPORT_SYMBOL_GPL(platform_driver_register);
-
-/**
- * platform_driver_unregister - unregister a driver for platform-level devices
- * @drv: platform driver structure
- */
-void platform_driver_unregister(struct platform_driver *drv)
-{
- driver_unregister(&drv->driver);
-}
-EXPORT_SYMBOL_GPL(platform_driver_unregister);
-
-/**
- * platform_driver_probe - register driver for non-hotpluggable device
- * @drv: platform driver structure
- * @probe: the driver probe routine, probably from an __init section
- *
- * Use this instead of platform_driver_register() when you know the device
- * is not hotpluggable and has already been registered, and you want to
- * remove its run-once probe() infrastructure from memory after the driver
- * has bound to the device.
- *
- * One typical use for this would be with drivers for controllers integrated
- * into system-on-chip processors, where the controller devices have been
- * configured as part of board setup.
- *
- * Returns zero if the driver registered and bound to a device, else returns
- * a negative error code and with the driver not registered.
- */
-int __init_or_module platform_driver_probe(struct platform_driver *drv,
- int (*probe)(struct platform_device *))
-{
- int retval, code;
-
- /* make sure driver won't have bind/unbind attributes */
- drv->driver.suppress_bind_attrs = true;
-
- /* temporary section violation during probe() */
- drv->probe = probe;
- retval = code = platform_driver_register(drv);
-
- /*
- * Fixup that section violation, being paranoid about code scanning
- * the list of drivers in order to probe new devices. Check to see
- * if the probe was successful, and make sure any forced probes of
- * new devices fail.
- */
- spin_lock(&drv->driver.bus->p->klist_drivers.k_lock);
- drv->probe = NULL;
- if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list))
- retval = -ENODEV;
- drv->driver.probe = platform_drv_probe_fail;
- spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock);
-
- if (code != retval)
- platform_driver_unregister(drv);
- return retval;
-}
-EXPORT_SYMBOL_GPL(platform_driver_probe);
-
-/**
- * platform_create_bundle - register driver and create corresponding device
- * @driver: platform driver structure
- * @probe: the driver probe routine, probably from an __init section
- * @res: set of resources that needs to be allocated for the device
- * @n_res: number of resources
- * @data: platform specific data for this platform device
- * @size: size of platform specific data
- *
- * Use this in legacy-style modules that probe hardware directly and
- * register a single platform device and corresponding platform driver.
- *
- * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
- */
-struct platform_device * __init_or_module platform_create_bundle(
- struct platform_driver *driver,
- int (*probe)(struct platform_device *),
- struct resource *res, unsigned int n_res,
- const void *data, size_t size)
-{
- struct platform_device *pdev;
- int error;
-
- pdev = platform_device_alloc(driver->driver.name, -1);
- if (!pdev) {
- error = -ENOMEM;
- goto err_out;
- }
-
- error = platform_device_add_resources(pdev, res, n_res);
- if (error)
- goto err_pdev_put;
-
- error = platform_device_add_data(pdev, data, size);
- if (error)
- goto err_pdev_put;
-
- error = platform_device_add(pdev);
- if (error)
- goto err_pdev_put;
-
- error = platform_driver_probe(driver, probe);
- if (error)
- goto err_pdev_del;
-
- return pdev;
-
-err_pdev_del:
- platform_device_del(pdev);
-err_pdev_put:
- platform_device_put(pdev);
-err_out:
- return ERR_PTR(error);
-}
-EXPORT_SYMBOL_GPL(platform_create_bundle);
-
-/* modalias support enables more hands-off userspace setup:
- * (a) environment variable lets new-style hotplug events work once system is
- * fully running: "modprobe $MODALIAS"
- * (b) sysfs attribute lets new-style coldplug recover from hotplug events
- * mishandled before system is fully running: "modprobe $(cat modalias)"
- */
-static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
- char *buf)
-{
- struct platform_device *pdev = to_platform_device(dev);
- int len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name);
-
- return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
-}
-
-static struct device_attribute platform_dev_attrs[] = {
- __ATTR_RO(modalias),
- __ATTR_NULL,
-};
-
-static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
- struct platform_device *pdev = to_platform_device(dev);
- int rc;
-
- /* Some devices have extra OF data and an OF-style MODALIAS */
- rc = of_device_uevent_modalias(dev,env);
- if (rc != -ENODEV)
- return rc;
-
- add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
- pdev->name);
- return 0;
-}
-
-static const struct platform_device_id *platform_match_id(
- const struct platform_device_id *id,
- struct platform_device *pdev)
-{
- while (id->name[0]) {
- if (strcmp(pdev->name, id->name) == 0) {
- pdev->id_entry = id;
- return id;
- }
- id++;
- }
- return NULL;
-}
-
-/**
- * platform_match - bind platform device to platform driver.
- * @dev: device.
- * @drv: driver.
- *
- * Platform device IDs are assumed to be encoded like this:
- * "<name><instance>", where <name> is a short description of the type of
- * device, like "pci" or "floppy", and <instance> is the enumerated
- * instance of the device, like '0' or '42'. Driver IDs are simply
- * "<name>". So, extract the <name> from the platform_device structure,
- * and compare it against the name of the driver. Return whether they match
- * or not.
- */
-static int platform_match(struct device *dev, struct device_driver *drv)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct platform_driver *pdrv = to_platform_driver(drv);
-
- /* Attempt an OF style match first */
- if (of_driver_match_device(dev, drv))
- return 1;
-
- /* Then try to match against the id table */
- if (pdrv->id_table)
- return platform_match_id(pdrv->id_table, pdev) != NULL;
-
- /* fall-back to driver name match */
- return (strcmp(pdev->name, drv->name) == 0);
-}
-
-#ifdef CONFIG_PM_SLEEP
-
-static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
-{
- struct platform_driver *pdrv = to_platform_driver(dev->driver);
- struct platform_device *pdev = to_platform_device(dev);
- int ret = 0;
-
- if (dev->driver && pdrv->suspend){
- printk(KERN_ERR"platform_legacy_suspend %s\n",pdrv->driver.name);
- ret = pdrv->suspend(pdev, mesg);
- }
-
- return ret;
-}
-
-static int platform_legacy_resume(struct device *dev)
-{
- struct platform_driver *pdrv = to_platform_driver(dev->driver);
- struct platform_device *pdev = to_platform_device(dev);
- int ret = 0;
-
- if (dev->driver && pdrv->resume){
- printk(KERN_ERR"platform_legacy_resume %s\n",pdrv->driver.name);
- ret = pdrv->resume(pdev);
- }
-
- return ret;
-}
-
-#endif /* CONFIG_PM_SLEEP */
-
-#ifdef CONFIG_SUSPEND
-
-int platform_pm_suspend(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->suspend){
- printk("platform_pm_suspend %s start\n",drv->name);
- ret = drv->pm->suspend(dev);
- printk("platform_pm_suspend %s end\n",drv->name);
- }
- } else {
- ret = platform_legacy_suspend(dev, PMSG_SUSPEND);
- }
-
- return ret;
-}
-
-int platform_pm_resume(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->resume){
- printk("platform_pm_resume %s start\n",drv->name);
- ret = drv->pm->resume(dev);
- printk("platform_pm_resume %s end\n",drv->name);
- }
- } else {
- ret = platform_legacy_resume(dev);
- }
-
- return ret;
-}
-
-#endif /* CONFIG_SUSPEND */
-
-#ifdef CONFIG_HIBERNATE_CALLBACKS
-
-int platform_pm_freeze(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->freeze)
- ret = drv->pm->freeze(dev);
- } else {
- ret = platform_legacy_suspend(dev, PMSG_FREEZE);
- }
-
- return ret;
-}
-
-int platform_pm_thaw(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->thaw)
- ret = drv->pm->thaw(dev);
- } else {
- ret = platform_legacy_resume(dev);
- }
-
- return ret;
-}
-
-int platform_pm_poweroff(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->poweroff)
- ret = drv->pm->poweroff(dev);
- } else {
- ret = platform_legacy_suspend(dev, PMSG_HIBERNATE);
- }
-
- return ret;
-}
-
-int platform_pm_restore(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->restore)
- ret = drv->pm->restore(dev);
- } else {
- ret = platform_legacy_resume(dev);
- }
-
- return ret;
-}
-
-#endif /* CONFIG_HIBERNATE_CALLBACKS */
-
-static const struct dev_pm_ops platform_dev_pm_ops = {
- .runtime_suspend = pm_generic_runtime_suspend,
- .runtime_resume = pm_generic_runtime_resume,
- .runtime_idle = pm_generic_runtime_idle,
- USE_PLATFORM_PM_SLEEP_OPS
-};
-
-struct bus_type platform_bus_type = {
- .name = "platform",
- .dev_attrs = platform_dev_attrs,
- .match = platform_match,
- .uevent = platform_uevent,
- .pm = &platform_dev_pm_ops,
-};
-EXPORT_SYMBOL_GPL(platform_bus_type);
-
-int __init platform_bus_init(void)
-{
- int error;
-
- early_platform_cleanup();
-
- error = device_register(&platform_bus);
- if (error)
- return error;
- error = bus_register(&platform_bus_type);
- if (error)
- device_unregister(&platform_bus);
- return error;
-}
-
-#ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK
-u64 dma_get_required_mask(struct device *dev)
-{
- u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
- u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
- u64 mask;
-
- if (!high_totalram) {
- /* convert to mask just covering totalram */
- low_totalram = (1 << (fls(low_totalram) - 1));
- low_totalram += low_totalram - 1;
- mask = low_totalram;
- } else {
- high_totalram = (1 << (fls(high_totalram) - 1));
- high_totalram += high_totalram - 1;
- mask = (((u64)high_totalram) << 32) + 0xffffffff;
- }
- return mask;
-}
-EXPORT_SYMBOL_GPL(dma_get_required_mask);
-#endif
-
-static __initdata LIST_HEAD(early_platform_driver_list);
-static __initdata LIST_HEAD(early_platform_device_list);
-
-/**
- * early_platform_driver_register - register early platform driver
- * @epdrv: early_platform driver structure
- * @buf: string passed from early_param()
- *
- * Helper function for early_platform_init() / early_platform_init_buffer()
- */
-int __init early_platform_driver_register(struct early_platform_driver *epdrv,
- char *buf)
-{
- char *tmp;
- int n;
-
- /* Simply add the driver to the end of the global list.
- * Drivers will by default be put on the list in compiled-in order.
- */
- if (!epdrv->list.next) {
- INIT_LIST_HEAD(&epdrv->list);
- list_add_tail(&epdrv->list, &early_platform_driver_list);
- }
-
- /* If the user has specified device then make sure the driver
- * gets prioritized. The driver of the last device specified on
- * command line will be put first on the list.
- */
- n = strlen(epdrv->pdrv->driver.name);
- if (buf && !strncmp(buf, epdrv->pdrv->driver.name, n)) {
- list_move(&epdrv->list, &early_platform_driver_list);
-
- /* Allow passing parameters after device name */
- if (buf[n] == '\0' || buf[n] == ',')
- epdrv->requested_id = -1;
- else {
- epdrv->requested_id = simple_strtoul(&buf[n + 1],
- &tmp, 10);
-
- if (buf[n] != '.' || (tmp == &buf[n + 1])) {
- epdrv->requested_id = EARLY_PLATFORM_ID_ERROR;
- n = 0;
- } else
- n += strcspn(&buf[n + 1], ",") + 1;
- }
-
- if (buf[n] == ',')
- n++;
-
- if (epdrv->bufsize) {
- memcpy(epdrv->buffer, &buf[n],
- min_t(int, epdrv->bufsize, strlen(&buf[n]) + 1));
- epdrv->buffer[epdrv->bufsize - 1] = '\0';
- }
- }
-
- return 0;
-}
-
-/**
- * early_platform_add_devices - adds a number of early platform devices
- * @devs: array of early platform devices to add
- * @num: number of early platform devices in array
- *
- * Used by early architecture code to register early platform devices and
- * their platform data.
- */
-void __init early_platform_add_devices(struct platform_device **devs, int num)
-{
- struct device *dev;
- int i;
-
- /* simply add the devices to list */
- for (i = 0; i < num; i++) {
- dev = &devs[i]->dev;
-
- if (!dev->devres_head.next) {
- INIT_LIST_HEAD(&dev->devres_head);
- list_add_tail(&dev->devres_head,
- &early_platform_device_list);
- }
- }
-}
-
-/**
- * early_platform_driver_register_all - register early platform drivers
- * @class_str: string to identify early platform driver class
- *
- * Used by architecture code to register all early platform drivers
- * for a certain class. If omitted then only early platform drivers
- * with matching kernel command line class parameters will be registered.
- */
-void __init early_platform_driver_register_all(char *class_str)
-{
- /* The "class_str" parameter may or may not be present on the kernel
- * command line. If it is present then there may be more than one
- * matching parameter.
- *
- * Since we register our early platform drivers using early_param()
- * we need to make sure that they also get registered in the case
- * when the parameter is missing from the kernel command line.
- *
- * We use parse_early_options() to make sure the early_param() gets
- * called at least once. The early_param() may be called more than
- * once since the name of the preferred device may be specified on
- * the kernel command line. early_platform_driver_register() handles
- * this case for us.
- */
- parse_early_options(class_str);
-}
-
-/**
- * early_platform_match - find early platform device matching driver
- * @epdrv: early platform driver structure
- * @id: id to match against
- */
-static __init struct platform_device *
-early_platform_match(struct early_platform_driver *epdrv, int id)
-{
- struct platform_device *pd;
-
- list_for_each_entry(pd, &early_platform_device_list, dev.devres_head)
- if (platform_match(&pd->dev, &epdrv->pdrv->driver))
- if (pd->id == id)
- return pd;
-
- return NULL;
-}
-
-/**
- * early_platform_left - check if early platform driver has matching devices
- * @epdrv: early platform driver structure
- * @id: return true if id or above exists
- */
-static __init int early_platform_left(struct early_platform_driver *epdrv,
- int id)
-{
- struct platform_device *pd;
-
- list_for_each_entry(pd, &early_platform_device_list, dev.devres_head)
- if (platform_match(&pd->dev, &epdrv->pdrv->driver))
- if (pd->id >= id)
- return 1;
-
- return 0;
-}
-
-/**
- * early_platform_driver_probe_id - probe drivers matching class_str and id
- * @class_str: string to identify early platform driver class
- * @id: id to match against
- * @nr_probe: number of platform devices to successfully probe before exiting
- */
-static int __init early_platform_driver_probe_id(char *class_str,
- int id,
- int nr_probe)
-{
- struct early_platform_driver *epdrv;
- struct platform_device *match;
- int match_id;
- int n = 0;
- int left = 0;
-
- list_for_each_entry(epdrv, &early_platform_driver_list, list) {
- /* only use drivers matching our class_str */
- if (strcmp(class_str, epdrv->class_str))
- continue;
-
- if (id == -2) {
- match_id = epdrv->requested_id;
- left = 1;
-
- } else {
- match_id = id;
- left += early_platform_left(epdrv, id);
-
- /* skip requested id */
- switch (epdrv->requested_id) {
- case EARLY_PLATFORM_ID_ERROR:
- case EARLY_PLATFORM_ID_UNSET:
- break;
- default:
- if (epdrv->requested_id == id)
- match_id = EARLY_PLATFORM_ID_UNSET;
- }
- }
-
- switch (match_id) {
- case EARLY_PLATFORM_ID_ERROR:
- pr_warning("%s: unable to parse %s parameter\n",
- class_str, epdrv->pdrv->driver.name);
- /* fall-through */
- case EARLY_PLATFORM_ID_UNSET:
- match = NULL;
- break;
- default:
- match = early_platform_match(epdrv, match_id);
- }
-
- if (match) {
- /*
- * Set up a sensible init_name to enable
- * dev_name() and others to be used before the
- * rest of the driver core is initialized.
- */
- if (!match->dev.init_name && slab_is_available()) {
- if (match->id != -1)
- match->dev.init_name =
- kasprintf(GFP_KERNEL, "%s.%d",
- match->name,
- match->id);
- else
- match->dev.init_name =
- kasprintf(GFP_KERNEL, "%s",
- match->name);
-
- if (!match->dev.init_name)
- return -ENOMEM;
- }
-
- if (epdrv->pdrv->probe(match))
- pr_warning("%s: unable to probe %s early.\n",
- class_str, match->name);
- else
- n++;
- }
-
- if (n >= nr_probe)
- break;
- }
-
- if (left)
- return n;
- else
- return -ENODEV;
-}
-
-/**
- * early_platform_driver_probe - probe a class of registered drivers
- * @class_str: string to identify early platform driver class
- * @nr_probe: number of platform devices to successfully probe before exiting
- * @user_only: only probe user specified early platform devices
- *
- * Used by architecture code to probe registered early platform drivers
- * within a certain class. For probe to happen a registered early platform
- * device matching a registered early platform driver is needed.
- */
-int __init early_platform_driver_probe(char *class_str,
- int nr_probe,
- int user_only)
-{
- int k, n, i;
-
- n = 0;
- for (i = -2; n < nr_probe; i++) {
- k = early_platform_driver_probe_id(class_str, i, nr_probe - n);
-
- if (k < 0)
- break;
-
- n += k;
-
- if (user_only)
- break;
- }
-
- return n;
-}
-
-/**
- * early_platform_cleanup - clean up early platform code
- */
-void __init early_platform_cleanup(void)
-{
- struct platform_device *pd, *pd2;
-
- /* clean up the devres list used to chain devices */
- list_for_each_entry_safe(pd, pd2, &early_platform_device_list,
- dev.devres_head) {
- list_del(&pd->dev.devres_head);
- memset(&pd->dev.devres_head, 0, sizeof(pd->dev.devres_head));
- }
-}
-
diff --git a/ANDROID_3.4.5/drivers/base/power/Makefile b/ANDROID_3.4.5/drivers/base/power/Makefile
deleted file mode 100644
index 2e58ebb1..00000000
--- a/ANDROID_3.4.5/drivers/base/power/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o
-obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
-obj-$(CONFIG_PM_RUNTIME) += runtime.o
-obj-$(CONFIG_PM_TRACE_RTC) += trace.o
-obj-$(CONFIG_PM_OPP) += opp.o
-obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
-obj-$(CONFIG_HAVE_CLK) += clock_ops.o
-
-ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/ANDROID_3.4.5/drivers/base/power/clock_ops.c b/ANDROID_3.4.5/drivers/base/power/clock_ops.c
deleted file mode 100644
index 869d7ff2..00000000
--- a/ANDROID_3.4.5/drivers/base/power/clock_ops.c
+++ /dev/null
@@ -1,487 +0,0 @@
-/*
- * drivers/base/power/clock_ops.c - Generic clock manipulation PM callbacks
- *
- * Copyright (c) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
- *
- * This file is released under the GPLv2.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/io.h>
-#include <linux/pm.h>
-#include <linux/pm_clock.h>
-#include <linux/clk.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-
-#ifdef CONFIG_PM
-
-enum pce_status {
- PCE_STATUS_NONE = 0,
- PCE_STATUS_ACQUIRED,
- PCE_STATUS_ENABLED,
- PCE_STATUS_ERROR,
-};
-
-struct pm_clock_entry {
- struct list_head node;
- char *con_id;
- struct clk *clk;
- enum pce_status status;
-};
-
-/**
- * pm_clk_acquire - Acquire a device clock.
- * @dev: Device whose clock is to be acquired.
- * @ce: PM clock entry corresponding to the clock.
- */
-static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
-{
- ce->clk = clk_get(dev, ce->con_id);
- if (IS_ERR(ce->clk)) {
- ce->status = PCE_STATUS_ERROR;
- } else {
- ce->status = PCE_STATUS_ACQUIRED;
- dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);
- }
-}
-
-/**
- * pm_clk_add - Start using a device clock for power management.
- * @dev: Device whose clock is going to be used for power management.
- * @con_id: Connection ID of the clock.
- *
- * Add the clock represented by @con_id to the list of clocks used for
- * the power management of @dev.
- */
-int pm_clk_add(struct device *dev, const char *con_id)
-{
- struct pm_subsys_data *psd = dev_to_psd(dev);
- struct pm_clock_entry *ce;
-
- if (!psd)
- return -EINVAL;
-
- ce = kzalloc(sizeof(*ce), GFP_KERNEL);
- if (!ce) {
- dev_err(dev, "Not enough memory for clock entry.\n");
- return -ENOMEM;
- }
-
- if (con_id) {
- ce->con_id = kstrdup(con_id, GFP_KERNEL);
- if (!ce->con_id) {
- dev_err(dev,
- "Not enough memory for clock connection ID.\n");
- kfree(ce);
- return -ENOMEM;
- }
- }
-
- pm_clk_acquire(dev, ce);
-
- spin_lock_irq(&psd->lock);
- list_add_tail(&ce->node, &psd->clock_list);
- spin_unlock_irq(&psd->lock);
- return 0;
-}
-
-/**
- * __pm_clk_remove - Destroy PM clock entry.
- * @ce: PM clock entry to destroy.
- */
-static void __pm_clk_remove(struct pm_clock_entry *ce)
-{
- if (!ce)
- return;
-
- if (ce->status < PCE_STATUS_ERROR) {
- if (ce->status == PCE_STATUS_ENABLED)
- clk_disable(ce->clk);
-
- if (ce->status >= PCE_STATUS_ACQUIRED)
- clk_put(ce->clk);
- }
-
- kfree(ce->con_id);
- kfree(ce);
-}
-
-/**
- * pm_clk_remove - Stop using a device clock for power management.
- * @dev: Device whose clock should not be used for PM any more.
- * @con_id: Connection ID of the clock.
- *
- * Remove the clock represented by @con_id from the list of clocks used for
- * the power management of @dev.
- */
-void pm_clk_remove(struct device *dev, const char *con_id)
-{
- struct pm_subsys_data *psd = dev_to_psd(dev);
- struct pm_clock_entry *ce;
-
- if (!psd)
- return;
-
- spin_lock_irq(&psd->lock);
-
- list_for_each_entry(ce, &psd->clock_list, node) {
- if (!con_id && !ce->con_id)
- goto remove;
- else if (!con_id || !ce->con_id)
- continue;
- else if (!strcmp(con_id, ce->con_id))
- goto remove;
- }
-
- spin_unlock_irq(&psd->lock);
- return;
-
- remove:
- list_del(&ce->node);
- spin_unlock_irq(&psd->lock);
-
- __pm_clk_remove(ce);
-}
-
-/**
- * pm_clk_init - Initialize a device's list of power management clocks.
- * @dev: Device to initialize the list of PM clocks for.
- *
- * Initialize the lock and clock_list members of the device's pm_subsys_data
- * object.
- */
-void pm_clk_init(struct device *dev)
-{
- struct pm_subsys_data *psd = dev_to_psd(dev);
- if (psd)
- INIT_LIST_HEAD(&psd->clock_list);
-}
-
-/**
- * pm_clk_create - Create and initialize a device's list of PM clocks.
- * @dev: Device to create and initialize the list of PM clocks for.
- *
- * Allocate a struct pm_subsys_data object, initialize its lock and clock_list
- * members and make the @dev's power.subsys_data field point to it.
- */
-int pm_clk_create(struct device *dev)
-{
- int ret = dev_pm_get_subsys_data(dev);
- return ret < 0 ? ret : 0;
-}
-
-/**
- * pm_clk_destroy - Destroy a device's list of power management clocks.
- * @dev: Device to destroy the list of PM clocks for.
- *
- * Clear the @dev's power.subsys_data field, remove the list of clock entries
- * from the struct pm_subsys_data object pointed to by it before and free
- * that object.
- */
-void pm_clk_destroy(struct device *dev)
-{
- struct pm_subsys_data *psd = dev_to_psd(dev);
- struct pm_clock_entry *ce, *c;
- struct list_head list;
-
- if (!psd)
- return;
-
- INIT_LIST_HEAD(&list);
-
- spin_lock_irq(&psd->lock);
-
- list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node)
- list_move(&ce->node, &list);
-
- spin_unlock_irq(&psd->lock);
-
- dev_pm_put_subsys_data(dev);
-
- list_for_each_entry_safe_reverse(ce, c, &list, node) {
- list_del(&ce->node);
- __pm_clk_remove(ce);
- }
-}
-
-#endif /* CONFIG_PM */
-
-#ifdef CONFIG_PM_RUNTIME
-
-/**
- * pm_clk_suspend - Disable clocks in a device's PM clock list.
- * @dev: Device to disable the clocks for.
- */
-int pm_clk_suspend(struct device *dev)
-{
- struct pm_subsys_data *psd = dev_to_psd(dev);
- struct pm_clock_entry *ce;
- unsigned long flags;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- if (!psd)
- return 0;
-
- spin_lock_irqsave(&psd->lock, flags);
-
- list_for_each_entry_reverse(ce, &psd->clock_list, node) {
- if (ce->status < PCE_STATUS_ERROR) {
- if (ce->status == PCE_STATUS_ENABLED)
- clk_disable(ce->clk);
- ce->status = PCE_STATUS_ACQUIRED;
- }
- }
-
- spin_unlock_irqrestore(&psd->lock, flags);
-
- return 0;
-}
-
-/**
- * pm_clk_resume - Enable clocks in a device's PM clock list.
- * @dev: Device to enable the clocks for.
- */
-int pm_clk_resume(struct device *dev)
-{
- struct pm_subsys_data *psd = dev_to_psd(dev);
- struct pm_clock_entry *ce;
- unsigned long flags;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- if (!psd)
- return 0;
-
- spin_lock_irqsave(&psd->lock, flags);
-
- list_for_each_entry(ce, &psd->clock_list, node) {
- if (ce->status < PCE_STATUS_ERROR) {
- clk_enable(ce->clk);
- ce->status = PCE_STATUS_ENABLED;
- }
- }
-
- spin_unlock_irqrestore(&psd->lock, flags);
-
- return 0;
-}
-
-/**
- * pm_clk_notify - Notify routine for device addition and removal.
- * @nb: Notifier block object this function is a member of.
- * @action: Operation being carried out by the caller.
- * @data: Device the routine is being run for.
- *
- * For this function to work, @nb must be a member of an object of type
- * struct pm_clk_notifier_block containing all of the requisite data.
- * Specifically, the pm_domain member of that object is copied to the device's
- * pm_domain field and its con_ids member is used to populate the device's list
- * of PM clocks, depending on @action.
- *
- * If the device's pm_domain field is already populated with a value different
- * from the one stored in the struct pm_clk_notifier_block object, the function
- * does nothing.
- */
-static int pm_clk_notify(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct pm_clk_notifier_block *clknb;
- struct device *dev = data;
- char **con_id;
- int error;
-
- dev_dbg(dev, "%s() %ld\n", __func__, action);
-
- clknb = container_of(nb, struct pm_clk_notifier_block, nb);
-
- switch (action) {
- case BUS_NOTIFY_ADD_DEVICE:
- if (dev->pm_domain)
- break;
-
- error = pm_clk_create(dev);
- if (error)
- break;
-
- dev->pm_domain = clknb->pm_domain;
- if (clknb->con_ids[0]) {
- for (con_id = clknb->con_ids; *con_id; con_id++)
- pm_clk_add(dev, *con_id);
- } else {
- pm_clk_add(dev, NULL);
- }
-
- break;
- case BUS_NOTIFY_DEL_DEVICE:
- if (dev->pm_domain != clknb->pm_domain)
- break;
-
- dev->pm_domain = NULL;
- pm_clk_destroy(dev);
- break;
- }
-
- return 0;
-}
-
-#else /* !CONFIG_PM_RUNTIME */
-
-#ifdef CONFIG_PM
-
-/**
- * pm_clk_suspend - Disable clocks in a device's PM clock list.
- * @dev: Device to disable the clocks for.
- */
-int pm_clk_suspend(struct device *dev)
-{
- struct pm_subsys_data *psd = dev_to_psd(dev);
- struct pm_clock_entry *ce;
- unsigned long flags;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- /* If there is no driver, the clocks are already disabled. */
- if (!psd || !dev->driver)
- return 0;
-
- spin_lock_irqsave(&psd->lock, flags);
-
- list_for_each_entry_reverse(ce, &psd->clock_list, node)
- clk_disable(ce->clk);
-
- spin_unlock_irqrestore(&psd->lock, flags);
-
- return 0;
-}
-
-/**
- * pm_clk_resume - Enable clocks in a device's PM clock list.
- * @dev: Device to enable the clocks for.
- */
-int pm_clk_resume(struct device *dev)
-{
- struct pm_subsys_data *psd = dev_to_psd(dev);
- struct pm_clock_entry *ce;
- unsigned long flags;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- /* If there is no driver, the clocks should remain disabled. */
- if (!psd || !dev->driver)
- return 0;
-
- spin_lock_irqsave(&psd->lock, flags);
-
- list_for_each_entry(ce, &psd->clock_list, node)
- clk_enable(ce->clk);
-
- spin_unlock_irqrestore(&psd->lock, flags);
-
- return 0;
-}
-
-#endif /* CONFIG_PM */
-
-/**
- * enable_clock - Enable a device clock.
- * @dev: Device whose clock is to be enabled.
- * @con_id: Connection ID of the clock.
- */
-static void enable_clock(struct device *dev, const char *con_id)
-{
- struct clk *clk;
-
- clk = clk_get(dev, con_id);
- if (!IS_ERR(clk)) {
- clk_enable(clk);
- clk_put(clk);
- dev_info(dev, "Runtime PM disabled, clock forced on.\n");
- }
-}
-
-/**
- * disable_clock - Disable a device clock.
- * @dev: Device whose clock is to be disabled.
- * @con_id: Connection ID of the clock.
- */
-static void disable_clock(struct device *dev, const char *con_id)
-{
- struct clk *clk;
-
- clk = clk_get(dev, con_id);
- if (!IS_ERR(clk)) {
- clk_disable(clk);
- clk_put(clk);
- dev_info(dev, "Runtime PM disabled, clock forced off.\n");
- }
-}
-
-/**
- * pm_clk_notify - Notify routine for device addition and removal.
- * @nb: Notifier block object this function is a member of.
- * @action: Operation being carried out by the caller.
- * @data: Device the routine is being run for.
- *
- * For this function to work, @nb must be a member of an object of type
- * struct pm_clk_notifier_block containing all of the requisite data.
- * Specifically, the con_ids member of that object is used to enable or disable
- * the device's clocks, depending on @action.
- */
-static int pm_clk_notify(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct pm_clk_notifier_block *clknb;
- struct device *dev = data;
- char **con_id;
-
- dev_dbg(dev, "%s() %ld\n", __func__, action);
-
- clknb = container_of(nb, struct pm_clk_notifier_block, nb);
-
- switch (action) {
- case BUS_NOTIFY_BIND_DRIVER:
- if (clknb->con_ids[0]) {
- for (con_id = clknb->con_ids; *con_id; con_id++)
- enable_clock(dev, *con_id);
- } else {
- enable_clock(dev, NULL);
- }
- break;
- case BUS_NOTIFY_UNBOUND_DRIVER:
- if (clknb->con_ids[0]) {
- for (con_id = clknb->con_ids; *con_id; con_id++)
- disable_clock(dev, *con_id);
- } else {
- disable_clock(dev, NULL);
- }
- break;
- }
-
- return 0;
-}
-
-#endif /* !CONFIG_PM_RUNTIME */
-
-/**
- * pm_clk_add_notifier - Add bus type notifier for power management clocks.
- * @bus: Bus type to add the notifier to.
- * @clknb: Notifier to be added to the given bus type.
- *
- * The nb member of @clknb is not expected to be initialized and its
- * notifier_call member will be replaced with pm_clk_notify(). However,
- * the remaining members of @clknb should be populated prior to calling this
- * routine.
- */
-void pm_clk_add_notifier(struct bus_type *bus,
- struct pm_clk_notifier_block *clknb)
-{
- if (!bus || !clknb)
- return;
-
- clknb->nb.notifier_call = pm_clk_notify;
- bus_register_notifier(bus, &clknb->nb);
-}
diff --git a/ANDROID_3.4.5/drivers/base/power/common.c b/ANDROID_3.4.5/drivers/base/power/common.c
deleted file mode 100644
index a14085cc..00000000
--- a/ANDROID_3.4.5/drivers/base/power/common.c
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * drivers/base/power/common.c - Common device power management code.
- *
- * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
- *
- * This file is released under the GPLv2.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <linux/pm_clock.h>
-
-/**
- * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device.
- * @dev: Device to handle.
- *
- * If power.subsys_data is NULL, point it to a new object, otherwise increment
- * its reference counter. Return 1 if a new object has been created, otherwise
- * return 0 or error code.
- */
-int dev_pm_get_subsys_data(struct device *dev)
-{
- struct pm_subsys_data *psd;
- int ret = 0;
-
- psd = kzalloc(sizeof(*psd), GFP_KERNEL);
- if (!psd)
- return -ENOMEM;
-
- spin_lock_irq(&dev->power.lock);
-
- if (dev->power.subsys_data) {
- dev->power.subsys_data->refcount++;
- } else {
- spin_lock_init(&psd->lock);
- psd->refcount = 1;
- dev->power.subsys_data = psd;
- pm_clk_init(dev);
- psd = NULL;
- ret = 1;
- }
-
- spin_unlock_irq(&dev->power.lock);
-
- /* kfree() verifies that its argument is nonzero. */
- kfree(psd);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data);
-
-/**
- * dev_pm_put_subsys_data - Drop reference to power.subsys_data.
- * @dev: Device to handle.
- *
- * If the reference counter of power.subsys_data is zero after dropping the
- * reference, power.subsys_data is removed. Return 1 if that happens or 0
- * otherwise.
- */
-int dev_pm_put_subsys_data(struct device *dev)
-{
- struct pm_subsys_data *psd;
- int ret = 0;
-
- spin_lock_irq(&dev->power.lock);
-
- psd = dev_to_psd(dev);
- if (!psd) {
- ret = -EINVAL;
- goto out;
- }
-
- if (--psd->refcount == 0) {
- dev->power.subsys_data = NULL;
- kfree(psd);
- ret = 1;
- }
-
- out:
- spin_unlock_irq(&dev->power.lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
diff --git a/ANDROID_3.4.5/drivers/base/power/domain.c b/ANDROID_3.4.5/drivers/base/power/domain.c
deleted file mode 100644
index 83aa694a..00000000
--- a/ANDROID_3.4.5/drivers/base/power/domain.c
+++ /dev/null
@@ -1,1815 +0,0 @@
-/*
- * drivers/base/power/domain.c - Common code related to device power domains.
- *
- * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
- *
- * This file is released under the GPLv2.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/io.h>
-#include <linux/pm_runtime.h>
-#include <linux/pm_domain.h>
-#include <linux/pm_qos.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/sched.h>
-#include <linux/suspend.h>
-#include <linux/export.h>
-
-#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
-({ \
- type (*__routine)(struct device *__d); \
- type __ret = (type)0; \
- \
- __routine = genpd->dev_ops.callback; \
- if (__routine) { \
- __ret = __routine(dev); \
- } else { \
- __routine = dev_gpd_data(dev)->ops.callback; \
- if (__routine) \
- __ret = __routine(dev); \
- } \
- __ret; \
-})
-
-#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \
-({ \
- ktime_t __start = ktime_get(); \
- type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \
- s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \
- struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \
- if (!__retval && __elapsed > __td->field) { \
- __td->field = __elapsed; \
- dev_warn(dev, name " latency exceeded, new value %lld ns\n", \
- __elapsed); \
- genpd->max_off_time_changed = true; \
- __td->constraint_changed = true; \
- } \
- __retval; \
-})
-
-static LIST_HEAD(gpd_list);
-static DEFINE_MUTEX(gpd_list_lock);
-
-#ifdef CONFIG_PM
-
-struct generic_pm_domain *dev_to_genpd(struct device *dev)
-{
- if (IS_ERR_OR_NULL(dev->pm_domain))
- return ERR_PTR(-EINVAL);
-
- return pd_to_genpd(dev->pm_domain);
-}
-
-static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
- stop_latency_ns, "stop");
-}
-
-static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
- start_latency_ns, "start");
-}
-
-static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
- save_state_latency_ns, "state save");
-}
-
-static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
- restore_state_latency_ns,
- "state restore");
-}
-
-static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
-{
- bool ret = false;
-
- if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
- ret = !!atomic_dec_and_test(&genpd->sd_count);
-
- return ret;
-}
-
-static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
-{
- atomic_inc(&genpd->sd_count);
- smp_mb__after_atomic_inc();
-}
-
-static void genpd_acquire_lock(struct generic_pm_domain *genpd)
-{
- DEFINE_WAIT(wait);
-
- mutex_lock(&genpd->lock);
- /*
- * Wait for the domain to transition into either the active,
- * or the power off state.
- */
- for (;;) {
- prepare_to_wait(&genpd->status_wait_queue, &wait,
- TASK_UNINTERRUPTIBLE);
- if (genpd->status == GPD_STATE_ACTIVE
- || genpd->status == GPD_STATE_POWER_OFF)
- break;
- mutex_unlock(&genpd->lock);
-
- schedule();
-
- mutex_lock(&genpd->lock);
- }
- finish_wait(&genpd->status_wait_queue, &wait);
-}
-
-static void genpd_release_lock(struct generic_pm_domain *genpd)
-{
- mutex_unlock(&genpd->lock);
-}
-
-static void genpd_set_active(struct generic_pm_domain *genpd)
-{
- if (genpd->resume_count == 0)
- genpd->status = GPD_STATE_ACTIVE;
-}
-
-/**
- * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
- * @genpd: PM domain to power up.
- *
- * Restore power to @genpd and all of its masters so that it is possible to
- * resume a device belonging to it.
- */
-int __pm_genpd_poweron(struct generic_pm_domain *genpd)
- __releases(&genpd->lock) __acquires(&genpd->lock)
-{
- struct gpd_link *link;
- DEFINE_WAIT(wait);
- int ret = 0;
-
- /* If the domain's master is being waited for, we have to wait too. */
- for (;;) {
- prepare_to_wait(&genpd->status_wait_queue, &wait,
- TASK_UNINTERRUPTIBLE);
- if (genpd->status != GPD_STATE_WAIT_MASTER)
- break;
- mutex_unlock(&genpd->lock);
-
- schedule();
-
- mutex_lock(&genpd->lock);
- }
- finish_wait(&genpd->status_wait_queue, &wait);
-
- if (genpd->status == GPD_STATE_ACTIVE
- || (genpd->prepared_count > 0 && genpd->suspend_power_off))
- return 0;
-
- if (genpd->status != GPD_STATE_POWER_OFF) {
- genpd_set_active(genpd);
- return 0;
- }
-
- /*
- * The list is guaranteed not to change while the loop below is being
- * executed, unless one of the masters' .power_on() callbacks fiddles
- * with it.
- */
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- genpd_sd_counter_inc(link->master);
- genpd->status = GPD_STATE_WAIT_MASTER;
-
- mutex_unlock(&genpd->lock);
-
- ret = pm_genpd_poweron(link->master);
-
- mutex_lock(&genpd->lock);
-
- /*
- * The "wait for parent" status is guaranteed not to change
- * while the master is powering on.
- */
- genpd->status = GPD_STATE_POWER_OFF;
- wake_up_all(&genpd->status_wait_queue);
- if (ret) {
- genpd_sd_counter_dec(link->master);
- goto err;
- }
- }
-
- if (genpd->power_on) {
- ktime_t time_start = ktime_get();
- s64 elapsed_ns;
-
- ret = genpd->power_on(genpd);
- if (ret)
- goto err;
-
- elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
- if (elapsed_ns > genpd->power_on_latency_ns) {
- genpd->power_on_latency_ns = elapsed_ns;
- genpd->max_off_time_changed = true;
- if (genpd->name)
- pr_warning("%s: Power-on latency exceeded, "
- "new value %lld ns\n", genpd->name,
- elapsed_ns);
- }
- }
-
- genpd_set_active(genpd);
-
- return 0;
-
- err:
- list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
- genpd_sd_counter_dec(link->master);
-
- return ret;
-}
-
-/**
- * pm_genpd_poweron - Restore power to a given PM domain and its masters.
- * @genpd: PM domain to power up.
- */
-int pm_genpd_poweron(struct generic_pm_domain *genpd)
-{
- int ret;
-
- mutex_lock(&genpd->lock);
- ret = __pm_genpd_poweron(genpd);
- mutex_unlock(&genpd->lock);
- return ret;
-}
-
-#endif /* CONFIG_PM */
-
-#ifdef CONFIG_PM_RUNTIME
-
-static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
- unsigned long val, void *ptr)
-{
- struct generic_pm_domain_data *gpd_data;
- struct device *dev;
-
- gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
-
- mutex_lock(&gpd_data->lock);
- dev = gpd_data->base.dev;
- if (!dev) {
- mutex_unlock(&gpd_data->lock);
- return NOTIFY_DONE;
- }
- mutex_unlock(&gpd_data->lock);
-
- for (;;) {
- struct generic_pm_domain *genpd;
- struct pm_domain_data *pdd;
-
- spin_lock_irq(&dev->power.lock);
-
- pdd = dev->power.subsys_data ?
- dev->power.subsys_data->domain_data : NULL;
- if (pdd) {
- to_gpd_data(pdd)->td.constraint_changed = true;
- genpd = dev_to_genpd(dev);
- } else {
- genpd = ERR_PTR(-ENODATA);
- }
-
- spin_unlock_irq(&dev->power.lock);
-
- if (!IS_ERR(genpd)) {
- mutex_lock(&genpd->lock);
- genpd->max_off_time_changed = true;
- mutex_unlock(&genpd->lock);
- }
-
- dev = dev->parent;
- if (!dev || dev->power.ignore_children)
- break;
- }
-
- return NOTIFY_DONE;
-}
-
-/**
- * __pm_genpd_save_device - Save the pre-suspend state of a device.
- * @pdd: Domain data of the device to save the state of.
- * @genpd: PM domain the device belongs to.
- */
-static int __pm_genpd_save_device(struct pm_domain_data *pdd,
- struct generic_pm_domain *genpd)
- __releases(&genpd->lock) __acquires(&genpd->lock)
-{
- struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
- struct device *dev = pdd->dev;
- int ret = 0;
-
- if (gpd_data->need_restore)
- return 0;
-
- mutex_unlock(&genpd->lock);
-
- genpd_start_dev(genpd, dev);
- ret = genpd_save_dev(genpd, dev);
- genpd_stop_dev(genpd, dev);
-
- mutex_lock(&genpd->lock);
-
- if (!ret)
- gpd_data->need_restore = true;
-
- return ret;
-}
-
-/**
- * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
- * @pdd: Domain data of the device to restore the state of.
- * @genpd: PM domain the device belongs to.
- */
-static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
- struct generic_pm_domain *genpd)
- __releases(&genpd->lock) __acquires(&genpd->lock)
-{
- struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
- struct device *dev = pdd->dev;
-
- if (!gpd_data->need_restore)
- return;
-
- mutex_unlock(&genpd->lock);
-
- genpd_start_dev(genpd, dev);
- genpd_restore_dev(genpd, dev);
- genpd_stop_dev(genpd, dev);
-
- mutex_lock(&genpd->lock);
-
- gpd_data->need_restore = false;
-}
-
-/**
- * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
- * @genpd: PM domain to check.
- *
- * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
- * a "power off" operation, which means that a "power on" has occured in the
- * meantime, or if its resume_count field is different from zero, which means
- * that one of its devices has been resumed in the meantime.
- */
-static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
-{
- return genpd->status == GPD_STATE_WAIT_MASTER
- || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
-}
-
-/**
- * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
- * @genpd: PM domait to power off.
- *
- * Queue up the execution of pm_genpd_poweroff() unless it's already been done
- * before.
- */
-void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
-{
- if (!work_pending(&genpd->power_off_work))
- queue_work(pm_wq, &genpd->power_off_work);
-}
-
-/**
- * pm_genpd_poweroff - Remove power from a given PM domain.
- * @genpd: PM domain to power down.
- *
- * If all of the @genpd's devices have been suspended and all of its subdomains
- * have been powered down, run the runtime suspend callbacks provided by all of
- * the @genpd's devices' drivers and remove power from @genpd.
- */
-static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
- __releases(&genpd->lock) __acquires(&genpd->lock)
-{
- struct pm_domain_data *pdd;
- struct gpd_link *link;
- unsigned int not_suspended;
- int ret = 0;
-
- start:
- /*
- * Do not try to power off the domain in the following situations:
- * (1) The domain is already in the "power off" state.
- * (2) The domain is waiting for its master to power up.
- * (3) One of the domain's devices is being resumed right now.
- * (4) System suspend is in progress.
- */
- if (genpd->status == GPD_STATE_POWER_OFF
- || genpd->status == GPD_STATE_WAIT_MASTER
- || genpd->resume_count > 0 || genpd->prepared_count > 0)
- return 0;
-
- if (atomic_read(&genpd->sd_count) > 0)
- return -EBUSY;
-
- not_suspended = 0;
- list_for_each_entry(pdd, &genpd->dev_list, list_node)
- if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
- || pdd->dev->power.irq_safe || to_gpd_data(pdd)->always_on))
- not_suspended++;
-
- if (not_suspended > genpd->in_progress)
- return -EBUSY;
-
- if (genpd->poweroff_task) {
- /*
- * Another instance of pm_genpd_poweroff() is executing
- * callbacks, so tell it to start over and return.
- */
- genpd->status = GPD_STATE_REPEAT;
- return 0;
- }
-
- if (genpd->gov && genpd->gov->power_down_ok) {
- if (!genpd->gov->power_down_ok(&genpd->domain))
- return -EAGAIN;
- }
-
- genpd->status = GPD_STATE_BUSY;
- genpd->poweroff_task = current;
-
- list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
- ret = atomic_read(&genpd->sd_count) == 0 ?
- __pm_genpd_save_device(pdd, genpd) : -EBUSY;
-
- if (genpd_abort_poweroff(genpd))
- goto out;
-
- if (ret) {
- genpd_set_active(genpd);
- goto out;
- }
-
- if (genpd->status == GPD_STATE_REPEAT) {
- genpd->poweroff_task = NULL;
- goto start;
- }
- }
-
- if (genpd->power_off) {
- ktime_t time_start;
- s64 elapsed_ns;
-
- if (atomic_read(&genpd->sd_count) > 0) {
- ret = -EBUSY;
- goto out;
- }
-
- time_start = ktime_get();
-
- /*
- * If sd_count > 0 at this point, one of the subdomains hasn't
- * managed to call pm_genpd_poweron() for the master yet after
- * incrementing it. In that case pm_genpd_poweron() will wait
- * for us to drop the lock, so we can call .power_off() and let
- * the pm_genpd_poweron() restore power for us (this shouldn't
- * happen very often).
- */
- ret = genpd->power_off(genpd);
- if (ret == -EBUSY) {
- genpd_set_active(genpd);
- goto out;
- }
-
- elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
- if (elapsed_ns > genpd->power_off_latency_ns) {
- genpd->power_off_latency_ns = elapsed_ns;
- genpd->max_off_time_changed = true;
- if (genpd->name)
- pr_warning("%s: Power-off latency exceeded, "
- "new value %lld ns\n", genpd->name,
- elapsed_ns);
- }
- }
-
- genpd->status = GPD_STATE_POWER_OFF;
-
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- genpd_sd_counter_dec(link->master);
- genpd_queue_power_off_work(link->master);
- }
-
- out:
- genpd->poweroff_task = NULL;
- wake_up_all(&genpd->status_wait_queue);
- return ret;
-}
-
-/**
- * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
- * @work: Work structure used for scheduling the execution of this function.
- */
-static void genpd_power_off_work_fn(struct work_struct *work)
-{
- struct generic_pm_domain *genpd;
-
- genpd = container_of(work, struct generic_pm_domain, power_off_work);
-
- genpd_acquire_lock(genpd);
- pm_genpd_poweroff(genpd);
- genpd_release_lock(genpd);
-}
-
-/**
- * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
- * @dev: Device to suspend.
- *
- * Carry out a runtime suspend of a device under the assumption that its
- * pm_domain field points to the domain member of an object of type
- * struct generic_pm_domain representing a PM domain consisting of I/O devices.
- */
-static int pm_genpd_runtime_suspend(struct device *dev)
-{
- struct generic_pm_domain *genpd;
- bool (*stop_ok)(struct device *__dev);
- int ret;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- might_sleep_if(!genpd->dev_irq_safe);
-
- if (dev_gpd_data(dev)->always_on)
- return -EBUSY;
-
- stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
- if (stop_ok && !stop_ok(dev))
- return -EBUSY;
-
- ret = genpd_stop_dev(genpd, dev);
- if (ret)
- return ret;
-
- /*
- * If power.irq_safe is set, this routine will be run with interrupts
- * off, so it can't use mutexes.
- */
- if (dev->power.irq_safe)
- return 0;
-
- mutex_lock(&genpd->lock);
- genpd->in_progress++;
- pm_genpd_poweroff(genpd);
- genpd->in_progress--;
- mutex_unlock(&genpd->lock);
-
- return 0;
-}
-
-/**
- * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
- * @dev: Device to resume.
- *
- * Carry out a runtime resume of a device under the assumption that its
- * pm_domain field points to the domain member of an object of type
- * struct generic_pm_domain representing a PM domain consisting of I/O devices.
- */
-static int pm_genpd_runtime_resume(struct device *dev)
-{
- struct generic_pm_domain *genpd;
- DEFINE_WAIT(wait);
- int ret;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- might_sleep_if(!genpd->dev_irq_safe);
-
- /* If power.irq_safe, the PM domain is never powered off. */
- if (dev->power.irq_safe)
- goto out;
-
- mutex_lock(&genpd->lock);
- ret = __pm_genpd_poweron(genpd);
- if (ret) {
- mutex_unlock(&genpd->lock);
- return ret;
- }
- genpd->status = GPD_STATE_BUSY;
- genpd->resume_count++;
- for (;;) {
- prepare_to_wait(&genpd->status_wait_queue, &wait,
- TASK_UNINTERRUPTIBLE);
- /*
- * If current is the powering off task, we have been called
- * reentrantly from one of the device callbacks, so we should
- * not wait.
- */
- if (!genpd->poweroff_task || genpd->poweroff_task == current)
- break;
- mutex_unlock(&genpd->lock);
-
- schedule();
-
- mutex_lock(&genpd->lock);
- }
- finish_wait(&genpd->status_wait_queue, &wait);
- __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
- genpd->resume_count--;
- genpd_set_active(genpd);
- wake_up_all(&genpd->status_wait_queue);
- mutex_unlock(&genpd->lock);
-
- out:
- genpd_start_dev(genpd, dev);
-
- return 0;
-}
-
-/**
- * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
- */
-void pm_genpd_poweroff_unused(void)
-{
- struct generic_pm_domain *genpd;
-
- mutex_lock(&gpd_list_lock);
-
- list_for_each_entry(genpd, &gpd_list, gpd_list_node)
- genpd_queue_power_off_work(genpd);
-
- mutex_unlock(&gpd_list_lock);
-}
-
-#else
-
-static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
- unsigned long val, void *ptr)
-{
- return NOTIFY_DONE;
-}
-
-static inline void genpd_power_off_work_fn(struct work_struct *work) {}
-
-#define pm_genpd_runtime_suspend NULL
-#define pm_genpd_runtime_resume NULL
-
-#endif /* CONFIG_PM_RUNTIME */
-
-#ifdef CONFIG_PM_SLEEP
-
-static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
- struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
-}
-
-static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
-}
-
-static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
-}
-
-static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
-}
-
-static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
-}
-
-static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
-}
-
-static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
-}
-
-static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
-}
-
-static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
-}
-
-/**
- * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
- * @genpd: PM domain to power off, if possible.
- *
- * Check if the given PM domain can be powered off (during system suspend or
- * hibernation) and do that if so. Also, in that case propagate to its masters.
- *
- * This function is only called in "noirq" stages of system power transitions,
- * so it need not acquire locks (all of the "noirq" callbacks are executed
- * sequentially, so it is guaranteed that it will never run twice in parallel).
- */
-static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
-{
- struct gpd_link *link;
-
- if (genpd->status == GPD_STATE_POWER_OFF)
- return;
-
- if (genpd->suspended_count != genpd->device_count
- || atomic_read(&genpd->sd_count) > 0)
- return;
-
- if (genpd->power_off)
- genpd->power_off(genpd);
-
- genpd->status = GPD_STATE_POWER_OFF;
-
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- genpd_sd_counter_dec(link->master);
- pm_genpd_sync_poweroff(link->master);
- }
-}
-
-/**
- * resume_needed - Check whether to resume a device before system suspend.
- * @dev: Device to check.
- * @genpd: PM domain the device belongs to.
- *
- * There are two cases in which a device that can wake up the system from sleep
- * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
- * to wake up the system and it has to remain active for this purpose while the
- * system is in the sleep state and (2) if the device is not enabled to wake up
- * the system from sleep states and it generally doesn't generate wakeup signals
- * by itself (those signals are generated on its behalf by other parts of the
- * system). In the latter case it may be necessary to reconfigure the device's
- * wakeup settings during system suspend, because it may have been set up to
- * signal remote wakeup from the system's working state as needed by runtime PM.
- * Return 'true' in either of the above cases.
- */
-static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
-{
- bool active_wakeup;
-
- if (!device_can_wakeup(dev))
- return false;
-
- active_wakeup = genpd_dev_active_wakeup(genpd, dev);
- return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
-}
-
-/**
- * pm_genpd_prepare - Start power transition of a device in a PM domain.
- * @dev: Device to start the transition of.
- *
- * Start a power transition of a device (during a system-wide power transition)
- * under the assumption that its pm_domain field points to the domain member of
- * an object of type struct generic_pm_domain representing a PM domain
- * consisting of I/O devices.
- */
-static int pm_genpd_prepare(struct device *dev)
-{
- struct generic_pm_domain *genpd;
- int ret;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- /*
- * If a wakeup request is pending for the device, it should be woken up
- * at this point and a system wakeup event should be reported if it's
- * set up to wake up the system from sleep states.
- */
- pm_runtime_get_noresume(dev);
- if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
- pm_wakeup_event(dev, 0);
-
- if (pm_wakeup_pending()) {
- pm_runtime_put_sync(dev);
- return -EBUSY;
- }
-
- if (resume_needed(dev, genpd))
- pm_runtime_resume(dev);
-
- genpd_acquire_lock(genpd);
-
- if (genpd->prepared_count++ == 0) {
- genpd->suspended_count = 0;
- genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
- }
-
- genpd_release_lock(genpd);
-
- if (genpd->suspend_power_off) {
- pm_runtime_put_noidle(dev);
- return 0;
- }
-
- /*
- * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
- * so pm_genpd_poweron() will return immediately, but if the device
- * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
- * to make it operational.
- */
- pm_runtime_resume(dev);
- __pm_runtime_disable(dev, false);
-
- ret = pm_generic_prepare(dev);
- if (ret) {
- mutex_lock(&genpd->lock);
-
- if (--genpd->prepared_count == 0)
- genpd->suspend_power_off = false;
-
- mutex_unlock(&genpd->lock);
- pm_runtime_enable(dev);
- }
-
- pm_runtime_put_sync(dev);
- return ret;
-}
-
-/**
- * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
- * @dev: Device to suspend.
- *
- * Suspend a device under the assumption that its pm_domain field points to the
- * domain member of an object of type struct generic_pm_domain representing
- * a PM domain consisting of I/O devices.
- */
-static int pm_genpd_suspend(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
-}
-
-/**
- * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
- * @dev: Device to suspend.
- *
- * Carry out a late suspend of a device under the assumption that its
- * pm_domain field points to the domain member of an object of type
- * struct generic_pm_domain representing a PM domain consisting of I/O devices.
- */
-static int pm_genpd_suspend_late(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev);
-}
-
-/**
- * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
- * @dev: Device to suspend.
- *
- * Stop the device and remove power from the domain if all devices in it have
- * been stopped.
- */
-static int pm_genpd_suspend_noirq(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
- || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
- return 0;
-
- genpd_stop_dev(genpd, dev);
-
- /*
- * Since all of the "noirq" callbacks are executed sequentially, it is
- * guaranteed that this function will never run twice in parallel for
- * the same PM domain, so it is not necessary to use locking here.
- */
- genpd->suspended_count++;
- pm_genpd_sync_poweroff(genpd);
-
- return 0;
-}
-
-/**
- * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
- * @dev: Device to resume.
- *
- * Restore power to the device's PM domain, if necessary, and start the device.
- */
-static int pm_genpd_resume_noirq(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
- || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
- return 0;
-
- /*
- * Since all of the "noirq" callbacks are executed sequentially, it is
- * guaranteed that this function will never run twice in parallel for
- * the same PM domain, so it is not necessary to use locking here.
- */
- pm_genpd_poweron(genpd);
- genpd->suspended_count--;
-
- return genpd_start_dev(genpd, dev);
-}
-
-/**
- * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
- * @dev: Device to resume.
- *
- * Carry out an early resume of a device under the assumption that its
- * pm_domain field points to the domain member of an object of type
- * struct generic_pm_domain representing a power domain consisting of I/O
- * devices.
- */
-static int pm_genpd_resume_early(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev);
-}
-
-/**
- * pm_genpd_resume - Resume of device in an I/O PM domain.
- * @dev: Device to resume.
- *
- * Resume a device under the assumption that its pm_domain field points to the
- * domain member of an object of type struct generic_pm_domain representing
- * a power domain consisting of I/O devices.
- */
-static int pm_genpd_resume(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
-}
-
-/**
- * pm_genpd_freeze - Freezing a device in an I/O PM domain.
- * @dev: Device to freeze.
- *
- * Freeze a device under the assumption that its pm_domain field points to the
- * domain member of an object of type struct generic_pm_domain representing
- * a power domain consisting of I/O devices.
- */
-static int pm_genpd_freeze(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
-}
-
-/**
- * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
- * @dev: Device to freeze.
- *
- * Carry out a late freeze of a device under the assumption that its
- * pm_domain field points to the domain member of an object of type
- * struct generic_pm_domain representing a power domain consisting of I/O
- * devices.
- */
-static int pm_genpd_freeze_late(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev);
-}
-
-/**
- * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
- * @dev: Device to freeze.
- *
- * Carry out a late freeze of a device under the assumption that its
- * pm_domain field points to the domain member of an object of type
- * struct generic_pm_domain representing a power domain consisting of I/O
- * devices.
- */
-static int pm_genpd_freeze_noirq(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
- 0 : genpd_stop_dev(genpd, dev);
-}
-
-/**
- * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
- * @dev: Device to thaw.
- *
- * Start the device, unless power has been removed from the domain already
- * before the system transition.
- */
-static int pm_genpd_thaw_noirq(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
- 0 : genpd_start_dev(genpd, dev);
-}
-
-/**
- * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
- * @dev: Device to thaw.
- *
- * Carry out an early thaw of a device under the assumption that its
- * pm_domain field points to the domain member of an object of type
- * struct generic_pm_domain representing a power domain consisting of I/O
- * devices.
- */
-static int pm_genpd_thaw_early(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev);
-}
-
-/**
- * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
- * @dev: Device to thaw.
- *
- * Thaw a device under the assumption that its pm_domain field points to the
- * domain member of an object of type struct generic_pm_domain representing
- * a power domain consisting of I/O devices.
- */
-static int pm_genpd_thaw(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
-}
-
-/**
- * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
- * @dev: Device to resume.
- *
- * Make sure the domain will be in the same power state as before the
- * hibernation the system is resuming from and start the device if necessary.
- */
-static int pm_genpd_restore_noirq(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- /*
- * Since all of the "noirq" callbacks are executed sequentially, it is
- * guaranteed that this function will never run twice in parallel for
- * the same PM domain, so it is not necessary to use locking here.
- *
- * At this point suspended_count == 0 means we are being run for the
- * first time for the given domain in the present cycle.
- */
- if (genpd->suspended_count++ == 0) {
- /*
- * The boot kernel might put the domain into arbitrary state,
- * so make it appear as powered off to pm_genpd_poweron(), so
- * that it tries to power it on in case it was really off.
- */
- genpd->status = GPD_STATE_POWER_OFF;
- if (genpd->suspend_power_off) {
- /*
- * If the domain was off before the hibernation, make
- * sure it will be off going forward.
- */
- if (genpd->power_off)
- genpd->power_off(genpd);
-
- return 0;
- }
- }
-
- if (genpd->suspend_power_off)
- return 0;
-
- pm_genpd_poweron(genpd);
-
- return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev);
-}
-
-/**
- * pm_genpd_complete - Complete power transition of a device in a power domain.
- * @dev: Device to complete the transition of.
- *
- * Complete a power transition of a device (during a system-wide power
- * transition) under the assumption that its pm_domain field points to the
- * domain member of an object of type struct generic_pm_domain representing
- * a power domain consisting of I/O devices.
- */
-static void pm_genpd_complete(struct device *dev)
-{
- struct generic_pm_domain *genpd;
- bool run_complete;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return;
-
- mutex_lock(&genpd->lock);
-
- run_complete = !genpd->suspend_power_off;
- if (--genpd->prepared_count == 0)
- genpd->suspend_power_off = false;
-
- mutex_unlock(&genpd->lock);
-
- if (run_complete) {
- pm_generic_complete(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- pm_runtime_idle(dev);
- }
-}
-
-#else
-
-#define pm_genpd_prepare NULL
-#define pm_genpd_suspend NULL
-#define pm_genpd_suspend_late NULL
-#define pm_genpd_suspend_noirq NULL
-#define pm_genpd_resume_early NULL
-#define pm_genpd_resume_noirq NULL
-#define pm_genpd_resume NULL
-#define pm_genpd_freeze NULL
-#define pm_genpd_freeze_late NULL
-#define pm_genpd_freeze_noirq NULL
-#define pm_genpd_thaw_early NULL
-#define pm_genpd_thaw_noirq NULL
-#define pm_genpd_thaw NULL
-#define pm_genpd_restore_noirq NULL
-#define pm_genpd_complete NULL
-
-#endif /* CONFIG_PM_SLEEP */
-
-/**
- * __pm_genpd_add_device - Add a device to an I/O PM domain.
- * @genpd: PM domain to add the device to.
- * @dev: Device to be added.
- * @td: Set of PM QoS timing parameters to attach to the device.
- */
-int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
- struct gpd_timing_data *td)
-{
- struct generic_pm_domain_data *gpd_data;
- struct pm_domain_data *pdd;
- int ret = 0;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
- return -EINVAL;
-
- gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
- if (!gpd_data)
- return -ENOMEM;
-
- mutex_init(&gpd_data->lock);
- gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
- dev_pm_qos_add_notifier(dev, &gpd_data->nb);
-
- genpd_acquire_lock(genpd);
-
- if (genpd->prepared_count > 0) {
- ret = -EAGAIN;
- goto out;
- }
-
- list_for_each_entry(pdd, &genpd->dev_list, list_node)
- if (pdd->dev == dev) {
- ret = -EINVAL;
- goto out;
- }
-
- genpd->device_count++;
- genpd->max_off_time_changed = true;
-
- dev_pm_get_subsys_data(dev);
-
- mutex_lock(&gpd_data->lock);
- spin_lock_irq(&dev->power.lock);
- dev->pm_domain = &genpd->domain;
- dev->power.subsys_data->domain_data = &gpd_data->base;
- gpd_data->base.dev = dev;
- list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
- gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
- if (td)
- gpd_data->td = *td;
-
- gpd_data->td.constraint_changed = true;
- gpd_data->td.effective_constraint_ns = -1;
- spin_unlock_irq(&dev->power.lock);
- mutex_unlock(&gpd_data->lock);
-
- genpd_release_lock(genpd);
-
- return 0;
-
- out:
- genpd_release_lock(genpd);
-
- dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
- kfree(gpd_data);
- return ret;
-}
-
-/**
- * __pm_genpd_of_add_device - Add a device to an I/O PM domain.
- * @genpd_node: Device tree node pointer representing a PM domain to which the
- * the device is added to.
- * @dev: Device to be added.
- * @td: Set of PM QoS timing parameters to attach to the device.
- */
-int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
- struct gpd_timing_data *td)
-{
- struct generic_pm_domain *genpd = NULL, *gpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev))
- return -EINVAL;
-
- mutex_lock(&gpd_list_lock);
- list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
- if (gpd->of_node == genpd_node) {
- genpd = gpd;
- break;
- }
- }
- mutex_unlock(&gpd_list_lock);
-
- if (!genpd)
- return -EINVAL;
-
- return __pm_genpd_add_device(genpd, dev, td);
-}
-
-/**
- * pm_genpd_remove_device - Remove a device from an I/O PM domain.
- * @genpd: PM domain to remove the device from.
- * @dev: Device to be removed.
- */
-int pm_genpd_remove_device(struct generic_pm_domain *genpd,
- struct device *dev)
-{
- struct generic_pm_domain_data *gpd_data;
- struct pm_domain_data *pdd;
- int ret = 0;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)
- || IS_ERR_OR_NULL(dev->pm_domain)
- || pd_to_genpd(dev->pm_domain) != genpd)
- return -EINVAL;
-
- genpd_acquire_lock(genpd);
-
- if (genpd->prepared_count > 0) {
- ret = -EAGAIN;
- goto out;
- }
-
- genpd->device_count--;
- genpd->max_off_time_changed = true;
-
- spin_lock_irq(&dev->power.lock);
- dev->pm_domain = NULL;
- pdd = dev->power.subsys_data->domain_data;
- list_del_init(&pdd->list_node);
- dev->power.subsys_data->domain_data = NULL;
- spin_unlock_irq(&dev->power.lock);
-
- gpd_data = to_gpd_data(pdd);
- mutex_lock(&gpd_data->lock);
- pdd->dev = NULL;
- mutex_unlock(&gpd_data->lock);
-
- genpd_release_lock(genpd);
-
- dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
- kfree(gpd_data);
- dev_pm_put_subsys_data(dev);
- return 0;
-
- out:
- genpd_release_lock(genpd);
-
- return ret;
-}
-
-/**
- * pm_genpd_dev_always_on - Set/unset the "always on" flag for a given device.
- * @dev: Device to set/unset the flag for.
- * @val: The new value of the device's "always on" flag.
- */
-void pm_genpd_dev_always_on(struct device *dev, bool val)
-{
- struct pm_subsys_data *psd;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->power.lock, flags);
-
- psd = dev_to_psd(dev);
- if (psd && psd->domain_data)
- to_gpd_data(psd->domain_data)->always_on = val;
-
- spin_unlock_irqrestore(&dev->power.lock, flags);
-}
-EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on);
-
-/**
- * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
- * @dev: Device to set/unset the flag for.
- * @val: The new value of the device's "need restore" flag.
- */
-void pm_genpd_dev_need_restore(struct device *dev, bool val)
-{
- struct pm_subsys_data *psd;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->power.lock, flags);
-
- psd = dev_to_psd(dev);
- if (psd && psd->domain_data)
- to_gpd_data(psd->domain_data)->need_restore = val;
-
- spin_unlock_irqrestore(&dev->power.lock, flags);
-}
-EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore);
-
-/**
- * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
- * @genpd: Master PM domain to add the subdomain to.
- * @subdomain: Subdomain to be added.
- */
-int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
- struct generic_pm_domain *subdomain)
-{
- struct gpd_link *link;
- int ret = 0;
-
- if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
- return -EINVAL;
-
- start:
- genpd_acquire_lock(genpd);
- mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
-
- if (subdomain->status != GPD_STATE_POWER_OFF
- && subdomain->status != GPD_STATE_ACTIVE) {
- mutex_unlock(&subdomain->lock);
- genpd_release_lock(genpd);
- goto start;
- }
-
- if (genpd->status == GPD_STATE_POWER_OFF
- && subdomain->status != GPD_STATE_POWER_OFF) {
- ret = -EINVAL;
- goto out;
- }
-
- list_for_each_entry(link, &genpd->master_links, master_node) {
- if (link->slave == subdomain && link->master == genpd) {
- ret = -EINVAL;
- goto out;
- }
- }
-
- link = kzalloc(sizeof(*link), GFP_KERNEL);
- if (!link) {
- ret = -ENOMEM;
- goto out;
- }
- link->master = genpd;
- list_add_tail(&link->master_node, &genpd->master_links);
- link->slave = subdomain;
- list_add_tail(&link->slave_node, &subdomain->slave_links);
- if (subdomain->status != GPD_STATE_POWER_OFF)
- genpd_sd_counter_inc(genpd);
-
- out:
- mutex_unlock(&subdomain->lock);
- genpd_release_lock(genpd);
-
- return ret;
-}
-
-/**
- * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
- * @genpd: Master PM domain to remove the subdomain from.
- * @subdomain: Subdomain to be removed.
- */
-int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
- struct generic_pm_domain *subdomain)
-{
- struct gpd_link *link;
- int ret = -EINVAL;
-
- if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
- return -EINVAL;
-
- start:
- genpd_acquire_lock(genpd);
-
- list_for_each_entry(link, &genpd->master_links, master_node) {
- if (link->slave != subdomain)
- continue;
-
- mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
-
- if (subdomain->status != GPD_STATE_POWER_OFF
- && subdomain->status != GPD_STATE_ACTIVE) {
- mutex_unlock(&subdomain->lock);
- genpd_release_lock(genpd);
- goto start;
- }
-
- list_del(&link->master_node);
- list_del(&link->slave_node);
- kfree(link);
- if (subdomain->status != GPD_STATE_POWER_OFF)
- genpd_sd_counter_dec(genpd);
-
- mutex_unlock(&subdomain->lock);
-
- ret = 0;
- break;
- }
-
- genpd_release_lock(genpd);
-
- return ret;
-}
-
-/**
- * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
- * @dev: Device to add the callbacks to.
- * @ops: Set of callbacks to add.
- * @td: Timing data to add to the device along with the callbacks (optional).
- */
-int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
- struct gpd_timing_data *td)
-{
- struct pm_domain_data *pdd;
- int ret = 0;
-
- if (!(dev && dev->power.subsys_data && ops))
- return -EINVAL;
-
- pm_runtime_disable(dev);
- device_pm_lock();
-
- pdd = dev->power.subsys_data->domain_data;
- if (pdd) {
- struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
-
- gpd_data->ops = *ops;
- if (td)
- gpd_data->td = *td;
- } else {
- ret = -EINVAL;
- }
-
- device_pm_unlock();
- pm_runtime_enable(dev);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
-
-/**
- * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
- * @dev: Device to remove the callbacks from.
- * @clear_td: If set, clear the device's timing data too.
- */
-int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
-{
- struct pm_domain_data *pdd;
- int ret = 0;
-
- if (!(dev && dev->power.subsys_data))
- return -EINVAL;
-
- pm_runtime_disable(dev);
- device_pm_lock();
-
- pdd = dev->power.subsys_data->domain_data;
- if (pdd) {
- struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
-
- gpd_data->ops = (struct gpd_dev_ops){ 0 };
- if (clear_td)
- gpd_data->td = (struct gpd_timing_data){ 0 };
- } else {
- ret = -EINVAL;
- }
-
- device_pm_unlock();
- pm_runtime_enable(dev);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
-
-/* Default device callbacks for generic PM domains. */
-
-/**
- * pm_genpd_default_save_state - Default "save device state" for PM domians.
- * @dev: Device to handle.
- */
-static int pm_genpd_default_save_state(struct device *dev)
-{
- int (*cb)(struct device *__dev);
- struct device_driver *drv = dev->driver;
-
- cb = dev_gpd_data(dev)->ops.save_state;
- if (cb)
- return cb(dev);
-
- if (drv && drv->pm && drv->pm->runtime_suspend)
- return drv->pm->runtime_suspend(dev);
-
- return 0;
-}
-
-/**
- * pm_genpd_default_restore_state - Default PM domians "restore device state".
- * @dev: Device to handle.
- */
-static int pm_genpd_default_restore_state(struct device *dev)
-{
- int (*cb)(struct device *__dev);
- struct device_driver *drv = dev->driver;
-
- cb = dev_gpd_data(dev)->ops.restore_state;
- if (cb)
- return cb(dev);
-
- if (drv && drv->pm && drv->pm->runtime_resume)
- return drv->pm->runtime_resume(dev);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-
-/**
- * pm_genpd_default_suspend - Default "device suspend" for PM domians.
- * @dev: Device to handle.
- */
-static int pm_genpd_default_suspend(struct device *dev)
-{
- int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend;
-
- return cb ? cb(dev) : pm_generic_suspend(dev);
-}
-
-/**
- * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
- * @dev: Device to handle.
- */
-static int pm_genpd_default_suspend_late(struct device *dev)
-{
- int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
-
- return cb ? cb(dev) : pm_generic_suspend_late(dev);
-}
-
-/**
- * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
- * @dev: Device to handle.
- */
-static int pm_genpd_default_resume_early(struct device *dev)
-{
- int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
-
- return cb ? cb(dev) : pm_generic_resume_early(dev);
-}
-
-/**
- * pm_genpd_default_resume - Default "device resume" for PM domians.
- * @dev: Device to handle.
- */
-static int pm_genpd_default_resume(struct device *dev)
-{
- int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume;
-
- return cb ? cb(dev) : pm_generic_resume(dev);
-}
-
-/**
- * pm_genpd_default_freeze - Default "device freeze" for PM domians.
- * @dev: Device to handle.
- */
-static int pm_genpd_default_freeze(struct device *dev)
-{
- int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
-
- return cb ? cb(dev) : pm_generic_freeze(dev);
-}
-
-/**
- * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
- * @dev: Device to handle.
- */
-static int pm_genpd_default_freeze_late(struct device *dev)
-{
- int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
-
- return cb ? cb(dev) : pm_generic_freeze_late(dev);
-}
-
-/**
- * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
- * @dev: Device to handle.
- */
-static int pm_genpd_default_thaw_early(struct device *dev)
-{
- int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
-
- return cb ? cb(dev) : pm_generic_thaw_early(dev);
-}
-
-/**
- * pm_genpd_default_thaw - Default "device thaw" for PM domians.
- * @dev: Device to handle.
- */
-static int pm_genpd_default_thaw(struct device *dev)
-{
- int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
-
- return cb ? cb(dev) : pm_generic_thaw(dev);
-}
-
-#else /* !CONFIG_PM_SLEEP */
-
-#define pm_genpd_default_suspend NULL
-#define pm_genpd_default_suspend_late NULL
-#define pm_genpd_default_resume_early NULL
-#define pm_genpd_default_resume NULL
-#define pm_genpd_default_freeze NULL
-#define pm_genpd_default_freeze_late NULL
-#define pm_genpd_default_thaw_early NULL
-#define pm_genpd_default_thaw NULL
-
-#endif /* !CONFIG_PM_SLEEP */
-
-/**
- * pm_genpd_init - Initialize a generic I/O PM domain object.
- * @genpd: PM domain object to initialize.
- * @gov: PM domain governor to associate with the domain (may be NULL).
- * @is_off: Initial value of the domain's power_is_off field.
- */
-void pm_genpd_init(struct generic_pm_domain *genpd,
- struct dev_power_governor *gov, bool is_off)
-{
- if (IS_ERR_OR_NULL(genpd))
- return;
-
- INIT_LIST_HEAD(&genpd->master_links);
- INIT_LIST_HEAD(&genpd->slave_links);
- INIT_LIST_HEAD(&genpd->dev_list);
- mutex_init(&genpd->lock);
- genpd->gov = gov;
- INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
- genpd->in_progress = 0;
- atomic_set(&genpd->sd_count, 0);
- genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
- init_waitqueue_head(&genpd->status_wait_queue);
- genpd->poweroff_task = NULL;
- genpd->resume_count = 0;
- genpd->device_count = 0;
- genpd->max_off_time_ns = -1;
- genpd->max_off_time_changed = true;
- genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
- genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
- genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
- genpd->domain.ops.prepare = pm_genpd_prepare;
- genpd->domain.ops.suspend = pm_genpd_suspend;
- genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
- genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
- genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
- genpd->domain.ops.resume_early = pm_genpd_resume_early;
- genpd->domain.ops.resume = pm_genpd_resume;
- genpd->domain.ops.freeze = pm_genpd_freeze;
- genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
- genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
- genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
- genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
- genpd->domain.ops.thaw = pm_genpd_thaw;
- genpd->domain.ops.poweroff = pm_genpd_suspend;
- genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
- genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
- genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
- genpd->domain.ops.restore_early = pm_genpd_resume_early;
- genpd->domain.ops.restore = pm_genpd_resume;
- genpd->domain.ops.complete = pm_genpd_complete;
- genpd->dev_ops.save_state = pm_genpd_default_save_state;
- genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
- genpd->dev_ops.suspend = pm_genpd_default_suspend;
- genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late;
- genpd->dev_ops.resume_early = pm_genpd_default_resume_early;
- genpd->dev_ops.resume = pm_genpd_default_resume;
- genpd->dev_ops.freeze = pm_genpd_default_freeze;
- genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
- genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
- genpd->dev_ops.thaw = pm_genpd_default_thaw;
- mutex_lock(&gpd_list_lock);
- list_add(&genpd->gpd_list_node, &gpd_list);
- mutex_unlock(&gpd_list_lock);
-}
diff --git a/ANDROID_3.4.5/drivers/base/power/domain_governor.c b/ANDROID_3.4.5/drivers/base/power/domain_governor.c
deleted file mode 100644
index 28dee305..00000000
--- a/ANDROID_3.4.5/drivers/base/power/domain_governor.c
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
- * drivers/base/power/domain_governor.c - Governors for device PM domains.
- *
- * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
- *
- * This file is released under the GPLv2.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/pm_domain.h>
-#include <linux/pm_qos.h>
-#include <linux/hrtimer.h>
-
-#ifdef CONFIG_PM_RUNTIME
-
-static int dev_update_qos_constraint(struct device *dev, void *data)
-{
- s64 *constraint_ns_p = data;
- s32 constraint_ns = -1;
-
- if (dev->power.subsys_data && dev->power.subsys_data->domain_data)
- constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns;
-
- if (constraint_ns < 0) {
- constraint_ns = dev_pm_qos_read_value(dev);
- constraint_ns *= NSEC_PER_USEC;
- }
- if (constraint_ns == 0)
- return 0;
-
- /*
- * constraint_ns cannot be negative here, because the device has been
- * suspended.
- */
- if (constraint_ns < *constraint_ns_p || *constraint_ns_p == 0)
- *constraint_ns_p = constraint_ns;
-
- return 0;
-}
-
-/**
- * default_stop_ok - Default PM domain governor routine for stopping devices.
- * @dev: Device to check.
- */
-bool default_stop_ok(struct device *dev)
-{
- struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
- unsigned long flags;
- s64 constraint_ns;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- spin_lock_irqsave(&dev->power.lock, flags);
-
- if (!td->constraint_changed) {
- bool ret = td->cached_stop_ok;
-
- spin_unlock_irqrestore(&dev->power.lock, flags);
- return ret;
- }
- td->constraint_changed = false;
- td->cached_stop_ok = false;
- td->effective_constraint_ns = -1;
- constraint_ns = __dev_pm_qos_read_value(dev);
-
- spin_unlock_irqrestore(&dev->power.lock, flags);
-
- if (constraint_ns < 0)
- return false;
-
- constraint_ns *= NSEC_PER_USEC;
- /*
- * We can walk the children without any additional locking, because
- * they all have been suspended at this point and their
- * effective_constraint_ns fields won't be modified in parallel with us.
- */
- if (!dev->power.ignore_children)
- device_for_each_child(dev, &constraint_ns,
- dev_update_qos_constraint);
-
- if (constraint_ns > 0) {
- constraint_ns -= td->start_latency_ns;
- if (constraint_ns == 0)
- return false;
- }
- td->effective_constraint_ns = constraint_ns;
- td->cached_stop_ok = constraint_ns > td->stop_latency_ns ||
- constraint_ns == 0;
- /*
- * The children have been suspended already, so we don't need to take
- * their stop latencies into account here.
- */
- return td->cached_stop_ok;
-}
-
-/**
- * default_power_down_ok - Default generic PM domain power off governor routine.
- * @pd: PM domain to check.
- *
- * This routine must be executed under the PM domain's lock.
- */
-static bool default_power_down_ok(struct dev_pm_domain *pd)
-{
- struct generic_pm_domain *genpd = pd_to_genpd(pd);
- struct gpd_link *link;
- struct pm_domain_data *pdd;
- s64 min_off_time_ns;
- s64 off_on_time_ns;
-
- if (genpd->max_off_time_changed) {
- struct gpd_link *link;
-
- /*
- * We have to invalidate the cached results for the masters, so
- * use the observation that default_power_down_ok() is not
- * going to be called for any master until this instance
- * returns.
- */
- list_for_each_entry(link, &genpd->slave_links, slave_node)
- link->master->max_off_time_changed = true;
-
- genpd->max_off_time_changed = false;
- genpd->cached_power_down_ok = false;
- genpd->max_off_time_ns = -1;
- } else {
- return genpd->cached_power_down_ok;
- }
-
- off_on_time_ns = genpd->power_off_latency_ns +
- genpd->power_on_latency_ns;
- /*
- * It doesn't make sense to remove power from the domain if saving
- * the state of all devices in it and the power off/power on operations
- * take too much time.
- *
- * All devices in this domain have been stopped already at this point.
- */
- list_for_each_entry(pdd, &genpd->dev_list, list_node) {
- if (pdd->dev->driver)
- off_on_time_ns +=
- to_gpd_data(pdd)->td.save_state_latency_ns;
- }
-
- min_off_time_ns = -1;
- /*
- * Check if subdomains can be off for enough time.
- *
- * All subdomains have been powered off already at this point.
- */
- list_for_each_entry(link, &genpd->master_links, master_node) {
- struct generic_pm_domain *sd = link->slave;
- s64 sd_max_off_ns = sd->max_off_time_ns;
-
- if (sd_max_off_ns < 0)
- continue;
-
- /*
- * Check if the subdomain is allowed to be off long enough for
- * the current domain to turn off and on (that's how much time
- * it will have to wait worst case).
- */
- if (sd_max_off_ns <= off_on_time_ns)
- return false;
-
- if (min_off_time_ns > sd_max_off_ns || min_off_time_ns < 0)
- min_off_time_ns = sd_max_off_ns;
- }
-
- /*
- * Check if the devices in the domain can be off enough time.
- */
- list_for_each_entry(pdd, &genpd->dev_list, list_node) {
- struct gpd_timing_data *td;
- s64 constraint_ns;
-
- if (!pdd->dev->driver)
- continue;
-
- /*
- * Check if the device is allowed to be off long enough for the
- * domain to turn off and on (that's how much time it will
- * have to wait worst case).
- */
- td = &to_gpd_data(pdd)->td;
- constraint_ns = td->effective_constraint_ns;
- /* default_stop_ok() need not be called before us. */
- if (constraint_ns < 0) {
- constraint_ns = dev_pm_qos_read_value(pdd->dev);
- constraint_ns *= NSEC_PER_USEC;
- }
- if (constraint_ns == 0)
- continue;
-
- /*
- * constraint_ns cannot be negative here, because the device has
- * been suspended.
- */
- constraint_ns -= td->restore_state_latency_ns;
- if (constraint_ns <= off_on_time_ns)
- return false;
-
- if (min_off_time_ns > constraint_ns || min_off_time_ns < 0)
- min_off_time_ns = constraint_ns;
- }
-
- genpd->cached_power_down_ok = true;
-
- /*
- * If the computed minimum device off time is negative, there are no
- * latency constraints, so the domain can spend arbitrary time in the
- * "off" state.
- */
- if (min_off_time_ns < 0)
- return true;
-
- /*
- * The difference between the computed minimum subdomain or device off
- * time and the time needed to turn the domain on is the maximum
- * theoretical time this domain can spend in the "off" state.
- */
- genpd->max_off_time_ns = min_off_time_ns - genpd->power_on_latency_ns;
- return true;
-}
-
-static bool always_on_power_down_ok(struct dev_pm_domain *domain)
-{
- return false;
-}
-
-#else /* !CONFIG_PM_RUNTIME */
-
-bool default_stop_ok(struct device *dev)
-{
- return false;
-}
-
-#define default_power_down_ok NULL
-#define always_on_power_down_ok NULL
-
-#endif /* !CONFIG_PM_RUNTIME */
-
-struct dev_power_governor simple_qos_governor = {
- .stop_ok = default_stop_ok,
- .power_down_ok = default_power_down_ok,
-};
-
-/**
- * pm_genpd_gov_always_on - A governor implementing an always-on policy
- */
-struct dev_power_governor pm_domain_always_on_gov = {
- .power_down_ok = always_on_power_down_ok,
- .stop_ok = default_stop_ok,
-};
diff --git a/ANDROID_3.4.5/drivers/base/power/generic_ops.c b/ANDROID_3.4.5/drivers/base/power/generic_ops.c
deleted file mode 100644
index d03d290f..00000000
--- a/ANDROID_3.4.5/drivers/base/power/generic_ops.c
+++ /dev/null
@@ -1,329 +0,0 @@
-/*
- * drivers/base/power/generic_ops.c - Generic PM callbacks for subsystems
- *
- * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- *
- * This file is released under the GPLv2.
- */
-
-#include <linux/pm.h>
-#include <linux/pm_runtime.h>
-#include <linux/export.h>
-
-#ifdef CONFIG_PM_RUNTIME
-/**
- * pm_generic_runtime_idle - Generic runtime idle callback for subsystems.
- * @dev: Device to handle.
- *
- * If PM operations are defined for the @dev's driver and they include
- * ->runtime_idle(), execute it and return its error code, if nonzero.
- * Otherwise, execute pm_runtime_suspend() for the device and return 0.
- */
-int pm_generic_runtime_idle(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- if (pm && pm->runtime_idle) {
- int ret = pm->runtime_idle(dev);
- if (ret)
- return ret;
- }
-
- pm_runtime_suspend(dev);
- return 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_runtime_idle);
-
-/**
- * pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems.
- * @dev: Device to suspend.
- *
- * If PM operations are defined for the @dev's driver and they include
- * ->runtime_suspend(), execute it and return its error code. Otherwise,
- * return 0.
- */
-int pm_generic_runtime_suspend(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int ret;
-
- ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0;
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend);
-
-/**
- * pm_generic_runtime_resume - Generic runtime resume callback for subsystems.
- * @dev: Device to resume.
- *
- * If PM operations are defined for the @dev's driver and they include
- * ->runtime_resume(), execute it and return its error code. Otherwise,
- * return 0.
- */
-int pm_generic_runtime_resume(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int ret;
-
- ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0;
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(pm_generic_runtime_resume);
-#endif /* CONFIG_PM_RUNTIME */
-
-#ifdef CONFIG_PM_SLEEP
-/**
- * pm_generic_prepare - Generic routine preparing a device for power transition.
- * @dev: Device to prepare.
- *
- * Prepare a device for a system-wide power transition.
- */
-int pm_generic_prepare(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (drv && drv->pm && drv->pm->prepare)
- ret = drv->pm->prepare(dev);
-
- return ret;
-}
-
-/**
- * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
- * @dev: Device to suspend.
- */
-int pm_generic_suspend_noirq(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
-
-/**
- * pm_generic_suspend_late - Generic suspend_late callback for subsystems.
- * @dev: Device to suspend.
- */
-int pm_generic_suspend_late(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->suspend_late ? pm->suspend_late(dev) : 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_suspend_late);
-
-/**
- * pm_generic_suspend - Generic suspend callback for subsystems.
- * @dev: Device to suspend.
- */
-int pm_generic_suspend(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->suspend ? pm->suspend(dev) : 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_suspend);
-
-/**
- * pm_generic_freeze_noirq - Generic freeze_noirq callback for subsystems.
- * @dev: Device to freeze.
- */
-int pm_generic_freeze_noirq(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
-
-/**
- * pm_generic_freeze_late - Generic freeze_late callback for subsystems.
- * @dev: Device to freeze.
- */
-int pm_generic_freeze_late(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->freeze_late ? pm->freeze_late(dev) : 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_freeze_late);
-
-/**
- * pm_generic_freeze - Generic freeze callback for subsystems.
- * @dev: Device to freeze.
- */
-int pm_generic_freeze(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->freeze ? pm->freeze(dev) : 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_freeze);
-
-/**
- * pm_generic_poweroff_noirq - Generic poweroff_noirq callback for subsystems.
- * @dev: Device to handle.
- */
-int pm_generic_poweroff_noirq(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
-
-/**
- * pm_generic_poweroff_late - Generic poweroff_late callback for subsystems.
- * @dev: Device to handle.
- */
-int pm_generic_poweroff_late(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_poweroff_late);
-
-/**
- * pm_generic_poweroff - Generic poweroff callback for subsystems.
- * @dev: Device to handle.
- */
-int pm_generic_poweroff(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->poweroff ? pm->poweroff(dev) : 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_poweroff);
-
-/**
- * pm_generic_thaw_noirq - Generic thaw_noirq callback for subsystems.
- * @dev: Device to thaw.
- */
-int pm_generic_thaw_noirq(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
-
-/**
- * pm_generic_thaw_early - Generic thaw_early callback for subsystems.
- * @dev: Device to thaw.
- */
-int pm_generic_thaw_early(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->thaw_early ? pm->thaw_early(dev) : 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_thaw_early);
-
-/**
- * pm_generic_thaw - Generic thaw callback for subsystems.
- * @dev: Device to thaw.
- */
-int pm_generic_thaw(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->thaw ? pm->thaw(dev) : 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_thaw);
-
-/**
- * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
- * @dev: Device to resume.
- */
-int pm_generic_resume_noirq(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
-
-/**
- * pm_generic_resume_early - Generic resume_early callback for subsystems.
- * @dev: Device to resume.
- */
-int pm_generic_resume_early(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->resume_early ? pm->resume_early(dev) : 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_resume_early);
-
-/**
- * pm_generic_resume - Generic resume callback for subsystems.
- * @dev: Device to resume.
- */
-int pm_generic_resume(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->resume ? pm->resume(dev) : 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_resume);
-
-/**
- * pm_generic_restore_noirq - Generic restore_noirq callback for subsystems.
- * @dev: Device to restore.
- */
-int pm_generic_restore_noirq(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
-
-/**
- * pm_generic_restore_early - Generic restore_early callback for subsystems.
- * @dev: Device to resume.
- */
-int pm_generic_restore_early(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->restore_early ? pm->restore_early(dev) : 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_restore_early);
-
-/**
- * pm_generic_restore - Generic restore callback for subsystems.
- * @dev: Device to restore.
- */
-int pm_generic_restore(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->restore ? pm->restore(dev) : 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_restore);
-
-/**
- * pm_generic_complete - Generic routine competing a device power transition.
- * @dev: Device to handle.
- *
- * Complete a device power transition during a system-wide power transition.
- */
-void pm_generic_complete(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
-
- if (drv && drv->pm && drv->pm->complete)
- drv->pm->complete(dev);
-
- /*
- * Let runtime PM try to suspend devices that haven't been in use before
- * going into the system-wide sleep state we're resuming from.
- */
- pm_runtime_idle(dev);
-}
-#endif /* CONFIG_PM_SLEEP */
diff --git a/ANDROID_3.4.5/drivers/base/power/main.c b/ANDROID_3.4.5/drivers/base/power/main.c
deleted file mode 100644
index e153e03b..00000000
--- a/ANDROID_3.4.5/drivers/base/power/main.c
+++ /dev/null
@@ -1,1409 +0,0 @@
-/*
- * drivers/base/power/main.c - Where the driver meets power management.
- *
- * Copyright (c) 2003 Patrick Mochel
- * Copyright (c) 2003 Open Source Development Lab
- *
- * This file is released under the GPLv2
- *
- *
- * The driver model core calls device_pm_add() when a device is registered.
- * This will initialize the embedded device_pm_info object in the device
- * and add it to the list of power-controlled devices. sysfs entries for
- * controlling device power management will also be added.
- *
- * A separate list is used for keeping track of power info, because the power
- * domain dependencies may differ from the ancestral dependencies that the
- * subsystem list maintains.
- */
-
-#include <linux/device.h>
-#include <linux/kallsyms.h>
-#include <linux/export.h>
-#include <linux/mutex.h>
-#include <linux/pm.h>
-#include <linux/pm_runtime.h>
-#include <linux/resume-trace.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
-#include <linux/async.h>
-#include <linux/suspend.h>
-#include <linux/timer.h>
-
-#include "../base.h"
-#include "power.h"
-#include <mach/wmt_secure.h>
-
-typedef int (*pm_callback_t)(struct device *);
-
-/*
- * The entries in the dpm_list list are in a depth first order, simply
- * because children are guaranteed to be discovered after parents, and
- * are inserted at the back of the list on discovery.
- *
- * Since device_pm_add() may be called with a device lock held,
- * we must never try to acquire a device lock while holding
- * dpm_list_mutex.
- */
-
-LIST_HEAD(dpm_list);
-LIST_HEAD(dpm_prepared_list);
-LIST_HEAD(dpm_suspended_list);
-LIST_HEAD(dpm_late_early_list);
-LIST_HEAD(dpm_noirq_list);
-
-struct suspend_stats suspend_stats;
-static DEFINE_MUTEX(dpm_list_mtx);
-static pm_message_t pm_transition;
-
-struct dpm_watchdog {
- struct device *dev;
- struct task_struct *tsk;
- struct timer_list timer;
-};
-
-static int async_error;
-extern unsigned int cpu_trustzone_enabled;
-extern int console_printk[];
-#define console_loglevel (console_printk[0])
-
-/**
- * device_pm_init - Initialize the PM-related part of a device object.
- * @dev: Device object being initialized.
- */
-void device_pm_init(struct device *dev)
-{
- dev->power.is_prepared = false;
- dev->power.is_suspended = false;
- init_completion(&dev->power.completion);
- complete_all(&dev->power.completion);
- dev->power.wakeup = NULL;
- spin_lock_init(&dev->power.lock);
- pm_runtime_init(dev);
- INIT_LIST_HEAD(&dev->power.entry);
- dev->power.power_state = PMSG_INVALID;
-}
-
-/**
- * device_pm_lock - Lock the list of active devices used by the PM core.
- */
-void device_pm_lock(void)
-{
- mutex_lock(&dpm_list_mtx);
-}
-
-/**
- * device_pm_unlock - Unlock the list of active devices used by the PM core.
- */
-void device_pm_unlock(void)
-{
- mutex_unlock(&dpm_list_mtx);
-}
-
-/**
- * device_pm_add - Add a device to the PM core's list of active devices.
- * @dev: Device to add to the list.
- */
-void device_pm_add(struct device *dev)
-{
- pr_debug("PM: Adding info for %s:%s\n",
- dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
- mutex_lock(&dpm_list_mtx);
- if (dev->parent && dev->parent->power.is_prepared)
- dev_warn(dev, "parent %s should not be sleeping\n",
- dev_name(dev->parent));
- list_add_tail(&dev->power.entry, &dpm_list);
- dev_pm_qos_constraints_init(dev);
- mutex_unlock(&dpm_list_mtx);
-}
-
-/**
- * device_pm_remove - Remove a device from the PM core's list of active devices.
- * @dev: Device to be removed from the list.
- */
-void device_pm_remove(struct device *dev)
-{
- pr_debug("PM: Removing info for %s:%s\n",
- dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
- complete_all(&dev->power.completion);
- mutex_lock(&dpm_list_mtx);
- dev_pm_qos_constraints_destroy(dev);
- list_del_init(&dev->power.entry);
- mutex_unlock(&dpm_list_mtx);
- device_wakeup_disable(dev);
- pm_runtime_remove(dev);
-}
-
-/**
- * device_pm_move_before - Move device in the PM core's list of active devices.
- * @deva: Device to move in dpm_list.
- * @devb: Device @deva should come before.
- */
-void device_pm_move_before(struct device *deva, struct device *devb)
-{
- pr_debug("PM: Moving %s:%s before %s:%s\n",
- deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
- devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
- /* Delete deva from dpm_list and reinsert before devb. */
- list_move_tail(&deva->power.entry, &devb->power.entry);
-}
-
-/**
- * device_pm_move_after - Move device in the PM core's list of active devices.
- * @deva: Device to move in dpm_list.
- * @devb: Device @deva should come after.
- */
-void device_pm_move_after(struct device *deva, struct device *devb)
-{
- pr_debug("PM: Moving %s:%s after %s:%s\n",
- deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
- devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
- /* Delete deva from dpm_list and reinsert after devb. */
- list_move(&deva->power.entry, &devb->power.entry);
-}
-
-/**
- * device_pm_move_last - Move device to end of the PM core's list of devices.
- * @dev: Device to move in dpm_list.
- */
-void device_pm_move_last(struct device *dev)
-{
- pr_debug("PM: Moving %s:%s to end of list\n",
- dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
- list_move_tail(&dev->power.entry, &dpm_list);
-}
-
-static ktime_t initcall_debug_start(struct device *dev)
-{
- ktime_t calltime = ktime_set(0, 0);
-
- if (initcall_debug) {
- pr_info("calling %s+ @ %i, parent: %s\n",
- dev_name(dev), task_pid_nr(current),
- dev->parent ? dev_name(dev->parent) : "none");
- calltime = ktime_get();
- }
-
- return calltime;
-}
-
-static void initcall_debug_report(struct device *dev, ktime_t calltime,
- int error)
-{
- ktime_t delta, rettime;
-
- if (initcall_debug) {
- rettime = ktime_get();
- delta = ktime_sub(rettime, calltime);
- pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
- error, (unsigned long long)ktime_to_ns(delta) >> 10);
- }
-}
-
-/**
- * dpm_wait - Wait for a PM operation to complete.
- * @dev: Device to wait for.
- * @async: If unset, wait only if the device's power.async_suspend flag is set.
- */
-static void dpm_wait(struct device *dev, bool async)
-{
- if (!dev)
- return;
-
- if (async || (pm_async_enabled && dev->power.async_suspend))
- wait_for_completion(&dev->power.completion);
-}
-
-static int dpm_wait_fn(struct device *dev, void *async_ptr)
-{
- dpm_wait(dev, *((bool *)async_ptr));
- return 0;
-}
-
-static void dpm_wait_for_children(struct device *dev, bool async)
-{
- device_for_each_child(dev, &async, dpm_wait_fn);
-}
-
-/**
- * pm_op - Return the PM operation appropriate for given PM event.
- * @ops: PM operations to choose from.
- * @state: PM transition of the system being carried out.
- */
-static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
-{
- switch (state.event) {
-#ifdef CONFIG_SUSPEND
- case PM_EVENT_SUSPEND:
- return ops->suspend;
- case PM_EVENT_RESUME:
- return ops->resume;
-#endif /* CONFIG_SUSPEND */
-#ifdef CONFIG_HIBERNATE_CALLBACKS
- case PM_EVENT_FREEZE:
- case PM_EVENT_QUIESCE:
- return ops->freeze;
- case PM_EVENT_HIBERNATE:
- return ops->poweroff;
- case PM_EVENT_THAW:
- case PM_EVENT_RECOVER:
- return ops->thaw;
- break;
- case PM_EVENT_RESTORE:
- return ops->restore;
-#endif /* CONFIG_HIBERNATE_CALLBACKS */
- }
-
- return NULL;
-}
-
-/**
- * pm_late_early_op - Return the PM operation appropriate for given PM event.
- * @ops: PM operations to choose from.
- * @state: PM transition of the system being carried out.
- *
- * Runtime PM is disabled for @dev while this function is being executed.
- */
-static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
- pm_message_t state)
-{
- switch (state.event) {
-#ifdef CONFIG_SUSPEND
- case PM_EVENT_SUSPEND:
- return ops->suspend_late;
- case PM_EVENT_RESUME:
- return ops->resume_early;
-#endif /* CONFIG_SUSPEND */
-#ifdef CONFIG_HIBERNATE_CALLBACKS
- case PM_EVENT_FREEZE:
- case PM_EVENT_QUIESCE:
- return ops->freeze_late;
- case PM_EVENT_HIBERNATE:
- return ops->poweroff_late;
- case PM_EVENT_THAW:
- case PM_EVENT_RECOVER:
- return ops->thaw_early;
- case PM_EVENT_RESTORE:
- return ops->restore_early;
-#endif /* CONFIG_HIBERNATE_CALLBACKS */
- }
-
- return NULL;
-}
-
-/**
- * pm_noirq_op - Return the PM operation appropriate for given PM event.
- * @ops: PM operations to choose from.
- * @state: PM transition of the system being carried out.
- *
- * The driver of @dev will not receive interrupts while this function is being
- * executed.
- */
-static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
-{
- switch (state.event) {
-#ifdef CONFIG_SUSPEND
- case PM_EVENT_SUSPEND:
- return ops->suspend_noirq;
- case PM_EVENT_RESUME:
- return ops->resume_noirq;
-#endif /* CONFIG_SUSPEND */
-#ifdef CONFIG_HIBERNATE_CALLBACKS
- case PM_EVENT_FREEZE:
- case PM_EVENT_QUIESCE:
- return ops->freeze_noirq;
- case PM_EVENT_HIBERNATE:
- return ops->poweroff_noirq;
- case PM_EVENT_THAW:
- case PM_EVENT_RECOVER:
- return ops->thaw_noirq;
- case PM_EVENT_RESTORE:
- return ops->restore_noirq;
-#endif /* CONFIG_HIBERNATE_CALLBACKS */
- }
-
- return NULL;
-}
-
-static char *pm_verb(int event)
-{
- switch (event) {
- case PM_EVENT_SUSPEND:
- return "suspend";
- case PM_EVENT_RESUME:
- return "resume";
- case PM_EVENT_FREEZE:
- return "freeze";
- case PM_EVENT_QUIESCE:
- return "quiesce";
- case PM_EVENT_HIBERNATE:
- return "hibernate";
- case PM_EVENT_THAW:
- return "thaw";
- case PM_EVENT_RESTORE:
- return "restore";
- case PM_EVENT_RECOVER:
- return "recover";
- default:
- return "(unknown PM event)";
- }
-}
-
-static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
-{
- dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
- ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
- ", may wakeup" : "");
-}
-
-static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
- int error)
-{
- printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
- dev_name(dev), pm_verb(state.event), info, error);
-}
-
-static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
-{
- ktime_t calltime;
- u64 usecs64;
- int usecs;
-
- calltime = ktime_get();
- usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
- do_div(usecs64, NSEC_PER_USEC);
- usecs = usecs64;
- if (usecs == 0)
- usecs = 1;
- pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
- info ?: "", info ? " " : "", pm_verb(state.event),
- usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
-}
-
-static int dpm_run_callback(pm_callback_t cb, struct device *dev,
- pm_message_t state, char *info)
-{
- ktime_t calltime;
- int error;
-
- if (!cb)
- return 0;
-
- calltime = initcall_debug_start(dev);
-
- pm_dev_dbg(dev, state, info);
- error = cb(dev);
- suspend_report_result(cb, error);
-
- initcall_debug_report(dev, calltime, error);
-
- return error;
-}
-
-/**
- * dpm_wd_handler - Driver suspend / resume watchdog handler.
- *
- * Called when a driver has timed out suspending or resuming.
- * There's not much we can do here to recover so BUG() out for
- * a crash-dump
- */
-static void dpm_wd_handler(unsigned long data)
-{
- struct dpm_watchdog *wd = (void *)data;
- struct device *dev = wd->dev;
- struct task_struct *tsk = wd->tsk;
-
- dev_emerg(dev, "**** DPM device timeout ****\n");
- show_stack(tsk, NULL);
-
- BUG();
-}
-
-/**
- * dpm_wd_set - Enable pm watchdog for given device.
- * @wd: Watchdog. Must be allocated on the stack.
- * @dev: Device to handle.
- */
-static void dpm_wd_set(struct dpm_watchdog *wd, struct device *dev)
-{
- struct timer_list *timer = &wd->timer;
-
- wd->dev = dev;
- wd->tsk = get_current();
-
- init_timer_on_stack(timer);
- timer->expires = jiffies + HZ * 12;
- timer->function = dpm_wd_handler;
- timer->data = (unsigned long)wd;
- add_timer(timer);
-}
-
-/**
- * dpm_wd_clear - Disable pm watchdog.
- * @wd: Watchdog to disable.
- */
-static void dpm_wd_clear(struct dpm_watchdog *wd)
-{
- struct timer_list *timer = &wd->timer;
-
- del_timer_sync(timer);
- destroy_timer_on_stack(timer);
-}
-
-/*------------------------- Resume routines -------------------------*/
-
-/**
- * device_resume_noirq - Execute an "early resume" callback for given device.
- * @dev: Device to handle.
- * @state: PM transition of the system being carried out.
- *
- * The driver of @dev will not receive interrupts while this function is being
- * executed.
- */
-static int device_resume_noirq(struct device *dev, pm_message_t state)
-{
- pm_callback_t callback = NULL;
- char *info = NULL;
- int error = 0;
-
- TRACE_DEVICE(dev);
- TRACE_RESUME(0);
-
- if (dev->pm_domain) {
- info = "noirq power domain ";
- callback = pm_noirq_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "noirq type ";
- callback = pm_noirq_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "noirq class ";
- callback = pm_noirq_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "noirq bus ";
- callback = pm_noirq_op(dev->bus->pm, state);
- }
-
- if (!callback && dev->driver && dev->driver->pm) {
- info = "noirq driver ";
- callback = pm_noirq_op(dev->driver->pm, state);
- }
-
- error = dpm_run_callback(callback, dev, state, info);
-
- TRACE_RESUME(error);
- return error;
-}
-
-/**
- * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
- * @state: PM transition of the system being carried out.
- *
- * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
- * enable device drivers to receive interrupts.
- */
-static void dpm_resume_noirq(pm_message_t state)
-{
- ktime_t starttime = ktime_get();
-
- mutex_lock(&dpm_list_mtx);
- while (!list_empty(&dpm_noirq_list)) {
- struct device *dev = to_device(dpm_noirq_list.next);
- int error;
-
- get_device(dev);
- list_move_tail(&dev->power.entry, &dpm_late_early_list);
- mutex_unlock(&dpm_list_mtx);
-
- error = device_resume_noirq(dev, state);
- if (error) {
- suspend_stats.failed_resume_noirq++;
- dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, state, " noirq", error);
- }
-
- mutex_lock(&dpm_list_mtx);
- put_device(dev);
- }
- mutex_unlock(&dpm_list_mtx);
- dpm_show_time(starttime, state, "noirq");
- resume_device_irqs();
-}
-
-/**
- * device_resume_early - Execute an "early resume" callback for given device.
- * @dev: Device to handle.
- * @state: PM transition of the system being carried out.
- *
- * Runtime PM is disabled for @dev while this function is being executed.
- */
-static int device_resume_early(struct device *dev, pm_message_t state)
-{
- pm_callback_t callback = NULL;
- char *info = NULL;
- int error = 0;
-
- TRACE_DEVICE(dev);
- TRACE_RESUME(0);
-
- if (dev->pm_domain) {
- info = "early power domain ";
- callback = pm_late_early_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "early type ";
- callback = pm_late_early_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "early class ";
- callback = pm_late_early_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "early bus ";
- callback = pm_late_early_op(dev->bus->pm, state);
- }
-
- if (!callback && dev->driver && dev->driver->pm) {
- info = "early driver ";
- callback = pm_late_early_op(dev->driver->pm, state);
- }
-
- error = dpm_run_callback(callback, dev, state, info);
-
- TRACE_RESUME(error);
- return error;
-}
-
-/**
- * dpm_resume_early - Execute "early resume" callbacks for all devices.
- * @state: PM transition of the system being carried out.
- */
-static void dpm_resume_early(pm_message_t state)
-{
- ktime_t starttime = ktime_get();
-
- mutex_lock(&dpm_list_mtx);
- while (!list_empty(&dpm_late_early_list)) {
- struct device *dev = to_device(dpm_late_early_list.next);
- int error;
-
- get_device(dev);
- list_move_tail(&dev->power.entry, &dpm_suspended_list);
- mutex_unlock(&dpm_list_mtx);
-
- error = device_resume_early(dev, state);
- if (error) {
- suspend_stats.failed_resume_early++;
- dpm_save_failed_step(SUSPEND_RESUME_EARLY);
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, state, " early", error);
- }
-
- mutex_lock(&dpm_list_mtx);
- put_device(dev);
- }
- mutex_unlock(&dpm_list_mtx);
- dpm_show_time(starttime, state, "early");
-}
-
-/**
- * dpm_resume_start - Execute "noirq" and "early" device callbacks.
- * @state: PM transition of the system being carried out.
- */
-void dpm_resume_start(pm_message_t state)
-{
- dpm_resume_noirq(state);
- dpm_resume_early(state);
-}
-EXPORT_SYMBOL_GPL(dpm_resume_start);
-
-/**
- * device_resume - Execute "resume" callbacks for given device.
- * @dev: Device to handle.
- * @state: PM transition of the system being carried out.
- * @async: If true, the device is being resumed asynchronously.
- */
-static int device_resume(struct device *dev, pm_message_t state, bool async)
-{
- pm_callback_t callback = NULL;
- char *info = NULL;
- int error = 0;
- bool put = false;
- struct dpm_watchdog wd;
-
- TRACE_DEVICE(dev);
- TRACE_RESUME(0);
-
- dpm_wait(dev->parent, async);
- device_lock(dev);
-
- /*
- * This is a fib. But we'll allow new children to be added below
- * a resumed device, even if the device hasn't been completed yet.
- */
- dev->power.is_prepared = false;
- dpm_wd_set(&wd, dev);
-
- if (!dev->power.is_suspended)
- goto Unlock;
-
- pm_runtime_enable(dev);
- put = true;
-
- if (dev->pm_domain) {
- info = "power domain ";
- callback = pm_op(&dev->pm_domain->ops, state);
- goto Driver;
- }
-
- if (dev->type && dev->type->pm) {
- info = "type ";
- callback = pm_op(dev->type->pm, state);
- goto Driver;
- }
-
- if (dev->class) {
- if (dev->class->pm) {
- info = "class ";
- callback = pm_op(dev->class->pm, state);
- goto Driver;
- } else if (dev->class->resume) {
- info = "legacy class ";
- callback = dev->class->resume;
- goto End;
- }
- }
-
- if (dev->bus) {
- if (dev->bus->pm) {
- info = "bus ";
- callback = pm_op(dev->bus->pm, state);
- } else if (dev->bus->resume) {
- info = "legacy bus ";
- callback = dev->bus->resume;
- goto End;
- }
- }
-
- Driver:
- if (!callback && dev->driver && dev->driver->pm) {
- info = "driver ";
- callback = pm_op(dev->driver->pm, state);
- }
-
- End:
- error = dpm_run_callback(callback, dev, state, info);
- dev->power.is_suspended = false;
-
- Unlock:
- device_unlock(dev);
- dpm_wd_clear(&wd);
- complete_all(&dev->power.completion);
-
- TRACE_RESUME(error);
-
- if (put)
- pm_runtime_put_sync(dev);
-
- return error;
-}
-
-static void async_resume(void *data, async_cookie_t cookie)
-{
- struct device *dev = (struct device *)data;
- int error;
-
- error = device_resume(dev, pm_transition, true);
- if (error)
- pm_dev_err(dev, pm_transition, " async", error);
- put_device(dev);
-}
-
-static bool is_async(struct device *dev)
-{
- return dev->power.async_suspend && pm_async_enabled
- && !pm_trace_is_enabled();
-}
-
-/**
- * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
- * @state: PM transition of the system being carried out.
- *
- * Execute the appropriate "resume" callback for all devices whose status
- * indicates that they are suspended.
- */
-void dpm_resume(pm_message_t state)
-{
- struct device *dev;
- ktime_t starttime = ktime_get();
- int tmp;
-
- might_sleep();
-
- mutex_lock(&dpm_list_mtx);
- pm_transition = state;
- async_error = 0;
-
- list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
- INIT_COMPLETION(dev->power.completion);
- if (is_async(dev)) {
- get_device(dev);
- async_schedule(async_resume, dev);
- }
- }
-
- tmp = console_loglevel;
- console_loglevel = 7;
- while (!list_empty(&dpm_suspended_list)) {
- dev = to_device(dpm_suspended_list.next);
- get_device(dev);
- if (!is_async(dev)) {
- int error;
-
- mutex_unlock(&dpm_list_mtx);
-
- error = device_resume(dev, state, false);
- if (error) {
- suspend_stats.failed_resume++;
- dpm_save_failed_step(SUSPEND_RESUME);
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, state, "", error);
- }
-
- mutex_lock(&dpm_list_mtx);
- }
- if (!list_empty(&dev->power.entry))
- list_move_tail(&dev->power.entry, &dpm_prepared_list);
- put_device(dev);
- }
- console_loglevel = tmp;
- mutex_unlock(&dpm_list_mtx);
- async_synchronize_full();
- dpm_show_time(starttime, state, NULL);
-}
-
-/**
- * device_complete - Complete a PM transition for given device.
- * @dev: Device to handle.
- * @state: PM transition of the system being carried out.
- */
-static void device_complete(struct device *dev, pm_message_t state)
-{
- void (*callback)(struct device *) = NULL;
- char *info = NULL;
-
- device_lock(dev);
-
- if (dev->pm_domain) {
- info = "completing power domain ";
- callback = dev->pm_domain->ops.complete;
- } else if (dev->type && dev->type->pm) {
- info = "completing type ";
- callback = dev->type->pm->complete;
- } else if (dev->class && dev->class->pm) {
- info = "completing class ";
- callback = dev->class->pm->complete;
- } else if (dev->bus && dev->bus->pm) {
- info = "completing bus ";
- callback = dev->bus->pm->complete;
- }
-
- if (!callback && dev->driver && dev->driver->pm) {
- info = "completing driver ";
- callback = dev->driver->pm->complete;
- }
-
- if (callback) {
- pm_dev_dbg(dev, state, info);
- callback(dev);
- }
-
- device_unlock(dev);
-}
-
-/**
- * dpm_complete - Complete a PM transition for all non-sysdev devices.
- * @state: PM transition of the system being carried out.
- *
- * Execute the ->complete() callbacks for all devices whose PM status is not
- * DPM_ON (this allows new devices to be registered).
- */
-void dpm_complete(pm_message_t state)
-{
- struct list_head list;
-
- might_sleep();
-
- INIT_LIST_HEAD(&list);
- mutex_lock(&dpm_list_mtx);
- while (!list_empty(&dpm_prepared_list)) {
- struct device *dev = to_device(dpm_prepared_list.prev);
-
- get_device(dev);
- dev->power.is_prepared = false;
- list_move(&dev->power.entry, &list);
- mutex_unlock(&dpm_list_mtx);
-
- device_complete(dev, state);
-
- mutex_lock(&dpm_list_mtx);
- put_device(dev);
- }
- list_splice(&list, &dpm_list);
- mutex_unlock(&dpm_list_mtx);
-}
-
-/**
- * dpm_resume_end - Execute "resume" callbacks and complete system transition.
- * @state: PM transition of the system being carried out.
- *
- * Execute "resume" callbacks for all devices and complete the PM transition of
- * the system.
- */
-void dpm_resume_end(pm_message_t state)
-{
- if (cpu_trustzone_enabled == 1) {
- wmt_smc(WMT_SMC_CMD_DEVICE_RESUME, 0x1001);
- }
-
- dpm_resume(state);
- dpm_complete(state);
-}
-EXPORT_SYMBOL_GPL(dpm_resume_end);
-
-
-/*------------------------- Suspend routines -------------------------*/
-
-/**
- * resume_event - Return a "resume" message for given "suspend" sleep state.
- * @sleep_state: PM message representing a sleep state.
- *
- * Return a PM message representing the resume event corresponding to given
- * sleep state.
- */
-static pm_message_t resume_event(pm_message_t sleep_state)
-{
- switch (sleep_state.event) {
- case PM_EVENT_SUSPEND:
- return PMSG_RESUME;
- case PM_EVENT_FREEZE:
- case PM_EVENT_QUIESCE:
- return PMSG_RECOVER;
- case PM_EVENT_HIBERNATE:
- return PMSG_RESTORE;
- }
- return PMSG_ON;
-}
-
-/**
- * device_suspend_noirq - Execute a "late suspend" callback for given device.
- * @dev: Device to handle.
- * @state: PM transition of the system being carried out.
- *
- * The driver of @dev will not receive interrupts while this function is being
- * executed.
- */
-static int device_suspend_noirq(struct device *dev, pm_message_t state)
-{
- pm_callback_t callback = NULL;
- char *info = NULL;
-
- if (dev->pm_domain) {
- info = "noirq power domain ";
- callback = pm_noirq_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "noirq type ";
- callback = pm_noirq_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "noirq class ";
- callback = pm_noirq_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "noirq bus ";
- callback = pm_noirq_op(dev->bus->pm, state);
- }
-
- if (!callback && dev->driver && dev->driver->pm) {
- info = "noirq driver ";
- callback = pm_noirq_op(dev->driver->pm, state);
- }
-
- return dpm_run_callback(callback, dev, state, info);
-}
-
-/**
- * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
- * @state: PM transition of the system being carried out.
- *
- * Prevent device drivers from receiving interrupts and call the "noirq" suspend
- * handlers for all non-sysdev devices.
- */
-static int dpm_suspend_noirq(pm_message_t state)
-{
- ktime_t starttime = ktime_get();
- int error = 0;
-
- suspend_device_irqs();
- mutex_lock(&dpm_list_mtx);
- while (!list_empty(&dpm_late_early_list)) {
- struct device *dev = to_device(dpm_late_early_list.prev);
-
- get_device(dev);
- mutex_unlock(&dpm_list_mtx);
-
- error = device_suspend_noirq(dev, state);
-
- mutex_lock(&dpm_list_mtx);
- if (error) {
- pm_dev_err(dev, state, " noirq", error);
- suspend_stats.failed_suspend_noirq++;
- dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
- dpm_save_failed_dev(dev_name(dev));
- put_device(dev);
- break;
- }
- if (!list_empty(&dev->power.entry))
- list_move(&dev->power.entry, &dpm_noirq_list);
- put_device(dev);
-
- if (pm_wakeup_pending()) {
- error = -EBUSY;
- break;
- }
- }
- mutex_unlock(&dpm_list_mtx);
- if (error)
- dpm_resume_noirq(resume_event(state));
- else
- dpm_show_time(starttime, state, "noirq");
- return error;
-}
-
-/**
- * device_suspend_late - Execute a "late suspend" callback for given device.
- * @dev: Device to handle.
- * @state: PM transition of the system being carried out.
- *
- * Runtime PM is disabled for @dev while this function is being executed.
- */
-static int device_suspend_late(struct device *dev, pm_message_t state)
-{
- pm_callback_t callback = NULL;
- char *info = NULL;
-
- if (dev->pm_domain) {
- info = "late power domain ";
- callback = pm_late_early_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "late type ";
- callback = pm_late_early_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "late class ";
- callback = pm_late_early_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "late bus ";
- callback = pm_late_early_op(dev->bus->pm, state);
- }
-
- if (!callback && dev->driver && dev->driver->pm) {
- info = "late driver ";
- callback = pm_late_early_op(dev->driver->pm, state);
- }
-
- return dpm_run_callback(callback, dev, state, info);
-}
-
-/**
- * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
- * @state: PM transition of the system being carried out.
- */
-static int dpm_suspend_late(pm_message_t state)
-{
- ktime_t starttime = ktime_get();
- int error = 0;
-
- mutex_lock(&dpm_list_mtx);
- while (!list_empty(&dpm_suspended_list)) {
- struct device *dev = to_device(dpm_suspended_list.prev);
-
- get_device(dev);
- mutex_unlock(&dpm_list_mtx);
-
- error = device_suspend_late(dev, state);
-
- mutex_lock(&dpm_list_mtx);
- if (error) {
- pm_dev_err(dev, state, " late", error);
- suspend_stats.failed_suspend_late++;
- dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
- dpm_save_failed_dev(dev_name(dev));
- put_device(dev);
- break;
- }
- if (!list_empty(&dev->power.entry))
- list_move(&dev->power.entry, &dpm_late_early_list);
- put_device(dev);
-
- if (pm_wakeup_pending()) {
- error = -EBUSY;
- break;
- }
- }
- mutex_unlock(&dpm_list_mtx);
- if (error)
- dpm_resume_early(resume_event(state));
- else
- dpm_show_time(starttime, state, "late");
-
- return error;
-}
-
-/**
- * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
- * @state: PM transition of the system being carried out.
- */
-int dpm_suspend_end(pm_message_t state)
-{
- int error = dpm_suspend_late(state);
- if (error)
- return error;
-
- error = dpm_suspend_noirq(state);
- if (error) {
- dpm_resume_early(state);
- return error;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(dpm_suspend_end);
-
-/**
- * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
- * @dev: Device to suspend.
- * @state: PM transition of the system being carried out.
- * @cb: Suspend callback to execute.
- */
-static int legacy_suspend(struct device *dev, pm_message_t state,
- int (*cb)(struct device *dev, pm_message_t state))
-{
- int error;
- ktime_t calltime;
-
- calltime = initcall_debug_start(dev);
-
- error = cb(dev, state);
- suspend_report_result(cb, error);
-
- initcall_debug_report(dev, calltime, error);
-
- return error;
-}
-
-/**
- * device_suspend - Execute "suspend" callbacks for given device.
- * @dev: Device to handle.
- * @state: PM transition of the system being carried out.
- * @async: If true, the device is being suspended asynchronously.
- */
-static int __device_suspend(struct device *dev, pm_message_t state, bool async)
-{
- pm_callback_t callback = NULL;
- char *info = NULL;
- int error = 0;
- struct dpm_watchdog wd;
-
- dpm_wait_for_children(dev, async);
-
- if (async_error)
- goto Complete;
-
- pm_runtime_get_noresume(dev);
- if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
- pm_wakeup_event(dev, 0);
-
- if (pm_wakeup_pending()) {
- pm_runtime_put_sync(dev);
- async_error = -EBUSY;
- goto Complete;
- }
-
- dpm_wd_set(&wd, dev);
-
- device_lock(dev);
-
- if (dev->pm_domain) {
- info = "power domain ";
- callback = pm_op(&dev->pm_domain->ops, state);
- goto Run;
- }
-
- if (dev->type && dev->type->pm) {
- info = "type ";
- callback = pm_op(dev->type->pm, state);
- goto Run;
- }
-
- if (dev->class) {
- if (dev->class->pm) {
- info = "class ";
- callback = pm_op(dev->class->pm, state);
- goto Run;
- } else if (dev->class->suspend) {
- pm_dev_dbg(dev, state, "legacy class ");
- error = legacy_suspend(dev, state, dev->class->suspend);
- goto End;
- }
- }
-
- if (dev->bus) {
- if (dev->bus->pm) {
- info = "bus ";
- callback = pm_op(dev->bus->pm, state);
- } else if (dev->bus->suspend) {
- pm_dev_dbg(dev, state, "legacy bus ");
- error = legacy_suspend(dev, state, dev->bus->suspend);
- goto End;
- }
- }
-
- Run:
- if (!callback && dev->driver && dev->driver->pm) {
- info = "driver ";
- callback = pm_op(dev->driver->pm, state);
- }
-
- error = dpm_run_callback(callback, dev, state, info);
-
- End:
- if (!error) {
- dev->power.is_suspended = true;
- if (dev->power.wakeup_path
- && dev->parent && !dev->parent->power.ignore_children)
- dev->parent->power.wakeup_path = true;
- }
-
- device_unlock(dev);
-
- dpm_wd_clear(&wd);
-
- Complete:
- complete_all(&dev->power.completion);
-
- if (error) {
- pm_runtime_put_sync(dev);
- async_error = error;
- } else if (dev->power.is_suspended) {
- __pm_runtime_disable(dev, false);
- }
-
- return error;
-}
-
-static void async_suspend(void *data, async_cookie_t cookie)
-{
- struct device *dev = (struct device *)data;
- int error;
-
- error = __device_suspend(dev, pm_transition, true);
- if (error) {
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, pm_transition, " async", error);
- }
-
- put_device(dev);
-}
-
-static int device_suspend(struct device *dev)
-{
- INIT_COMPLETION(dev->power.completion);
-
- if (pm_async_enabled && dev->power.async_suspend) {
- get_device(dev);
- async_schedule(async_suspend, dev);
- return 0;
- }
-
- return __device_suspend(dev, pm_transition, false);
-}
-
-/**
- * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
- * @state: PM transition of the system being carried out.
- */
-int dpm_suspend(pm_message_t state)
-{
- ktime_t starttime = ktime_get();
- int error = 0;
- int tmp;
-
- might_sleep();
-
- mutex_lock(&dpm_list_mtx);
- pm_transition = state;
- async_error = 0;
-
- tmp = console_loglevel;
- console_loglevel = 7;
- while (!list_empty(&dpm_prepared_list)) {
- struct device *dev = to_device(dpm_prepared_list.prev);
-
- get_device(dev);
- mutex_unlock(&dpm_list_mtx);
-
- error = device_suspend(dev);
-
- mutex_lock(&dpm_list_mtx);
- if (error) {
- pm_dev_err(dev, state, "", error);
- dpm_save_failed_dev(dev_name(dev));
- put_device(dev);
- break;
- }
- if (!list_empty(&dev->power.entry))
- list_move(&dev->power.entry, &dpm_suspended_list);
- put_device(dev);
- if (async_error)
- break;
- }
- console_loglevel = tmp;
- mutex_unlock(&dpm_list_mtx);
- async_synchronize_full();
- if (!error)
- error = async_error;
- if (error) {
- suspend_stats.failed_suspend++;
- dpm_save_failed_step(SUSPEND_SUSPEND);
- } else
- dpm_show_time(starttime, state, NULL);
- return error;
-}
-
-/**
- * device_prepare - Prepare a device for system power transition.
- * @dev: Device to handle.
- * @state: PM transition of the system being carried out.
- *
- * Execute the ->prepare() callback(s) for given device. No new children of the
- * device may be registered after this function has returned.
- */
-static int device_prepare(struct device *dev, pm_message_t state)
-{
- int (*callback)(struct device *) = NULL;
- char *info = NULL;
- int error = 0;
-
- device_lock(dev);
-
- dev->power.wakeup_path = device_may_wakeup(dev);
-
- if (dev->pm_domain) {
- info = "preparing power domain ";
- callback = dev->pm_domain->ops.prepare;
- } else if (dev->type && dev->type->pm) {
- info = "preparing type ";
- callback = dev->type->pm->prepare;
- } else if (dev->class && dev->class->pm) {
- info = "preparing class ";
- callback = dev->class->pm->prepare;
- } else if (dev->bus && dev->bus->pm) {
- info = "preparing bus ";
- callback = dev->bus->pm->prepare;
- }
-
- if (!callback && dev->driver && dev->driver->pm) {
- info = "preparing driver ";
- callback = dev->driver->pm->prepare;
- }
-
- if (callback) {
- error = callback(dev);
- suspend_report_result(callback, error);
- }
-
- device_unlock(dev);
-
- return error;
-}
-
-/**
- * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
- * @state: PM transition of the system being carried out.
- *
- * Execute the ->prepare() callback(s) for all devices.
- */
-int dpm_prepare(pm_message_t state)
-{
- int error = 0;
-
- might_sleep();
-
- mutex_lock(&dpm_list_mtx);
- while (!list_empty(&dpm_list)) {
- struct device *dev = to_device(dpm_list.next);
-
- get_device(dev);
- mutex_unlock(&dpm_list_mtx);
-
- error = device_prepare(dev, state);
-
- mutex_lock(&dpm_list_mtx);
- if (error) {
- if (error == -EAGAIN) {
- put_device(dev);
- error = 0;
- continue;
- }
- printk(KERN_INFO "PM: Device %s not prepared "
- "for power transition: code %d\n",
- dev_name(dev), error);
- put_device(dev);
- break;
- }
- dev->power.is_prepared = true;
- if (!list_empty(&dev->power.entry))
- list_move_tail(&dev->power.entry, &dpm_prepared_list);
- put_device(dev);
- }
- mutex_unlock(&dpm_list_mtx);
- return error;
-}
-
-/**
- * dpm_suspend_start - Prepare devices for PM transition and suspend them.
- * @state: PM transition of the system being carried out.
- *
- * Prepare all non-sysdev devices for system PM transition and execute "suspend"
- * callbacks for them.
- */
-int dpm_suspend_start(pm_message_t state)
-{
- int error;
-
- error = dpm_prepare(state);
- if (error) {
- suspend_stats.failed_prepare++;
- dpm_save_failed_step(SUSPEND_PREPARE);
- } else
- error = dpm_suspend(state);
- if (cpu_trustzone_enabled == 1) {
- wmt_smc(WMT_SMC_CMD_DEVICE_SUSPEND, 0x1001);
- }
- return error;
-}
-EXPORT_SYMBOL_GPL(dpm_suspend_start);
-
-void __suspend_report_result(const char *function, void *fn, int ret)
-{
- if (ret)
- printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
-}
-EXPORT_SYMBOL_GPL(__suspend_report_result);
-
-/**
- * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
- * @dev: Device to wait for.
- * @subordinate: Device that needs to wait for @dev.
- */
-int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
-{
- dpm_wait(dev, subordinate->power.async_suspend);
- return async_error;
-}
-EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
diff --git a/ANDROID_3.4.5/drivers/base/power/opp.c b/ANDROID_3.4.5/drivers/base/power/opp.c
deleted file mode 100644
index ac993eaf..00000000
--- a/ANDROID_3.4.5/drivers/base/power/opp.c
+++ /dev/null
@@ -1,676 +0,0 @@
-/*
- * Generic OPP Interface
- *
- * Copyright (C) 2009-2010 Texas Instruments Incorporated.
- * Nishanth Menon
- * Romit Dasgupta
- * Kevin Hilman
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/cpufreq.h>
-#include <linux/device.h>
-#include <linux/list.h>
-#include <linux/rculist.h>
-#include <linux/rcupdate.h>
-#include <linux/opp.h>
-
-/*
- * Internal data structure organization with the OPP layer library is as
- * follows:
- * dev_opp_list (root)
- * |- device 1 (represents voltage domain 1)
- * | |- opp 1 (availability, freq, voltage)
- * | |- opp 2 ..
- * ... ...
- * | `- opp n ..
- * |- device 2 (represents the next voltage domain)
- * ...
- * `- device m (represents mth voltage domain)
- * device 1, 2.. are represented by dev_opp structure while each opp
- * is represented by the opp structure.
- */
-
-/**
- * struct opp - Generic OPP description structure
- * @node: opp list node. The nodes are maintained throughout the lifetime
- * of boot. It is expected only an optimal set of OPPs are
- * added to the library by the SoC framework.
- * RCU usage: opp list is traversed with RCU locks. node
- * modification is possible realtime, hence the modifications
- * are protected by the dev_opp_list_lock for integrity.
- * IMPORTANT: the opp nodes should be maintained in increasing
- * order.
- * @available: true/false - marks if this OPP as available or not
- * @rate: Frequency in hertz
- * @u_volt: Nominal voltage in microvolts corresponding to this OPP
- * @dev_opp: points back to the device_opp struct this opp belongs to
- *
- * This structure stores the OPP information for a given device.
- */
-struct opp {
- struct list_head node;
-
- bool available;
- unsigned long rate;
- unsigned long u_volt;
-
- struct device_opp *dev_opp;
-};
-
-/**
- * struct device_opp - Device opp structure
- * @node: list node - contains the devices with OPPs that
- * have been registered. Nodes once added are not modified in this
- * list.
- * RCU usage: nodes are not modified in the list of device_opp,
- * however addition is possible and is secured by dev_opp_list_lock
- * @dev: device pointer
- * @head: notifier head to notify the OPP availability changes.
- * @opp_list: list of opps
- *
- * This is an internal data structure maintaining the link to opps attached to
- * a device. This structure is not meant to be shared to users as it is
- * meant for book keeping and private to OPP library
- */
-struct device_opp {
- struct list_head node;
-
- struct device *dev;
- struct srcu_notifier_head head;
- struct list_head opp_list;
-};
-
-/*
- * The root of the list of all devices. All device_opp structures branch off
- * from here, with each device_opp containing the list of opp it supports in
- * various states of availability.
- */
-static LIST_HEAD(dev_opp_list);
-/* Lock to allow exclusive modification to the device and opp lists */
-static DEFINE_MUTEX(dev_opp_list_lock);
-
-/**
- * find_device_opp() - find device_opp struct using device pointer
- * @dev: device pointer used to lookup device OPPs
- *
- * Search list of device OPPs for one containing matching device. Does a RCU
- * reader operation to grab the pointer needed.
- *
- * Returns pointer to 'struct device_opp' if found, otherwise -ENODEV or
- * -EINVAL based on type of error.
- *
- * Locking: This function must be called under rcu_read_lock(). device_opp
- * is a RCU protected pointer. This means that device_opp is valid as long
- * as we are under RCU lock.
- */
-static struct device_opp *find_device_opp(struct device *dev)
-{
- struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
-
- if (unlikely(IS_ERR_OR_NULL(dev))) {
- pr_err("%s: Invalid parameters\n", __func__);
- return ERR_PTR(-EINVAL);
- }
-
- list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) {
- if (tmp_dev_opp->dev == dev) {
- dev_opp = tmp_dev_opp;
- break;
- }
- }
-
- return dev_opp;
-}
-
-/**
- * opp_get_voltage() - Gets the voltage corresponding to an available opp
- * @opp: opp for which voltage has to be returned for
- *
- * Return voltage in micro volt corresponding to the opp, else
- * return 0
- *
- * Locking: This function must be called under rcu_read_lock(). opp is a rcu
- * protected pointer. This means that opp which could have been fetched by
- * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
- * under RCU lock. The pointer returned by the opp_find_freq family must be
- * used in the same section as the usage of this function with the pointer
- * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
- * pointer.
- */
-unsigned long opp_get_voltage(struct opp *opp)
-{
- struct opp *tmp_opp;
- unsigned long v = 0;
-
- tmp_opp = rcu_dereference(opp);
- if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
- pr_err("%s: Invalid parameters\n", __func__);
- else
- v = tmp_opp->u_volt;
-
- return v;
-}
-
-/**
- * opp_get_freq() - Gets the frequency corresponding to an available opp
- * @opp: opp for which frequency has to be returned for
- *
- * Return frequency in hertz corresponding to the opp, else
- * return 0
- *
- * Locking: This function must be called under rcu_read_lock(). opp is a rcu
- * protected pointer. This means that opp which could have been fetched by
- * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
- * under RCU lock. The pointer returned by the opp_find_freq family must be
- * used in the same section as the usage of this function with the pointer
- * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
- * pointer.
- */
-unsigned long opp_get_freq(struct opp *opp)
-{
- struct opp *tmp_opp;
- unsigned long f = 0;
-
- tmp_opp = rcu_dereference(opp);
- if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
- pr_err("%s: Invalid parameters\n", __func__);
- else
- f = tmp_opp->rate;
-
- return f;
-}
-
-/**
- * opp_get_opp_count() - Get number of opps available in the opp list
- * @dev: device for which we do this operation
- *
- * This function returns the number of available opps if there are any,
- * else returns 0 if none or the corresponding error value.
- *
- * Locking: This function must be called under rcu_read_lock(). This function
- * internally references two RCU protected structures: device_opp and opp which
- * are safe as long as we are under a common RCU locked section.
- */
-int opp_get_opp_count(struct device *dev)
-{
- struct device_opp *dev_opp;
- struct opp *temp_opp;
- int count = 0;
-
- dev_opp = find_device_opp(dev);
- if (IS_ERR(dev_opp)) {
- int r = PTR_ERR(dev_opp);
- dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
- return r;
- }
-
- list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
- if (temp_opp->available)
- count++;
- }
-
- return count;
-}
-
-/**
- * opp_find_freq_exact() - search for an exact frequency
- * @dev: device for which we do this operation
- * @freq: frequency to search for
- * @available: true/false - match for available opp
- *
- * Searches for exact match in the opp list and returns pointer to the matching
- * opp if found, else returns ERR_PTR in case of error and should be handled
- * using IS_ERR.
- *
- * Note: available is a modifier for the search. if available=true, then the
- * match is for exact matching frequency and is available in the stored OPP
- * table. if false, the match is for exact frequency which is not available.
- *
- * This provides a mechanism to enable an opp which is not available currently
- * or the opposite as well.
- *
- * Locking: This function must be called under rcu_read_lock(). opp is a rcu
- * protected pointer. The reason for the same is that the opp pointer which is
- * returned will remain valid for use with opp_get_{voltage, freq} only while
- * under the locked area. The pointer returned must be used prior to unlocking
- * with rcu_read_unlock() to maintain the integrity of the pointer.
- */
-struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
- bool available)
-{
- struct device_opp *dev_opp;
- struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
-
- dev_opp = find_device_opp(dev);
- if (IS_ERR(dev_opp)) {
- int r = PTR_ERR(dev_opp);
- dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
- return ERR_PTR(r);
- }
-
- list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
- if (temp_opp->available == available &&
- temp_opp->rate == freq) {
- opp = temp_opp;
- break;
- }
- }
-
- return opp;
-}
-
-/**
- * opp_find_freq_ceil() - Search for an rounded ceil freq
- * @dev: device for which we do this operation
- * @freq: Start frequency
- *
- * Search for the matching ceil *available* OPP from a starting freq
- * for a device.
- *
- * Returns matching *opp and refreshes *freq accordingly, else returns
- * ERR_PTR in case of error and should be handled using IS_ERR.
- *
- * Locking: This function must be called under rcu_read_lock(). opp is a rcu
- * protected pointer. The reason for the same is that the opp pointer which is
- * returned will remain valid for use with opp_get_{voltage, freq} only while
- * under the locked area. The pointer returned must be used prior to unlocking
- * with rcu_read_unlock() to maintain the integrity of the pointer.
- */
-struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
-{
- struct device_opp *dev_opp;
- struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
-
- if (!dev || !freq) {
- dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
- return ERR_PTR(-EINVAL);
- }
-
- dev_opp = find_device_opp(dev);
- if (IS_ERR(dev_opp))
- return opp;
-
- list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
- if (temp_opp->available && temp_opp->rate >= *freq) {
- opp = temp_opp;
- *freq = opp->rate;
- break;
- }
- }
-
- return opp;
-}
-
-/**
- * opp_find_freq_floor() - Search for a rounded floor freq
- * @dev: device for which we do this operation
- * @freq: Start frequency
- *
- * Search for the matching floor *available* OPP from a starting freq
- * for a device.
- *
- * Returns matching *opp and refreshes *freq accordingly, else returns
- * ERR_PTR in case of error and should be handled using IS_ERR.
- *
- * Locking: This function must be called under rcu_read_lock(). opp is a rcu
- * protected pointer. The reason for the same is that the opp pointer which is
- * returned will remain valid for use with opp_get_{voltage, freq} only while
- * under the locked area. The pointer returned must be used prior to unlocking
- * with rcu_read_unlock() to maintain the integrity of the pointer.
- */
-struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
-{
- struct device_opp *dev_opp;
- struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
-
- if (!dev || !freq) {
- dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
- return ERR_PTR(-EINVAL);
- }
-
- dev_opp = find_device_opp(dev);
- if (IS_ERR(dev_opp))
- return opp;
-
- list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
- if (temp_opp->available) {
- /* go to the next node, before choosing prev */
- if (temp_opp->rate > *freq)
- break;
- else
- opp = temp_opp;
- }
- }
- if (!IS_ERR(opp))
- *freq = opp->rate;
-
- return opp;
-}
-
-/**
- * opp_add() - Add an OPP table from a table definitions
- * @dev: device for which we do this operation
- * @freq: Frequency in Hz for this OPP
- * @u_volt: Voltage in uVolts for this OPP
- *
- * This function adds an opp definition to the opp list and returns status.
- * The opp is made available by default and it can be controlled using
- * opp_enable/disable functions.
- *
- * Locking: The internal device_opp and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
- */
-int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
-{
- struct device_opp *dev_opp = NULL;
- struct opp *opp, *new_opp;
- struct list_head *head;
-
- /* allocate new OPP node */
- new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL);
- if (!new_opp) {
- dev_warn(dev, "%s: Unable to create new OPP node\n", __func__);
- return -ENOMEM;
- }
-
- /* Hold our list modification lock here */
- mutex_lock(&dev_opp_list_lock);
-
- /* Check for existing list for 'dev' */
- dev_opp = find_device_opp(dev);
- if (IS_ERR(dev_opp)) {
- /*
- * Allocate a new device OPP table. In the infrequent case
- * where a new device is needed to be added, we pay this
- * penalty.
- */
- dev_opp = kzalloc(sizeof(struct device_opp), GFP_KERNEL);
- if (!dev_opp) {
- mutex_unlock(&dev_opp_list_lock);
- kfree(new_opp);
- dev_warn(dev,
- "%s: Unable to create device OPP structure\n",
- __func__);
- return -ENOMEM;
- }
-
- dev_opp->dev = dev;
- srcu_init_notifier_head(&dev_opp->head);
- INIT_LIST_HEAD(&dev_opp->opp_list);
-
- /* Secure the device list modification */
- list_add_rcu(&dev_opp->node, &dev_opp_list);
- }
-
- /* populate the opp table */
- new_opp->dev_opp = dev_opp;
- new_opp->rate = freq;
- new_opp->u_volt = u_volt;
- new_opp->available = true;
-
- /* Insert new OPP in order of increasing frequency */
- head = &dev_opp->opp_list;
- list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
- if (new_opp->rate < opp->rate)
- break;
- else
- head = &opp->node;
- }
-
- list_add_rcu(&new_opp->node, head);
- mutex_unlock(&dev_opp_list_lock);
-
- /*
- * Notify the changes in the availability of the operable
- * frequency/voltage list.
- */
- srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp);
- return 0;
-}
-
-/**
- * opp_set_availability() - helper to set the availability of an opp
- * @dev: device for which we do this operation
- * @freq: OPP frequency to modify availability
- * @availability_req: availability status requested for this opp
- *
- * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
- * share a common logic which is isolated here.
- *
- * Returns -EINVAL for bad pointers, -ENOMEM if no memory available for the
- * copy operation, returns 0 if no modifcation was done OR modification was
- * successful.
- *
- * Locking: The internal device_opp and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks to
- * keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex locking or synchronize_rcu() blocking calls cannot be used.
- */
-static int opp_set_availability(struct device *dev, unsigned long freq,
- bool availability_req)
-{
- struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
- struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
- int r = 0;
-
- /* keep the node allocated */
- new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL);
- if (!new_opp) {
- dev_warn(dev, "%s: Unable to create OPP\n", __func__);
- return -ENOMEM;
- }
-
- mutex_lock(&dev_opp_list_lock);
-
- /* Find the device_opp */
- list_for_each_entry(tmp_dev_opp, &dev_opp_list, node) {
- if (dev == tmp_dev_opp->dev) {
- dev_opp = tmp_dev_opp;
- break;
- }
- }
- if (IS_ERR(dev_opp)) {
- r = PTR_ERR(dev_opp);
- dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
- goto unlock;
- }
-
- /* Do we have the frequency? */
- list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
- if (tmp_opp->rate == freq) {
- opp = tmp_opp;
- break;
- }
- }
- if (IS_ERR(opp)) {
- r = PTR_ERR(opp);
- goto unlock;
- }
-
- /* Is update really needed? */
- if (opp->available == availability_req)
- goto unlock;
- /* copy the old data over */
- *new_opp = *opp;
-
- /* plug in new node */
- new_opp->available = availability_req;
-
- list_replace_rcu(&opp->node, &new_opp->node);
- mutex_unlock(&dev_opp_list_lock);
- synchronize_rcu();
-
- /* Notify the change of the OPP availability */
- if (availability_req)
- srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ENABLE,
- new_opp);
- else
- srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE,
- new_opp);
-
- /* clean up old opp */
- new_opp = opp;
- goto out;
-
-unlock:
- mutex_unlock(&dev_opp_list_lock);
-out:
- kfree(new_opp);
- return r;
-}
-
-/**
- * opp_enable() - Enable a specific OPP
- * @dev: device for which we do this operation
- * @freq: OPP frequency to enable
- *
- * Enables a provided opp. If the operation is valid, this returns 0, else the
- * corresponding error value. It is meant to be used for users an OPP available
- * after being temporarily made unavailable with opp_disable.
- *
- * Locking: The internal device_opp and opp structures are RCU protected.
- * Hence this function indirectly uses RCU and mutex locks to keep the
- * integrity of the internal data structures. Callers should ensure that
- * this function is *NOT* called under RCU protection or in contexts where
- * mutex locking or synchronize_rcu() blocking calls cannot be used.
- */
-int opp_enable(struct device *dev, unsigned long freq)
-{
- return opp_set_availability(dev, freq, true);
-}
-
-/**
- * opp_disable() - Disable a specific OPP
- * @dev: device for which we do this operation
- * @freq: OPP frequency to disable
- *
- * Disables a provided opp. If the operation is valid, this returns
- * 0, else the corresponding error value. It is meant to be a temporary
- * control by users to make this OPP not available until the circumstances are
- * right to make it available again (with a call to opp_enable).
- *
- * Locking: The internal device_opp and opp structures are RCU protected.
- * Hence this function indirectly uses RCU and mutex locks to keep the
- * integrity of the internal data structures. Callers should ensure that
- * this function is *NOT* called under RCU protection or in contexts where
- * mutex locking or synchronize_rcu() blocking calls cannot be used.
- */
-int opp_disable(struct device *dev, unsigned long freq)
-{
- return opp_set_availability(dev, freq, false);
-}
-
-#ifdef CONFIG_CPU_FREQ
-/**
- * opp_init_cpufreq_table() - create a cpufreq table for a device
- * @dev: device for which we do this operation
- * @table: Cpufreq table returned back to caller
- *
- * Generate a cpufreq table for a provided device- this assumes that the
- * opp list is already initialized and ready for usage.
- *
- * This function allocates required memory for the cpufreq table. It is
- * expected that the caller does the required maintenance such as freeing
- * the table as required.
- *
- * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
- * if no memory available for the operation (table is not populated), returns 0
- * if successful and table is populated.
- *
- * WARNING: It is important for the callers to ensure refreshing their copy of
- * the table if any of the mentioned functions have been invoked in the interim.
- *
- * Locking: The internal device_opp and opp structures are RCU protected.
- * To simplify the logic, we pretend we are updater and hold relevant mutex here
- * Callers should ensure that this function is *NOT* called under RCU protection
- * or in contexts where mutex locking cannot be used.
- */
-int opp_init_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table **table)
-{
- struct device_opp *dev_opp;
- struct opp *opp;
- struct cpufreq_frequency_table *freq_table;
- int i = 0;
-
- /* Pretend as if I am an updater */
- mutex_lock(&dev_opp_list_lock);
-
- dev_opp = find_device_opp(dev);
- if (IS_ERR(dev_opp)) {
- int r = PTR_ERR(dev_opp);
- mutex_unlock(&dev_opp_list_lock);
- dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r);
- return r;
- }
-
- freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
- (opp_get_opp_count(dev) + 1), GFP_KERNEL);
- if (!freq_table) {
- mutex_unlock(&dev_opp_list_lock);
- dev_warn(dev, "%s: Unable to allocate frequency table\n",
- __func__);
- return -ENOMEM;
- }
-
- list_for_each_entry(opp, &dev_opp->opp_list, node) {
- if (opp->available) {
- freq_table[i].index = i;
- freq_table[i].frequency = opp->rate / 1000;
- i++;
- }
- }
- mutex_unlock(&dev_opp_list_lock);
-
- freq_table[i].index = i;
- freq_table[i].frequency = CPUFREQ_TABLE_END;
-
- *table = &freq_table[0];
-
- return 0;
-}
-
-/**
- * opp_free_cpufreq_table() - free the cpufreq table
- * @dev: device for which we do this operation
- * @table: table to free
- *
- * Free up the table allocated by opp_init_cpufreq_table
- */
-void opp_free_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table **table)
-{
- if (!table)
- return;
-
- kfree(*table);
- *table = NULL;
-}
-#endif /* CONFIG_CPU_FREQ */
-
-/**
- * opp_get_notifier() - find notifier_head of the device with opp
- * @dev: device pointer used to lookup device OPPs.
- */
-struct srcu_notifier_head *opp_get_notifier(struct device *dev)
-{
- struct device_opp *dev_opp = find_device_opp(dev);
-
- if (IS_ERR(dev_opp))
- return ERR_CAST(dev_opp); /* matching type */
-
- return &dev_opp->head;
-}
diff --git a/ANDROID_3.4.5/drivers/base/power/power.h b/ANDROID_3.4.5/drivers/base/power/power.h
deleted file mode 100644
index eeb4bff9..00000000
--- a/ANDROID_3.4.5/drivers/base/power/power.h
+++ /dev/null
@@ -1,87 +0,0 @@
-#include <linux/pm_qos.h>
-
-#ifdef CONFIG_PM_RUNTIME
-
-extern void pm_runtime_init(struct device *dev);
-extern void pm_runtime_remove(struct device *dev);
-
-#else /* !CONFIG_PM_RUNTIME */
-
-static inline void pm_runtime_init(struct device *dev) {}
-static inline void pm_runtime_remove(struct device *dev) {}
-
-#endif /* !CONFIG_PM_RUNTIME */
-
-#ifdef CONFIG_PM_SLEEP
-
-/* kernel/power/main.c */
-extern int pm_async_enabled;
-
-/* drivers/base/power/main.c */
-extern struct list_head dpm_list; /* The active device list */
-
-static inline struct device *to_device(struct list_head *entry)
-{
- return container_of(entry, struct device, power.entry);
-}
-
-extern void device_pm_init(struct device *dev);
-extern void device_pm_add(struct device *);
-extern void device_pm_remove(struct device *);
-extern void device_pm_move_before(struct device *, struct device *);
-extern void device_pm_move_after(struct device *, struct device *);
-extern void device_pm_move_last(struct device *);
-
-#else /* !CONFIG_PM_SLEEP */
-
-static inline void device_pm_init(struct device *dev)
-{
- spin_lock_init(&dev->power.lock);
- dev->power.power_state = PMSG_INVALID;
- pm_runtime_init(dev);
-}
-
-static inline void device_pm_add(struct device *dev)
-{
- dev_pm_qos_constraints_init(dev);
-}
-
-static inline void device_pm_remove(struct device *dev)
-{
- dev_pm_qos_constraints_destroy(dev);
- pm_runtime_remove(dev);
-}
-
-static inline void device_pm_move_before(struct device *deva,
- struct device *devb) {}
-static inline void device_pm_move_after(struct device *deva,
- struct device *devb) {}
-static inline void device_pm_move_last(struct device *dev) {}
-
-#endif /* !CONFIG_PM_SLEEP */
-
-#ifdef CONFIG_PM
-
-/*
- * sysfs.c
- */
-
-extern int dpm_sysfs_add(struct device *dev);
-extern void dpm_sysfs_remove(struct device *dev);
-extern void rpm_sysfs_remove(struct device *dev);
-extern int wakeup_sysfs_add(struct device *dev);
-extern void wakeup_sysfs_remove(struct device *dev);
-extern int pm_qos_sysfs_add(struct device *dev);
-extern void pm_qos_sysfs_remove(struct device *dev);
-
-#else /* CONFIG_PM */
-
-static inline int dpm_sysfs_add(struct device *dev) { return 0; }
-static inline void dpm_sysfs_remove(struct device *dev) {}
-static inline void rpm_sysfs_remove(struct device *dev) {}
-static inline int wakeup_sysfs_add(struct device *dev) { return 0; }
-static inline void wakeup_sysfs_remove(struct device *dev) {}
-static inline int pm_qos_sysfs_add(struct device *dev) { return 0; }
-static inline void pm_qos_sysfs_remove(struct device *dev) {}
-
-#endif
diff --git a/ANDROID_3.4.5/drivers/base/power/qos.c b/ANDROID_3.4.5/drivers/base/power/qos.c
deleted file mode 100644
index fd849a2c..00000000
--- a/ANDROID_3.4.5/drivers/base/power/qos.c
+++ /dev/null
@@ -1,513 +0,0 @@
-/*
- * Devices PM QoS constraints management
- *
- * Copyright (C) 2011 Texas Instruments, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- *
- * This module exposes the interface to kernel space for specifying
- * per-device PM QoS dependencies. It provides infrastructure for registration
- * of:
- *
- * Dependents on a QoS value : register requests
- * Watchers of QoS value : get notified when target QoS value changes
- *
- * This QoS design is best effort based. Dependents register their QoS needs.
- * Watchers register to keep track of the current QoS needs of the system.
- * Watchers can register different types of notification callbacks:
- * . a per-device notification callback using the dev_pm_qos_*_notifier API.
- * The notification chain data is stored in the per-device constraint
- * data struct.
- * . a system-wide notification callback using the dev_pm_qos_*_global_notifier
- * API. The notification chain data is stored in a static variable.
- *
- * Note about the per-device constraint data struct allocation:
- * . The per-device constraints data struct ptr is tored into the device
- * dev_pm_info.
- * . To minimize the data usage by the per-device constraints, the data struct
- * is only allocated at the first call to dev_pm_qos_add_request.
- * . The data is later free'd when the device is removed from the system.
- * . A global mutex protects the constraints users from the data being
- * allocated and free'd.
- */
-
-#include <linux/pm_qos.h>
-#include <linux/spinlock.h>
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/mutex.h>
-#include <linux/export.h>
-
-#include "power.h"
-
-static DEFINE_MUTEX(dev_pm_qos_mtx);
-
-static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
-
-/**
- * __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
- * @dev: Device to get the PM QoS constraint value for.
- *
- * This routine must be called with dev->power.lock held.
- */
-s32 __dev_pm_qos_read_value(struct device *dev)
-{
- struct pm_qos_constraints *c = dev->power.constraints;
-
- return c ? pm_qos_read_value(c) : 0;
-}
-
-/**
- * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
- * @dev: Device to get the PM QoS constraint value for.
- */
-s32 dev_pm_qos_read_value(struct device *dev)
-{
- unsigned long flags;
- s32 ret;
-
- spin_lock_irqsave(&dev->power.lock, flags);
- ret = __dev_pm_qos_read_value(dev);
- spin_unlock_irqrestore(&dev->power.lock, flags);
-
- return ret;
-}
-
-/*
- * apply_constraint
- * @req: constraint request to apply
- * @action: action to perform add/update/remove, of type enum pm_qos_req_action
- * @value: defines the qos request
- *
- * Internal function to update the constraints list using the PM QoS core
- * code and if needed call the per-device and the global notification
- * callbacks
- */
-static int apply_constraint(struct dev_pm_qos_request *req,
- enum pm_qos_req_action action, int value)
-{
- int ret, curr_value;
-
- ret = pm_qos_update_target(req->dev->power.constraints,
- &req->node, action, value);
-
- if (ret) {
- /* Call the global callbacks if needed */
- curr_value = pm_qos_read_value(req->dev->power.constraints);
- blocking_notifier_call_chain(&dev_pm_notifiers,
- (unsigned long)curr_value,
- req);
- }
-
- return ret;
-}
-
-/*
- * dev_pm_qos_constraints_allocate
- * @dev: device to allocate data for
- *
- * Called at the first call to add_request, for constraint data allocation
- * Must be called with the dev_pm_qos_mtx mutex held
- */
-static int dev_pm_qos_constraints_allocate(struct device *dev)
-{
- struct pm_qos_constraints *c;
- struct blocking_notifier_head *n;
-
- c = kzalloc(sizeof(*c), GFP_KERNEL);
- if (!c)
- return -ENOMEM;
-
- n = kzalloc(sizeof(*n), GFP_KERNEL);
- if (!n) {
- kfree(c);
- return -ENOMEM;
- }
- BLOCKING_INIT_NOTIFIER_HEAD(n);
-
- plist_head_init(&c->list);
- c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
- c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
- c->type = PM_QOS_MIN;
- c->notifiers = n;
-
- spin_lock_irq(&dev->power.lock);
- dev->power.constraints = c;
- spin_unlock_irq(&dev->power.lock);
-
- return 0;
-}
-
-/**
- * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer.
- * @dev: target device
- *
- * Called from the device PM subsystem during device insertion under
- * device_pm_lock().
- */
-void dev_pm_qos_constraints_init(struct device *dev)
-{
- mutex_lock(&dev_pm_qos_mtx);
- dev->power.constraints = NULL;
- dev->power.power_state = PMSG_ON;
- mutex_unlock(&dev_pm_qos_mtx);
-}
-
-/**
- * dev_pm_qos_constraints_destroy
- * @dev: target device
- *
- * Called from the device PM subsystem on device removal under device_pm_lock().
- */
-void dev_pm_qos_constraints_destroy(struct device *dev)
-{
- struct dev_pm_qos_request *req, *tmp;
- struct pm_qos_constraints *c;
-
- /*
- * If the device's PM QoS resume latency limit has been exposed to user
- * space, it has to be hidden at this point.
- */
- dev_pm_qos_hide_latency_limit(dev);
-
- mutex_lock(&dev_pm_qos_mtx);
-
- dev->power.power_state = PMSG_INVALID;
- c = dev->power.constraints;
- if (!c)
- goto out;
-
- /* Flush the constraints list for the device */
- plist_for_each_entry_safe(req, tmp, &c->list, node) {
- /*
- * Update constraints list and call the notification
- * callbacks if needed
- */
- apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
- memset(req, 0, sizeof(*req));
- }
-
- spin_lock_irq(&dev->power.lock);
- dev->power.constraints = NULL;
- spin_unlock_irq(&dev->power.lock);
-
- kfree(c->notifiers);
- kfree(c);
-
- out:
- mutex_unlock(&dev_pm_qos_mtx);
-}
-
-/**
- * dev_pm_qos_add_request - inserts new qos request into the list
- * @dev: target device for the constraint
- * @req: pointer to a preallocated handle
- * @value: defines the qos request
- *
- * This function inserts a new entry in the device constraints list of
- * requested qos performance characteristics. It recomputes the aggregate
- * QoS expectations of parameters and initializes the dev_pm_qos_request
- * handle. Caller needs to save this handle for later use in updates and
- * removal.
- *
- * Returns 1 if the aggregated constraint value has changed,
- * 0 if the aggregated constraint value has not changed,
- * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
- * to allocate for data structures, -ENODEV if the device has just been removed
- * from the system.
- */
-int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
- s32 value)
-{
- int ret = 0;
-
- if (!dev || !req) /*guard against callers passing in null */
- return -EINVAL;
-
- if (WARN(dev_pm_qos_request_active(req),
- "%s() called for already added request\n", __func__))
- return -EINVAL;
-
- req->dev = dev;
-
- mutex_lock(&dev_pm_qos_mtx);
-
- if (!dev->power.constraints) {
- if (dev->power.power_state.event == PM_EVENT_INVALID) {
- /* The device has been removed from the system. */
- req->dev = NULL;
- ret = -ENODEV;
- goto out;
- } else {
- /*
- * Allocate the constraints data on the first call to
- * add_request, i.e. only if the data is not already
- * allocated and if the device has not been removed.
- */
- ret = dev_pm_qos_constraints_allocate(dev);
- }
- }
-
- if (!ret)
- ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
-
- out:
- mutex_unlock(&dev_pm_qos_mtx);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
-
-/**
- * dev_pm_qos_update_request - modifies an existing qos request
- * @req : handle to list element holding a dev_pm_qos request to use
- * @new_value: defines the qos request
- *
- * Updates an existing dev PM qos request along with updating the
- * target value.
- *
- * Attempts are made to make this code callable on hot code paths.
- *
- * Returns 1 if the aggregated constraint value has changed,
- * 0 if the aggregated constraint value has not changed,
- * -EINVAL in case of wrong parameters, -ENODEV if the device has been
- * removed from the system
- */
-int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
- s32 new_value)
-{
- int ret = 0;
-
- if (!req) /*guard against callers passing in null */
- return -EINVAL;
-
- if (WARN(!dev_pm_qos_request_active(req),
- "%s() called for unknown object\n", __func__))
- return -EINVAL;
-
- mutex_lock(&dev_pm_qos_mtx);
-
- if (req->dev->power.constraints) {
- if (new_value != req->node.prio)
- ret = apply_constraint(req, PM_QOS_UPDATE_REQ,
- new_value);
- } else {
- /* Return if the device has been removed */
- ret = -ENODEV;
- }
-
- mutex_unlock(&dev_pm_qos_mtx);
- return ret;
-}
-EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
-
-/**
- * dev_pm_qos_remove_request - modifies an existing qos request
- * @req: handle to request list element
- *
- * Will remove pm qos request from the list of constraints and
- * recompute the current target value. Call this on slow code paths.
- *
- * Returns 1 if the aggregated constraint value has changed,
- * 0 if the aggregated constraint value has not changed,
- * -EINVAL in case of wrong parameters, -ENODEV if the device has been
- * removed from the system
- */
-int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
-{
- int ret = 0;
-
- if (!req) /*guard against callers passing in null */
- return -EINVAL;
-
- if (WARN(!dev_pm_qos_request_active(req),
- "%s() called for unknown object\n", __func__))
- return -EINVAL;
-
- mutex_lock(&dev_pm_qos_mtx);
-
- if (req->dev->power.constraints) {
- ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
- PM_QOS_DEFAULT_VALUE);
- memset(req, 0, sizeof(*req));
- } else {
- /* Return if the device has been removed */
- ret = -ENODEV;
- }
-
- mutex_unlock(&dev_pm_qos_mtx);
- return ret;
-}
-EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
-
-/**
- * dev_pm_qos_add_notifier - sets notification entry for changes to target value
- * of per-device PM QoS constraints
- *
- * @dev: target device for the constraint
- * @notifier: notifier block managed by caller.
- *
- * Will register the notifier into a notification chain that gets called
- * upon changes to the target value for the device.
- *
- * If the device's constraints object doesn't exist when this routine is called,
- * it will be created (or error code will be returned if that fails).
- */
-int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
-{
- int ret = 0;
-
- mutex_lock(&dev_pm_qos_mtx);
-
- if (!dev->power.constraints)
- ret = dev->power.power_state.event != PM_EVENT_INVALID ?
- dev_pm_qos_constraints_allocate(dev) : -ENODEV;
-
- if (!ret)
- ret = blocking_notifier_chain_register(
- dev->power.constraints->notifiers, notifier);
-
- mutex_unlock(&dev_pm_qos_mtx);
- return ret;
-}
-EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
-
-/**
- * dev_pm_qos_remove_notifier - deletes notification for changes to target value
- * of per-device PM QoS constraints
- *
- * @dev: target device for the constraint
- * @notifier: notifier block to be removed.
- *
- * Will remove the notifier from the notification chain that gets called
- * upon changes to the target value.
- */
-int dev_pm_qos_remove_notifier(struct device *dev,
- struct notifier_block *notifier)
-{
- int retval = 0;
-
- mutex_lock(&dev_pm_qos_mtx);
-
- /* Silently return if the constraints object is not present. */
- if (dev->power.constraints)
- retval = blocking_notifier_chain_unregister(
- dev->power.constraints->notifiers,
- notifier);
-
- mutex_unlock(&dev_pm_qos_mtx);
- return retval;
-}
-EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
-
-/**
- * dev_pm_qos_add_global_notifier - sets notification entry for changes to
- * target value of the PM QoS constraints for any device
- *
- * @notifier: notifier block managed by caller.
- *
- * Will register the notifier into a notification chain that gets called
- * upon changes to the target value for any device.
- */
-int dev_pm_qos_add_global_notifier(struct notifier_block *notifier)
-{
- return blocking_notifier_chain_register(&dev_pm_notifiers, notifier);
-}
-EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier);
-
-/**
- * dev_pm_qos_remove_global_notifier - deletes notification for changes to
- * target value of PM QoS constraints for any device
- *
- * @notifier: notifier block to be removed.
- *
- * Will remove the notifier from the notification chain that gets called
- * upon changes to the target value for any device.
- */
-int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
-{
- return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
-}
-EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
-
-/**
- * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
- * @dev: Device whose ancestor to add the request for.
- * @req: Pointer to the preallocated handle.
- * @value: Constraint latency value.
- */
-int dev_pm_qos_add_ancestor_request(struct device *dev,
- struct dev_pm_qos_request *req, s32 value)
-{
- struct device *ancestor = dev->parent;
- int error = -ENODEV;
-
- while (ancestor && !ancestor->power.ignore_children)
- ancestor = ancestor->parent;
-
- if (ancestor)
- error = dev_pm_qos_add_request(ancestor, req, value);
-
- if (error)
- req->dev = NULL;
-
- return error;
-}
-EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
-
-#ifdef CONFIG_PM_RUNTIME
-static void __dev_pm_qos_drop_user_request(struct device *dev)
-{
- dev_pm_qos_remove_request(dev->power.pq_req);
- dev->power.pq_req = 0;
-}
-
-/**
- * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
- * @dev: Device whose PM QoS latency limit is to be exposed to user space.
- * @value: Initial value of the latency limit.
- */
-int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
-{
- struct dev_pm_qos_request *req;
- int ret;
-
- if (!device_is_registered(dev) || value < 0)
- return -EINVAL;
-
- if (dev->power.pq_req)
- return -EEXIST;
-
- req = kzalloc(sizeof(*req), GFP_KERNEL);
- if (!req)
- return -ENOMEM;
-
- ret = dev_pm_qos_add_request(dev, req, value);
- if (ret < 0)
- return ret;
-
- dev->power.pq_req = req;
- ret = pm_qos_sysfs_add(dev);
- if (ret)
- __dev_pm_qos_drop_user_request(dev);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
-
-/**
- * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
- * @dev: Device whose PM QoS latency limit is to be hidden from user space.
- */
-void dev_pm_qos_hide_latency_limit(struct device *dev)
-{
- if (dev->power.pq_req) {
- pm_qos_sysfs_remove(dev);
- __dev_pm_qos_drop_user_request(dev);
- }
-}
-EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
-#endif /* CONFIG_PM_RUNTIME */
diff --git a/ANDROID_3.4.5/drivers/base/power/runtime.c b/ANDROID_3.4.5/drivers/base/power/runtime.c
deleted file mode 100644
index 59894873..00000000
--- a/ANDROID_3.4.5/drivers/base/power/runtime.c
+++ /dev/null
@@ -1,1317 +0,0 @@
-/*
- * drivers/base/power/runtime.c - Helper functions for device runtime PM
- *
- * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
- *
- * This file is released under the GPLv2.
- */
-
-#include <linux/sched.h>
-#include <linux/export.h>
-#include <linux/pm_runtime.h>
-#include <trace/events/rpm.h>
-#include "power.h"
-
-static int rpm_resume(struct device *dev, int rpmflags);
-static int rpm_suspend(struct device *dev, int rpmflags);
-
-/**
- * update_pm_runtime_accounting - Update the time accounting of power states
- * @dev: Device to update the accounting for
- *
- * In order to be able to have time accounting of the various power states
- * (as used by programs such as PowerTOP to show the effectiveness of runtime
- * PM), we need to track the time spent in each state.
- * update_pm_runtime_accounting must be called each time before the
- * runtime_status field is updated, to account the time in the old state
- * correctly.
- */
-void update_pm_runtime_accounting(struct device *dev)
-{
- unsigned long now = jiffies;
- unsigned long delta;
-
- delta = now - dev->power.accounting_timestamp;
-
- dev->power.accounting_timestamp = now;
-
- if (dev->power.disable_depth > 0)
- return;
-
- if (dev->power.runtime_status == RPM_SUSPENDED)
- dev->power.suspended_jiffies += delta;
- else
- dev->power.active_jiffies += delta;
-}
-
-static void __update_runtime_status(struct device *dev, enum rpm_status status)
-{
- update_pm_runtime_accounting(dev);
- dev->power.runtime_status = status;
-}
-
-/**
- * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
- * @dev: Device to handle.
- */
-static void pm_runtime_deactivate_timer(struct device *dev)
-{
- if (dev->power.timer_expires > 0) {
- del_timer(&dev->power.suspend_timer);
- dev->power.timer_expires = 0;
- }
-}
-
-/**
- * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
- * @dev: Device to handle.
- */
-static void pm_runtime_cancel_pending(struct device *dev)
-{
- pm_runtime_deactivate_timer(dev);
- /*
- * In case there's a request pending, make sure its work function will
- * return without doing anything.
- */
- dev->power.request = RPM_REQ_NONE;
-}
-
-/*
- * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
- * @dev: Device to handle.
- *
- * Compute the autosuspend-delay expiration time based on the device's
- * power.last_busy time. If the delay has already expired or is disabled
- * (negative) or the power.use_autosuspend flag isn't set, return 0.
- * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
- *
- * This function may be called either with or without dev->power.lock held.
- * Either way it can be racy, since power.last_busy may be updated at any time.
- */
-unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
-{
- int autosuspend_delay;
- long elapsed;
- unsigned long last_busy;
- unsigned long expires = 0;
-
- if (!dev->power.use_autosuspend)
- goto out;
-
- autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
- if (autosuspend_delay < 0)
- goto out;
-
- last_busy = ACCESS_ONCE(dev->power.last_busy);
- elapsed = jiffies - last_busy;
- if (elapsed < 0)
- goto out; /* jiffies has wrapped around. */
-
- /*
- * If the autosuspend_delay is >= 1 second, align the timer by rounding
- * up to the nearest second.
- */
- expires = last_busy + msecs_to_jiffies(autosuspend_delay);
- if (autosuspend_delay >= 1000)
- expires = round_jiffies(expires);
- expires += !expires;
- if (elapsed >= expires - last_busy)
- expires = 0; /* Already expired. */
-
- out:
- return expires;
-}
-EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
-
-/**
- * rpm_check_suspend_allowed - Test whether a device may be suspended.
- * @dev: Device to test.
- */
-static int rpm_check_suspend_allowed(struct device *dev)
-{
- int retval = 0;
-
- if (dev->power.runtime_error)
- retval = -EINVAL;
- else if (dev->power.disable_depth > 0)
- retval = -EACCES;
- else if (atomic_read(&dev->power.usage_count) > 0)
- retval = -EAGAIN;
- else if (!pm_children_suspended(dev))
- retval = -EBUSY;
-
- /* Pending resume requests take precedence over suspends. */
- else if ((dev->power.deferred_resume
- && dev->power.runtime_status == RPM_SUSPENDING)
- || (dev->power.request_pending
- && dev->power.request == RPM_REQ_RESUME))
- retval = -EAGAIN;
- else if (dev->power.runtime_status == RPM_SUSPENDED)
- retval = 1;
-
- return retval;
-}
-
-/**
- * __rpm_callback - Run a given runtime PM callback for a given device.
- * @cb: Runtime PM callback to run.
- * @dev: Device to run the callback for.
- */
-static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
- __releases(&dev->power.lock) __acquires(&dev->power.lock)
-{
- int retval;
-
- if (dev->power.irq_safe)
- spin_unlock(&dev->power.lock);
- else
- spin_unlock_irq(&dev->power.lock);
-
- retval = cb(dev);
-
- if (dev->power.irq_safe)
- spin_lock(&dev->power.lock);
- else
- spin_lock_irq(&dev->power.lock);
-
- return retval;
-}
-
-/**
- * rpm_idle - Notify device bus type if the device can be suspended.
- * @dev: Device to notify the bus type about.
- * @rpmflags: Flag bits.
- *
- * Check if the device's runtime PM status allows it to be suspended. If
- * another idle notification has been started earlier, return immediately. If
- * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
- * run the ->runtime_idle() callback directly.
- *
- * This function must be called under dev->power.lock with interrupts disabled.
- */
-static int rpm_idle(struct device *dev, int rpmflags)
-{
- int (*callback)(struct device *);
- int retval;
-
- trace_rpm_idle(dev, rpmflags);
- retval = rpm_check_suspend_allowed(dev);
- if (retval < 0)
- ; /* Conditions are wrong. */
-
- /* Idle notifications are allowed only in the RPM_ACTIVE state. */
- else if (dev->power.runtime_status != RPM_ACTIVE)
- retval = -EAGAIN;
-
- /*
- * Any pending request other than an idle notification takes
- * precedence over us, except that the timer may be running.
- */
- else if (dev->power.request_pending &&
- dev->power.request > RPM_REQ_IDLE)
- retval = -EAGAIN;
-
- /* Act as though RPM_NOWAIT is always set. */
- else if (dev->power.idle_notification)
- retval = -EINPROGRESS;
- if (retval)
- goto out;
-
- /* Pending requests need to be canceled. */
- dev->power.request = RPM_REQ_NONE;
-
- if (dev->power.no_callbacks) {
- /* Assume ->runtime_idle() callback would have suspended. */
- retval = rpm_suspend(dev, rpmflags);
- goto out;
- }
-
- /* Carry out an asynchronous or a synchronous idle notification. */
- if (rpmflags & RPM_ASYNC) {
- dev->power.request = RPM_REQ_IDLE;
- if (!dev->power.request_pending) {
- dev->power.request_pending = true;
- queue_work(pm_wq, &dev->power.work);
- }
- goto out;
- }
-
- dev->power.idle_notification = true;
-
- if (dev->pm_domain)
- callback = dev->pm_domain->ops.runtime_idle;
- else if (dev->type && dev->type->pm)
- callback = dev->type->pm->runtime_idle;
- else if (dev->class && dev->class->pm)
- callback = dev->class->pm->runtime_idle;
- else if (dev->bus && dev->bus->pm)
- callback = dev->bus->pm->runtime_idle;
- else
- callback = NULL;
-
- if (!callback && dev->driver && dev->driver->pm)
- callback = dev->driver->pm->runtime_idle;
-
- if (callback)
- __rpm_callback(callback, dev);
-
- dev->power.idle_notification = false;
- wake_up_all(&dev->power.wait_queue);
-
- out:
- trace_rpm_return_int(dev, _THIS_IP_, retval);
- return retval;
-}
-
-/**
- * rpm_callback - Run a given runtime PM callback for a given device.
- * @cb: Runtime PM callback to run.
- * @dev: Device to run the callback for.
- */
-static int rpm_callback(int (*cb)(struct device *), struct device *dev)
-{
- int retval;
-
- if (!cb)
- return -ENOSYS;
-
- retval = __rpm_callback(cb, dev);
-
- dev->power.runtime_error = retval;
- return retval != -EACCES ? retval : -EIO;
-}
-
-/**
- * rpm_suspend - Carry out runtime suspend of given device.
- * @dev: Device to suspend.
- * @rpmflags: Flag bits.
- *
- * Check if the device's runtime PM status allows it to be suspended.
- * Cancel a pending idle notification, autosuspend or suspend. If
- * another suspend has been started earlier, either return immediately
- * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
- * flags. If the RPM_ASYNC flag is set then queue a suspend request;
- * otherwise run the ->runtime_suspend() callback directly. When
- * ->runtime_suspend succeeded, if a deferred resume was requested while
- * the callback was running then carry it out, otherwise send an idle
- * notification for its parent (if the suspend succeeded and both
- * ignore_children of parent->power and irq_safe of dev->power are not set).
- * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
- * flag is set and the next autosuspend-delay expiration time is in the
- * future, schedule another autosuspend attempt.
- *
- * This function must be called under dev->power.lock with interrupts disabled.
- */
-static int rpm_suspend(struct device *dev, int rpmflags)
- __releases(&dev->power.lock) __acquires(&dev->power.lock)
-{
- int (*callback)(struct device *);
- struct device *parent = NULL;
- int retval;
-
- trace_rpm_suspend(dev, rpmflags);
-
- repeat:
- retval = rpm_check_suspend_allowed(dev);
-
- if (retval < 0)
- ; /* Conditions are wrong. */
-
- /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
- else if (dev->power.runtime_status == RPM_RESUMING &&
- !(rpmflags & RPM_ASYNC))
- retval = -EAGAIN;
- if (retval)
- goto out;
-
- /* If the autosuspend_delay time hasn't expired yet, reschedule. */
- if ((rpmflags & RPM_AUTO)
- && dev->power.runtime_status != RPM_SUSPENDING) {
- unsigned long expires = pm_runtime_autosuspend_expiration(dev);
-
- if (expires != 0) {
- /* Pending requests need to be canceled. */
- dev->power.request = RPM_REQ_NONE;
-
- /*
- * Optimization: If the timer is already running and is
- * set to expire at or before the autosuspend delay,
- * avoid the overhead of resetting it. Just let it
- * expire; pm_suspend_timer_fn() will take care of the
- * rest.
- */
- if (!(dev->power.timer_expires && time_before_eq(
- dev->power.timer_expires, expires))) {
- dev->power.timer_expires = expires;
- mod_timer(&dev->power.suspend_timer, expires);
- }
- dev->power.timer_autosuspends = 1;
- goto out;
- }
- }
-
- /* Other scheduled or pending requests need to be canceled. */
- pm_runtime_cancel_pending(dev);
-
- if (dev->power.runtime_status == RPM_SUSPENDING) {
- DEFINE_WAIT(wait);
-
- if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
- retval = -EINPROGRESS;
- goto out;
- }
-
- if (dev->power.irq_safe) {
- spin_unlock(&dev->power.lock);
-
- cpu_relax();
-
- spin_lock(&dev->power.lock);
- goto repeat;
- }
-
- /* Wait for the other suspend running in parallel with us. */
- for (;;) {
- prepare_to_wait(&dev->power.wait_queue, &wait,
- TASK_UNINTERRUPTIBLE);
- if (dev->power.runtime_status != RPM_SUSPENDING)
- break;
-
- spin_unlock_irq(&dev->power.lock);
-
- schedule();
-
- spin_lock_irq(&dev->power.lock);
- }
- finish_wait(&dev->power.wait_queue, &wait);
- goto repeat;
- }
-
- dev->power.deferred_resume = false;
- if (dev->power.no_callbacks)
- goto no_callback; /* Assume success. */
-
- /* Carry out an asynchronous or a synchronous suspend. */
- if (rpmflags & RPM_ASYNC) {
- dev->power.request = (rpmflags & RPM_AUTO) ?
- RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
- if (!dev->power.request_pending) {
- dev->power.request_pending = true;
- queue_work(pm_wq, &dev->power.work);
- }
- goto out;
- }
-
- if (__dev_pm_qos_read_value(dev) < 0) {
- /* Negative PM QoS constraint means "never suspend". */
- retval = -EPERM;
- goto out;
- }
-
- __update_runtime_status(dev, RPM_SUSPENDING);
-
- if (dev->pm_domain)
- callback = dev->pm_domain->ops.runtime_suspend;
- else if (dev->type && dev->type->pm)
- callback = dev->type->pm->runtime_suspend;
- else if (dev->class && dev->class->pm)
- callback = dev->class->pm->runtime_suspend;
- else if (dev->bus && dev->bus->pm)
- callback = dev->bus->pm->runtime_suspend;
- else
- callback = NULL;
-
- if (!callback && dev->driver && dev->driver->pm)
- callback = dev->driver->pm->runtime_suspend;
-
- retval = rpm_callback(callback, dev);
- if (retval)
- goto fail;
-
- no_callback:
- __update_runtime_status(dev, RPM_SUSPENDED);
- pm_runtime_deactivate_timer(dev);
-
- if (dev->parent) {
- parent = dev->parent;
- atomic_add_unless(&parent->power.child_count, -1, 0);
- }
- wake_up_all(&dev->power.wait_queue);
-
- if (dev->power.deferred_resume) {
- rpm_resume(dev, 0);
- retval = -EAGAIN;
- goto out;
- }
-
- /* Maybe the parent is now able to suspend. */
- if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
- spin_unlock(&dev->power.lock);
-
- spin_lock(&parent->power.lock);
- rpm_idle(parent, RPM_ASYNC);
- spin_unlock(&parent->power.lock);
-
- spin_lock(&dev->power.lock);
- }
-
- out:
- trace_rpm_return_int(dev, _THIS_IP_, retval);
-
- return retval;
-
- fail:
- __update_runtime_status(dev, RPM_ACTIVE);
- dev->power.deferred_resume = false;
- wake_up_all(&dev->power.wait_queue);
-
- if (retval == -EAGAIN || retval == -EBUSY) {
- dev->power.runtime_error = 0;
-
- /*
- * If the callback routine failed an autosuspend, and
- * if the last_busy time has been updated so that there
- * is a new autosuspend expiration time, automatically
- * reschedule another autosuspend.
- */
- if ((rpmflags & RPM_AUTO) &&
- pm_runtime_autosuspend_expiration(dev) != 0)
- goto repeat;
- } else {
- pm_runtime_cancel_pending(dev);
- }
- goto out;
-}
-
-/**
- * rpm_resume - Carry out runtime resume of given device.
- * @dev: Device to resume.
- * @rpmflags: Flag bits.
- *
- * Check if the device's runtime PM status allows it to be resumed. Cancel
- * any scheduled or pending requests. If another resume has been started
- * earlier, either return immediately or wait for it to finish, depending on the
- * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
- * parallel with this function, either tell the other process to resume after
- * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
- * flag is set then queue a resume request; otherwise run the
- * ->runtime_resume() callback directly. Queue an idle notification for the
- * device if the resume succeeded.
- *
- * This function must be called under dev->power.lock with interrupts disabled.
- */
-static int rpm_resume(struct device *dev, int rpmflags)
- __releases(&dev->power.lock) __acquires(&dev->power.lock)
-{
- int (*callback)(struct device *);
- struct device *parent = NULL;
- int retval = 0;
-
- trace_rpm_resume(dev, rpmflags);
-
- repeat:
- if (dev->power.runtime_error)
- retval = -EINVAL;
- else if (dev->power.disable_depth > 0)
- retval = -EACCES;
- if (retval)
- goto out;
-
- /*
- * Other scheduled or pending requests need to be canceled. Small
- * optimization: If an autosuspend timer is running, leave it running
- * rather than cancelling it now only to restart it again in the near
- * future.
- */
- dev->power.request = RPM_REQ_NONE;
- if (!dev->power.timer_autosuspends)
- pm_runtime_deactivate_timer(dev);
-
- if (dev->power.runtime_status == RPM_ACTIVE) {
- retval = 1;
- goto out;
- }
-
- if (dev->power.runtime_status == RPM_RESUMING
- || dev->power.runtime_status == RPM_SUSPENDING) {
- DEFINE_WAIT(wait);
-
- if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
- if (dev->power.runtime_status == RPM_SUSPENDING)
- dev->power.deferred_resume = true;
- else
- retval = -EINPROGRESS;
- goto out;
- }
-
- if (dev->power.irq_safe) {
- spin_unlock(&dev->power.lock);
-
- cpu_relax();
-
- spin_lock(&dev->power.lock);
- goto repeat;
- }
-
- /* Wait for the operation carried out in parallel with us. */
- for (;;) {
- prepare_to_wait(&dev->power.wait_queue, &wait,
- TASK_UNINTERRUPTIBLE);
- if (dev->power.runtime_status != RPM_RESUMING
- && dev->power.runtime_status != RPM_SUSPENDING)
- break;
-
- spin_unlock_irq(&dev->power.lock);
-
- schedule();
-
- spin_lock_irq(&dev->power.lock);
- }
- finish_wait(&dev->power.wait_queue, &wait);
- goto repeat;
- }
-
- /*
- * See if we can skip waking up the parent. This is safe only if
- * power.no_callbacks is set, because otherwise we don't know whether
- * the resume will actually succeed.
- */
- if (dev->power.no_callbacks && !parent && dev->parent) {
- spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
- if (dev->parent->power.disable_depth > 0
- || dev->parent->power.ignore_children
- || dev->parent->power.runtime_status == RPM_ACTIVE) {
- atomic_inc(&dev->parent->power.child_count);
- spin_unlock(&dev->parent->power.lock);
- goto no_callback; /* Assume success. */
- }
- spin_unlock(&dev->parent->power.lock);
- }
-
- /* Carry out an asynchronous or a synchronous resume. */
- if (rpmflags & RPM_ASYNC) {
- dev->power.request = RPM_REQ_RESUME;
- if (!dev->power.request_pending) {
- dev->power.request_pending = true;
- queue_work(pm_wq, &dev->power.work);
- }
- retval = 0;
- goto out;
- }
-
- if (!parent && dev->parent) {
- /*
- * Increment the parent's usage counter and resume it if
- * necessary. Not needed if dev is irq-safe; then the
- * parent is permanently resumed.
- */
- parent = dev->parent;
- if (dev->power.irq_safe)
- goto skip_parent;
- spin_unlock(&dev->power.lock);
-
- pm_runtime_get_noresume(parent);
-
- spin_lock(&parent->power.lock);
- /*
- * We can resume if the parent's runtime PM is disabled or it
- * is set to ignore children.
- */
- if (!parent->power.disable_depth
- && !parent->power.ignore_children) {
- rpm_resume(parent, 0);
- if (parent->power.runtime_status != RPM_ACTIVE)
- retval = -EBUSY;
- }
- spin_unlock(&parent->power.lock);
-
- spin_lock(&dev->power.lock);
- if (retval)
- goto out;
- goto repeat;
- }
- skip_parent:
-
- if (dev->power.no_callbacks)
- goto no_callback; /* Assume success. */
-
- __update_runtime_status(dev, RPM_RESUMING);
-
- if (dev->pm_domain)
- callback = dev->pm_domain->ops.runtime_resume;
- else if (dev->type && dev->type->pm)
- callback = dev->type->pm->runtime_resume;
- else if (dev->class && dev->class->pm)
- callback = dev->class->pm->runtime_resume;
- else if (dev->bus && dev->bus->pm)
- callback = dev->bus->pm->runtime_resume;
- else
- callback = NULL;
-
- if (!callback && dev->driver && dev->driver->pm)
- callback = dev->driver->pm->runtime_resume;
-
- retval = rpm_callback(callback, dev);
- if (retval) {
- __update_runtime_status(dev, RPM_SUSPENDED);
- pm_runtime_cancel_pending(dev);
- } else {
- no_callback:
- __update_runtime_status(dev, RPM_ACTIVE);
- if (parent)
- atomic_inc(&parent->power.child_count);
- }
- wake_up_all(&dev->power.wait_queue);
-
- if (!retval)
- rpm_idle(dev, RPM_ASYNC);
-
- out:
- if (parent && !dev->power.irq_safe) {
- spin_unlock_irq(&dev->power.lock);
-
- pm_runtime_put(parent);
-
- spin_lock_irq(&dev->power.lock);
- }
-
- trace_rpm_return_int(dev, _THIS_IP_, retval);
-
- return retval;
-}
-
-/**
- * pm_runtime_work - Universal runtime PM work function.
- * @work: Work structure used for scheduling the execution of this function.
- *
- * Use @work to get the device object the work is to be done for, determine what
- * is to be done and execute the appropriate runtime PM function.
- */
-static void pm_runtime_work(struct work_struct *work)
-{
- struct device *dev = container_of(work, struct device, power.work);
- enum rpm_request req;
-
- spin_lock_irq(&dev->power.lock);
-
- if (!dev->power.request_pending)
- goto out;
-
- req = dev->power.request;
- dev->power.request = RPM_REQ_NONE;
- dev->power.request_pending = false;
-
- switch (req) {
- case RPM_REQ_NONE:
- break;
- case RPM_REQ_IDLE:
- rpm_idle(dev, RPM_NOWAIT);
- break;
- case RPM_REQ_SUSPEND:
- rpm_suspend(dev, RPM_NOWAIT);
- break;
- case RPM_REQ_AUTOSUSPEND:
- rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
- break;
- case RPM_REQ_RESUME:
- rpm_resume(dev, RPM_NOWAIT);
- break;
- }
-
- out:
- spin_unlock_irq(&dev->power.lock);
-}
-
-/**
- * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
- * @data: Device pointer passed by pm_schedule_suspend().
- *
- * Check if the time is right and queue a suspend request.
- */
-static void pm_suspend_timer_fn(unsigned long data)
-{
- struct device *dev = (struct device *)data;
- unsigned long flags;
- unsigned long expires;
-
- spin_lock_irqsave(&dev->power.lock, flags);
-
- expires = dev->power.timer_expires;
- /* If 'expire' is after 'jiffies' we've been called too early. */
- if (expires > 0 && !time_after(expires, jiffies)) {
- dev->power.timer_expires = 0;
- rpm_suspend(dev, dev->power.timer_autosuspends ?
- (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
- }
-
- spin_unlock_irqrestore(&dev->power.lock, flags);
-}
-
-/**
- * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
- * @dev: Device to suspend.
- * @delay: Time to wait before submitting a suspend request, in milliseconds.
- */
-int pm_schedule_suspend(struct device *dev, unsigned int delay)
-{
- unsigned long flags;
- int retval;
-
- spin_lock_irqsave(&dev->power.lock, flags);
-
- if (!delay) {
- retval = rpm_suspend(dev, RPM_ASYNC);
- goto out;
- }
-
- retval = rpm_check_suspend_allowed(dev);
- if (retval)
- goto out;
-
- /* Other scheduled or pending requests need to be canceled. */
- pm_runtime_cancel_pending(dev);
-
- dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
- dev->power.timer_expires += !dev->power.timer_expires;
- dev->power.timer_autosuspends = 0;
- mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
-
- out:
- spin_unlock_irqrestore(&dev->power.lock, flags);
-
- return retval;
-}
-EXPORT_SYMBOL_GPL(pm_schedule_suspend);
-
-/**
- * __pm_runtime_idle - Entry point for runtime idle operations.
- * @dev: Device to send idle notification for.
- * @rpmflags: Flag bits.
- *
- * If the RPM_GET_PUT flag is set, decrement the device's usage count and
- * return immediately if it is larger than zero. Then carry out an idle
- * notification, either synchronous or asynchronous.
- *
- * This routine may be called in atomic context if the RPM_ASYNC flag is set,
- * or if pm_runtime_irq_safe() has been called.
- */
-int __pm_runtime_idle(struct device *dev, int rpmflags)
-{
- unsigned long flags;
- int retval;
-
- might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
-
- if (rpmflags & RPM_GET_PUT) {
- if (!atomic_dec_and_test(&dev->power.usage_count))
- return 0;
- }
-
- spin_lock_irqsave(&dev->power.lock, flags);
- retval = rpm_idle(dev, rpmflags);
- spin_unlock_irqrestore(&dev->power.lock, flags);
-
- return retval;
-}
-EXPORT_SYMBOL_GPL(__pm_runtime_idle);
-
-/**
- * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
- * @dev: Device to suspend.
- * @rpmflags: Flag bits.
- *
- * If the RPM_GET_PUT flag is set, decrement the device's usage count and
- * return immediately if it is larger than zero. Then carry out a suspend,
- * either synchronous or asynchronous.
- *
- * This routine may be called in atomic context if the RPM_ASYNC flag is set,
- * or if pm_runtime_irq_safe() has been called.
- */
-int __pm_runtime_suspend(struct device *dev, int rpmflags)
-{
- unsigned long flags;
- int retval;
-
- might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
-
- if (rpmflags & RPM_GET_PUT) {
- if (!atomic_dec_and_test(&dev->power.usage_count))
- return 0;
- }
-
- spin_lock_irqsave(&dev->power.lock, flags);
- retval = rpm_suspend(dev, rpmflags);
- spin_unlock_irqrestore(&dev->power.lock, flags);
-
- return retval;
-}
-EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
-
-/**
- * __pm_runtime_resume - Entry point for runtime resume operations.
- * @dev: Device to resume.
- * @rpmflags: Flag bits.
- *
- * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
- * carry out a resume, either synchronous or asynchronous.
- *
- * This routine may be called in atomic context if the RPM_ASYNC flag is set,
- * or if pm_runtime_irq_safe() has been called.
- */
-int __pm_runtime_resume(struct device *dev, int rpmflags)
-{
- unsigned long flags;
- int retval;
-
- might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
-
- if (rpmflags & RPM_GET_PUT)
- atomic_inc(&dev->power.usage_count);
-
- spin_lock_irqsave(&dev->power.lock, flags);
- retval = rpm_resume(dev, rpmflags);
- spin_unlock_irqrestore(&dev->power.lock, flags);
-
- return retval;
-}
-EXPORT_SYMBOL_GPL(__pm_runtime_resume);
-
-/**
- * __pm_runtime_set_status - Set runtime PM status of a device.
- * @dev: Device to handle.
- * @status: New runtime PM status of the device.
- *
- * If runtime PM of the device is disabled or its power.runtime_error field is
- * different from zero, the status may be changed either to RPM_ACTIVE, or to
- * RPM_SUSPENDED, as long as that reflects the actual state of the device.
- * However, if the device has a parent and the parent is not active, and the
- * parent's power.ignore_children flag is unset, the device's status cannot be
- * set to RPM_ACTIVE, so -EBUSY is returned in that case.
- *
- * If successful, __pm_runtime_set_status() clears the power.runtime_error field
- * and the device parent's counter of unsuspended children is modified to
- * reflect the new status. If the new status is RPM_SUSPENDED, an idle
- * notification request for the parent is submitted.
- */
-int __pm_runtime_set_status(struct device *dev, unsigned int status)
-{
- struct device *parent = dev->parent;
- unsigned long flags;
- bool notify_parent = false;
- int error = 0;
-
- if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
- return -EINVAL;
-
- spin_lock_irqsave(&dev->power.lock, flags);
-
- if (!dev->power.runtime_error && !dev->power.disable_depth) {
- error = -EAGAIN;
- goto out;
- }
-
- if (dev->power.runtime_status == status)
- goto out_set;
-
- if (status == RPM_SUSPENDED) {
- /* It always is possible to set the status to 'suspended'. */
- if (parent) {
- atomic_add_unless(&parent->power.child_count, -1, 0);
- notify_parent = !parent->power.ignore_children;
- }
- goto out_set;
- }
-
- if (parent) {
- spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
-
- /*
- * It is invalid to put an active child under a parent that is
- * not active, has runtime PM enabled and the
- * 'power.ignore_children' flag unset.
- */
- if (!parent->power.disable_depth
- && !parent->power.ignore_children
- && parent->power.runtime_status != RPM_ACTIVE)
- error = -EBUSY;
- else if (dev->power.runtime_status == RPM_SUSPENDED)
- atomic_inc(&parent->power.child_count);
-
- spin_unlock(&parent->power.lock);
-
- if (error)
- goto out;
- }
-
- out_set:
- __update_runtime_status(dev, status);
- dev->power.runtime_error = 0;
- out:
- spin_unlock_irqrestore(&dev->power.lock, flags);
-
- if (notify_parent)
- pm_request_idle(parent);
-
- return error;
-}
-EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
-
-/**
- * __pm_runtime_barrier - Cancel pending requests and wait for completions.
- * @dev: Device to handle.
- *
- * Flush all pending requests for the device from pm_wq and wait for all
- * runtime PM operations involving the device in progress to complete.
- *
- * Should be called under dev->power.lock with interrupts disabled.
- */
-static void __pm_runtime_barrier(struct device *dev)
-{
- pm_runtime_deactivate_timer(dev);
-
- if (dev->power.request_pending) {
- dev->power.request = RPM_REQ_NONE;
- spin_unlock_irq(&dev->power.lock);
-
- cancel_work_sync(&dev->power.work);
-
- spin_lock_irq(&dev->power.lock);
- dev->power.request_pending = false;
- }
-
- if (dev->power.runtime_status == RPM_SUSPENDING
- || dev->power.runtime_status == RPM_RESUMING
- || dev->power.idle_notification) {
- DEFINE_WAIT(wait);
-
- /* Suspend, wake-up or idle notification in progress. */
- for (;;) {
- prepare_to_wait(&dev->power.wait_queue, &wait,
- TASK_UNINTERRUPTIBLE);
- if (dev->power.runtime_status != RPM_SUSPENDING
- && dev->power.runtime_status != RPM_RESUMING
- && !dev->power.idle_notification)
- break;
- spin_unlock_irq(&dev->power.lock);
-
- schedule();
-
- spin_lock_irq(&dev->power.lock);
- }
- finish_wait(&dev->power.wait_queue, &wait);
- }
-}
-
-/**
- * pm_runtime_barrier - Flush pending requests and wait for completions.
- * @dev: Device to handle.
- *
- * Prevent the device from being suspended by incrementing its usage counter and
- * if there's a pending resume request for the device, wake the device up.
- * Next, make sure that all pending requests for the device have been flushed
- * from pm_wq and wait for all runtime PM operations involving the device in
- * progress to complete.
- *
- * Return value:
- * 1, if there was a resume request pending and the device had to be woken up,
- * 0, otherwise
- */
-int pm_runtime_barrier(struct device *dev)
-{
- int retval = 0;
-
- pm_runtime_get_noresume(dev);
- spin_lock_irq(&dev->power.lock);
-
- if (dev->power.request_pending
- && dev->power.request == RPM_REQ_RESUME) {
- rpm_resume(dev, 0);
- retval = 1;
- }
-
- __pm_runtime_barrier(dev);
-
- spin_unlock_irq(&dev->power.lock);
- pm_runtime_put_noidle(dev);
-
- return retval;
-}
-EXPORT_SYMBOL_GPL(pm_runtime_barrier);
-
-/**
- * __pm_runtime_disable - Disable runtime PM of a device.
- * @dev: Device to handle.
- * @check_resume: If set, check if there's a resume request for the device.
- *
- * Increment power.disable_depth for the device and if was zero previously,
- * cancel all pending runtime PM requests for the device and wait for all
- * operations in progress to complete. The device can be either active or
- * suspended after its runtime PM has been disabled.
- *
- * If @check_resume is set and there's a resume request pending when
- * __pm_runtime_disable() is called and power.disable_depth is zero, the
- * function will wake up the device before disabling its runtime PM.
- */
-void __pm_runtime_disable(struct device *dev, bool check_resume)
-{
- spin_lock_irq(&dev->power.lock);
-
- if (dev->power.disable_depth > 0) {
- dev->power.disable_depth++;
- goto out;
- }
-
- /*
- * Wake up the device if there's a resume request pending, because that
- * means there probably is some I/O to process and disabling runtime PM
- * shouldn't prevent the device from processing the I/O.
- */
- if (check_resume && dev->power.request_pending
- && dev->power.request == RPM_REQ_RESUME) {
- /*
- * Prevent suspends and idle notifications from being carried
- * out after we have woken up the device.
- */
- pm_runtime_get_noresume(dev);
-
- rpm_resume(dev, 0);
-
- pm_runtime_put_noidle(dev);
- }
-
- if (!dev->power.disable_depth++)
- __pm_runtime_barrier(dev);
-
- out:
- spin_unlock_irq(&dev->power.lock);
-}
-EXPORT_SYMBOL_GPL(__pm_runtime_disable);
-
-/**
- * pm_runtime_enable - Enable runtime PM of a device.
- * @dev: Device to handle.
- */
-void pm_runtime_enable(struct device *dev)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dev->power.lock, flags);
-
- if (dev->power.disable_depth > 0)
- dev->power.disable_depth--;
- else
- dev_warn(dev, "Unbalanced %s!\n", __func__);
-
- spin_unlock_irqrestore(&dev->power.lock, flags);
-}
-EXPORT_SYMBOL_GPL(pm_runtime_enable);
-
-/**
- * pm_runtime_forbid - Block runtime PM of a device.
- * @dev: Device to handle.
- *
- * Increase the device's usage count and clear its power.runtime_auto flag,
- * so that it cannot be suspended at run time until pm_runtime_allow() is called
- * for it.
- */
-void pm_runtime_forbid(struct device *dev)
-{
- spin_lock_irq(&dev->power.lock);
- if (!dev->power.runtime_auto)
- goto out;
-
- dev->power.runtime_auto = false;
- atomic_inc(&dev->power.usage_count);
- rpm_resume(dev, 0);
-
- out:
- spin_unlock_irq(&dev->power.lock);
-}
-EXPORT_SYMBOL_GPL(pm_runtime_forbid);
-
-/**
- * pm_runtime_allow - Unblock runtime PM of a device.
- * @dev: Device to handle.
- *
- * Decrease the device's usage count and set its power.runtime_auto flag.
- */
-void pm_runtime_allow(struct device *dev)
-{
- spin_lock_irq(&dev->power.lock);
- if (dev->power.runtime_auto)
- goto out;
-
- dev->power.runtime_auto = true;
- if (atomic_dec_and_test(&dev->power.usage_count))
- rpm_idle(dev, RPM_AUTO);
-
- out:
- spin_unlock_irq(&dev->power.lock);
-}
-EXPORT_SYMBOL_GPL(pm_runtime_allow);
-
-/**
- * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
- * @dev: Device to handle.
- *
- * Set the power.no_callbacks flag, which tells the PM core that this
- * device is power-managed through its parent and has no runtime PM
- * callbacks of its own. The runtime sysfs attributes will be removed.
- */
-void pm_runtime_no_callbacks(struct device *dev)
-{
- spin_lock_irq(&dev->power.lock);
- dev->power.no_callbacks = 1;
- spin_unlock_irq(&dev->power.lock);
- if (device_is_registered(dev))
- rpm_sysfs_remove(dev);
-}
-EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
-
-/**
- * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
- * @dev: Device to handle
- *
- * Set the power.irq_safe flag, which tells the PM core that the
- * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
- * always be invoked with the spinlock held and interrupts disabled. It also
- * causes the parent's usage counter to be permanently incremented, preventing
- * the parent from runtime suspending -- otherwise an irq-safe child might have
- * to wait for a non-irq-safe parent.
- */
-void pm_runtime_irq_safe(struct device *dev)
-{
- if (dev->parent)
- pm_runtime_get_sync(dev->parent);
- spin_lock_irq(&dev->power.lock);
- dev->power.irq_safe = 1;
- spin_unlock_irq(&dev->power.lock);
-}
-EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
-
-/**
- * update_autosuspend - Handle a change to a device's autosuspend settings.
- * @dev: Device to handle.
- * @old_delay: The former autosuspend_delay value.
- * @old_use: The former use_autosuspend value.
- *
- * Prevent runtime suspend if the new delay is negative and use_autosuspend is
- * set; otherwise allow it. Send an idle notification if suspends are allowed.
- *
- * This function must be called under dev->power.lock with interrupts disabled.
- */
-static void update_autosuspend(struct device *dev, int old_delay, int old_use)
-{
- int delay = dev->power.autosuspend_delay;
-
- /* Should runtime suspend be prevented now? */
- if (dev->power.use_autosuspend && delay < 0) {
-
- /* If it used to be allowed then prevent it. */
- if (!old_use || old_delay >= 0) {
- atomic_inc(&dev->power.usage_count);
- rpm_resume(dev, 0);
- }
- }
-
- /* Runtime suspend should be allowed now. */
- else {
-
- /* If it used to be prevented then allow it. */
- if (old_use && old_delay < 0)
- atomic_dec(&dev->power.usage_count);
-
- /* Maybe we can autosuspend now. */
- rpm_idle(dev, RPM_AUTO);
- }
-}
-
-/**
- * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
- * @dev: Device to handle.
- * @delay: Value of the new delay in milliseconds.
- *
- * Set the device's power.autosuspend_delay value. If it changes to negative
- * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
- * changes the other way, allow runtime suspends.
- */
-void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
-{
- int old_delay, old_use;
-
- spin_lock_irq(&dev->power.lock);
- old_delay = dev->power.autosuspend_delay;
- old_use = dev->power.use_autosuspend;
- dev->power.autosuspend_delay = delay;
- update_autosuspend(dev, old_delay, old_use);
- spin_unlock_irq(&dev->power.lock);
-}
-EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
-
-/**
- * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
- * @dev: Device to handle.
- * @use: New value for use_autosuspend.
- *
- * Set the device's power.use_autosuspend flag, and allow or prevent runtime
- * suspends as needed.
- */
-void __pm_runtime_use_autosuspend(struct device *dev, bool use)
-{
- int old_delay, old_use;
-
- spin_lock_irq(&dev->power.lock);
- old_delay = dev->power.autosuspend_delay;
- old_use = dev->power.use_autosuspend;
- dev->power.use_autosuspend = use;
- update_autosuspend(dev, old_delay, old_use);
- spin_unlock_irq(&dev->power.lock);
-}
-EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
-
-/**
- * pm_runtime_init - Initialize runtime PM fields in given device object.
- * @dev: Device object to initialize.
- */
-void pm_runtime_init(struct device *dev)
-{
- dev->power.runtime_status = RPM_SUSPENDED;
- dev->power.idle_notification = false;
-
- dev->power.disable_depth = 1;
- atomic_set(&dev->power.usage_count, 0);
-
- dev->power.runtime_error = 0;
-
- atomic_set(&dev->power.child_count, 0);
- pm_suspend_ignore_children(dev, false);
- dev->power.runtime_auto = true;
-
- dev->power.request_pending = false;
- dev->power.request = RPM_REQ_NONE;
- dev->power.deferred_resume = false;
- dev->power.accounting_timestamp = jiffies;
- INIT_WORK(&dev->power.work, pm_runtime_work);
-
- dev->power.timer_expires = 0;
- setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
- (unsigned long)dev);
-
- init_waitqueue_head(&dev->power.wait_queue);
-}
-
-/**
- * pm_runtime_remove - Prepare for removing a device from device hierarchy.
- * @dev: Device object being removed from device hierarchy.
- */
-void pm_runtime_remove(struct device *dev)
-{
- __pm_runtime_disable(dev, false);
-
- /* Change the status back to 'suspended' to match the initial status. */
- if (dev->power.runtime_status == RPM_ACTIVE)
- pm_runtime_set_suspended(dev);
- if (dev->power.irq_safe && dev->parent)
- pm_runtime_put_sync(dev->parent);
-}
diff --git a/ANDROID_3.4.5/drivers/base/power/sysfs.c b/ANDROID_3.4.5/drivers/base/power/sysfs.c
deleted file mode 100644
index 48be2ad4..00000000
--- a/ANDROID_3.4.5/drivers/base/power/sysfs.c
+++ /dev/null
@@ -1,634 +0,0 @@
-/*
- * drivers/base/power/sysfs.c - sysfs entries for device PM
- */
-
-#include <linux/device.h>
-#include <linux/string.h>
-#include <linux/export.h>
-#include <linux/pm_qos.h>
-#include <linux/pm_runtime.h>
-#include <linux/atomic.h>
-#include <linux/jiffies.h>
-#include "power.h"
-
-/*
- * control - Report/change current runtime PM setting of the device
- *
- * Runtime power management of a device can be blocked with the help of
- * this attribute. All devices have one of the following two values for
- * the power/control file:
- *
- * + "auto\n" to allow the device to be power managed at run time;
- * + "on\n" to prevent the device from being power managed at run time;
- *
- * The default for all devices is "auto", which means that devices may be
- * subject to automatic power management, depending on their drivers.
- * Changing this attribute to "on" prevents the driver from power managing
- * the device at run time. Doing that while the device is suspended causes
- * it to be woken up.
- *
- * wakeup - Report/change current wakeup option for device
- *
- * Some devices support "wakeup" events, which are hardware signals
- * used to activate devices from suspended or low power states. Such
- * devices have one of three values for the sysfs power/wakeup file:
- *
- * + "enabled\n" to issue the events;
- * + "disabled\n" not to do so; or
- * + "\n" for temporary or permanent inability to issue wakeup.
- *
- * (For example, unconfigured USB devices can't issue wakeups.)
- *
- * Familiar examples of devices that can issue wakeup events include
- * keyboards and mice (both PS2 and USB styles), power buttons, modems,
- * "Wake-On-LAN" Ethernet links, GPIO lines, and more. Some events
- * will wake the entire system from a suspend state; others may just
- * wake up the device (if the system as a whole is already active).
- * Some wakeup events use normal IRQ lines; other use special out
- * of band signaling.
- *
- * It is the responsibility of device drivers to enable (or disable)
- * wakeup signaling as part of changing device power states, respecting
- * the policy choices provided through the driver model.
- *
- * Devices may not be able to generate wakeup events from all power
- * states. Also, the events may be ignored in some configurations;
- * for example, they might need help from other devices that aren't
- * active, or which may have wakeup disabled. Some drivers rely on
- * wakeup events internally (unless they are disabled), keeping
- * their hardware in low power modes whenever they're unused. This
- * saves runtime power, without requiring system-wide sleep states.
- *
- * async - Report/change current async suspend setting for the device
- *
- * Asynchronous suspend and resume of the device during system-wide power
- * state transitions can be enabled by writing "enabled" to this file.
- * Analogously, if "disabled" is written to this file, the device will be
- * suspended and resumed synchronously.
- *
- * All devices have one of the following two values for power/async:
- *
- * + "enabled\n" to permit the asynchronous suspend/resume of the device;
- * + "disabled\n" to forbid it;
- *
- * NOTE: It generally is unsafe to permit the asynchronous suspend/resume
- * of a device unless it is certain that all of the PM dependencies of the
- * device are known to the PM core. However, for some devices this
- * attribute is set to "enabled" by bus type code or device drivers and in
- * that cases it should be safe to leave the default value.
- *
- * autosuspend_delay_ms - Report/change a device's autosuspend_delay value
- *
- * Some drivers don't want to carry out a runtime suspend as soon as a
- * device becomes idle; they want it always to remain idle for some period
- * of time before suspending it. This period is the autosuspend_delay
- * value (expressed in milliseconds) and it can be controlled by the user.
- * If the value is negative then the device will never be runtime
- * suspended.
- *
- * NOTE: The autosuspend_delay_ms attribute and the autosuspend_delay
- * value are used only if the driver calls pm_runtime_use_autosuspend().
- *
- * wakeup_count - Report the number of wakeup events related to the device
- */
-
-static const char enabled[] = "enabled";
-static const char disabled[] = "disabled";
-
-const char power_group_name[] = "power";
-EXPORT_SYMBOL_GPL(power_group_name);
-
-#ifdef CONFIG_PM_RUNTIME
-static const char ctrl_auto[] = "auto";
-static const char ctrl_on[] = "on";
-
-static ssize_t control_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%s\n",
- dev->power.runtime_auto ? ctrl_auto : ctrl_on);
-}
-
-static ssize_t control_store(struct device * dev, struct device_attribute *attr,
- const char * buf, size_t n)
-{
- char *cp;
- int len = n;
-
- cp = memchr(buf, '\n', n);
- if (cp)
- len = cp - buf;
- device_lock(dev);
- if (len == sizeof ctrl_auto - 1 && strncmp(buf, ctrl_auto, len) == 0)
- pm_runtime_allow(dev);
- else if (len == sizeof ctrl_on - 1 && strncmp(buf, ctrl_on, len) == 0)
- pm_runtime_forbid(dev);
- else
- n = -EINVAL;
- device_unlock(dev);
- return n;
-}
-
-static DEVICE_ATTR(control, 0644, control_show, control_store);
-
-static ssize_t rtpm_active_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int ret;
- spin_lock_irq(&dev->power.lock);
- update_pm_runtime_accounting(dev);
- ret = sprintf(buf, "%i\n", jiffies_to_msecs(dev->power.active_jiffies));
- spin_unlock_irq(&dev->power.lock);
- return ret;
-}
-
-static DEVICE_ATTR(runtime_active_time, 0444, rtpm_active_time_show, NULL);
-
-static ssize_t rtpm_suspended_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int ret;
- spin_lock_irq(&dev->power.lock);
- update_pm_runtime_accounting(dev);
- ret = sprintf(buf, "%i\n",
- jiffies_to_msecs(dev->power.suspended_jiffies));
- spin_unlock_irq(&dev->power.lock);
- return ret;
-}
-
-static DEVICE_ATTR(runtime_suspended_time, 0444, rtpm_suspended_time_show, NULL);
-
-static ssize_t rtpm_status_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- const char *p;
-
- if (dev->power.runtime_error) {
- p = "error\n";
- } else if (dev->power.disable_depth) {
- p = "unsupported\n";
- } else {
- switch (dev->power.runtime_status) {
- case RPM_SUSPENDED:
- p = "suspended\n";
- break;
- case RPM_SUSPENDING:
- p = "suspending\n";
- break;
- case RPM_RESUMING:
- p = "resuming\n";
- break;
- case RPM_ACTIVE:
- p = "active\n";
- break;
- default:
- return -EIO;
- }
- }
- return sprintf(buf, p);
-}
-
-static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
-
-static ssize_t autosuspend_delay_ms_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- if (!dev->power.use_autosuspend)
- return -EIO;
- return sprintf(buf, "%d\n", dev->power.autosuspend_delay);
-}
-
-static ssize_t autosuspend_delay_ms_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t n)
-{
- long delay;
-
- if (!dev->power.use_autosuspend)
- return -EIO;
-
- if (strict_strtol(buf, 10, &delay) != 0 || delay != (int) delay)
- return -EINVAL;
-
- device_lock(dev);
- pm_runtime_set_autosuspend_delay(dev, delay);
- device_unlock(dev);
- return n;
-}
-
-static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,
- autosuspend_delay_ms_store);
-
-static ssize_t pm_qos_latency_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", dev->power.pq_req->node.prio);
-}
-
-static ssize_t pm_qos_latency_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t n)
-{
- s32 value;
- int ret;
-
- if (kstrtos32(buf, 0, &value))
- return -EINVAL;
-
- if (value < 0)
- return -EINVAL;
-
- ret = dev_pm_qos_update_request(dev->power.pq_req, value);
- return ret < 0 ? ret : n;
-}
-
-static DEVICE_ATTR(pm_qos_resume_latency_us, 0644,
- pm_qos_latency_show, pm_qos_latency_store);
-#endif /* CONFIG_PM_RUNTIME */
-
-#ifdef CONFIG_PM_SLEEP
-static ssize_t
-wake_show(struct device * dev, struct device_attribute *attr, char * buf)
-{
- return sprintf(buf, "%s\n", device_can_wakeup(dev)
- ? (device_may_wakeup(dev) ? enabled : disabled)
- : "");
-}
-
-static ssize_t
-wake_store(struct device * dev, struct device_attribute *attr,
- const char * buf, size_t n)
-{
- char *cp;
- int len = n;
-
- if (!device_can_wakeup(dev))
- return -EINVAL;
-
- cp = memchr(buf, '\n', n);
- if (cp)
- len = cp - buf;
- if (len == sizeof enabled - 1
- && strncmp(buf, enabled, sizeof enabled - 1) == 0)
- device_set_wakeup_enable(dev, 1);
- else if (len == sizeof disabled - 1
- && strncmp(buf, disabled, sizeof disabled - 1) == 0)
- device_set_wakeup_enable(dev, 0);
- else
- return -EINVAL;
- return n;
-}
-
-static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store);
-
-static ssize_t wakeup_count_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long count = 0;
- bool enabled = false;
-
- spin_lock_irq(&dev->power.lock);
- if (dev->power.wakeup) {
- count = dev->power.wakeup->event_count;
- enabled = true;
- }
- spin_unlock_irq(&dev->power.lock);
- return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
-}
-
-static DEVICE_ATTR(wakeup_count, 0444, wakeup_count_show, NULL);
-
-static ssize_t wakeup_active_count_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long count = 0;
- bool enabled = false;
-
- spin_lock_irq(&dev->power.lock);
- if (dev->power.wakeup) {
- count = dev->power.wakeup->active_count;
- enabled = true;
- }
- spin_unlock_irq(&dev->power.lock);
- return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
-}
-
-static DEVICE_ATTR(wakeup_active_count, 0444, wakeup_active_count_show, NULL);
-
-static ssize_t wakeup_abort_count_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned long count = 0;
- bool enabled = false;
-
- spin_lock_irq(&dev->power.lock);
- if (dev->power.wakeup) {
- count = dev->power.wakeup->wakeup_count;
- enabled = true;
- }
- spin_unlock_irq(&dev->power.lock);
- return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
-}
-
-static DEVICE_ATTR(wakeup_abort_count, 0444, wakeup_abort_count_show, NULL);
-
-static ssize_t wakeup_expire_count_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned long count = 0;
- bool enabled = false;
-
- spin_lock_irq(&dev->power.lock);
- if (dev->power.wakeup) {
- count = dev->power.wakeup->expire_count;
- enabled = true;
- }
- spin_unlock_irq(&dev->power.lock);
- return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
-}
-
-static DEVICE_ATTR(wakeup_expire_count, 0444, wakeup_expire_count_show, NULL);
-
-static ssize_t wakeup_active_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned int active = 0;
- bool enabled = false;
-
- spin_lock_irq(&dev->power.lock);
- if (dev->power.wakeup) {
- active = dev->power.wakeup->active;
- enabled = true;
- }
- spin_unlock_irq(&dev->power.lock);
- return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n");
-}
-
-static DEVICE_ATTR(wakeup_active, 0444, wakeup_active_show, NULL);
-
-static ssize_t wakeup_total_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- s64 msec = 0;
- bool enabled = false;
-
- spin_lock_irq(&dev->power.lock);
- if (dev->power.wakeup) {
- msec = ktime_to_ms(dev->power.wakeup->total_time);
- enabled = true;
- }
- spin_unlock_irq(&dev->power.lock);
- return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
-}
-
-static DEVICE_ATTR(wakeup_total_time_ms, 0444, wakeup_total_time_show, NULL);
-
-static ssize_t wakeup_max_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- s64 msec = 0;
- bool enabled = false;
-
- spin_lock_irq(&dev->power.lock);
- if (dev->power.wakeup) {
- msec = ktime_to_ms(dev->power.wakeup->max_time);
- enabled = true;
- }
- spin_unlock_irq(&dev->power.lock);
- return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
-}
-
-static DEVICE_ATTR(wakeup_max_time_ms, 0444, wakeup_max_time_show, NULL);
-
-static ssize_t wakeup_last_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- s64 msec = 0;
- bool enabled = false;
-
- spin_lock_irq(&dev->power.lock);
- if (dev->power.wakeup) {
- msec = ktime_to_ms(dev->power.wakeup->last_time);
- enabled = true;
- }
- spin_unlock_irq(&dev->power.lock);
- return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
-}
-
-static DEVICE_ATTR(wakeup_last_time_ms, 0444, wakeup_last_time_show, NULL);
-
-#ifdef CONFIG_PM_AUTOSLEEP
-static ssize_t wakeup_prevent_sleep_time_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- s64 msec = 0;
- bool enabled = false;
-
- spin_lock_irq(&dev->power.lock);
- if (dev->power.wakeup) {
- msec = ktime_to_ms(dev->power.wakeup->prevent_sleep_time);
- enabled = true;
- }
- spin_unlock_irq(&dev->power.lock);
- return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
-}
-
-static DEVICE_ATTR(wakeup_prevent_sleep_time_ms, 0444,
- wakeup_prevent_sleep_time_show, NULL);
-#endif /* CONFIG_PM_AUTOSLEEP */
-#endif /* CONFIG_PM_SLEEP */
-
-#ifdef CONFIG_PM_ADVANCED_DEBUG
-#ifdef CONFIG_PM_RUNTIME
-
-static ssize_t rtpm_usagecount_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count));
-}
-
-static ssize_t rtpm_children_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", dev->power.ignore_children ?
- 0 : atomic_read(&dev->power.child_count));
-}
-
-static ssize_t rtpm_enabled_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- if ((dev->power.disable_depth) && (dev->power.runtime_auto == false))
- return sprintf(buf, "disabled & forbidden\n");
- else if (dev->power.disable_depth)
- return sprintf(buf, "disabled\n");
- else if (dev->power.runtime_auto == false)
- return sprintf(buf, "forbidden\n");
- return sprintf(buf, "enabled\n");
-}
-
-static DEVICE_ATTR(runtime_usage, 0444, rtpm_usagecount_show, NULL);
-static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL);
-static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL);
-
-#endif
-
-static ssize_t async_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%s\n",
- device_async_suspend_enabled(dev) ? enabled : disabled);
-}
-
-static ssize_t async_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t n)
-{
- char *cp;
- int len = n;
-
- cp = memchr(buf, '\n', n);
- if (cp)
- len = cp - buf;
- if (len == sizeof enabled - 1 && strncmp(buf, enabled, len) == 0)
- device_enable_async_suspend(dev);
- else if (len == sizeof disabled - 1 && strncmp(buf, disabled, len) == 0)
- device_disable_async_suspend(dev);
- else
- return -EINVAL;
- return n;
-}
-
-static DEVICE_ATTR(async, 0644, async_show, async_store);
-#endif /* CONFIG_PM_ADVANCED_DEBUG */
-
-static struct attribute *power_attrs[] = {
-#ifdef CONFIG_PM_ADVANCED_DEBUG
-#ifdef CONFIG_PM_SLEEP
- &dev_attr_async.attr,
-#endif
-#ifdef CONFIG_PM_RUNTIME
- &dev_attr_runtime_status.attr,
- &dev_attr_runtime_usage.attr,
- &dev_attr_runtime_active_kids.attr,
- &dev_attr_runtime_enabled.attr,
-#endif
-#endif /* CONFIG_PM_ADVANCED_DEBUG */
- NULL,
-};
-static struct attribute_group pm_attr_group = {
- .name = power_group_name,
- .attrs = power_attrs,
-};
-
-static struct attribute *wakeup_attrs[] = {
-#ifdef CONFIG_PM_SLEEP
- &dev_attr_wakeup.attr,
- &dev_attr_wakeup_count.attr,
- &dev_attr_wakeup_active_count.attr,
- &dev_attr_wakeup_abort_count.attr,
- &dev_attr_wakeup_expire_count.attr,
- &dev_attr_wakeup_active.attr,
- &dev_attr_wakeup_total_time_ms.attr,
- &dev_attr_wakeup_max_time_ms.attr,
- &dev_attr_wakeup_last_time_ms.attr,
-#ifdef CONFIG_PM_AUTOSLEEP
- &dev_attr_wakeup_prevent_sleep_time_ms.attr,
-#endif
-#endif
- NULL,
-};
-static struct attribute_group pm_wakeup_attr_group = {
- .name = power_group_name,
- .attrs = wakeup_attrs,
-};
-
-static struct attribute *runtime_attrs[] = {
-#ifdef CONFIG_PM_RUNTIME
-#ifndef CONFIG_PM_ADVANCED_DEBUG
- &dev_attr_runtime_status.attr,
-#endif
- &dev_attr_control.attr,
- &dev_attr_runtime_suspended_time.attr,
- &dev_attr_runtime_active_time.attr,
- &dev_attr_autosuspend_delay_ms.attr,
-#endif /* CONFIG_PM_RUNTIME */
- NULL,
-};
-static struct attribute_group pm_runtime_attr_group = {
- .name = power_group_name,
- .attrs = runtime_attrs,
-};
-
-static struct attribute *pm_qos_attrs[] = {
-#ifdef CONFIG_PM_RUNTIME
- &dev_attr_pm_qos_resume_latency_us.attr,
-#endif /* CONFIG_PM_RUNTIME */
- NULL,
-};
-static struct attribute_group pm_qos_attr_group = {
- .name = power_group_name,
- .attrs = pm_qos_attrs,
-};
-
-int dpm_sysfs_add(struct device *dev)
-{
- int rc;
-
- rc = sysfs_create_group(&dev->kobj, &pm_attr_group);
- if (rc)
- return rc;
-
- if (pm_runtime_callbacks_present(dev)) {
- rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group);
- if (rc)
- goto err_out;
- }
-
- if (device_can_wakeup(dev)) {
- rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
- if (rc) {
- if (pm_runtime_callbacks_present(dev))
- sysfs_unmerge_group(&dev->kobj,
- &pm_runtime_attr_group);
- goto err_out;
- }
- }
- return 0;
-
- err_out:
- sysfs_remove_group(&dev->kobj, &pm_attr_group);
- return rc;
-}
-
-int wakeup_sysfs_add(struct device *dev)
-{
- return sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
-}
-
-void wakeup_sysfs_remove(struct device *dev)
-{
- sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
-}
-
-int pm_qos_sysfs_add(struct device *dev)
-{
- return sysfs_merge_group(&dev->kobj, &pm_qos_attr_group);
-}
-
-void pm_qos_sysfs_remove(struct device *dev)
-{
- sysfs_unmerge_group(&dev->kobj, &pm_qos_attr_group);
-}
-
-void rpm_sysfs_remove(struct device *dev)
-{
- sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
-}
-
-void dpm_sysfs_remove(struct device *dev)
-{
- rpm_sysfs_remove(dev);
- sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
- sysfs_remove_group(&dev->kobj, &pm_attr_group);
-}
diff --git a/ANDROID_3.4.5/drivers/base/power/trace.c b/ANDROID_3.4.5/drivers/base/power/trace.c
deleted file mode 100644
index d94a1f51..00000000
--- a/ANDROID_3.4.5/drivers/base/power/trace.c
+++ /dev/null
@@ -1,266 +0,0 @@
-/*
- * drivers/base/power/trace.c
- *
- * Copyright (C) 2006 Linus Torvalds
- *
- * Trace facility for suspend/resume problems, when none of the
- * devices may be working.
- */
-
-#include <linux/resume-trace.h>
-#include <linux/export.h>
-#include <linux/rtc.h>
-
-#include <asm/rtc.h>
-
-#include "power.h"
-
-/*
- * Horrid, horrid, horrid.
- *
- * It turns out that the _only_ piece of hardware that actually
- * keeps its value across a hard boot (and, more importantly, the
- * POST init sequence) is literally the realtime clock.
- *
- * Never mind that an RTC chip has 114 bytes (and often a whole
- * other bank of an additional 128 bytes) of nice SRAM that is
- * _designed_ to keep data - the POST will clear it. So we literally
- * can just use the few bytes of actual time data, which means that
- * we're really limited.
- *
- * It means, for example, that we can't use the seconds at all
- * (since the time between the hang and the boot might be more
- * than a minute), and we'd better not depend on the low bits of
- * the minutes either.
- *
- * There are the wday fields etc, but I wouldn't guarantee those
- * are dependable either. And if the date isn't valid, either the
- * hw or POST will do strange things.
- *
- * So we're left with:
- * - year: 0-99
- * - month: 0-11
- * - day-of-month: 1-28
- * - hour: 0-23
- * - min: (0-30)*2
- *
- * Giving us a total range of 0-16128000 (0xf61800), ie less
- * than 24 bits of actual data we can save across reboots.
- *
- * And if your box can't boot in less than three minutes,
- * you're screwed.
- *
- * Now, almost 24 bits of data is pitifully small, so we need
- * to be pretty dense if we want to use it for anything nice.
- * What we do is that instead of saving off nice readable info,
- * we save off _hashes_ of information that we can hopefully
- * regenerate after the reboot.
- *
- * In particular, this means that we might be unlucky, and hit
- * a case where we have a hash collision, and we end up not
- * being able to tell for certain exactly which case happened.
- * But that's hopefully unlikely.
- *
- * What we do is to take the bits we can fit, and split them
- * into three parts (16*997*1009 = 16095568), and use the values
- * for:
- * - 0-15: user-settable
- * - 0-996: file + line number
- * - 0-1008: device
- */
-#define USERHASH (16)
-#define FILEHASH (997)
-#define DEVHASH (1009)
-
-#define DEVSEED (7919)
-
-static unsigned int dev_hash_value;
-
-static int set_magic_time(unsigned int user, unsigned int file, unsigned int device)
-{
- unsigned int n = user + USERHASH*(file + FILEHASH*device);
-
- // June 7th, 2006
- static struct rtc_time time = {
- .tm_sec = 0,
- .tm_min = 0,
- .tm_hour = 0,
- .tm_mday = 7,
- .tm_mon = 5, // June - counting from zero
- .tm_year = 106,
- .tm_wday = 3,
- .tm_yday = 160,
- .tm_isdst = 1
- };
-
- time.tm_year = (n % 100);
- n /= 100;
- time.tm_mon = (n % 12);
- n /= 12;
- time.tm_mday = (n % 28) + 1;
- n /= 28;
- time.tm_hour = (n % 24);
- n /= 24;
- time.tm_min = (n % 20) * 3;
- n /= 20;
- set_rtc_time(&time);
- return n ? -1 : 0;
-}
-
-static unsigned int read_magic_time(void)
-{
- struct rtc_time time;
- unsigned int val;
-
- get_rtc_time(&time);
- pr_info("RTC time: %2d:%02d:%02d, date: %02d/%02d/%02d\n",
- time.tm_hour, time.tm_min, time.tm_sec,
- time.tm_mon + 1, time.tm_mday, time.tm_year % 100);
- val = time.tm_year; /* 100 years */
- if (val > 100)
- val -= 100;
- val += time.tm_mon * 100; /* 12 months */
- val += (time.tm_mday-1) * 100 * 12; /* 28 month-days */
- val += time.tm_hour * 100 * 12 * 28; /* 24 hours */
- val += (time.tm_min / 3) * 100 * 12 * 28 * 24; /* 20 3-minute intervals */
- return val;
-}
-
-/*
- * This is just the sdbm hash function with a user-supplied
- * seed and final size parameter.
- */
-static unsigned int hash_string(unsigned int seed, const char *data, unsigned int mod)
-{
- unsigned char c;
- while ((c = *data++) != 0) {
- seed = (seed << 16) + (seed << 6) - seed + c;
- }
- return seed % mod;
-}
-
-void set_trace_device(struct device *dev)
-{
- dev_hash_value = hash_string(DEVSEED, dev_name(dev), DEVHASH);
-}
-EXPORT_SYMBOL(set_trace_device);
-
-/*
- * We could just take the "tracedata" index into the .tracedata
- * section instead. Generating a hash of the data gives us a
- * chance to work across kernel versions, and perhaps more
- * importantly it also gives us valid/invalid check (ie we will
- * likely not give totally bogus reports - if the hash matches,
- * it's not any guarantee, but it's a high _likelihood_ that
- * the match is valid).
- */
-void generate_resume_trace(const void *tracedata, unsigned int user)
-{
- unsigned short lineno = *(unsigned short *)tracedata;
- const char *file = *(const char **)(tracedata + 2);
- unsigned int user_hash_value, file_hash_value;
-
- user_hash_value = user % USERHASH;
- file_hash_value = hash_string(lineno, file, FILEHASH);
- set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
-}
-EXPORT_SYMBOL(generate_resume_trace);
-
-extern char __tracedata_start, __tracedata_end;
-static int show_file_hash(unsigned int value)
-{
- int match;
- char *tracedata;
-
- match = 0;
- for (tracedata = &__tracedata_start ; tracedata < &__tracedata_end ;
- tracedata += 2 + sizeof(unsigned long)) {
- unsigned short lineno = *(unsigned short *)tracedata;
- const char *file = *(const char **)(tracedata + 2);
- unsigned int hash = hash_string(lineno, file, FILEHASH);
- if (hash != value)
- continue;
- pr_info(" hash matches %s:%u\n", file, lineno);
- match++;
- }
- return match;
-}
-
-static int show_dev_hash(unsigned int value)
-{
- int match = 0;
- struct list_head *entry;
-
- device_pm_lock();
- entry = dpm_list.prev;
- while (entry != &dpm_list) {
- struct device * dev = to_device(entry);
- unsigned int hash = hash_string(DEVSEED, dev_name(dev), DEVHASH);
- if (hash == value) {
- dev_info(dev, "hash matches\n");
- match++;
- }
- entry = entry->prev;
- }
- device_pm_unlock();
- return match;
-}
-
-static unsigned int hash_value_early_read;
-
-int show_trace_dev_match(char *buf, size_t size)
-{
- unsigned int value = hash_value_early_read / (USERHASH * FILEHASH);
- int ret = 0;
- struct list_head *entry;
-
- /*
- * It's possible that multiple devices will match the hash and we can't
- * tell which is the culprit, so it's best to output them all.
- */
- device_pm_lock();
- entry = dpm_list.prev;
- while (size && entry != &dpm_list) {
- struct device *dev = to_device(entry);
- unsigned int hash = hash_string(DEVSEED, dev_name(dev),
- DEVHASH);
- if (hash == value) {
- int len = snprintf(buf, size, "%s\n",
- dev_driver_string(dev));
- if (len > size)
- len = size;
- buf += len;
- ret += len;
- size -= len;
- }
- entry = entry->prev;
- }
- device_pm_unlock();
- return ret;
-}
-
-static int early_resume_init(void)
-{
- hash_value_early_read = read_magic_time();
- return 0;
-}
-
-static int late_resume_init(void)
-{
- unsigned int val = hash_value_early_read;
- unsigned int user, file, dev;
-
- user = val % USERHASH;
- val = val / USERHASH;
- file = val % FILEHASH;
- val = val / FILEHASH;
- dev = val /* % DEVHASH */;
-
- pr_info(" Magic number: %d:%d:%d\n", user, file, dev);
- show_file_hash(file);
- show_dev_hash(dev);
- return 0;
-}
-
-core_initcall(early_resume_init);
-late_initcall(late_resume_init);
diff --git a/ANDROID_3.4.5/drivers/base/power/wakeup.c b/ANDROID_3.4.5/drivers/base/power/wakeup.c
deleted file mode 100644
index ba06da42..00000000
--- a/ANDROID_3.4.5/drivers/base/power/wakeup.c
+++ /dev/null
@@ -1,997 +0,0 @@
-/*
- * drivers/base/power/wakeup.c - System wakeup events framework
- *
- * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- *
- * This file is released under the GPLv2.
- */
-
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/capability.h>
-#include <linux/export.h>
-#include <linux/suspend.h>
-#include <linux/seq_file.h>
-#include <linux/debugfs.h>
-#include <trace/events/power.h>
-
-#include "power.h"
-
-/*
- * If set, the suspend/hibernate code will abort transitions to a sleep state
- * if wakeup events are registered during or immediately before the transition.
- */
-bool events_check_enabled __read_mostly;
-
-/*
- * Combined counters of registered wakeup events and wakeup events in progress.
- * They need to be modified together atomically, so it's better to use one
- * atomic variable to hold them both.
- */
-static atomic_t combined_event_count = ATOMIC_INIT(0);
-
-#define IN_PROGRESS_BITS (sizeof(int) * 4)
-#define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
-
-static void split_counters(unsigned int *cnt, unsigned int *inpr)
-{
- unsigned int comb = atomic_read(&combined_event_count);
-
- *cnt = (comb >> IN_PROGRESS_BITS);
- *inpr = comb & MAX_IN_PROGRESS;
-}
-
-/* A preserved old value of the events counter. */
-static unsigned int saved_count;
-
-static DEFINE_SPINLOCK(events_lock);
-
-static void pm_wakeup_timer_fn(unsigned long data);
-
-static LIST_HEAD(wakeup_sources);
-
-static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
-
-/**
- * wakeup_source_prepare - Prepare a new wakeup source for initialization.
- * @ws: Wakeup source to prepare.
- * @name: Pointer to the name of the new wakeup source.
- *
- * Callers must ensure that the @name string won't be freed when @ws is still in
- * use.
- */
-void wakeup_source_prepare(struct wakeup_source *ws, const char *name)
-{
- if (ws) {
- memset(ws, 0, sizeof(*ws));
- ws->name = name;
- }
-}
-EXPORT_SYMBOL_GPL(wakeup_source_prepare);
-
-/**
- * wakeup_source_create - Create a struct wakeup_source object.
- * @name: Name of the new wakeup source.
- */
-struct wakeup_source *wakeup_source_create(const char *name)
-{
- struct wakeup_source *ws;
-
- ws = kmalloc(sizeof(*ws), GFP_KERNEL);
- if (!ws)
- return NULL;
-
- wakeup_source_prepare(ws, name ? kstrdup(name, GFP_KERNEL) : NULL);
- return ws;
-}
-EXPORT_SYMBOL_GPL(wakeup_source_create);
-
-/**
- * wakeup_source_drop - Prepare a struct wakeup_source object for destruction.
- * @ws: Wakeup source to prepare for destruction.
- *
- * Callers must ensure that __pm_stay_awake() or __pm_wakeup_event() will never
- * be run in parallel with this function for the same wakeup source object.
- */
-void wakeup_source_drop(struct wakeup_source *ws)
-{
- if (!ws)
- return;
-
- del_timer_sync(&ws->timer);
- __pm_relax(ws);
-}
-EXPORT_SYMBOL_GPL(wakeup_source_drop);
-
-/**
- * wakeup_source_destroy - Destroy a struct wakeup_source object.
- * @ws: Wakeup source to destroy.
- *
- * Use only for wakeup source objects created with wakeup_source_create().
- */
-void wakeup_source_destroy(struct wakeup_source *ws)
-{
- if (!ws)
- return;
-
- wakeup_source_drop(ws);
- kfree(ws->name);
- kfree(ws);
-}
-EXPORT_SYMBOL_GPL(wakeup_source_destroy);
-
-/**
- * wakeup_source_add - Add given object to the list of wakeup sources.
- * @ws: Wakeup source object to add to the list.
- */
-void wakeup_source_add(struct wakeup_source *ws)
-{
- unsigned long flags;
-
- if (WARN_ON(!ws))
- return;
-
- spin_lock_init(&ws->lock);
- setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws);
- ws->active = false;
- ws->last_time = ktime_get();
-
- spin_lock_irqsave(&events_lock, flags);
- list_add_rcu(&ws->entry, &wakeup_sources);
- spin_unlock_irqrestore(&events_lock, flags);
-}
-EXPORT_SYMBOL_GPL(wakeup_source_add);
-
-/**
- * wakeup_source_remove - Remove given object from the wakeup sources list.
- * @ws: Wakeup source object to remove from the list.
- */
-void wakeup_source_remove(struct wakeup_source *ws)
-{
- unsigned long flags;
-
- if (WARN_ON(!ws))
- return;
-
- spin_lock_irqsave(&events_lock, flags);
- list_del_rcu(&ws->entry);
- spin_unlock_irqrestore(&events_lock, flags);
- synchronize_rcu();
-}
-EXPORT_SYMBOL_GPL(wakeup_source_remove);
-
-/**
- * wakeup_source_register - Create wakeup source and add it to the list.
- * @name: Name of the wakeup source to register.
- */
-struct wakeup_source *wakeup_source_register(const char *name)
-{
- struct wakeup_source *ws;
-
- ws = wakeup_source_create(name);
- if (ws)
- wakeup_source_add(ws);
-
- return ws;
-}
-EXPORT_SYMBOL_GPL(wakeup_source_register);
-
-/**
- * wakeup_source_unregister - Remove wakeup source from the list and remove it.
- * @ws: Wakeup source object to unregister.
- */
-void wakeup_source_unregister(struct wakeup_source *ws)
-{
- if (ws) {
- wakeup_source_remove(ws);
- wakeup_source_destroy(ws);
- }
-}
-EXPORT_SYMBOL_GPL(wakeup_source_unregister);
-
-/**
- * device_wakeup_attach - Attach a wakeup source object to a device object.
- * @dev: Device to handle.
- * @ws: Wakeup source object to attach to @dev.
- *
- * This causes @dev to be treated as a wakeup device.
- */
-static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
-{
- spin_lock_irq(&dev->power.lock);
- if (dev->power.wakeup) {
- spin_unlock_irq(&dev->power.lock);
- return -EEXIST;
- }
- dev->power.wakeup = ws;
- spin_unlock_irq(&dev->power.lock);
- return 0;
-}
-
-/**
- * device_wakeup_enable - Enable given device to be a wakeup source.
- * @dev: Device to handle.
- *
- * Create a wakeup source object, register it and attach it to @dev.
- */
-int device_wakeup_enable(struct device *dev)
-{
- struct wakeup_source *ws;
- int ret;
-
- if (!dev || !dev->power.can_wakeup)
- return -EINVAL;
-
- ws = wakeup_source_register(dev_name(dev));
- if (!ws)
- return -ENOMEM;
-
- ret = device_wakeup_attach(dev, ws);
- if (ret)
- wakeup_source_unregister(ws);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(device_wakeup_enable);
-
-/**
- * device_wakeup_detach - Detach a device's wakeup source object from it.
- * @dev: Device to detach the wakeup source object from.
- *
- * After it returns, @dev will not be treated as a wakeup device any more.
- */
-static struct wakeup_source *device_wakeup_detach(struct device *dev)
-{
- struct wakeup_source *ws;
-
- spin_lock_irq(&dev->power.lock);
- ws = dev->power.wakeup;
- dev->power.wakeup = NULL;
- spin_unlock_irq(&dev->power.lock);
- return ws;
-}
-
-/**
- * device_wakeup_disable - Do not regard a device as a wakeup source any more.
- * @dev: Device to handle.
- *
- * Detach the @dev's wakeup source object from it, unregister this wakeup source
- * object and destroy it.
- */
-int device_wakeup_disable(struct device *dev)
-{
- struct wakeup_source *ws;
-
- if (!dev || !dev->power.can_wakeup)
- return -EINVAL;
-
- ws = device_wakeup_detach(dev);
- if (ws)
- wakeup_source_unregister(ws);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(device_wakeup_disable);
-
-/**
- * device_set_wakeup_capable - Set/reset device wakeup capability flag.
- * @dev: Device to handle.
- * @capable: Whether or not @dev is capable of waking up the system from sleep.
- *
- * If @capable is set, set the @dev's power.can_wakeup flag and add its
- * wakeup-related attributes to sysfs. Otherwise, unset the @dev's
- * power.can_wakeup flag and remove its wakeup-related attributes from sysfs.
- *
- * This function may sleep and it can't be called from any context where
- * sleeping is not allowed.
- */
-void device_set_wakeup_capable(struct device *dev, bool capable)
-{
- if (!!dev->power.can_wakeup == !!capable)
- return;
-
- if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
- if (capable) {
- if (wakeup_sysfs_add(dev))
- return;
- } else {
- wakeup_sysfs_remove(dev);
- }
- }
- dev->power.can_wakeup = capable;
-}
-EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
-
-/**
- * device_init_wakeup - Device wakeup initialization.
- * @dev: Device to handle.
- * @enable: Whether or not to enable @dev as a wakeup device.
- *
- * By default, most devices should leave wakeup disabled. The exceptions are
- * devices that everyone expects to be wakeup sources: keyboards, power buttons,
- * possibly network interfaces, etc. Also, devices that don't generate their
- * own wakeup requests but merely forward requests from one bus to another
- * (like PCI bridges) should have wakeup enabled by default.
- */
-int device_init_wakeup(struct device *dev, bool enable)
-{
- int ret = 0;
-
- if (enable) {
- device_set_wakeup_capable(dev, true);
- ret = device_wakeup_enable(dev);
- } else {
- device_set_wakeup_capable(dev, false);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(device_init_wakeup);
-
-/**
- * device_set_wakeup_enable - Enable or disable a device to wake up the system.
- * @dev: Device to handle.
- */
-int device_set_wakeup_enable(struct device *dev, bool enable)
-{
- if (!dev || !dev->power.can_wakeup)
- return -EINVAL;
-
- return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
-}
-EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
-
-/*
- * The functions below use the observation that each wakeup event starts a
- * period in which the system should not be suspended. The moment this period
- * will end depends on how the wakeup event is going to be processed after being
- * detected and all of the possible cases can be divided into two distinct
- * groups.
- *
- * First, a wakeup event may be detected by the same functional unit that will
- * carry out the entire processing of it and possibly will pass it to user space
- * for further processing. In that case the functional unit that has detected
- * the event may later "close" the "no suspend" period associated with it
- * directly as soon as it has been dealt with. The pair of pm_stay_awake() and
- * pm_relax(), balanced with each other, is supposed to be used in such
- * situations.
- *
- * Second, a wakeup event may be detected by one functional unit and processed
- * by another one. In that case the unit that has detected it cannot really
- * "close" the "no suspend" period associated with it, unless it knows in
- * advance what's going to happen to the event during processing. This
- * knowledge, however, may not be available to it, so it can simply specify time
- * to wait before the system can be suspended and pass it as the second
- * argument of pm_wakeup_event().
- *
- * It is valid to call pm_relax() after pm_wakeup_event(), in which case the
- * "no suspend" period will be ended either by the pm_relax(), or by the timer
- * function executed when the timer expires, whichever comes first.
- */
-
-/**
- * wakup_source_activate - Mark given wakeup source as active.
- * @ws: Wakeup source to handle.
- *
- * Update the @ws' statistics and, if @ws has just been activated, notify the PM
- * core of the event by incrementing the counter of of wakeup events being
- * processed.
- */
-static void wakeup_source_activate(struct wakeup_source *ws)
-{
- unsigned int cec;
-
- ws->active = true;
- ws->active_count++;
- ws->last_time = ktime_get();
- if (ws->autosleep_enabled)
- ws->start_prevent_time = ws->last_time;
-
- /* Increment the counter of events in progress. */
- cec = atomic_inc_return(&combined_event_count);
-
- trace_wakeup_source_activate(ws->name, cec);
-}
-
-/**
- * wakeup_source_report_event - Report wakeup event using the given source.
- * @ws: Wakeup source to report the event for.
- */
-static void wakeup_source_report_event(struct wakeup_source *ws)
-{
- ws->event_count++;
- /* This is racy, but the counter is approximate anyway. */
- if (events_check_enabled)
- ws->wakeup_count++;
-
- if (!ws->active)
- wakeup_source_activate(ws);
-}
-
-/**
- * __pm_stay_awake - Notify the PM core of a wakeup event.
- * @ws: Wakeup source object associated with the source of the event.
- *
- * It is safe to call this function from interrupt context.
- */
-
-
-/*add by kevin ,for wakeup lock debug.default is uncompile
-usage:
- 1: #define WAKE_TRACE_ENABLE 1
- 2: add wake_trace_show(); in kernel/power/main.c/state_show()
- 3: cat /sys/power/state
-*/
-#define WAKE_TRACE_ENABLE 0
-
-#if WAKE_TRACE_ENABLE
-#define WAKE_TRACE_NUMBER 100
-typedef struct{
- char name[64];
- int val;
-}WAKE_TRACE;
-WAKE_TRACE wake_trace[WAKE_TRACE_NUMBER];
-int wake_trace_init=0;
-
-void wake_trace_lock(struct wakeup_source *ws){
- int i;
- if(wake_trace_init==0){
- wake_trace_init = 1;
- memset(wake_trace,0,sizeof(wake_trace));
- }
- for(i=0;i<WAKE_TRACE_NUMBER;i++){
- if((strlen(wake_trace[i].name)>0&&!strcmp(wake_trace[i].name,ws->name)))
- break;
- if(strlen(wake_trace[i].name)<=0)
- break;
- }
- if(i>=WAKE_TRACE_NUMBER)
- printk("%s %d %s error\n",__func__,__LINE__,ws->name);
- else{
- if(strlen(wake_trace[i].name)<=0){
- printk("%s %d insert %s\n",__func__,__LINE__,ws->name);
- strcpy(wake_trace[i].name,ws->name);
- }
- wake_trace[i].val++;
- }
-}
-void wake_trace_unlock(struct wakeup_source *ws){
- int i;
- for(i=0;i<WAKE_TRACE_NUMBER;i++){
- if((strlen(wake_trace[i].name)>0&&!strcmp(wake_trace[i].name,ws->name)))
- break;
- }
- if(i>=WAKE_TRACE_NUMBER)
- printk("%s %d %s error\n",__func__,__LINE__,ws->name);
- else{
- wake_trace[i].val--;
- }
-}
-
-int wake_trace_show(void)
-{
- int i;
- for(i=0;i<WAKE_TRACE_NUMBER;i++){
- if(strlen(wake_trace[i].name)>0)
- printk("%d %s %d\n",i,wake_trace[i].name,wake_trace[i].val);
- }
-}
-EXPORT_SYMBOL_GPL(wake_trace_show);
-#endif
-void __pm_stay_awake(struct wakeup_source *ws)
-{
- unsigned long flags;
-
- if (!ws)
- return;
-
- spin_lock_irqsave(&ws->lock, flags);
-
-#if WAKE_TRACE_ENABLE
- wake_trace_lock(ws);
-#endif
-
- wakeup_source_report_event(ws);
- del_timer(&ws->timer);
- ws->timer_expires = 0;
-
- spin_unlock_irqrestore(&ws->lock, flags);
-}
-EXPORT_SYMBOL_GPL(__pm_stay_awake);
-
-/**
- * pm_stay_awake - Notify the PM core that a wakeup event is being processed.
- * @dev: Device the wakeup event is related to.
- *
- * Notify the PM core of a wakeup event (signaled by @dev) by calling
- * __pm_stay_awake for the @dev's wakeup source object.
- *
- * Call this function after detecting of a wakeup event if pm_relax() is going
- * to be called directly after processing the event (and possibly passing it to
- * user space for further processing).
- */
-void pm_stay_awake(struct device *dev)
-{
- unsigned long flags;
-
- if (!dev)
- return;
-
- spin_lock_irqsave(&dev->power.lock, flags);
- __pm_stay_awake(dev->power.wakeup);
- spin_unlock_irqrestore(&dev->power.lock, flags);
-}
-EXPORT_SYMBOL_GPL(pm_stay_awake);
-
-#ifdef CONFIG_PM_AUTOSLEEP
-static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now)
-{
- ktime_t delta = ktime_sub(now, ws->start_prevent_time);
- ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta);
-}
-#else
-static inline void update_prevent_sleep_time(struct wakeup_source *ws,
- ktime_t now) {}
-#endif
-
-/**
- * wakup_source_deactivate - Mark given wakeup source as inactive.
- * @ws: Wakeup source to handle.
- *
- * Update the @ws' statistics and notify the PM core that the wakeup source has
- * become inactive by decrementing the counter of wakeup events being processed
- * and incrementing the counter of registered wakeup events.
- */
-static void wakeup_source_deactivate(struct wakeup_source *ws)
-{
- unsigned int cnt, inpr, cec;
- ktime_t duration;
- ktime_t now;
-
- ws->relax_count++;
- /*
- * __pm_relax() may be called directly or from a timer function.
- * If it is called directly right after the timer function has been
- * started, but before the timer function calls __pm_relax(), it is
- * possible that __pm_stay_awake() will be called in the meantime and
- * will set ws->active. Then, ws->active may be cleared immediately
- * by the __pm_relax() called from the timer function, but in such a
- * case ws->relax_count will be different from ws->active_count.
- */
- if (ws->relax_count != ws->active_count) {
- ws->relax_count--;
- return;
- }
-
- ws->active = false;
-
- now = ktime_get();
- duration = ktime_sub(now, ws->last_time);
- ws->total_time = ktime_add(ws->total_time, duration);
- if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
- ws->max_time = duration;
-
- ws->last_time = now;
- del_timer(&ws->timer);
- ws->timer_expires = 0;
-
- if (ws->autosleep_enabled)
- update_prevent_sleep_time(ws, now);
-
- /*
- * Increment the counter of registered wakeup events and decrement the
- * couter of wakeup events in progress simultaneously.
- */
- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
- trace_wakeup_source_deactivate(ws->name, cec);
-
- split_counters(&cnt, &inpr);
- if (!inpr && waitqueue_active(&wakeup_count_wait_queue))
- wake_up(&wakeup_count_wait_queue);
-}
-
-/**
- * __pm_relax - Notify the PM core that processing of a wakeup event has ended.
- * @ws: Wakeup source object associated with the source of the event.
- *
- * Call this function for wakeup events whose processing started with calling
- * __pm_stay_awake().
- *
- * It is safe to call it from interrupt context.
- */
-void __pm_relax(struct wakeup_source *ws)
-{
- unsigned long flags;
-
- if (!ws)
- return;
-
- spin_lock_irqsave(&ws->lock, flags);
-#if WAKE_TRACE_ENABLE
- wake_trace_unlock(ws);
-#endif
- if (ws->active)
- wakeup_source_deactivate(ws);
- spin_unlock_irqrestore(&ws->lock, flags);
-}
-EXPORT_SYMBOL_GPL(__pm_relax);
-
-/**
- * pm_relax - Notify the PM core that processing of a wakeup event has ended.
- * @dev: Device that signaled the event.
- *
- * Execute __pm_relax() for the @dev's wakeup source object.
- */
-void pm_relax(struct device *dev)
-{
- unsigned long flags;
-
- if (!dev)
- return;
-
- spin_lock_irqsave(&dev->power.lock, flags);
- __pm_relax(dev->power.wakeup);
- spin_unlock_irqrestore(&dev->power.lock, flags);
-}
-EXPORT_SYMBOL_GPL(pm_relax);
-
-/**
- * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
- * @data: Address of the wakeup source object associated with the event source.
- *
- * Call wakeup_source_deactivate() for the wakeup source whose address is stored
- * in @data if it is currently active and its timer has not been canceled and
- * the expiration time of the timer is not in future.
- */
-static void pm_wakeup_timer_fn(unsigned long data)
-{
- struct wakeup_source *ws = (struct wakeup_source *)data;
- unsigned long flags;
-
- spin_lock_irqsave(&ws->lock, flags);
-
- if (ws->active && ws->timer_expires
- && time_after_eq(jiffies, ws->timer_expires)) {
- wakeup_source_deactivate(ws);
- ws->expire_count++;
- }
-
- spin_unlock_irqrestore(&ws->lock, flags);
-}
-
-/**
- * __pm_wakeup_event - Notify the PM core of a wakeup event.
- * @ws: Wakeup source object associated with the event source.
- * @msec: Anticipated event processing time (in milliseconds).
- *
- * Notify the PM core of a wakeup event whose source is @ws that will take
- * approximately @msec milliseconds to be processed by the kernel. If @ws is
- * not active, activate it. If @msec is nonzero, set up the @ws' timer to
- * execute pm_wakeup_timer_fn() in future.
- *
- * It is safe to call this function from interrupt context.
- */
-void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
-{
- unsigned long flags;
- unsigned long expires;
-
- if (!ws)
- return;
-
- spin_lock_irqsave(&ws->lock, flags);
-
-#if WAKE_TRACE_ENABLE
- wake_trace_lock(ws);
-#endif
-
- wakeup_source_report_event(ws);
-
- if (!msec) {
- wakeup_source_deactivate(ws);
- goto unlock;
- }
-
- expires = jiffies + msecs_to_jiffies(msec);
- if (!expires)
- expires = 1;
-
- if (!ws->timer_expires || time_after(expires, ws->timer_expires)) {
- mod_timer(&ws->timer, expires);
- ws->timer_expires = expires;
- }
-
- unlock:
- spin_unlock_irqrestore(&ws->lock, flags);
-}
-EXPORT_SYMBOL_GPL(__pm_wakeup_event);
-
-
-/**
- * pm_wakeup_event - Notify the PM core of a wakeup event.
- * @dev: Device the wakeup event is related to.
- * @msec: Anticipated event processing time (in milliseconds).
- *
- * Call __pm_wakeup_event() for the @dev's wakeup source object.
- */
-void pm_wakeup_event(struct device *dev, unsigned int msec)
-{
- unsigned long flags;
-
- if (!dev)
- return;
-
- spin_lock_irqsave(&dev->power.lock, flags);
- __pm_wakeup_event(dev->power.wakeup, msec);
- spin_unlock_irqrestore(&dev->power.lock, flags);
-}
-EXPORT_SYMBOL_GPL(pm_wakeup_event);
-
-extern int wmt_getsyspara(char *varname, unsigned char *varval, int *varlen);
-
-static int is_rda5991(void){
- int retval;
- unsigned char buf[80];
- int varlen = 80;
-
-
-
- memset(buf,0,sizeof(buf));
- varlen = 80;
- retval = wmt_getsyspara("wmt.init.rc", buf, &varlen);
- if (retval == 0) {
- if (!strcmp(buf, "init.rda5991.rc"))
- {
- printk("is rda5991\n");
- return 1;
- }
-
- }
- return 0;
-
-}
-
-static void print_active_wakeup_sources(void)
-{
- struct wakeup_source *ws;
- int active = 0;
- struct wakeup_source *last_activity_ws = NULL;
- printk("\n...in %s\n",__FUNCTION__);
- if(is_rda5991()){
- printk("skip!\n");
- return;
- }
- rcu_read_lock();
- list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
- //kevin add for null pointer
- if(!ws)
- break;
- if (ws->active) {
- printk("active wakeup source: %s\n", ws->name);
- active = 1;
- } else if (!active &&
- (!last_activity_ws ||
- ktime_to_ns(ws->last_time) >
- ktime_to_ns(last_activity_ws->last_time))) {
- last_activity_ws = ws;
- }
- }
- if (!active && last_activity_ws)
- printk("last active wakeup source: %s\n",
- last_activity_ws->name);
- rcu_read_unlock();
-}
-
-/**
- * pm_wakeup_pending - Check if power transition in progress should be aborted.
- *
- * Compare the current number of registered wakeup events with its preserved
- * value from the past and return true if new wakeup events have been registered
- * since the old value was stored. Also return true if the current number of
- * wakeup events being processed is different from zero.
- */
-bool pm_wakeup_pending(void)
-{
- unsigned long flags;
- bool ret = false;
-
- spin_lock_irqsave(&events_lock, flags);
- if (events_check_enabled) {
- unsigned int cnt, inpr;
-
- split_counters(&cnt, &inpr);
- ret = (cnt != saved_count || inpr > 0);
- events_check_enabled = !ret;
- }
- spin_unlock_irqrestore(&events_lock, flags);
-
- if (ret)
- print_active_wakeup_sources();
-
- return ret;
-}
-
-/**
- * pm_get_wakeup_count - Read the number of registered wakeup events.
- * @count: Address to store the value at.
- * @block: Whether or not to block.
- *
- * Store the number of registered wakeup events at the address in @count. If
- * @block is set, block until the current number of wakeup events being
- * processed is zero.
- *
- * Return 'false' if the current number of wakeup events being processed is
- * nonzero. Otherwise return 'true'.
- */
-bool pm_get_wakeup_count(unsigned int *count, bool block)
-{
- unsigned int cnt, inpr;
-
- if (block) {
- DEFINE_WAIT(wait);
-
- for (;;) {
- prepare_to_wait(&wakeup_count_wait_queue, &wait,
- TASK_INTERRUPTIBLE);
- split_counters(&cnt, &inpr);
- if (inpr == 0 || signal_pending(current))
- break;
-
- schedule();
- }
- finish_wait(&wakeup_count_wait_queue, &wait);
- }
-
- split_counters(&cnt, &inpr);
- *count = cnt;
- return !inpr;
-}
-
-/**
- * pm_save_wakeup_count - Save the current number of registered wakeup events.
- * @count: Value to compare with the current number of registered wakeup events.
- *
- * If @count is equal to the current number of registered wakeup events and the
- * current number of wakeup events being processed is zero, store @count as the
- * old number of registered wakeup events for pm_check_wakeup_events(), enable
- * wakeup events detection and return 'true'. Otherwise disable wakeup events
- * detection and return 'false'.
- */
-bool pm_save_wakeup_count(unsigned int count)
-{
- unsigned int cnt, inpr;
- unsigned long flags;
-
- events_check_enabled = false;
- spin_lock_irqsave(&events_lock, flags);
- split_counters(&cnt, &inpr);
- if (cnt == count && inpr == 0) {
- saved_count = count;
- events_check_enabled = true;
- }
- spin_unlock_irqrestore(&events_lock, flags);
- return events_check_enabled;
-}
-
-#ifdef CONFIG_PM_AUTOSLEEP
-/**
- * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources.
- * @enabled: Whether to set or to clear the autosleep_enabled flags.
- */
-void pm_wakep_autosleep_enabled(bool set)
-{
- struct wakeup_source *ws;
- ktime_t now = ktime_get();
-
- rcu_read_lock();
- list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
- spin_lock_irq(&ws->lock);
- if (ws->autosleep_enabled != set) {
- ws->autosleep_enabled = set;
- if (ws->active) {
- if (set)
- ws->start_prevent_time = now;
- else
- update_prevent_sleep_time(ws, now);
- }
- }
- spin_unlock_irq(&ws->lock);
- }
- rcu_read_unlock();
-}
-#endif /* CONFIG_PM_AUTOSLEEP */
-
-static struct dentry *wakeup_sources_stats_dentry;
-
-/**
- * print_wakeup_source_stats - Print wakeup source statistics information.
- * @m: seq_file to print the statistics into.
- * @ws: Wakeup source object to print the statistics for.
- */
-static int print_wakeup_source_stats(struct seq_file *m,
- struct wakeup_source *ws)
-{
- unsigned long flags;
- ktime_t total_time;
- ktime_t max_time;
- unsigned long active_count;
- ktime_t active_time;
- ktime_t prevent_sleep_time;
- int ret;
-
- spin_lock_irqsave(&ws->lock, flags);
-
- total_time = ws->total_time;
- max_time = ws->max_time;
- prevent_sleep_time = ws->prevent_sleep_time;
- active_count = ws->active_count;
- if (ws->active) {
- ktime_t now = ktime_get();
-
- active_time = ktime_sub(now, ws->last_time);
- total_time = ktime_add(total_time, active_time);
- if (active_time.tv64 > max_time.tv64)
- max_time = active_time;
-
- if (ws->autosleep_enabled)
- prevent_sleep_time = ktime_add(prevent_sleep_time,
- ktime_sub(now, ws->start_prevent_time));
- } else {
- active_time = ktime_set(0, 0);
- }
-
- ret = seq_printf(m, "%-20s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t"
- "%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
- ws->name, active_count, ws->event_count,
- ws->wakeup_count, ws->expire_count,
- ktime_to_ms(active_time), ktime_to_ms(total_time),
- ktime_to_ms(max_time), ktime_to_ms(ws->last_time),
- ktime_to_ms(prevent_sleep_time));
-
- spin_unlock_irqrestore(&ws->lock, flags);
-
- return ret;
-}
-
-/**
- * wakeup_sources_stats_show - Print wakeup sources statistics information.
- * @m: seq_file to print the statistics into.
- */
-static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
-{
- struct wakeup_source *ws;
-
- seq_puts(m, "name\t\t\tactive_count\tevent_count\twakeup_count\t"
- "expire_count\tactive_since\ttotal_time\tmax_time\t"
- "last_change\tprevent_suspend_time\n");
-
- rcu_read_lock();
- list_for_each_entry_rcu(ws, &wakeup_sources, entry)
- print_wakeup_source_stats(m, ws);
- rcu_read_unlock();
-
- return 0;
-}
-
-static int wakeup_sources_stats_open(struct inode *inode, struct file *file)
-{
- return single_open(file, wakeup_sources_stats_show, NULL);
-}
-
-static const struct file_operations wakeup_sources_stats_fops = {
- .owner = THIS_MODULE,
- .open = wakeup_sources_stats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int __init wakeup_sources_debugfs_init(void)
-{
- wakeup_sources_stats_dentry = debugfs_create_file("wakeup_sources",
- S_IRUGO, NULL, NULL, &wakeup_sources_stats_fops);
- return 0;
-}
-
-postcore_initcall(wakeup_sources_debugfs_init);
diff --git a/ANDROID_3.4.5/drivers/base/regmap/Kconfig b/ANDROID_3.4.5/drivers/base/regmap/Kconfig
deleted file mode 100644
index 0f6c7fb4..00000000
--- a/ANDROID_3.4.5/drivers/base/regmap/Kconfig
+++ /dev/null
@@ -1,18 +0,0 @@
-# Generic register map support. There are no user servicable options here,
-# this is an API intended to be used by other kernel subsystems. These
-# subsystems should select the appropriate symbols.
-
-config REGMAP
- default y if (REGMAP_I2C || REGMAP_SPI)
- select LZO_COMPRESS
- select LZO_DECOMPRESS
- bool
-
-config REGMAP_I2C
- tristate
-
-config REGMAP_SPI
- tristate
-
-config REGMAP_IRQ
- bool
diff --git a/ANDROID_3.4.5/drivers/base/regmap/Makefile b/ANDROID_3.4.5/drivers/base/regmap/Makefile
deleted file mode 100644
index defd5796..00000000
--- a/ANDROID_3.4.5/drivers/base/regmap/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-obj-$(CONFIG_REGMAP) += regmap.o regcache.o
-obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o
-obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
-obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
-obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
-obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o
diff --git a/ANDROID_3.4.5/drivers/base/regmap/internal.h b/ANDROID_3.4.5/drivers/base/regmap/internal.h
deleted file mode 100644
index fcafc5b2..00000000
--- a/ANDROID_3.4.5/drivers/base/regmap/internal.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Register map access API internal header
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _REGMAP_INTERNAL_H
-#define _REGMAP_INTERNAL_H
-
-#include <linux/regmap.h>
-#include <linux/fs.h>
-
-struct regmap;
-struct regcache_ops;
-
-struct regmap_format {
- size_t buf_size;
- size_t reg_bytes;
- size_t pad_bytes;
- size_t val_bytes;
- void (*format_write)(struct regmap *map,
- unsigned int reg, unsigned int val);
- void (*format_reg)(void *buf, unsigned int reg);
- void (*format_val)(void *buf, unsigned int val);
- unsigned int (*parse_val)(void *buf);
-};
-
-struct regmap {
- struct mutex lock;
-
- struct device *dev; /* Device we do I/O on */
- void *work_buf; /* Scratch buffer used to format I/O */
- struct regmap_format format; /* Buffer format */
- const struct regmap_bus *bus;
-
-#ifdef CONFIG_DEBUG_FS
- struct dentry *debugfs;
-#endif
-
- unsigned int max_register;
- bool (*writeable_reg)(struct device *dev, unsigned int reg);
- bool (*readable_reg)(struct device *dev, unsigned int reg);
- bool (*volatile_reg)(struct device *dev, unsigned int reg);
- bool (*precious_reg)(struct device *dev, unsigned int reg);
-
- u8 read_flag_mask;
- u8 write_flag_mask;
-
- /* regcache specific members */
- const struct regcache_ops *cache_ops;
- enum regcache_type cache_type;
-
- /* number of bytes in reg_defaults_raw */
- unsigned int cache_size_raw;
- /* number of bytes per word in reg_defaults_raw */
- unsigned int cache_word_size;
- /* number of entries in reg_defaults */
- unsigned int num_reg_defaults;
- /* number of entries in reg_defaults_raw */
- unsigned int num_reg_defaults_raw;
-
- /* if set, only the cache is modified not the HW */
- u32 cache_only;
- /* if set, only the HW is modified not the cache */
- u32 cache_bypass;
- /* if set, remember to free reg_defaults_raw */
- bool cache_free;
-
- struct reg_default *reg_defaults;
- const void *reg_defaults_raw;
- void *cache;
- u32 cache_dirty;
-
- struct reg_default *patch;
- int patch_regs;
-};
-
-struct regcache_ops {
- const char *name;
- enum regcache_type type;
- int (*init)(struct regmap *map);
- int (*exit)(struct regmap *map);
- int (*read)(struct regmap *map, unsigned int reg, unsigned int *value);
- int (*write)(struct regmap *map, unsigned int reg, unsigned int value);
- int (*sync)(struct regmap *map, unsigned int min, unsigned int max);
-};
-
-bool regmap_writeable(struct regmap *map, unsigned int reg);
-bool regmap_readable(struct regmap *map, unsigned int reg);
-bool regmap_volatile(struct regmap *map, unsigned int reg);
-bool regmap_precious(struct regmap *map, unsigned int reg);
-
-int _regmap_write(struct regmap *map, unsigned int reg,
- unsigned int val);
-
-#ifdef CONFIG_DEBUG_FS
-extern void regmap_debugfs_initcall(void);
-extern void regmap_debugfs_init(struct regmap *map);
-extern void regmap_debugfs_exit(struct regmap *map);
-#else
-static inline void regmap_debugfs_initcall(void) { }
-static inline void regmap_debugfs_init(struct regmap *map) { }
-static inline void regmap_debugfs_exit(struct regmap *map) { }
-#endif
-
-/* regcache core declarations */
-int regcache_init(struct regmap *map, const struct regmap_config *config);
-void regcache_exit(struct regmap *map);
-int regcache_read(struct regmap *map,
- unsigned int reg, unsigned int *value);
-int regcache_write(struct regmap *map,
- unsigned int reg, unsigned int value);
-int regcache_sync(struct regmap *map);
-
-unsigned int regcache_get_val(const void *base, unsigned int idx,
- unsigned int word_size);
-bool regcache_set_val(void *base, unsigned int idx,
- unsigned int val, unsigned int word_size);
-int regcache_lookup_reg(struct regmap *map, unsigned int reg);
-
-extern struct regcache_ops regcache_rbtree_ops;
-extern struct regcache_ops regcache_lzo_ops;
-
-#endif
diff --git a/ANDROID_3.4.5/drivers/base/regmap/regcache-lzo.c b/ANDROID_3.4.5/drivers/base/regmap/regcache-lzo.c
deleted file mode 100644
index 483b06d4..00000000
--- a/ANDROID_3.4.5/drivers/base/regmap/regcache-lzo.c
+++ /dev/null
@@ -1,379 +0,0 @@
-/*
- * Register cache access API - LZO caching support
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/lzo.h>
-
-#include "internal.h"
-
-static int regcache_lzo_exit(struct regmap *map);
-
-struct regcache_lzo_ctx {
- void *wmem;
- void *dst;
- const void *src;
- size_t src_len;
- size_t dst_len;
- size_t decompressed_size;
- unsigned long *sync_bmp;
- int sync_bmp_nbits;
-};
-
-#define LZO_BLOCK_NUM 8
-static int regcache_lzo_block_count(struct regmap *map)
-{
- return LZO_BLOCK_NUM;
-}
-
-static int regcache_lzo_prepare(struct regcache_lzo_ctx *lzo_ctx)
-{
- lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
- if (!lzo_ctx->wmem)
- return -ENOMEM;
- return 0;
-}
-
-static int regcache_lzo_compress(struct regcache_lzo_ctx *lzo_ctx)
-{
- size_t compress_size;
- int ret;
-
- ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len,
- lzo_ctx->dst, &compress_size, lzo_ctx->wmem);
- if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len)
- return -EINVAL;
- lzo_ctx->dst_len = compress_size;
- return 0;
-}
-
-static int regcache_lzo_decompress(struct regcache_lzo_ctx *lzo_ctx)
-{
- size_t dst_len;
- int ret;
-
- dst_len = lzo_ctx->dst_len;
- ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len,
- lzo_ctx->dst, &dst_len);
- if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len)
- return -EINVAL;
- return 0;
-}
-
-static int regcache_lzo_compress_cache_block(struct regmap *map,
- struct regcache_lzo_ctx *lzo_ctx)
-{
- int ret;
-
- lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE);
- lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
- if (!lzo_ctx->dst) {
- lzo_ctx->dst_len = 0;
- return -ENOMEM;
- }
-
- ret = regcache_lzo_compress(lzo_ctx);
- if (ret < 0)
- return ret;
- return 0;
-}
-
-static int regcache_lzo_decompress_cache_block(struct regmap *map,
- struct regcache_lzo_ctx *lzo_ctx)
-{
- int ret;
-
- lzo_ctx->dst_len = lzo_ctx->decompressed_size;
- lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
- if (!lzo_ctx->dst) {
- lzo_ctx->dst_len = 0;
- return -ENOMEM;
- }
-
- ret = regcache_lzo_decompress(lzo_ctx);
- if (ret < 0)
- return ret;
- return 0;
-}
-
-static inline int regcache_lzo_get_blkindex(struct regmap *map,
- unsigned int reg)
-{
- return (reg * map->cache_word_size) /
- DIV_ROUND_UP(map->cache_size_raw,
- regcache_lzo_block_count(map));
-}
-
-static inline int regcache_lzo_get_blkpos(struct regmap *map,
- unsigned int reg)
-{
- return reg % (DIV_ROUND_UP(map->cache_size_raw,
- regcache_lzo_block_count(map)) /
- map->cache_word_size);
-}
-
-static inline int regcache_lzo_get_blksize(struct regmap *map)
-{
- return DIV_ROUND_UP(map->cache_size_raw,
- regcache_lzo_block_count(map));
-}
-
-static int regcache_lzo_init(struct regmap *map)
-{
- struct regcache_lzo_ctx **lzo_blocks;
- size_t bmp_size;
- int ret, i, blksize, blkcount;
- const char *p, *end;
- unsigned long *sync_bmp;
-
- ret = 0;
-
- blkcount = regcache_lzo_block_count(map);
- map->cache = kzalloc(blkcount * sizeof *lzo_blocks,
- GFP_KERNEL);
- if (!map->cache)
- return -ENOMEM;
- lzo_blocks = map->cache;
-
- /*
- * allocate a bitmap to be used when syncing the cache with
- * the hardware. Each time a register is modified, the corresponding
- * bit is set in the bitmap, so we know that we have to sync
- * that register.
- */
- bmp_size = map->num_reg_defaults_raw;
- sync_bmp = kmalloc(BITS_TO_LONGS(bmp_size) * sizeof(long),
- GFP_KERNEL);
- if (!sync_bmp) {
- ret = -ENOMEM;
- goto err;
- }
- bitmap_zero(sync_bmp, bmp_size);
-
- /* allocate the lzo blocks and initialize them */
- for (i = 0; i < blkcount; i++) {
- lzo_blocks[i] = kzalloc(sizeof **lzo_blocks,
- GFP_KERNEL);
- if (!lzo_blocks[i]) {
- kfree(sync_bmp);
- ret = -ENOMEM;
- goto err;
- }
- lzo_blocks[i]->sync_bmp = sync_bmp;
- lzo_blocks[i]->sync_bmp_nbits = bmp_size;
- /* alloc the working space for the compressed block */
- ret = regcache_lzo_prepare(lzo_blocks[i]);
- if (ret < 0)
- goto err;
- }
-
- blksize = regcache_lzo_get_blksize(map);
- p = map->reg_defaults_raw;
- end = map->reg_defaults_raw + map->cache_size_raw;
- /* compress the register map and fill the lzo blocks */
- for (i = 0; i < blkcount; i++, p += blksize) {
- lzo_blocks[i]->src = p;
- if (p + blksize > end)
- lzo_blocks[i]->src_len = end - p;
- else
- lzo_blocks[i]->src_len = blksize;
- ret = regcache_lzo_compress_cache_block(map,
- lzo_blocks[i]);
- if (ret < 0)
- goto err;
- lzo_blocks[i]->decompressed_size =
- lzo_blocks[i]->src_len;
- }
-
- return 0;
-err:
- regcache_lzo_exit(map);
- return ret;
-}
-
-static int regcache_lzo_exit(struct regmap *map)
-{
- struct regcache_lzo_ctx **lzo_blocks;
- int i, blkcount;
-
- lzo_blocks = map->cache;
- if (!lzo_blocks)
- return 0;
-
- blkcount = regcache_lzo_block_count(map);
- /*
- * the pointer to the bitmap used for syncing the cache
- * is shared amongst all lzo_blocks. Ensure it is freed
- * only once.
- */
- if (lzo_blocks[0])
- kfree(lzo_blocks[0]->sync_bmp);
- for (i = 0; i < blkcount; i++) {
- if (lzo_blocks[i]) {
- kfree(lzo_blocks[i]->wmem);
- kfree(lzo_blocks[i]->dst);
- }
- /* each lzo_block is a pointer returned by kmalloc or NULL */
- kfree(lzo_blocks[i]);
- }
- kfree(lzo_blocks);
- map->cache = NULL;
- return 0;
-}
-
-static int regcache_lzo_read(struct regmap *map,
- unsigned int reg, unsigned int *value)
-{
- struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
- int ret, blkindex, blkpos;
- size_t blksize, tmp_dst_len;
- void *tmp_dst;
-
- /* index of the compressed lzo block */
- blkindex = regcache_lzo_get_blkindex(map, reg);
- /* register index within the decompressed block */
- blkpos = regcache_lzo_get_blkpos(map, reg);
- /* size of the compressed block */
- blksize = regcache_lzo_get_blksize(map);
- lzo_blocks = map->cache;
- lzo_block = lzo_blocks[blkindex];
-
- /* save the pointer and length of the compressed block */
- tmp_dst = lzo_block->dst;
- tmp_dst_len = lzo_block->dst_len;
-
- /* prepare the source to be the compressed block */
- lzo_block->src = lzo_block->dst;
- lzo_block->src_len = lzo_block->dst_len;
-
- /* decompress the block */
- ret = regcache_lzo_decompress_cache_block(map, lzo_block);
- if (ret >= 0)
- /* fetch the value from the cache */
- *value = regcache_get_val(lzo_block->dst, blkpos,
- map->cache_word_size);
-
- kfree(lzo_block->dst);
- /* restore the pointer and length of the compressed block */
- lzo_block->dst = tmp_dst;
- lzo_block->dst_len = tmp_dst_len;
-
- return ret;
-}
-
-static int regcache_lzo_write(struct regmap *map,
- unsigned int reg, unsigned int value)
-{
- struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
- int ret, blkindex, blkpos;
- size_t blksize, tmp_dst_len;
- void *tmp_dst;
-
- /* index of the compressed lzo block */
- blkindex = regcache_lzo_get_blkindex(map, reg);
- /* register index within the decompressed block */
- blkpos = regcache_lzo_get_blkpos(map, reg);
- /* size of the compressed block */
- blksize = regcache_lzo_get_blksize(map);
- lzo_blocks = map->cache;
- lzo_block = lzo_blocks[blkindex];
-
- /* save the pointer and length of the compressed block */
- tmp_dst = lzo_block->dst;
- tmp_dst_len = lzo_block->dst_len;
-
- /* prepare the source to be the compressed block */
- lzo_block->src = lzo_block->dst;
- lzo_block->src_len = lzo_block->dst_len;
-
- /* decompress the block */
- ret = regcache_lzo_decompress_cache_block(map, lzo_block);
- if (ret < 0) {
- kfree(lzo_block->dst);
- goto out;
- }
-
- /* write the new value to the cache */
- if (regcache_set_val(lzo_block->dst, blkpos, value,
- map->cache_word_size)) {
- kfree(lzo_block->dst);
- goto out;
- }
-
- /* prepare the source to be the decompressed block */
- lzo_block->src = lzo_block->dst;
- lzo_block->src_len = lzo_block->dst_len;
-
- /* compress the block */
- ret = regcache_lzo_compress_cache_block(map, lzo_block);
- if (ret < 0) {
- kfree(lzo_block->dst);
- kfree(lzo_block->src);
- goto out;
- }
-
- /* set the bit so we know we have to sync this register */
- set_bit(reg, lzo_block->sync_bmp);
- kfree(tmp_dst);
- kfree(lzo_block->src);
- return 0;
-out:
- lzo_block->dst = tmp_dst;
- lzo_block->dst_len = tmp_dst_len;
- return ret;
-}
-
-static int regcache_lzo_sync(struct regmap *map, unsigned int min,
- unsigned int max)
-{
- struct regcache_lzo_ctx **lzo_blocks;
- unsigned int val;
- int i;
- int ret;
-
- lzo_blocks = map->cache;
- i = min;
- for_each_set_bit_from(i, lzo_blocks[0]->sync_bmp,
- lzo_blocks[0]->sync_bmp_nbits) {
- if (i > max)
- continue;
-
- ret = regcache_read(map, i, &val);
- if (ret)
- return ret;
-
- /* Is this the hardware default? If so skip. */
- ret = regcache_lookup_reg(map, i);
- if (ret > 0 && val == map->reg_defaults[ret].def)
- continue;
-
- map->cache_bypass = 1;
- ret = _regmap_write(map, i, val);
- map->cache_bypass = 0;
- if (ret)
- return ret;
- dev_dbg(map->dev, "Synced register %#x, value %#x\n",
- i, val);
- }
-
- return 0;
-}
-
-struct regcache_ops regcache_lzo_ops = {
- .type = REGCACHE_COMPRESSED,
- .name = "lzo",
- .init = regcache_lzo_init,
- .exit = regcache_lzo_exit,
- .read = regcache_lzo_read,
- .write = regcache_lzo_write,
- .sync = regcache_lzo_sync
-};
diff --git a/ANDROID_3.4.5/drivers/base/regmap/regcache-rbtree.c b/ANDROID_3.4.5/drivers/base/regmap/regcache-rbtree.c
deleted file mode 100644
index 92b779ee..00000000
--- a/ANDROID_3.4.5/drivers/base/regmap/regcache-rbtree.c
+++ /dev/null
@@ -1,430 +0,0 @@
-/*
- * Register cache access API - rbtree caching support
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/debugfs.h>
-#include <linux/rbtree.h>
-#include <linux/seq_file.h>
-
-#include "internal.h"
-
-static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
- unsigned int value);
-static int regcache_rbtree_exit(struct regmap *map);
-
-struct regcache_rbtree_node {
- /* the actual rbtree node holding this block */
- struct rb_node node;
- /* base register handled by this block */
- unsigned int base_reg;
- /* block of adjacent registers */
- void *block;
- /* number of registers available in the block */
- unsigned int blklen;
-} __attribute__ ((packed));
-
-struct regcache_rbtree_ctx {
- struct rb_root root;
- struct regcache_rbtree_node *cached_rbnode;
-};
-
-static inline void regcache_rbtree_get_base_top_reg(
- struct regcache_rbtree_node *rbnode,
- unsigned int *base, unsigned int *top)
-{
- *base = rbnode->base_reg;
- *top = rbnode->base_reg + rbnode->blklen - 1;
-}
-
-static unsigned int regcache_rbtree_get_register(
- struct regcache_rbtree_node *rbnode, unsigned int idx,
- unsigned int word_size)
-{
- return regcache_get_val(rbnode->block, idx, word_size);
-}
-
-static void regcache_rbtree_set_register(struct regcache_rbtree_node *rbnode,
- unsigned int idx, unsigned int val,
- unsigned int word_size)
-{
- regcache_set_val(rbnode->block, idx, val, word_size);
-}
-
-static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
- unsigned int reg)
-{
- struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
- struct rb_node *node;
- struct regcache_rbtree_node *rbnode;
- unsigned int base_reg, top_reg;
-
- rbnode = rbtree_ctx->cached_rbnode;
- if (rbnode) {
- regcache_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
- if (reg >= base_reg && reg <= top_reg)
- return rbnode;
- }
-
- node = rbtree_ctx->root.rb_node;
- while (node) {
- rbnode = container_of(node, struct regcache_rbtree_node, node);
- regcache_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
- if (reg >= base_reg && reg <= top_reg) {
- rbtree_ctx->cached_rbnode = rbnode;
- return rbnode;
- } else if (reg > top_reg) {
- node = node->rb_right;
- } else if (reg < base_reg) {
- node = node->rb_left;
- }
- }
-
- return NULL;
-}
-
-static int regcache_rbtree_insert(struct rb_root *root,
- struct regcache_rbtree_node *rbnode)
-{
- struct rb_node **new, *parent;
- struct regcache_rbtree_node *rbnode_tmp;
- unsigned int base_reg_tmp, top_reg_tmp;
- unsigned int base_reg;
-
- parent = NULL;
- new = &root->rb_node;
- while (*new) {
- rbnode_tmp = container_of(*new, struct regcache_rbtree_node,
- node);
- /* base and top registers of the current rbnode */
- regcache_rbtree_get_base_top_reg(rbnode_tmp, &base_reg_tmp,
- &top_reg_tmp);
- /* base register of the rbnode to be added */
- base_reg = rbnode->base_reg;
- parent = *new;
- /* if this register has already been inserted, just return */
- if (base_reg >= base_reg_tmp &&
- base_reg <= top_reg_tmp)
- return 0;
- else if (base_reg > top_reg_tmp)
- new = &((*new)->rb_right);
- else if (base_reg < base_reg_tmp)
- new = &((*new)->rb_left);
- }
-
- /* insert the node into the rbtree */
- rb_link_node(&rbnode->node, parent, new);
- rb_insert_color(&rbnode->node, root);
-
- return 1;
-}
-
-#ifdef CONFIG_DEBUG_FS
-static int rbtree_show(struct seq_file *s, void *ignored)
-{
- struct regmap *map = s->private;
- struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
- struct regcache_rbtree_node *n;
- struct rb_node *node;
- unsigned int base, top;
- int nodes = 0;
- int registers = 0;
- int average;
-
- mutex_lock(&map->lock);
-
- for (node = rb_first(&rbtree_ctx->root); node != NULL;
- node = rb_next(node)) {
- n = container_of(node, struct regcache_rbtree_node, node);
-
- regcache_rbtree_get_base_top_reg(n, &base, &top);
- seq_printf(s, "%x-%x (%d)\n", base, top, top - base + 1);
-
- nodes++;
- registers += top - base + 1;
- }
-
- if (nodes)
- average = registers / nodes;
- else
- average = 0;
-
- seq_printf(s, "%d nodes, %d registers, average %d registers\n",
- nodes, registers, average);
-
- mutex_unlock(&map->lock);
-
- return 0;
-}
-
-static int rbtree_open(struct inode *inode, struct file *file)
-{
- return single_open(file, rbtree_show, inode->i_private);
-}
-
-static const struct file_operations rbtree_fops = {
- .open = rbtree_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static void rbtree_debugfs_init(struct regmap *map)
-{
- debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops);
-}
-#else
-static void rbtree_debugfs_init(struct regmap *map)
-{
-}
-#endif
-
-static int regcache_rbtree_init(struct regmap *map)
-{
- struct regcache_rbtree_ctx *rbtree_ctx;
- int i;
- int ret;
-
- map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
- if (!map->cache)
- return -ENOMEM;
-
- rbtree_ctx = map->cache;
- rbtree_ctx->root = RB_ROOT;
- rbtree_ctx->cached_rbnode = NULL;
-
- for (i = 0; i < map->num_reg_defaults; i++) {
- ret = regcache_rbtree_write(map,
- map->reg_defaults[i].reg,
- map->reg_defaults[i].def);
- if (ret)
- goto err;
- }
-
- rbtree_debugfs_init(map);
-
- return 0;
-
-err:
- regcache_rbtree_exit(map);
- return ret;
-}
-
-static int regcache_rbtree_exit(struct regmap *map)
-{
- struct rb_node *next;
- struct regcache_rbtree_ctx *rbtree_ctx;
- struct regcache_rbtree_node *rbtree_node;
-
- /* if we've already been called then just return */
- rbtree_ctx = map->cache;
- if (!rbtree_ctx)
- return 0;
-
- /* free up the rbtree */
- next = rb_first(&rbtree_ctx->root);
- while (next) {
- rbtree_node = rb_entry(next, struct regcache_rbtree_node, node);
- next = rb_next(&rbtree_node->node);
- rb_erase(&rbtree_node->node, &rbtree_ctx->root);
- kfree(rbtree_node->block);
- kfree(rbtree_node);
- }
-
- /* release the resources */
- kfree(map->cache);
- map->cache = NULL;
-
- return 0;
-}
-
-static int regcache_rbtree_read(struct regmap *map,
- unsigned int reg, unsigned int *value)
-{
- struct regcache_rbtree_node *rbnode;
- unsigned int reg_tmp;
-
- rbnode = regcache_rbtree_lookup(map, reg);
- if (rbnode) {
- reg_tmp = reg - rbnode->base_reg;
- *value = regcache_rbtree_get_register(rbnode, reg_tmp,
- map->cache_word_size);
- } else {
- return -ENOENT;
- }
-
- return 0;
-}
-
-
-static int regcache_rbtree_insert_to_block(struct regcache_rbtree_node *rbnode,
- unsigned int pos, unsigned int reg,
- unsigned int value, unsigned int word_size)
-{
- u8 *blk;
-
- blk = krealloc(rbnode->block,
- (rbnode->blklen + 1) * word_size, GFP_KERNEL);
- if (!blk)
- return -ENOMEM;
-
- /* insert the register value in the correct place in the rbnode block */
- memmove(blk + (pos + 1) * word_size,
- blk + pos * word_size,
- (rbnode->blklen - pos) * word_size);
-
- /* update the rbnode block, its size and the base register */
- rbnode->block = blk;
- rbnode->blklen++;
- if (!pos)
- rbnode->base_reg = reg;
-
- regcache_rbtree_set_register(rbnode, pos, value, word_size);
- return 0;
-}
-
-static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
- unsigned int value)
-{
- struct regcache_rbtree_ctx *rbtree_ctx;
- struct regcache_rbtree_node *rbnode, *rbnode_tmp;
- struct rb_node *node;
- unsigned int val;
- unsigned int reg_tmp;
- unsigned int pos;
- int i;
- int ret;
-
- rbtree_ctx = map->cache;
- /* if we can't locate it in the cached rbnode we'll have
- * to traverse the rbtree looking for it.
- */
- rbnode = regcache_rbtree_lookup(map, reg);
- if (rbnode) {
- reg_tmp = reg - rbnode->base_reg;
- val = regcache_rbtree_get_register(rbnode, reg_tmp,
- map->cache_word_size);
- if (val == value)
- return 0;
- regcache_rbtree_set_register(rbnode, reg_tmp, value,
- map->cache_word_size);
- } else {
- /* look for an adjacent register to the one we are about to add */
- for (node = rb_first(&rbtree_ctx->root); node;
- node = rb_next(node)) {
- rbnode_tmp = rb_entry(node, struct regcache_rbtree_node, node);
- for (i = 0; i < rbnode_tmp->blklen; i++) {
- reg_tmp = rbnode_tmp->base_reg + i;
- if (abs(reg_tmp - reg) != 1)
- continue;
- /* decide where in the block to place our register */
- if (reg_tmp + 1 == reg)
- pos = i + 1;
- else
- pos = i;
- ret = regcache_rbtree_insert_to_block(rbnode_tmp, pos,
- reg, value,
- map->cache_word_size);
- if (ret)
- return ret;
- rbtree_ctx->cached_rbnode = rbnode_tmp;
- return 0;
- }
- }
- /* we did not manage to find a place to insert it in an existing
- * block so create a new rbnode with a single register in its block.
- * This block will get populated further if any other adjacent
- * registers get modified in the future.
- */
- rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL);
- if (!rbnode)
- return -ENOMEM;
- rbnode->blklen = 1;
- rbnode->base_reg = reg;
- rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size,
- GFP_KERNEL);
- if (!rbnode->block) {
- kfree(rbnode);
- return -ENOMEM;
- }
- regcache_rbtree_set_register(rbnode, 0, value, map->cache_word_size);
- regcache_rbtree_insert(&rbtree_ctx->root, rbnode);
- rbtree_ctx->cached_rbnode = rbnode;
- }
-
- return 0;
-}
-
-static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
- unsigned int max)
-{
- struct regcache_rbtree_ctx *rbtree_ctx;
- struct rb_node *node;
- struct regcache_rbtree_node *rbnode;
- unsigned int regtmp;
- unsigned int val;
- int ret;
- int i, base, end;
-
- rbtree_ctx = map->cache;
- for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
- rbnode = rb_entry(node, struct regcache_rbtree_node, node);
-
- if (rbnode->base_reg < min)
- continue;
- if (rbnode->base_reg > max)
- break;
- if (rbnode->base_reg + rbnode->blklen < min)
- continue;
-
- if (min > rbnode->base_reg)
- base = min - rbnode->base_reg;
- else
- base = 0;
-
- if (max < rbnode->base_reg + rbnode->blklen)
- end = rbnode->base_reg + rbnode->blklen - max;
- else
- end = rbnode->blklen;
-
- for (i = base; i < end; i++) {
- regtmp = rbnode->base_reg + i;
- val = regcache_rbtree_get_register(rbnode, i,
- map->cache_word_size);
-
- /* Is this the hardware default? If so skip. */
- ret = regcache_lookup_reg(map, regtmp);
- if (ret >= 0 && val == map->reg_defaults[ret].def)
- continue;
-
- map->cache_bypass = 1;
- ret = _regmap_write(map, regtmp, val);
- map->cache_bypass = 0;
- if (ret)
- return ret;
- dev_dbg(map->dev, "Synced register %#x, value %#x\n",
- regtmp, val);
- }
- }
-
- return 0;
-}
-
-struct regcache_ops regcache_rbtree_ops = {
- .type = REGCACHE_RBTREE,
- .name = "rbtree",
- .init = regcache_rbtree_init,
- .exit = regcache_rbtree_exit,
- .read = regcache_rbtree_read,
- .write = regcache_rbtree_write,
- .sync = regcache_rbtree_sync
-};
diff --git a/ANDROID_3.4.5/drivers/base/regmap/regcache.c b/ANDROID_3.4.5/drivers/base/regmap/regcache.c
deleted file mode 100644
index 74b69095..00000000
--- a/ANDROID_3.4.5/drivers/base/regmap/regcache.c
+++ /dev/null
@@ -1,491 +0,0 @@
-/*
- * Register cache access API
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <linux/device.h>
-#include <trace/events/regmap.h>
-#include <linux/bsearch.h>
-#include <linux/sort.h>
-
-#include "internal.h"
-
-static const struct regcache_ops *cache_types[] = {
- &regcache_rbtree_ops,
- &regcache_lzo_ops,
-};
-
-static int regcache_hw_init(struct regmap *map)
-{
- int i, j;
- int ret;
- int count;
- unsigned int val;
- void *tmp_buf;
-
- if (!map->num_reg_defaults_raw)
- return -EINVAL;
-
- if (!map->reg_defaults_raw) {
- u32 cache_bypass = map->cache_bypass;
- dev_warn(map->dev, "No cache defaults, reading back from HW\n");
-
- /* Bypass the cache access till data read from HW*/
- map->cache_bypass = 1;
- tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
- if (!tmp_buf)
- return -EINVAL;
- ret = regmap_bulk_read(map, 0, tmp_buf,
- map->num_reg_defaults_raw);
- map->cache_bypass = cache_bypass;
- if (ret < 0) {
- kfree(tmp_buf);
- return ret;
- }
- map->reg_defaults_raw = tmp_buf;
- map->cache_free = 1;
- }
-
- /* calculate the size of reg_defaults */
- for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
- val = regcache_get_val(map->reg_defaults_raw,
- i, map->cache_word_size);
- if (regmap_volatile(map, i))
- continue;
- count++;
- }
-
- map->reg_defaults = kmalloc(count * sizeof(struct reg_default),
- GFP_KERNEL);
- if (!map->reg_defaults) {
- ret = -ENOMEM;
- goto err_free;
- }
-
- /* fill the reg_defaults */
- map->num_reg_defaults = count;
- for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
- val = regcache_get_val(map->reg_defaults_raw,
- i, map->cache_word_size);
- if (regmap_volatile(map, i))
- continue;
- map->reg_defaults[j].reg = i;
- map->reg_defaults[j].def = val;
- j++;
- }
-
- return 0;
-
-err_free:
- if (map->cache_free)
- kfree(map->reg_defaults_raw);
-
- return ret;
-}
-
-int regcache_init(struct regmap *map, const struct regmap_config *config)
-{
- int ret;
- int i;
- void *tmp_buf;
-
- if (map->cache_type == REGCACHE_NONE) {
- map->cache_bypass = true;
- return 0;
- }
-
- for (i = 0; i < ARRAY_SIZE(cache_types); i++)
- if (cache_types[i]->type == map->cache_type)
- break;
-
- if (i == ARRAY_SIZE(cache_types)) {
- dev_err(map->dev, "Could not match compress type: %d\n",
- map->cache_type);
- return -EINVAL;
- }
-
- map->num_reg_defaults = config->num_reg_defaults;
- map->num_reg_defaults_raw = config->num_reg_defaults_raw;
- map->reg_defaults_raw = config->reg_defaults_raw;
- map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
- map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
-
- map->cache = NULL;
- map->cache_ops = cache_types[i];
-
- if (!map->cache_ops->read ||
- !map->cache_ops->write ||
- !map->cache_ops->name)
- return -EINVAL;
-
- /* We still need to ensure that the reg_defaults
- * won't vanish from under us. We'll need to make
- * a copy of it.
- */
- if (config->reg_defaults) {
- if (!map->num_reg_defaults)
- return -EINVAL;
- tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults *
- sizeof(struct reg_default), GFP_KERNEL);
- if (!tmp_buf)
- return -ENOMEM;
- map->reg_defaults = tmp_buf;
- } else if (map->num_reg_defaults_raw) {
- /* Some devices such as PMICs don't have cache defaults,
- * we cope with this by reading back the HW registers and
- * crafting the cache defaults by hand.
- */
- ret = regcache_hw_init(map);
- if (ret < 0)
- return ret;
- }
-
- if (!map->max_register)
- map->max_register = map->num_reg_defaults_raw;
-
- if (map->cache_ops->init) {
- dev_dbg(map->dev, "Initializing %s cache\n",
- map->cache_ops->name);
- ret = map->cache_ops->init(map);
- if (ret)
- goto err_free;
- }
- return 0;
-
-err_free:
- kfree(map->reg_defaults);
- if (map->cache_free)
- kfree(map->reg_defaults_raw);
-
- return ret;
-}
-
-void regcache_exit(struct regmap *map)
-{
- if (map->cache_type == REGCACHE_NONE)
- return;
-
- BUG_ON(!map->cache_ops);
-
- kfree(map->reg_defaults);
- if (map->cache_free)
- kfree(map->reg_defaults_raw);
-
- if (map->cache_ops->exit) {
- dev_dbg(map->dev, "Destroying %s cache\n",
- map->cache_ops->name);
- map->cache_ops->exit(map);
- }
-}
-
-/**
- * regcache_read: Fetch the value of a given register from the cache.
- *
- * @map: map to configure.
- * @reg: The register index.
- * @value: The value to be returned.
- *
- * Return a negative value on failure, 0 on success.
- */
-int regcache_read(struct regmap *map,
- unsigned int reg, unsigned int *value)
-{
- int ret;
-
- if (map->cache_type == REGCACHE_NONE)
- return -ENOSYS;
-
- BUG_ON(!map->cache_ops);
-
- if (!regmap_volatile(map, reg)) {
- ret = map->cache_ops->read(map, reg, value);
-
- if (ret == 0)
- trace_regmap_reg_read_cache(map->dev, reg, *value);
-
- return ret;
- }
-
- return -EINVAL;
-}
-
-/**
- * regcache_write: Set the value of a given register in the cache.
- *
- * @map: map to configure.
- * @reg: The register index.
- * @value: The new register value.
- *
- * Return a negative value on failure, 0 on success.
- */
-int regcache_write(struct regmap *map,
- unsigned int reg, unsigned int value)
-{
- if (map->cache_type == REGCACHE_NONE)
- return 0;
-
- BUG_ON(!map->cache_ops);
-
- if (!regmap_writeable(map, reg))
- return -EIO;
-
- if (!regmap_volatile(map, reg))
- return map->cache_ops->write(map, reg, value);
-
- return 0;
-}
-
-/**
- * regcache_sync: Sync the register cache with the hardware.
- *
- * @map: map to configure.
- *
- * Any registers that should not be synced should be marked as
- * volatile. In general drivers can choose not to use the provided
- * syncing functionality if they so require.
- *
- * Return a negative value on failure, 0 on success.
- */
-int regcache_sync(struct regmap *map)
-{
- int ret = 0;
- unsigned int i;
- const char *name;
- unsigned int bypass;
-
- BUG_ON(!map->cache_ops || !map->cache_ops->sync);
-
- mutex_lock(&map->lock);
- /* Remember the initial bypass state */
- bypass = map->cache_bypass;
- dev_dbg(map->dev, "Syncing %s cache\n",
- map->cache_ops->name);
- name = map->cache_ops->name;
- trace_regcache_sync(map->dev, name, "start");
-
- if (!map->cache_dirty)
- goto out;
-
- /* Apply any patch first */
- map->cache_bypass = 1;
- for (i = 0; i < map->patch_regs; i++) {
- ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
- if (ret != 0) {
- dev_err(map->dev, "Failed to write %x = %x: %d\n",
- map->patch[i].reg, map->patch[i].def, ret);
- goto out;
- }
- }
- map->cache_bypass = 0;
-
- ret = map->cache_ops->sync(map, 0, map->max_register);
-
- if (ret == 0)
- map->cache_dirty = false;
-
-out:
- trace_regcache_sync(map->dev, name, "stop");
- /* Restore the bypass state */
- map->cache_bypass = bypass;
- mutex_unlock(&map->lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(regcache_sync);
-
-/**
- * regcache_sync_region: Sync part of the register cache with the hardware.
- *
- * @map: map to sync.
- * @min: first register to sync
- * @max: last register to sync
- *
- * Write all non-default register values in the specified region to
- * the hardware.
- *
- * Return a negative value on failure, 0 on success.
- */
-int regcache_sync_region(struct regmap *map, unsigned int min,
- unsigned int max)
-{
- int ret = 0;
- const char *name;
- unsigned int bypass;
-
- BUG_ON(!map->cache_ops || !map->cache_ops->sync);
-
- mutex_lock(&map->lock);
-
- /* Remember the initial bypass state */
- bypass = map->cache_bypass;
-
- name = map->cache_ops->name;
- dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
-
- trace_regcache_sync(map->dev, name, "start region");
-
- if (!map->cache_dirty)
- goto out;
-
- ret = map->cache_ops->sync(map, min, max);
-
-out:
- trace_regcache_sync(map->dev, name, "stop region");
- /* Restore the bypass state */
- map->cache_bypass = bypass;
- mutex_unlock(&map->lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(regcache_sync_region);
-
-/**
- * regcache_cache_only: Put a register map into cache only mode
- *
- * @map: map to configure
- * @cache_only: flag if changes should be written to the hardware
- *
- * When a register map is marked as cache only writes to the register
- * map API will only update the register cache, they will not cause
- * any hardware changes. This is useful for allowing portions of
- * drivers to act as though the device were functioning as normal when
- * it is disabled for power saving reasons.
- */
-void regcache_cache_only(struct regmap *map, bool enable)
-{
- mutex_lock(&map->lock);
- WARN_ON(map->cache_bypass && enable);
- map->cache_only = enable;
- trace_regmap_cache_only(map->dev, enable);
- mutex_unlock(&map->lock);
-}
-EXPORT_SYMBOL_GPL(regcache_cache_only);
-
-/**
- * regcache_mark_dirty: Mark the register cache as dirty
- *
- * @map: map to mark
- *
- * Mark the register cache as dirty, for example due to the device
- * having been powered down for suspend. If the cache is not marked
- * as dirty then the cache sync will be suppressed.
- */
-void regcache_mark_dirty(struct regmap *map)
-{
- mutex_lock(&map->lock);
- map->cache_dirty = true;
- mutex_unlock(&map->lock);
-}
-EXPORT_SYMBOL_GPL(regcache_mark_dirty);
-
-/**
- * regcache_cache_bypass: Put a register map into cache bypass mode
- *
- * @map: map to configure
- * @cache_bypass: flag if changes should not be written to the hardware
- *
- * When a register map is marked with the cache bypass option, writes
- * to the register map API will only update the hardware and not the
- * the cache directly. This is useful when syncing the cache back to
- * the hardware.
- */
-void regcache_cache_bypass(struct regmap *map, bool enable)
-{
- mutex_lock(&map->lock);
- WARN_ON(map->cache_only && enable);
- map->cache_bypass = enable;
- trace_regmap_cache_bypass(map->dev, enable);
- mutex_unlock(&map->lock);
-}
-EXPORT_SYMBOL_GPL(regcache_cache_bypass);
-
-bool regcache_set_val(void *base, unsigned int idx,
- unsigned int val, unsigned int word_size)
-{
- switch (word_size) {
- case 1: {
- u8 *cache = base;
- if (cache[idx] == val)
- return true;
- cache[idx] = val;
- break;
- }
- case 2: {
- u16 *cache = base;
- if (cache[idx] == val)
- return true;
- cache[idx] = val;
- break;
- }
- case 4: {
- u32 *cache = base;
- if (cache[idx] == val)
- return true;
- cache[idx] = val;
- break;
- }
- default:
- BUG();
- }
- return false;
-}
-
-unsigned int regcache_get_val(const void *base, unsigned int idx,
- unsigned int word_size)
-{
- if (!base)
- return -EINVAL;
-
- switch (word_size) {
- case 1: {
- const u8 *cache = base;
- return cache[idx];
- }
- case 2: {
- const u16 *cache = base;
- return cache[idx];
- }
- case 4: {
- const u32 *cache = base;
- return cache[idx];
- }
- default:
- BUG();
- }
- /* unreachable */
- return -1;
-}
-
-static int regcache_default_cmp(const void *a, const void *b)
-{
- const struct reg_default *_a = a;
- const struct reg_default *_b = b;
-
- return _a->reg - _b->reg;
-}
-
-int regcache_lookup_reg(struct regmap *map, unsigned int reg)
-{
- struct reg_default key;
- struct reg_default *r;
-
- key.reg = reg;
- key.def = 0;
-
- r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
- sizeof(struct reg_default), regcache_default_cmp);
-
- if (r)
- return r - map->reg_defaults;
- else
- return -ENOENT;
-}
diff --git a/ANDROID_3.4.5/drivers/base/regmap/regmap-debugfs.c b/ANDROID_3.4.5/drivers/base/regmap/regmap-debugfs.c
deleted file mode 100644
index 8ee03493..00000000
--- a/ANDROID_3.4.5/drivers/base/regmap/regmap-debugfs.c
+++ /dev/null
@@ -1,286 +0,0 @@
-/*
- * Register map access API - debugfs
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/debugfs.h>
-#include <linux/uaccess.h>
-#include <linux/device.h>
-
-#include "internal.h"
-
-static struct dentry *regmap_debugfs_root;
-
-/* Calculate the length of a fixed format */
-static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
-{
- snprintf(buf, buf_size, "%x", max_val);
- return strlen(buf);
-}
-
-static ssize_t regmap_name_read_file(struct file *file,
- char __user *user_buf, size_t count,
- loff_t *ppos)
-{
- struct regmap *map = file->private_data;
- int ret;
- char *buf;
-
- buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- ret = snprintf(buf, PAGE_SIZE, "%s\n", map->dev->driver->name);
- if (ret < 0) {
- kfree(buf);
- return ret;
- }
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
- kfree(buf);
- return ret;
-}
-
-static const struct file_operations regmap_name_fops = {
- .open = simple_open,
- .read = regmap_name_read_file,
- .llseek = default_llseek,
-};
-
-static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- int reg_len, val_len, tot_len;
- size_t buf_pos = 0;
- loff_t p = 0;
- ssize_t ret;
- int i;
- struct regmap *map = file->private_data;
- char *buf;
- unsigned int val;
-
- if (*ppos < 0 || !count)
- return -EINVAL;
-
- buf = kmalloc(count, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- /* Calculate the length of a fixed format */
- reg_len = regmap_calc_reg_len(map->max_register, buf, count);
- val_len = 2 * map->format.val_bytes;
- tot_len = reg_len + val_len + 3; /* : \n */
-
- for (i = 0; i < map->max_register + 1; i++) {
- if (!regmap_readable(map, i))
- continue;
-
- if (regmap_precious(map, i))
- continue;
-
- /* If we're in the region the user is trying to read */
- if (p >= *ppos) {
- /* ...but not beyond it */
- if (buf_pos >= count - 1 - tot_len)
- break;
-
- /* Format the register */
- snprintf(buf + buf_pos, count - buf_pos, "%.*x: ",
- reg_len, i);
- buf_pos += reg_len + 2;
-
- /* Format the value, write all X if we can't read */
- ret = regmap_read(map, i, &val);
- if (ret == 0)
- snprintf(buf + buf_pos, count - buf_pos,
- "%.*x", val_len, val);
- else
- memset(buf + buf_pos, 'X', val_len);
- buf_pos += 2 * map->format.val_bytes;
-
- buf[buf_pos++] = '\n';
- }
- p += tot_len;
- }
-
- ret = buf_pos;
-
- if (copy_to_user(user_buf, buf, buf_pos)) {
- ret = -EFAULT;
- goto out;
- }
-
- *ppos += buf_pos;
-
-out:
- kfree(buf);
- return ret;
-}
-
-#define REGMAP_ALLOW_WRITE_DEBUGFS
-#ifdef REGMAP_ALLOW_WRITE_DEBUGFS
-/*
- * This can be dangerous especially when we have clients such as
- * PMICs, therefore don't provide any real compile time configuration option
- * for this feature, people who want to use this will need to modify
- * the source code directly.
- */
-static ssize_t regmap_map_write_file(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- char buf[32];
- size_t buf_size;
- char *start = buf;
- unsigned long reg, value;
- struct regmap *map = file->private_data;
-
- buf_size = min(count, (sizeof(buf)-1));
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- buf[buf_size] = 0;
-
- while (*start == ' ')
- start++;
- reg = simple_strtoul(start, &start, 16);
- while (*start == ' ')
- start++;
- if (strict_strtoul(start, 16, &value))
- return -EINVAL;
-
- /* Userspace has been fiddling around behind the kernel's back */
- add_taint(TAINT_USER);
-
- regmap_write(map, reg, value);
- return buf_size;
-}
-#else
-#define regmap_map_write_file NULL
-#endif
-
-static const struct file_operations regmap_map_fops = {
- .open = simple_open,
- .read = regmap_map_read_file,
- .write = regmap_map_write_file,
- .llseek = default_llseek,
-};
-
-static ssize_t regmap_access_read_file(struct file *file,
- char __user *user_buf, size_t count,
- loff_t *ppos)
-{
- int reg_len, tot_len;
- size_t buf_pos = 0;
- loff_t p = 0;
- ssize_t ret;
- int i;
- struct regmap *map = file->private_data;
- char *buf;
-
- if (*ppos < 0 || !count)
- return -EINVAL;
-
- buf = kmalloc(count, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- /* Calculate the length of a fixed format */
- reg_len = regmap_calc_reg_len(map->max_register, buf, count);
- tot_len = reg_len + 10; /* ': R W V P\n' */
-
- for (i = 0; i < map->max_register + 1; i++) {
- /* Ignore registers which are neither readable nor writable */
- if (!regmap_readable(map, i) && !regmap_writeable(map, i))
- continue;
-
- /* If we're in the region the user is trying to read */
- if (p >= *ppos) {
- /* ...but not beyond it */
- if (buf_pos >= count - 1 - tot_len)
- break;
-
- /* Format the register */
- snprintf(buf + buf_pos, count - buf_pos,
- "%.*x: %c %c %c %c\n",
- reg_len, i,
- regmap_readable(map, i) ? 'y' : 'n',
- regmap_writeable(map, i) ? 'y' : 'n',
- regmap_volatile(map, i) ? 'y' : 'n',
- regmap_precious(map, i) ? 'y' : 'n');
-
- buf_pos += tot_len;
- }
- p += tot_len;
- }
-
- ret = buf_pos;
-
- if (copy_to_user(user_buf, buf, buf_pos)) {
- ret = -EFAULT;
- goto out;
- }
-
- *ppos += buf_pos;
-
-out:
- kfree(buf);
- return ret;
-}
-
-static const struct file_operations regmap_access_fops = {
- .open = simple_open,
- .read = regmap_access_read_file,
- .llseek = default_llseek,
-};
-
-void regmap_debugfs_init(struct regmap *map)
-{
- map->debugfs = debugfs_create_dir(dev_name(map->dev),
- regmap_debugfs_root);
- if (!map->debugfs) {
- dev_warn(map->dev, "Failed to create debugfs directory\n");
- return;
- }
-
- debugfs_create_file("name", 0400, map->debugfs,
- map, &regmap_name_fops);
-
- if (map->max_register) {
- debugfs_create_file("registers", 0400, map->debugfs,
- map, &regmap_map_fops);
- debugfs_create_file("access", 0400, map->debugfs,
- map, &regmap_access_fops);
- }
-
- if (map->cache_type) {
- debugfs_create_bool("cache_only", 0400, map->debugfs,
- &map->cache_only);
- debugfs_create_bool("cache_dirty", 0400, map->debugfs,
- &map->cache_dirty);
- debugfs_create_bool("cache_bypass", 0400, map->debugfs,
- &map->cache_bypass);
- }
-}
-
-void regmap_debugfs_exit(struct regmap *map)
-{
- debugfs_remove_recursive(map->debugfs);
-}
-
-void regmap_debugfs_initcall(void)
-{
- regmap_debugfs_root = debugfs_create_dir("regmap", NULL);
- if (!regmap_debugfs_root) {
- pr_warn("regmap: Failed to create debugfs root\n");
- return;
- }
-}
diff --git a/ANDROID_3.4.5/drivers/base/regmap/regmap-i2c.c b/ANDROID_3.4.5/drivers/base/regmap/regmap-i2c.c
deleted file mode 100644
index 9a3a8c56..00000000
--- a/ANDROID_3.4.5/drivers/base/regmap/regmap-i2c.c
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Register map access API - I2C support
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/regmap.h>
-#include <linux/i2c.h>
-#include <linux/module.h>
-#include <linux/init.h>
-
-static int regmap_i2c_write(struct device *dev, const void *data, size_t count)
-{
- struct i2c_client *i2c = to_i2c_client(dev);
- int ret;
-
- ret = i2c_master_send(i2c, data, count);
- if (ret == count)
- return 0;
- else if (ret < 0)
- return ret;
- else
- return -EIO;
-}
-
-static int regmap_i2c_gather_write(struct device *dev,
- const void *reg, size_t reg_size,
- const void *val, size_t val_size)
-{
- struct i2c_client *i2c = to_i2c_client(dev);
- struct i2c_msg xfer[2];
- int ret;
-
- /* If the I2C controller can't do a gather tell the core, it
- * will substitute in a linear write for us.
- */
- if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_PROTOCOL_MANGLING))
- return -ENOTSUPP;
-
- xfer[0].addr = i2c->addr;
- xfer[0].flags = 0;
- xfer[0].len = reg_size;
- xfer[0].buf = (void *)reg;
-
- xfer[1].addr = i2c->addr;
- xfer[1].flags = I2C_M_NOSTART;
- xfer[1].len = val_size;
- xfer[1].buf = (void *)val;
-
- ret = i2c_transfer(i2c->adapter, xfer, 2);
- if (ret == 2)
- return 0;
- if (ret < 0)
- return ret;
- else
- return -EIO;
-}
-
-static int regmap_i2c_read(struct device *dev,
- const void *reg, size_t reg_size,
- void *val, size_t val_size)
-{
- struct i2c_client *i2c = to_i2c_client(dev);
- struct i2c_msg xfer[2];
- int ret;
-
- xfer[0].addr = i2c->addr;
- xfer[0].flags = 0;
- xfer[0].len = reg_size;
- xfer[0].buf = (void *)reg;
-
- xfer[1].addr = i2c->addr;
- xfer[1].flags = I2C_M_RD;
- xfer[1].len = val_size;
- xfer[1].buf = val;
-
- ret = i2c_transfer(i2c->adapter, xfer, 2);
- if (ret == 2)
- return 0;
- else if (ret < 0)
- return ret;
- else
- return -EIO;
-}
-
-static struct regmap_bus regmap_i2c = {
- .write = regmap_i2c_write,
- .gather_write = regmap_i2c_gather_write,
- .read = regmap_i2c_read,
-};
-
-/**
- * regmap_init_i2c(): Initialise register map
- *
- * @i2c: Device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer to
- * a struct regmap.
- */
-struct regmap *regmap_init_i2c(struct i2c_client *i2c,
- const struct regmap_config *config)
-{
- return regmap_init(&i2c->dev, &regmap_i2c, config);
-}
-EXPORT_SYMBOL_GPL(regmap_init_i2c);
-
-/**
- * devm_regmap_init_i2c(): Initialise managed register map
- *
- * @i2c: Device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer
- * to a struct regmap. The regmap will be automatically freed by the
- * device management code.
- */
-struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c,
- const struct regmap_config *config)
-{
- return devm_regmap_init(&i2c->dev, &regmap_i2c, config);
-}
-EXPORT_SYMBOL_GPL(devm_regmap_init_i2c);
-
-MODULE_LICENSE("GPL");
diff --git a/ANDROID_3.4.5/drivers/base/regmap/regmap-irq.c b/ANDROID_3.4.5/drivers/base/regmap/regmap-irq.c
deleted file mode 100644
index 1befaa7a..00000000
--- a/ANDROID_3.4.5/drivers/base/regmap/regmap-irq.c
+++ /dev/null
@@ -1,303 +0,0 @@
-/*
- * regmap based irq_chip
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/export.h>
-#include <linux/device.h>
-#include <linux/regmap.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-
-#include "internal.h"
-
-struct regmap_irq_chip_data {
- struct mutex lock;
-
- struct regmap *map;
- struct regmap_irq_chip *chip;
-
- int irq_base;
-
- void *status_reg_buf;
- unsigned int *status_buf;
- unsigned int *mask_buf;
- unsigned int *mask_buf_def;
-};
-
-static inline const
-struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
- int irq)
-{
- return &data->chip->irqs[irq - data->irq_base];
-}
-
-static void regmap_irq_lock(struct irq_data *data)
-{
- struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
-
- mutex_lock(&d->lock);
-}
-
-static void regmap_irq_sync_unlock(struct irq_data *data)
-{
- struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
- int i, ret;
-
- /*
- * If there's been a change in the mask write it back to the
- * hardware. We rely on the use of the regmap core cache to
- * suppress pointless writes.
- */
- for (i = 0; i < d->chip->num_regs; i++) {
- ret = regmap_update_bits(d->map, d->chip->mask_base + i,
- d->mask_buf_def[i], d->mask_buf[i]);
- if (ret != 0)
- dev_err(d->map->dev, "Failed to sync masks in %x\n",
- d->chip->mask_base + i);
- }
-
- mutex_unlock(&d->lock);
-}
-
-static void regmap_irq_enable(struct irq_data *data)
-{
- struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
- const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->irq);
-
- d->mask_buf[irq_data->reg_offset] &= ~irq_data->mask;
-}
-
-static void regmap_irq_disable(struct irq_data *data)
-{
- struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
- const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->irq);
-
- d->mask_buf[irq_data->reg_offset] |= irq_data->mask;
-}
-
-static struct irq_chip regmap_irq_chip = {
- .name = "regmap",
- .irq_bus_lock = regmap_irq_lock,
- .irq_bus_sync_unlock = regmap_irq_sync_unlock,
- .irq_disable = regmap_irq_disable,
- .irq_enable = regmap_irq_enable,
-};
-
-static irqreturn_t regmap_irq_thread(int irq, void *d)
-{
- struct regmap_irq_chip_data *data = d;
- struct regmap_irq_chip *chip = data->chip;
- struct regmap *map = data->map;
- int ret, i;
- u8 *buf8 = data->status_reg_buf;
- u16 *buf16 = data->status_reg_buf;
- u32 *buf32 = data->status_reg_buf;
- bool handled = false;
-
- ret = regmap_bulk_read(map, chip->status_base, data->status_reg_buf,
- chip->num_regs);
- if (ret != 0) {
- dev_err(map->dev, "Failed to read IRQ status: %d\n", ret);
- return IRQ_NONE;
- }
-
- /*
- * Ignore masked IRQs and ack if we need to; we ack early so
- * there is no race between handling and acknowleding the
- * interrupt. We assume that typically few of the interrupts
- * will fire simultaneously so don't worry about overhead from
- * doing a write per register.
- */
- for (i = 0; i < data->chip->num_regs; i++) {
- switch (map->format.val_bytes) {
- case 1:
- data->status_buf[i] = buf8[i];
- break;
- case 2:
- data->status_buf[i] = buf16[i];
- break;
- case 4:
- data->status_buf[i] = buf32[i];
- break;
- default:
- BUG();
- return IRQ_NONE;
- }
-
- data->status_buf[i] &= ~data->mask_buf[i];
-
- if (data->status_buf[i] && chip->ack_base) {
- ret = regmap_write(map, chip->ack_base + i,
- data->status_buf[i]);
- if (ret != 0)
- dev_err(map->dev, "Failed to ack 0x%x: %d\n",
- chip->ack_base + i, ret);
- }
- }
-
- for (i = 0; i < chip->num_irqs; i++) {
- if (data->status_buf[chip->irqs[i].reg_offset] &
- chip->irqs[i].mask) {
- handle_nested_irq(data->irq_base + i);
- handled = true;
- }
- }
-
- if (handled)
- return IRQ_HANDLED;
- else
- return IRQ_NONE;
-}
-
-/**
- * regmap_add_irq_chip(): Use standard regmap IRQ controller handling
- *
- * map: The regmap for the device.
- * irq: The IRQ the device uses to signal interrupts
- * irq_flags: The IRQF_ flags to use for the primary interrupt.
- * chip: Configuration for the interrupt controller.
- * data: Runtime data structure for the controller, allocated on success
- *
- * Returns 0 on success or an errno on failure.
- *
- * In order for this to be efficient the chip really should use a
- * register cache. The chip driver is responsible for restoring the
- * register values used by the IRQ controller over suspend and resume.
- */
-int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
- int irq_base, struct regmap_irq_chip *chip,
- struct regmap_irq_chip_data **data)
-{
- struct regmap_irq_chip_data *d;
- int cur_irq, i;
- int ret = -ENOMEM;
-
- irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
- if (irq_base < 0) {
- dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
- irq_base);
- return irq_base;
- }
-
- d = kzalloc(sizeof(*d), GFP_KERNEL);
- if (!d)
- return -ENOMEM;
-
- d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
- GFP_KERNEL);
- if (!d->status_buf)
- goto err_alloc;
-
- d->status_reg_buf = kzalloc(map->format.val_bytes * chip->num_regs,
- GFP_KERNEL);
- if (!d->status_reg_buf)
- goto err_alloc;
-
- d->mask_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
- GFP_KERNEL);
- if (!d->mask_buf)
- goto err_alloc;
-
- d->mask_buf_def = kzalloc(sizeof(unsigned int) * chip->num_regs,
- GFP_KERNEL);
- if (!d->mask_buf_def)
- goto err_alloc;
-
- d->map = map;
- d->chip = chip;
- d->irq_base = irq_base;
- mutex_init(&d->lock);
-
- for (i = 0; i < chip->num_irqs; i++)
- d->mask_buf_def[chip->irqs[i].reg_offset]
- |= chip->irqs[i].mask;
-
- /* Mask all the interrupts by default */
- for (i = 0; i < chip->num_regs; i++) {
- d->mask_buf[i] = d->mask_buf_def[i];
- ret = regmap_write(map, chip->mask_base + i, d->mask_buf[i]);
- if (ret != 0) {
- dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
- chip->mask_base + i, ret);
- goto err_alloc;
- }
- }
-
- /* Register them with genirq */
- for (cur_irq = irq_base;
- cur_irq < chip->num_irqs + irq_base;
- cur_irq++) {
- irq_set_chip_data(cur_irq, d);
- irq_set_chip_and_handler(cur_irq, &regmap_irq_chip,
- handle_edge_irq);
- irq_set_nested_thread(cur_irq, 1);
-
- /* ARM needs us to explicitly flag the IRQ as valid
- * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
- set_irq_flags(cur_irq, IRQF_VALID);
-#else
- irq_set_noprobe(cur_irq);
-#endif
- }
-
- ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags,
- chip->name, d);
- if (ret != 0) {
- dev_err(map->dev, "Failed to request IRQ %d: %d\n", irq, ret);
- goto err_alloc;
- }
-
- return 0;
-
-err_alloc:
- kfree(d->mask_buf_def);
- kfree(d->mask_buf);
- kfree(d->status_reg_buf);
- kfree(d->status_buf);
- kfree(d);
- return ret;
-}
-EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
-
-/**
- * regmap_del_irq_chip(): Stop interrupt handling for a regmap IRQ chip
- *
- * @irq: Primary IRQ for the device
- * @d: regmap_irq_chip_data allocated by regmap_add_irq_chip()
- */
-void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
-{
- if (!d)
- return;
-
- free_irq(irq, d);
- kfree(d->mask_buf_def);
- kfree(d->mask_buf);
- kfree(d->status_reg_buf);
- kfree(d->status_buf);
- kfree(d);
-}
-EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
-
-/**
- * regmap_irq_chip_get_base(): Retrieve interrupt base for a regmap IRQ chip
- *
- * Useful for drivers to request their own IRQs.
- *
- * @data: regmap_irq controller to operate on.
- */
-int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
-{
- return data->irq_base;
-}
-EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
diff --git a/ANDROID_3.4.5/drivers/base/regmap/regmap-spi.c b/ANDROID_3.4.5/drivers/base/regmap/regmap-spi.c
deleted file mode 100644
index 7c0c35a3..00000000
--- a/ANDROID_3.4.5/drivers/base/regmap/regmap-spi.c
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Register map access API - SPI support
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/regmap.h>
-#include <linux/spi/spi.h>
-#include <linux/init.h>
-#include <linux/module.h>
-
-static int regmap_spi_write(struct device *dev, const void *data, size_t count)
-{
- struct spi_device *spi = to_spi_device(dev);
-
- return spi_write(spi, data, count);
-}
-
-static int regmap_spi_gather_write(struct device *dev,
- const void *reg, size_t reg_len,
- const void *val, size_t val_len)
-{
- struct spi_device *spi = to_spi_device(dev);
- struct spi_message m;
- struct spi_transfer t[2] = { { .tx_buf = reg, .len = reg_len, },
- { .tx_buf = val, .len = val_len, }, };
-
- spi_message_init(&m);
- spi_message_add_tail(&t[0], &m);
- spi_message_add_tail(&t[1], &m);
-
- return spi_sync(spi, &m);
-}
-
-static int regmap_spi_read(struct device *dev,
- const void *reg, size_t reg_size,
- void *val, size_t val_size)
-{
- struct spi_device *spi = to_spi_device(dev);
-
- return spi_write_then_read(spi, reg, reg_size, val, val_size);
-}
-
-static struct regmap_bus regmap_spi = {
- .write = regmap_spi_write,
- .gather_write = regmap_spi_gather_write,
- .read = regmap_spi_read,
- .read_flag_mask = 0x80,
-};
-
-/**
- * regmap_init_spi(): Initialise register map
- *
- * @spi: Device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer to
- * a struct regmap.
- */
-struct regmap *regmap_init_spi(struct spi_device *spi,
- const struct regmap_config *config)
-{
- return regmap_init(&spi->dev, &regmap_spi, config);
-}
-EXPORT_SYMBOL_GPL(regmap_init_spi);
-
-/**
- * devm_regmap_init_spi(): Initialise register map
- *
- * @spi: Device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer
- * to a struct regmap. The map will be automatically freed by the
- * device management code.
- */
-struct regmap *devm_regmap_init_spi(struct spi_device *spi,
- const struct regmap_config *config)
-{
- return devm_regmap_init(&spi->dev, &regmap_spi, config);
-}
-EXPORT_SYMBOL_GPL(devm_regmap_init_spi);
-
-MODULE_LICENSE("GPL");
diff --git a/ANDROID_3.4.5/drivers/base/regmap/regmap.c b/ANDROID_3.4.5/drivers/base/regmap/regmap.c
deleted file mode 100644
index bb80853f..00000000
--- a/ANDROID_3.4.5/drivers/base/regmap/regmap.c
+++ /dev/null
@@ -1,936 +0,0 @@
-/*
- * Register map access API
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <linux/mutex.h>
-#include <linux/err.h>
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/regmap.h>
-
-#include "internal.h"
-
-bool regmap_writeable(struct regmap *map, unsigned int reg)
-{
- if (map->max_register && reg > map->max_register)
- return false;
-
- if (map->writeable_reg)
- return map->writeable_reg(map->dev, reg);
-
- return true;
-}
-
-bool regmap_readable(struct regmap *map, unsigned int reg)
-{
- if (map->max_register && reg > map->max_register)
- return false;
-
- if (map->format.format_write)
- return false;
-
- if (map->readable_reg)
- return map->readable_reg(map->dev, reg);
-
- return true;
-}
-
-bool regmap_volatile(struct regmap *map, unsigned int reg)
-{
- if (!regmap_readable(map, reg))
- return false;
-
- if (map->volatile_reg)
- return map->volatile_reg(map->dev, reg);
-
- return true;
-}
-
-bool regmap_precious(struct regmap *map, unsigned int reg)
-{
- if (!regmap_readable(map, reg))
- return false;
-
- if (map->precious_reg)
- return map->precious_reg(map->dev, reg);
-
- return false;
-}
-
-static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
- unsigned int num)
-{
- unsigned int i;
-
- for (i = 0; i < num; i++)
- if (!regmap_volatile(map, reg + i))
- return false;
-
- return true;
-}
-
-static void regmap_format_2_6_write(struct regmap *map,
- unsigned int reg, unsigned int val)
-{
- u8 *out = map->work_buf;
-
- *out = (reg << 6) | val;
-}
-
-static void regmap_format_4_12_write(struct regmap *map,
- unsigned int reg, unsigned int val)
-{
- __be16 *out = map->work_buf;
- *out = cpu_to_be16((reg << 12) | val);
-}
-
-static void regmap_format_7_9_write(struct regmap *map,
- unsigned int reg, unsigned int val)
-{
- __be16 *out = map->work_buf;
- *out = cpu_to_be16((reg << 9) | val);
-}
-
-static void regmap_format_10_14_write(struct regmap *map,
- unsigned int reg, unsigned int val)
-{
- u8 *out = map->work_buf;
-
- out[2] = val;
- out[1] = (val >> 8) | (reg << 6);
- out[0] = reg >> 2;
-}
-
-static void regmap_format_8(void *buf, unsigned int val)
-{
- u8 *b = buf;
-
- b[0] = val;
-}
-
-static void regmap_format_16(void *buf, unsigned int val)
-{
- __be16 *b = buf;
-
- b[0] = cpu_to_be16(val);
-}
-
-static void regmap_format_32(void *buf, unsigned int val)
-{
- __be32 *b = buf;
-
- b[0] = cpu_to_be32(val);
-}
-
-static unsigned int regmap_parse_8(void *buf)
-{
- u8 *b = buf;
-
- return b[0];
-}
-
-static unsigned int regmap_parse_16(void *buf)
-{
- __be16 *b = buf;
-
- b[0] = be16_to_cpu(b[0]);
-
- return b[0];
-}
-
-static unsigned int regmap_parse_32(void *buf)
-{
- __be32 *b = buf;
-
- b[0] = be32_to_cpu(b[0]);
-
- return b[0];
-}
-
-/**
- * regmap_init(): Initialise register map
- *
- * @dev: Device that will be interacted with
- * @bus: Bus-specific callbacks to use with device
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer to
- * a struct regmap. This function should generally not be called
- * directly, it should be called by bus-specific init functions.
- */
-struct regmap *regmap_init(struct device *dev,
- const struct regmap_bus *bus,
- const struct regmap_config *config)
-{
- struct regmap *map;
- int ret = -EINVAL;
-
- if (!bus || !config)
- goto err;
-
- map = kzalloc(sizeof(*map), GFP_KERNEL);
- if (map == NULL) {
- ret = -ENOMEM;
- goto err;
- }
-
- mutex_init(&map->lock);
- map->format.buf_size = (config->reg_bits + config->val_bits) / 8;
- map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
- map->format.pad_bytes = config->pad_bits / 8;
- map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
- map->format.buf_size += map->format.pad_bytes;
- map->dev = dev;
- map->bus = bus;
- map->max_register = config->max_register;
- map->writeable_reg = config->writeable_reg;
- map->readable_reg = config->readable_reg;
- map->volatile_reg = config->volatile_reg;
- map->precious_reg = config->precious_reg;
- map->cache_type = config->cache_type;
-
- if (config->read_flag_mask || config->write_flag_mask) {
- map->read_flag_mask = config->read_flag_mask;
- map->write_flag_mask = config->write_flag_mask;
- } else {
- map->read_flag_mask = bus->read_flag_mask;
- }
-
- switch (config->reg_bits) {
- case 2:
- switch (config->val_bits) {
- case 6:
- map->format.format_write = regmap_format_2_6_write;
- break;
- default:
- goto err_map;
- }
- break;
-
- case 4:
- switch (config->val_bits) {
- case 12:
- map->format.format_write = regmap_format_4_12_write;
- break;
- default:
- goto err_map;
- }
- break;
-
- case 7:
- switch (config->val_bits) {
- case 9:
- map->format.format_write = regmap_format_7_9_write;
- break;
- default:
- goto err_map;
- }
- break;
-
- case 10:
- switch (config->val_bits) {
- case 14:
- map->format.format_write = regmap_format_10_14_write;
- break;
- default:
- goto err_map;
- }
- break;
-
- case 8:
- map->format.format_reg = regmap_format_8;
- break;
-
- case 16:
- map->format.format_reg = regmap_format_16;
- break;
-
- case 32:
- map->format.format_reg = regmap_format_32;
- break;
-
- default:
- goto err_map;
- }
-
- switch (config->val_bits) {
- case 8:
- map->format.format_val = regmap_format_8;
- map->format.parse_val = regmap_parse_8;
- break;
- case 16:
- map->format.format_val = regmap_format_16;
- map->format.parse_val = regmap_parse_16;
- break;
- case 32:
- map->format.format_val = regmap_format_32;
- map->format.parse_val = regmap_parse_32;
- break;
- }
-
- if (!map->format.format_write &&
- !(map->format.format_reg && map->format.format_val))
- goto err_map;
-
- map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
- if (map->work_buf == NULL) {
- ret = -ENOMEM;
- goto err_map;
- }
-
- regmap_debugfs_init(map);
-
- ret = regcache_init(map, config);
- if (ret < 0)
- goto err_free_workbuf;
-
- return map;
-
-err_free_workbuf:
- kfree(map->work_buf);
-err_map:
- kfree(map);
-err:
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL_GPL(regmap_init);
-
-static void devm_regmap_release(struct device *dev, void *res)
-{
- regmap_exit(*(struct regmap **)res);
-}
-
-/**
- * devm_regmap_init(): Initialise managed register map
- *
- * @dev: Device that will be interacted with
- * @bus: Bus-specific callbacks to use with device
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer
- * to a struct regmap. This function should generally not be called
- * directly, it should be called by bus-specific init functions. The
- * map will be automatically freed by the device management code.
- */
-struct regmap *devm_regmap_init(struct device *dev,
- const struct regmap_bus *bus,
- const struct regmap_config *config)
-{
- struct regmap **ptr, *regmap;
-
- ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return ERR_PTR(-ENOMEM);
-
- regmap = regmap_init(dev, bus, config);
- if (!IS_ERR(regmap)) {
- *ptr = regmap;
- devres_add(dev, ptr);
- } else {
- devres_free(ptr);
- }
-
- return regmap;
-}
-EXPORT_SYMBOL_GPL(devm_regmap_init);
-
-/**
- * regmap_reinit_cache(): Reinitialise the current register cache
- *
- * @map: Register map to operate on.
- * @config: New configuration. Only the cache data will be used.
- *
- * Discard any existing register cache for the map and initialize a
- * new cache. This can be used to restore the cache to defaults or to
- * update the cache configuration to reflect runtime discovery of the
- * hardware.
- */
-int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
-{
- int ret;
-
- mutex_lock(&map->lock);
-
- regcache_exit(map);
- regmap_debugfs_exit(map);
-
- map->max_register = config->max_register;
- map->writeable_reg = config->writeable_reg;
- map->readable_reg = config->readable_reg;
- map->volatile_reg = config->volatile_reg;
- map->precious_reg = config->precious_reg;
- map->cache_type = config->cache_type;
-
- regmap_debugfs_init(map);
-
- map->cache_bypass = false;
- map->cache_only = false;
-
- ret = regcache_init(map, config);
-
- mutex_unlock(&map->lock);
-
- return ret;
-}
-
-/**
- * regmap_exit(): Free a previously allocated register map
- */
-void regmap_exit(struct regmap *map)
-{
- regcache_exit(map);
- regmap_debugfs_exit(map);
- kfree(map->work_buf);
- kfree(map);
-}
-EXPORT_SYMBOL_GPL(regmap_exit);
-
-static int _regmap_raw_write(struct regmap *map, unsigned int reg,
- const void *val, size_t val_len)
-{
- u8 *u8 = map->work_buf;
- void *buf;
- int ret = -ENOTSUPP;
- size_t len;
- int i;
-
- /* Check for unwritable registers before we start */
- if (map->writeable_reg)
- for (i = 0; i < val_len / map->format.val_bytes; i++)
- if (!map->writeable_reg(map->dev, reg + i))
- return -EINVAL;
-
- if (!map->cache_bypass && map->format.parse_val) {
- unsigned int ival;
- int val_bytes = map->format.val_bytes;
- for (i = 0; i < val_len / val_bytes; i++) {
- memcpy(map->work_buf, val + (i * val_bytes), val_bytes);
- ival = map->format.parse_val(map->work_buf);
- ret = regcache_write(map, reg + i, ival);
- if (ret) {
- dev_err(map->dev,
- "Error in caching of register: %u ret: %d\n",
- reg + i, ret);
- return ret;
- }
- }
- if (map->cache_only) {
- map->cache_dirty = true;
- return 0;
- }
- }
-
- map->format.format_reg(map->work_buf, reg);
-
- u8[0] |= map->write_flag_mask;
-
- trace_regmap_hw_write_start(map->dev, reg,
- val_len / map->format.val_bytes);
-
- /* If we're doing a single register write we can probably just
- * send the work_buf directly, otherwise try to do a gather
- * write.
- */
- if (val == (map->work_buf + map->format.pad_bytes +
- map->format.reg_bytes))
- ret = map->bus->write(map->dev, map->work_buf,
- map->format.reg_bytes +
- map->format.pad_bytes +
- val_len);
- else if (map->bus->gather_write)
- ret = map->bus->gather_write(map->dev, map->work_buf,
- map->format.reg_bytes +
- map->format.pad_bytes,
- val, val_len);
-
- /* If that didn't work fall back on linearising by hand. */
- if (ret == -ENOTSUPP) {
- len = map->format.reg_bytes + map->format.pad_bytes + val_len;
- buf = kzalloc(len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- memcpy(buf, map->work_buf, map->format.reg_bytes);
- memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
- val, val_len);
- ret = map->bus->write(map->dev, buf, len);
-
- kfree(buf);
- }
-
- trace_regmap_hw_write_done(map->dev, reg,
- val_len / map->format.val_bytes);
-
- return ret;
-}
-
-int _regmap_write(struct regmap *map, unsigned int reg,
- unsigned int val)
-{
- int ret;
- BUG_ON(!map->format.format_write && !map->format.format_val);
-
- if (!map->cache_bypass && map->format.format_write) {
- ret = regcache_write(map, reg, val);
- if (ret != 0)
- return ret;
- if (map->cache_only) {
- map->cache_dirty = true;
- return 0;
- }
- }
-
- trace_regmap_reg_write(map->dev, reg, val);
-
- if (map->format.format_write) {
- map->format.format_write(map, reg, val);
-
- trace_regmap_hw_write_start(map->dev, reg, 1);
-
- ret = map->bus->write(map->dev, map->work_buf,
- map->format.buf_size);
-
- trace_regmap_hw_write_done(map->dev, reg, 1);
-
- return ret;
- } else {
- map->format.format_val(map->work_buf + map->format.reg_bytes
- + map->format.pad_bytes, val);
- return _regmap_raw_write(map, reg,
- map->work_buf +
- map->format.reg_bytes +
- map->format.pad_bytes,
- map->format.val_bytes);
- }
-}
-
-/**
- * regmap_write(): Write a value to a single register
- *
- * @map: Register map to write to
- * @reg: Register to write to
- * @val: Value to be written
- *
- * A value of zero will be returned on success, a negative errno will
- * be returned in error cases.
- */
-int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
-{
- int ret;
-
- mutex_lock(&map->lock);
-
- ret = _regmap_write(map, reg, val);
-
- mutex_unlock(&map->lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(regmap_write);
-
-/**
- * regmap_raw_write(): Write raw values to one or more registers
- *
- * @map: Register map to write to
- * @reg: Initial register to write to
- * @val: Block of data to be written, laid out for direct transmission to the
- * device
- * @val_len: Length of data pointed to by val.
- *
- * This function is intended to be used for things like firmware
- * download where a large block of data needs to be transferred to the
- * device. No formatting will be done on the data provided.
- *
- * A value of zero will be returned on success, a negative errno will
- * be returned in error cases.
- */
-int regmap_raw_write(struct regmap *map, unsigned int reg,
- const void *val, size_t val_len)
-{
- int ret;
-
- mutex_lock(&map->lock);
-
- ret = _regmap_raw_write(map, reg, val, val_len);
-
- mutex_unlock(&map->lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(regmap_raw_write);
-
-/*
- * regmap_bulk_write(): Write multiple registers to the device
- *
- * @map: Register map to write to
- * @reg: First register to be write from
- * @val: Block of data to be written, in native register size for device
- * @val_count: Number of registers to write
- *
- * This function is intended to be used for writing a large block of
- * data to be device either in single transfer or multiple transfer.
- *
- * A value of zero will be returned on success, a negative errno will
- * be returned in error cases.
- */
-int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
- size_t val_count)
-{
- int ret = 0, i;
- size_t val_bytes = map->format.val_bytes;
- void *wval;
-
- if (!map->format.parse_val)
- return -EINVAL;
-
- mutex_lock(&map->lock);
-
- /* No formatting is require if val_byte is 1 */
- if (val_bytes == 1) {
- wval = (void *)val;
- } else {
- wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);
- if (!wval) {
- ret = -ENOMEM;
- dev_err(map->dev, "Error in memory allocation\n");
- goto out;
- }
- for (i = 0; i < val_count * val_bytes; i += val_bytes)
- map->format.parse_val(wval + i);
- }
- ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
-
- if (val_bytes != 1)
- kfree(wval);
-
-out:
- mutex_unlock(&map->lock);
- return ret;
-}
-EXPORT_SYMBOL_GPL(regmap_bulk_write);
-
-static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
- unsigned int val_len)
-{
- u8 *u8 = map->work_buf;
- int ret;
-
- map->format.format_reg(map->work_buf, reg);
-
- /*
- * Some buses or devices flag reads by setting the high bits in the
- * register addresss; since it's always the high bits for all
- * current formats we can do this here rather than in
- * formatting. This may break if we get interesting formats.
- */
- u8[0] |= map->read_flag_mask;
-
- trace_regmap_hw_read_start(map->dev, reg,
- val_len / map->format.val_bytes);
-
- ret = map->bus->read(map->dev, map->work_buf,
- map->format.reg_bytes + map->format.pad_bytes,
- val, val_len);
-
- trace_regmap_hw_read_done(map->dev, reg,
- val_len / map->format.val_bytes);
-
- return ret;
-}
-
-static int _regmap_read(struct regmap *map, unsigned int reg,
- unsigned int *val)
-{
- int ret;
-
- if (!map->cache_bypass) {
- ret = regcache_read(map, reg, val);
- if (ret == 0)
- return 0;
- }
-
- if (!map->format.parse_val)
- return -EINVAL;
-
- if (map->cache_only)
- return -EBUSY;
-
- ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
- if (ret == 0) {
- *val = map->format.parse_val(map->work_buf);
- trace_regmap_reg_read(map->dev, reg, *val);
- }
-
- return ret;
-}
-
-/**
- * regmap_read(): Read a value from a single register
- *
- * @map: Register map to write to
- * @reg: Register to be read from
- * @val: Pointer to store read value
- *
- * A value of zero will be returned on success, a negative errno will
- * be returned in error cases.
- */
-int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
-{
- int ret;
-
- mutex_lock(&map->lock);
-
- ret = _regmap_read(map, reg, val);
-
- mutex_unlock(&map->lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(regmap_read);
-
-/**
- * regmap_raw_read(): Read raw data from the device
- *
- * @map: Register map to write to
- * @reg: First register to be read from
- * @val: Pointer to store read value
- * @val_len: Size of data to read
- *
- * A value of zero will be returned on success, a negative errno will
- * be returned in error cases.
- */
-int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
- size_t val_len)
-{
- size_t val_bytes = map->format.val_bytes;
- size_t val_count = val_len / val_bytes;
- unsigned int v;
- int ret, i;
-
- mutex_lock(&map->lock);
-
- if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
- map->cache_type == REGCACHE_NONE) {
- /* Physical block read if there's no cache involved */
- ret = _regmap_raw_read(map, reg, val, val_len);
-
- } else {
- /* Otherwise go word by word for the cache; should be low
- * cost as we expect to hit the cache.
- */
- for (i = 0; i < val_count; i++) {
- ret = _regmap_read(map, reg + i, &v);
- if (ret != 0)
- goto out;
-
- map->format.format_val(val + (i * val_bytes), v);
- }
- }
-
- out:
- mutex_unlock(&map->lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(regmap_raw_read);
-
-/**
- * regmap_bulk_read(): Read multiple registers from the device
- *
- * @map: Register map to write to
- * @reg: First register to be read from
- * @val: Pointer to store read value, in native register size for device
- * @val_count: Number of registers to read
- *
- * A value of zero will be returned on success, a negative errno will
- * be returned in error cases.
- */
-int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
- size_t val_count)
-{
- int ret, i;
- size_t val_bytes = map->format.val_bytes;
- bool vol = regmap_volatile_range(map, reg, val_count);
-
- if (!map->format.parse_val)
- return -EINVAL;
-
- if (vol || map->cache_type == REGCACHE_NONE) {
- ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
- if (ret != 0)
- return ret;
-
- for (i = 0; i < val_count * val_bytes; i += val_bytes)
- map->format.parse_val(val + i);
- } else {
- for (i = 0; i < val_count; i++) {
- unsigned int ival;
- ret = regmap_read(map, reg + i, &ival);
- if (ret != 0)
- return ret;
- memcpy(val + (i * val_bytes), &ival, val_bytes);
- }
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(regmap_bulk_read);
-
-static int _regmap_update_bits(struct regmap *map, unsigned int reg,
- unsigned int mask, unsigned int val,
- bool *change)
-{
- int ret;
- unsigned int tmp, orig;
-
- mutex_lock(&map->lock);
-
- ret = _regmap_read(map, reg, &orig);
- if (ret != 0)
- goto out;
-
- tmp = orig & ~mask;
- tmp |= val & mask;
-
- if (tmp != orig) {
- ret = _regmap_write(map, reg, tmp);
- *change = true;
- } else {
- *change = false;
- }
-
-out:
- mutex_unlock(&map->lock);
-
- return ret;
-}
-
-/**
- * regmap_update_bits: Perform a read/modify/write cycle on the register map
- *
- * @map: Register map to update
- * @reg: Register to update
- * @mask: Bitmask to change
- * @val: New value for bitmask
- *
- * Returns zero for success, a negative number on error.
- */
-int regmap_update_bits(struct regmap *map, unsigned int reg,
- unsigned int mask, unsigned int val)
-{
- bool change;
- return _regmap_update_bits(map, reg, mask, val, &change);
-}
-EXPORT_SYMBOL_GPL(regmap_update_bits);
-
-/**
- * regmap_update_bits_check: Perform a read/modify/write cycle on the
- * register map and report if updated
- *
- * @map: Register map to update
- * @reg: Register to update
- * @mask: Bitmask to change
- * @val: New value for bitmask
- * @change: Boolean indicating if a write was done
- *
- * Returns zero for success, a negative number on error.
- */
-int regmap_update_bits_check(struct regmap *map, unsigned int reg,
- unsigned int mask, unsigned int val,
- bool *change)
-{
- return _regmap_update_bits(map, reg, mask, val, change);
-}
-EXPORT_SYMBOL_GPL(regmap_update_bits_check);
-
-/**
- * regmap_register_patch: Register and apply register updates to be applied
- * on device initialistion
- *
- * @map: Register map to apply updates to.
- * @regs: Values to update.
- * @num_regs: Number of entries in regs.
- *
- * Register a set of register updates to be applied to the device
- * whenever the device registers are synchronised with the cache and
- * apply them immediately. Typically this is used to apply
- * corrections to be applied to the device defaults on startup, such
- * as the updates some vendors provide to undocumented registers.
- */
-int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
- int num_regs)
-{
- int i, ret;
- bool bypass;
-
- /* If needed the implementation can be extended to support this */
- if (map->patch)
- return -EBUSY;
-
- mutex_lock(&map->lock);
-
- bypass = map->cache_bypass;
-
- map->cache_bypass = true;
-
- /* Write out first; it's useful to apply even if we fail later. */
- for (i = 0; i < num_regs; i++) {
- ret = _regmap_write(map, regs[i].reg, regs[i].def);
- if (ret != 0) {
- dev_err(map->dev, "Failed to write %x = %x: %d\n",
- regs[i].reg, regs[i].def, ret);
- goto out;
- }
- }
-
- map->patch = kcalloc(num_regs, sizeof(struct reg_default), GFP_KERNEL);
- if (map->patch != NULL) {
- memcpy(map->patch, regs,
- num_regs * sizeof(struct reg_default));
- map->patch_regs = num_regs;
- } else {
- ret = -ENOMEM;
- }
-
-out:
- map->cache_bypass = bypass;
-
- mutex_unlock(&map->lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(regmap_register_patch);
-
-/*
- * regmap_get_val_bytes(): Report the size of a register value
- *
- * Report the size of a register value, mainly intended to for use by
- * generic infrastructure built on top of regmap.
- */
-int regmap_get_val_bytes(struct regmap *map)
-{
- if (map->format.format_write)
- return -EINVAL;
-
- return map->format.val_bytes;
-}
-EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
-
-static int __init regmap_initcall(void)
-{
- regmap_debugfs_initcall();
-
- return 0;
-}
-postcore_initcall(regmap_initcall);
diff --git a/ANDROID_3.4.5/drivers/base/soc.c b/ANDROID_3.4.5/drivers/base/soc.c
deleted file mode 100644
index ba29b2e7..00000000
--- a/ANDROID_3.4.5/drivers/base/soc.c
+++ /dev/null
@@ -1,181 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2011
- *
- * Author: Lee Jones <lee.jones@linaro.org> for ST-Ericsson.
- * License terms: GNU General Public License (GPL), version 2
- */
-
-#include <linux/sysfs.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/stat.h>
-#include <linux/slab.h>
-#include <linux/idr.h>
-#include <linux/spinlock.h>
-#include <linux/sys_soc.h>
-#include <linux/err.h>
-
-static DEFINE_IDA(soc_ida);
-static DEFINE_SPINLOCK(soc_lock);
-
-static ssize_t soc_info_get(struct device *dev,
- struct device_attribute *attr,
- char *buf);
-
-struct soc_device {
- struct device dev;
- struct soc_device_attribute *attr;
- int soc_dev_num;
-};
-
-static struct bus_type soc_bus_type = {
- .name = "soc",
-};
-
-static DEVICE_ATTR(machine, S_IRUGO, soc_info_get, NULL);
-static DEVICE_ATTR(family, S_IRUGO, soc_info_get, NULL);
-static DEVICE_ATTR(soc_id, S_IRUGO, soc_info_get, NULL);
-static DEVICE_ATTR(revision, S_IRUGO, soc_info_get, NULL);
-
-struct device *soc_device_to_device(struct soc_device *soc_dev)
-{
- return &soc_dev->dev;
-}
-
-static mode_t soc_attribute_mode(struct kobject *kobj,
- struct attribute *attr,
- int index)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
-
- if ((attr == &dev_attr_machine.attr)
- && (soc_dev->attr->machine != NULL))
- return attr->mode;
- if ((attr == &dev_attr_family.attr)
- && (soc_dev->attr->family != NULL))
- return attr->mode;
- if ((attr == &dev_attr_revision.attr)
- && (soc_dev->attr->revision != NULL))
- return attr->mode;
- if ((attr == &dev_attr_soc_id.attr)
- && (soc_dev->attr->soc_id != NULL))
- return attr->mode;
-
- /* Unknown or unfilled attribute. */
- return 0;
-}
-
-static ssize_t soc_info_get(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
-
- if (attr == &dev_attr_machine)
- return sprintf(buf, "%s\n", soc_dev->attr->machine);
- if (attr == &dev_attr_family)
- return sprintf(buf, "%s\n", soc_dev->attr->family);
- if (attr == &dev_attr_revision)
- return sprintf(buf, "%s\n", soc_dev->attr->revision);
- if (attr == &dev_attr_soc_id)
- return sprintf(buf, "%s\n", soc_dev->attr->soc_id);
-
- return -EINVAL;
-
-}
-
-static struct attribute *soc_attr[] = {
- &dev_attr_machine.attr,
- &dev_attr_family.attr,
- &dev_attr_soc_id.attr,
- &dev_attr_revision.attr,
- NULL,
-};
-
-static const struct attribute_group soc_attr_group = {
- .attrs = soc_attr,
- .is_visible = soc_attribute_mode,
-};
-
-static const struct attribute_group *soc_attr_groups[] = {
- &soc_attr_group,
- NULL,
-};
-
-static void soc_release(struct device *dev)
-{
- struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
-
- kfree(soc_dev);
-}
-
-struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr)
-{
- struct soc_device *soc_dev;
- int ret;
-
- soc_dev = kzalloc(sizeof(*soc_dev), GFP_KERNEL);
- if (!soc_dev) {
- ret = -ENOMEM;
- goto out1;
- }
-
- /* Fetch a unique (reclaimable) SOC ID. */
- do {
- if (!ida_pre_get(&soc_ida, GFP_KERNEL)) {
- ret = -ENOMEM;
- goto out2;
- }
-
- spin_lock(&soc_lock);
- ret = ida_get_new(&soc_ida, &soc_dev->soc_dev_num);
- spin_unlock(&soc_lock);
-
- } while (ret == -EAGAIN);
-
- if (ret)
- goto out2;
-
- soc_dev->attr = soc_dev_attr;
- soc_dev->dev.bus = &soc_bus_type;
- soc_dev->dev.groups = soc_attr_groups;
- soc_dev->dev.release = soc_release;
-
- dev_set_name(&soc_dev->dev, "soc%d", soc_dev->soc_dev_num);
-
- ret = device_register(&soc_dev->dev);
- if (ret)
- goto out3;
-
- return soc_dev;
-
-out3:
- ida_remove(&soc_ida, soc_dev->soc_dev_num);
-out2:
- kfree(soc_dev);
-out1:
- return ERR_PTR(ret);
-}
-
-/* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */
-void soc_device_unregister(struct soc_device *soc_dev)
-{
- ida_remove(&soc_ida, soc_dev->soc_dev_num);
-
- device_unregister(&soc_dev->dev);
-}
-
-static int __init soc_bus_register(void)
-{
- return bus_register(&soc_bus_type);
-}
-core_initcall(soc_bus_register);
-
-static void __exit soc_bus_unregister(void)
-{
- ida_destroy(&soc_ida);
-
- bus_unregister(&soc_bus_type);
-}
-module_exit(soc_bus_unregister);
diff --git a/ANDROID_3.4.5/drivers/base/sw_sync.c b/ANDROID_3.4.5/drivers/base/sw_sync.c
deleted file mode 100644
index b4d8529e..00000000
--- a/ANDROID_3.4.5/drivers/base/sw_sync.c
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * drivers/base/sw_sync.c
- *
- * Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/module.h>
-#include <linux/sw_sync.h>
-#include <linux/syscalls.h>
-#include <linux/uaccess.h>
-
-static int sw_sync_cmp(u32 a, u32 b)
-{
- if (a == b)
- return 0;
-
- return ((s32)a - (s32)b) < 0 ? -1 : 1;
-}
-
-struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value)
-{
- struct sw_sync_pt *pt;
-
- pt = (struct sw_sync_pt *)
- sync_pt_create(&obj->obj, sizeof(struct sw_sync_pt));
-
- pt->value = value;
-
- return (struct sync_pt *)pt;
-}
-EXPORT_SYMBOL(sw_sync_pt_create);
-
-static struct sync_pt *sw_sync_pt_dup(struct sync_pt *sync_pt)
-{
- struct sw_sync_pt *pt = (struct sw_sync_pt *) sync_pt;
- struct sw_sync_timeline *obj =
- (struct sw_sync_timeline *)sync_pt->parent;
-
- return (struct sync_pt *) sw_sync_pt_create(obj, pt->value);
-}
-
-static int sw_sync_pt_has_signaled(struct sync_pt *sync_pt)
-{
- struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
- struct sw_sync_timeline *obj =
- (struct sw_sync_timeline *)sync_pt->parent;
-
- return sw_sync_cmp(obj->value, pt->value) >= 0;
-}
-
-static int sw_sync_pt_compare(struct sync_pt *a, struct sync_pt *b)
-{
- struct sw_sync_pt *pt_a = (struct sw_sync_pt *)a;
- struct sw_sync_pt *pt_b = (struct sw_sync_pt *)b;
-
- return sw_sync_cmp(pt_a->value, pt_b->value);
-}
-
-static int sw_sync_fill_driver_data(struct sync_pt *sync_pt,
- void *data, int size)
-{
- struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
-
- if (size < sizeof(pt->value))
- return -ENOMEM;
-
- memcpy(data, &pt->value, sizeof(pt->value));
-
- return sizeof(pt->value);
-}
-
-static void sw_sync_timeline_value_str(struct sync_timeline *sync_timeline,
- char *str, int size)
-{
- struct sw_sync_timeline *timeline =
- (struct sw_sync_timeline *)sync_timeline;
- snprintf(str, size, "%d", timeline->value);
-}
-
-static void sw_sync_pt_value_str(struct sync_pt *sync_pt,
- char *str, int size)
-{
- struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
- snprintf(str, size, "%d", pt->value);
-}
-
-struct sync_timeline_ops sw_sync_timeline_ops = {
- .driver_name = "sw_sync",
- .dup = sw_sync_pt_dup,
- .has_signaled = sw_sync_pt_has_signaled,
- .compare = sw_sync_pt_compare,
- .fill_driver_data = sw_sync_fill_driver_data,
- .timeline_value_str = sw_sync_timeline_value_str,
- .pt_value_str = sw_sync_pt_value_str,
-};
-
-
-struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
-{
- struct sw_sync_timeline *obj = (struct sw_sync_timeline *)
- sync_timeline_create(&sw_sync_timeline_ops,
- sizeof(struct sw_sync_timeline),
- name);
-
- return obj;
-}
-EXPORT_SYMBOL(sw_sync_timeline_create);
-
-void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
-{
- obj->value += inc;
-
- sync_timeline_signal(&obj->obj);
-}
-EXPORT_SYMBOL(sw_sync_timeline_inc);
-
-#ifdef CONFIG_SW_SYNC_USER
-/* *WARNING*
- *
- * improper use of this can result in deadlocking kernel drivers from userspace.
- */
-
-/* opening sw_sync create a new sync obj */
-int sw_sync_open(struct inode *inode, struct file *file)
-{
- struct sw_sync_timeline *obj;
- char task_comm[TASK_COMM_LEN];
-
- get_task_comm(task_comm, current);
-
- obj = sw_sync_timeline_create(task_comm);
- if (obj == NULL)
- return -ENOMEM;
-
- file->private_data = obj;
-
- return 0;
-}
-
-int sw_sync_release(struct inode *inode, struct file *file)
-{
- struct sw_sync_timeline *obj = file->private_data;
- sync_timeline_destroy(&obj->obj);
- return 0;
-}
-
-long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj, unsigned long arg)
-{
- int fd = get_unused_fd();
- int err;
- struct sync_pt *pt;
- struct sync_fence *fence;
- struct sw_sync_create_fence_data data;
-
- if (fd < 0)
- return fd;
-
- if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
- err = -EFAULT;
- goto err;
- }
-
- pt = sw_sync_pt_create(obj, data.value);
- if (pt == NULL) {
- err = -ENOMEM;
- goto err;
- }
-
- data.name[sizeof(data.name) - 1] = '\0';
- fence = sync_fence_create(data.name, pt);
- if (fence == NULL) {
- sync_pt_free(pt);
- err = -ENOMEM;
- goto err;
- }
-
- data.fence = fd;
- if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
- sync_fence_put(fence);
- err = -EFAULT;
- goto err;
- }
-
- sync_fence_install(fence, fd);
-
- return 0;
-
-err:
- put_unused_fd(fd);
- return err;
-}
-
-long sw_sync_ioctl_inc(struct sw_sync_timeline *obj, unsigned long arg)
-{
- u32 value;
-
- if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
- return -EFAULT;
-
- sw_sync_timeline_inc(obj, value);
-
- return 0;
-}
-
-long sw_sync_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct sw_sync_timeline *obj = file->private_data;
-
- switch (cmd) {
- case SW_SYNC_IOC_CREATE_FENCE:
- return sw_sync_ioctl_create_fence(obj, arg);
-
- case SW_SYNC_IOC_INC:
- return sw_sync_ioctl_inc(obj, arg);
-
- default:
- return -ENOTTY;
- }
-}
-
-static const struct file_operations sw_sync_fops = {
- .owner = THIS_MODULE,
- .open = sw_sync_open,
- .release = sw_sync_release,
- .unlocked_ioctl = sw_sync_ioctl,
-};
-
-static struct miscdevice sw_sync_dev = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "sw_sync",
- .fops = &sw_sync_fops,
-};
-
-int __init sw_sync_device_init(void)
-{
- return misc_register(&sw_sync_dev);
-}
-
-void __exit sw_sync_device_remove(void)
-{
- misc_deregister(&sw_sync_dev);
-}
-
-module_init(sw_sync_device_init);
-module_exit(sw_sync_device_remove);
-
-#endif /* CONFIG_SW_SYNC_USER */
diff --git a/ANDROID_3.4.5/drivers/base/sync.c b/ANDROID_3.4.5/drivers/base/sync.c
deleted file mode 100644
index ac9a1af0..00000000
--- a/ANDROID_3.4.5/drivers/base/sync.c
+++ /dev/null
@@ -1,1046 +0,0 @@
-/*
- * drivers/base/sync.c
- *
- * Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/debugfs.h>
-#include <linux/export.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/poll.h>
-#include <linux/sched.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/sync.h>
-#include <linux/uaccess.h>
-
-#include <linux/anon_inodes.h>
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/sync.h>
-
-/* Remove fence from sync_fence_list before release it. -- WonderMedia */
-#define FIX_SYNC_FENCE_LIST
-
-/* Limit number of pts per fence for speed. -- WonderMedia */
-#define SYNC_PTS_MAX 256
-
-#ifdef SYNC_PTS_MAX
-#include <linux/moduleparam.h>
-int sync_pts_max = SYNC_PTS_MAX;
-module_param(sync_pts_max, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
-MODULE_PARM_DESC(sync_pts_max, "Max pts per sync fence");
-#endif
-
-static void sync_fence_signal_pt(struct sync_pt *pt);
-static int _sync_pt_has_signaled(struct sync_pt *pt);
-static void sync_fence_free(struct kref *kref);
-static void sync_dump(void);
-
-static LIST_HEAD(sync_timeline_list_head);
-static DEFINE_SPINLOCK(sync_timeline_list_lock);
-
-static LIST_HEAD(sync_fence_list_head);
-static DEFINE_SPINLOCK(sync_fence_list_lock);
-
-struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
- int size, const char *name)
-{
- struct sync_timeline *obj;
- unsigned long flags;
-
- if (size < sizeof(struct sync_timeline))
- return NULL;
-
- obj = kzalloc(size, GFP_KERNEL);
- if (obj == NULL)
- return NULL;
-
- kref_init(&obj->kref);
- obj->ops = ops;
- strlcpy(obj->name, name, sizeof(obj->name));
-
- INIT_LIST_HEAD(&obj->child_list_head);
- spin_lock_init(&obj->child_list_lock);
-
- INIT_LIST_HEAD(&obj->active_list_head);
- spin_lock_init(&obj->active_list_lock);
-
- spin_lock_irqsave(&sync_timeline_list_lock, flags);
- list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
- spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
-
- return obj;
-}
-EXPORT_SYMBOL(sync_timeline_create);
-
-static void sync_timeline_free(struct kref *kref)
-{
- struct sync_timeline *obj =
- container_of(kref, struct sync_timeline, kref);
- unsigned long flags;
-
- if (obj->ops->release_obj)
- obj->ops->release_obj(obj);
-
- spin_lock_irqsave(&sync_timeline_list_lock, flags);
- list_del(&obj->sync_timeline_list);
- spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
-
- kfree(obj);
-}
-
-void sync_timeline_destroy(struct sync_timeline *obj)
-{
- obj->destroyed = true;
-
- /*
- * If this is not the last reference, signal any children
- * that their parent is going away.
- */
-
- if (!kref_put(&obj->kref, sync_timeline_free))
- sync_timeline_signal(obj);
-}
-EXPORT_SYMBOL(sync_timeline_destroy);
-
-static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
-{
- unsigned long flags;
-
- pt->parent = obj;
-
- spin_lock_irqsave(&obj->child_list_lock, flags);
- list_add_tail(&pt->child_list, &obj->child_list_head);
- spin_unlock_irqrestore(&obj->child_list_lock, flags);
-}
-
-static void sync_timeline_remove_pt(struct sync_pt *pt)
-{
- struct sync_timeline *obj = pt->parent;
- unsigned long flags;
-
- spin_lock_irqsave(&obj->active_list_lock, flags);
- if (!list_empty(&pt->active_list))
- list_del_init(&pt->active_list);
- spin_unlock_irqrestore(&obj->active_list_lock, flags);
-
- spin_lock_irqsave(&obj->child_list_lock, flags);
- if (!list_empty(&pt->child_list)) {
- list_del_init(&pt->child_list);
- }
- spin_unlock_irqrestore(&obj->child_list_lock, flags);
-}
-
-void sync_timeline_signal(struct sync_timeline *obj)
-{
- unsigned long flags;
- LIST_HEAD(signaled_pts);
- struct list_head *pos, *n;
-
- trace_sync_timeline(obj);
-
- spin_lock_irqsave(&obj->active_list_lock, flags);
-
- list_for_each_safe(pos, n, &obj->active_list_head) {
- struct sync_pt *pt =
- container_of(pos, struct sync_pt, active_list);
-
- if (_sync_pt_has_signaled(pt)) {
- list_del_init(pos);
- list_add(&pt->signaled_list, &signaled_pts);
- kref_get(&pt->fence->kref);
- }
- }
-
- spin_unlock_irqrestore(&obj->active_list_lock, flags);
-
- list_for_each_safe(pos, n, &signaled_pts) {
- struct sync_pt *pt =
- container_of(pos, struct sync_pt, signaled_list);
-
- list_del_init(pos);
- sync_fence_signal_pt(pt);
- kref_put(&pt->fence->kref, sync_fence_free);
- }
-}
-EXPORT_SYMBOL(sync_timeline_signal);
-
-struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
-{
- struct sync_pt *pt;
-
- if (size < sizeof(struct sync_pt))
- return NULL;
-
- pt = kzalloc(size, GFP_KERNEL);
- if (pt == NULL)
- return NULL;
-
- INIT_LIST_HEAD(&pt->active_list);
- kref_get(&parent->kref);
- sync_timeline_add_pt(parent, pt);
-
- return pt;
-}
-EXPORT_SYMBOL(sync_pt_create);
-
-void sync_pt_free(struct sync_pt *pt)
-{
- if (pt->parent->ops->free_pt)
- pt->parent->ops->free_pt(pt);
-
- sync_timeline_remove_pt(pt);
-
- kref_put(&pt->parent->kref, sync_timeline_free);
-
- kfree(pt);
-}
-EXPORT_SYMBOL(sync_pt_free);
-
-/* call with pt->parent->active_list_lock held */
-static int _sync_pt_has_signaled(struct sync_pt *pt)
-{
- int old_status = pt->status;
-
- if (!pt->status)
- pt->status = pt->parent->ops->has_signaled(pt);
-
- if (!pt->status && pt->parent->destroyed)
- pt->status = -ENOENT;
-
- if (pt->status != old_status)
- pt->timestamp = ktime_get();
-
- return pt->status;
-}
-
-static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
-{
- return pt->parent->ops->dup(pt);
-}
-
-/* Adds a sync pt to the active queue. Called when added to a fence */
-static void sync_pt_activate(struct sync_pt *pt)
-{
- struct sync_timeline *obj = pt->parent;
- unsigned long flags;
- int err;
-
- spin_lock_irqsave(&obj->active_list_lock, flags);
-
- err = _sync_pt_has_signaled(pt);
- if (err != 0)
- goto out;
-
- list_add_tail(&pt->active_list, &obj->active_list_head);
-
-out:
- spin_unlock_irqrestore(&obj->active_list_lock, flags);
-}
-
-static int sync_fence_release(struct inode *inode, struct file *file);
-static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
-static long sync_fence_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg);
-
-
-static const struct file_operations sync_fence_fops = {
- .release = sync_fence_release,
- .poll = sync_fence_poll,
- .unlocked_ioctl = sync_fence_ioctl,
-};
-
-static struct sync_fence *sync_fence_alloc(const char *name)
-{
- struct sync_fence *fence;
- unsigned long flags;
-
- fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
- if (fence == NULL)
- return NULL;
-
- fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
- fence, 0);
- if (fence->file == NULL)
- goto err;
-
- kref_init(&fence->kref);
- strlcpy(fence->name, name, sizeof(fence->name));
-
- INIT_LIST_HEAD(&fence->pt_list_head);
- INIT_LIST_HEAD(&fence->waiter_list_head);
- spin_lock_init(&fence->waiter_list_lock);
-
- init_waitqueue_head(&fence->wq);
-
- spin_lock_irqsave(&sync_fence_list_lock, flags);
- list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
- spin_unlock_irqrestore(&sync_fence_list_lock, flags);
-
- return fence;
-
-err:
- kfree(fence);
- return NULL;
-}
-
-/* TODO: implement a create which takes more that one sync_pt */
-struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
-{
- struct sync_fence *fence;
-
- if (pt->fence)
- return NULL;
-
- fence = sync_fence_alloc(name);
- if (fence == NULL)
- return NULL;
-
- pt->fence = fence;
- list_add(&pt->pt_list, &fence->pt_list_head);
- sync_pt_activate(pt);
-
- /*
- * signal the fence in case pt was activated before
- * sync_pt_activate(pt) was called
- */
- sync_fence_signal_pt(pt);
-
- return fence;
-}
-EXPORT_SYMBOL(sync_fence_create);
-
-static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
-{
- struct list_head *pos;
-#ifdef SYNC_PTS_MAX
- int num_pts = 0;
-#endif
-
- list_for_each(pos, &src->pt_list_head) {
- struct sync_pt *orig_pt =
- container_of(pos, struct sync_pt, pt_list);
- struct sync_pt *new_pt = sync_pt_dup(orig_pt);
-
- if (new_pt == NULL)
- return -ENOMEM;
-
- new_pt->fence = dst;
- list_add(&new_pt->pt_list, &dst->pt_list_head);
-#ifdef SYNC_PTS_MAX
- num_pts++;
-#endif
- }
-#ifdef SYNC_PTS_MAX
- if (sync_pts_max && num_pts >= sync_pts_max) {
- printk(KERN_ERR "too many pts per sync fence! %d\n", num_pts);
- return -ENOMEM;
- }
-#endif
-
- return 0;
-}
-
-static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
-{
- struct list_head *src_pos, *dst_pos, *n;
-
- list_for_each(src_pos, &src->pt_list_head) {
- struct sync_pt *src_pt =
- container_of(src_pos, struct sync_pt, pt_list);
- bool collapsed = false;
-
- list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
- struct sync_pt *dst_pt =
- container_of(dst_pos, struct sync_pt, pt_list);
- /* collapse two sync_pts on the same timeline
- * to a single sync_pt that will signal at
- * the later of the two
- */
- if (dst_pt->parent == src_pt->parent) {
- if (dst_pt->parent->ops->compare(dst_pt, src_pt) == -1) {
- struct sync_pt *new_pt =
- sync_pt_dup(src_pt);
- if (new_pt == NULL)
- return -ENOMEM;
-
- new_pt->fence = dst;
- list_replace(&dst_pt->pt_list,
- &new_pt->pt_list);
- sync_pt_free(dst_pt);
- }
- collapsed = true;
- break;
- }
- }
-
- if (!collapsed) {
- struct sync_pt *new_pt = sync_pt_dup(src_pt);
-
- if (new_pt == NULL)
- return -ENOMEM;
-
- new_pt->fence = dst;
- list_add(&new_pt->pt_list, &dst->pt_list_head);
- }
- }
-
- return 0;
-}
-
-static void sync_fence_detach_pts(struct sync_fence *fence)
-{
- struct list_head *pos, *n;
-
- list_for_each_safe(pos, n, &fence->pt_list_head) {
- struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
- sync_timeline_remove_pt(pt);
- }
-}
-
-static void sync_fence_free_pts(struct sync_fence *fence)
-{
- struct list_head *pos, *n;
-
- list_for_each_safe(pos, n, &fence->pt_list_head) {
- struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
- sync_pt_free(pt);
- }
-}
-
-struct sync_fence *sync_fence_fdget(int fd)
-{
- struct file *file = fget(fd);
-
- if (file == NULL)
- return NULL;
-
- if (file->f_op != &sync_fence_fops)
- goto err;
-
- return file->private_data;
-
-err:
- fput(file);
- return NULL;
-}
-EXPORT_SYMBOL(sync_fence_fdget);
-
-void sync_fence_put(struct sync_fence *fence)
-{
- fput(fence->file);
-}
-EXPORT_SYMBOL(sync_fence_put);
-
-void sync_fence_install(struct sync_fence *fence, int fd)
-{
- fd_install(fd, fence->file);
-}
-EXPORT_SYMBOL(sync_fence_install);
-
-static int sync_fence_get_status(struct sync_fence *fence)
-{
- struct list_head *pos;
- int status = 1;
-
- list_for_each(pos, &fence->pt_list_head) {
- struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
- int pt_status = pt->status;
-
- if (pt_status < 0) {
- status = pt_status;
- break;
- } else if (status == 1) {
- status = pt_status;
- }
- }
-
- return status;
-}
-
-struct sync_fence *sync_fence_merge(const char *name,
- struct sync_fence *a, struct sync_fence *b)
-{
- struct sync_fence *fence;
- struct list_head *pos;
- int err;
-#ifdef FIX_SYNC_FENCE_LIST
- unsigned long flags;
-#endif
-
- fence = sync_fence_alloc(name);
- if (fence == NULL)
- return NULL;
-
- err = sync_fence_copy_pts(fence, a);
- if (err < 0)
- goto err;
-
- err = sync_fence_merge_pts(fence, b);
- if (err < 0)
- goto err;
-
- list_for_each(pos, &fence->pt_list_head) {
- struct sync_pt *pt =
- container_of(pos, struct sync_pt, pt_list);
- sync_pt_activate(pt);
- }
-
- /*
- * signal the fence in case one of it's pts were activated before
- * they were activated
- */
- sync_fence_signal_pt(list_first_entry(&fence->pt_list_head,
- struct sync_pt,
- pt_list));
-
- return fence;
-err:
-#ifdef FIX_SYNC_FENCE_LIST
- spin_lock_irqsave(&sync_fence_list_lock, flags);
- list_del(&fence->sync_fence_list);
- spin_unlock_irqrestore(&sync_fence_list_lock, flags);
-#endif
- sync_fence_free_pts(fence);
- kfree(fence);
- return NULL;
-}
-EXPORT_SYMBOL(sync_fence_merge);
-
-static void sync_fence_signal_pt(struct sync_pt *pt)
-{
- LIST_HEAD(signaled_waiters);
- struct sync_fence *fence = pt->fence;
- struct list_head *pos;
- struct list_head *n;
- unsigned long flags;
- int status;
-
- status = sync_fence_get_status(fence);
-
- spin_lock_irqsave(&fence->waiter_list_lock, flags);
- /*
- * this should protect against two threads racing on the signaled
- * false -> true transition
- */
- if (status && !fence->status) {
- list_for_each_safe(pos, n, &fence->waiter_list_head)
- list_move(pos, &signaled_waiters);
-
- fence->status = status;
- } else {
- status = 0;
- }
- spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
-
- if (status) {
- list_for_each_safe(pos, n, &signaled_waiters) {
- struct sync_fence_waiter *waiter =
- container_of(pos, struct sync_fence_waiter,
- waiter_list);
-
- list_del(pos);
- waiter->callback(fence, waiter);
- }
- wake_up(&fence->wq);
- }
-}
-
-int sync_fence_wait_async(struct sync_fence *fence,
- struct sync_fence_waiter *waiter)
-{
- unsigned long flags;
- int err = 0;
-
- spin_lock_irqsave(&fence->waiter_list_lock, flags);
-
- if (fence->status) {
- err = fence->status;
- goto out;
- }
-
- list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
-out:
- spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
-
- return err;
-}
-EXPORT_SYMBOL(sync_fence_wait_async);
-
-int sync_fence_cancel_async(struct sync_fence *fence,
- struct sync_fence_waiter *waiter)
-{
- struct list_head *pos;
- struct list_head *n;
- unsigned long flags;
- int ret = -ENOENT;
-
- spin_lock_irqsave(&fence->waiter_list_lock, flags);
- /*
- * Make sure waiter is still in waiter_list because it is possible for
- * the waiter to be removed from the list while the callback is still
- * pending.
- */
- list_for_each_safe(pos, n, &fence->waiter_list_head) {
- struct sync_fence_waiter *list_waiter =
- container_of(pos, struct sync_fence_waiter,
- waiter_list);
- if (list_waiter == waiter) {
- list_del(pos);
- ret = 0;
- break;
- }
- }
- spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(sync_fence_cancel_async);
-
-static bool sync_fence_check(struct sync_fence *fence)
-{
- /*
- * Make sure that reads to fence->status are ordered with the
- * wait queue event triggering
- */
- smp_rmb();
- return fence->status != 0;
-}
-
-int sync_fence_wait(struct sync_fence *fence, long timeout)
-{
- int err = 0;
- struct sync_pt *pt;
-
- trace_sync_wait(fence, 1);
- list_for_each_entry(pt, &fence->pt_list_head, pt_list)
- trace_sync_pt(pt);
-
- if (timeout > 0) {
- timeout = msecs_to_jiffies(timeout);
- err = wait_event_interruptible_timeout(fence->wq,
- sync_fence_check(fence),
- timeout);
- } else if (timeout < 0) {
- err = wait_event_interruptible(fence->wq,
- sync_fence_check(fence));
- }
- trace_sync_wait(fence, 0);
-
- if (err < 0)
- return err;
-
- if (fence->status < 0) {
- pr_info("fence error %d on [%p]\n", fence->status, fence);
- sync_dump();
- return fence->status;
- }
-
- if (fence->status == 0) {
- pr_info("fence timeout on [%p] after %dms\n", fence,
- jiffies_to_msecs(timeout));
- sync_dump();
- return -ETIME;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(sync_fence_wait);
-
-static void sync_fence_free(struct kref *kref)
-{
- struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
-
- sync_fence_free_pts(fence);
-
- kfree(fence);
-}
-
-static int sync_fence_release(struct inode *inode, struct file *file)
-{
- struct sync_fence *fence = file->private_data;
- unsigned long flags;
-
- /*
- * We need to remove all ways to access this fence before droping
- * our ref.
- *
- * start with its membership in the global fence list
- */
- spin_lock_irqsave(&sync_fence_list_lock, flags);
- list_del(&fence->sync_fence_list);
- spin_unlock_irqrestore(&sync_fence_list_lock, flags);
-
- /*
- * remove its pts from their parents so that sync_timeline_signal()
- * can't reference the fence.
- */
- sync_fence_detach_pts(fence);
-
- kref_put(&fence->kref, sync_fence_free);
-
- return 0;
-}
-
-static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
-{
- struct sync_fence *fence = file->private_data;
-
- poll_wait(file, &fence->wq, wait);
-
- /*
- * Make sure that reads to fence->status are ordered with the
- * wait queue event triggering
- */
- smp_rmb();
-
- if (fence->status == 1)
- return POLLIN;
- else if (fence->status < 0)
- return POLLERR;
- else
- return 0;
-}
-
-static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
-{
- __s32 value;
-
- if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
- return -EFAULT;
-
- return sync_fence_wait(fence, value);
-}
-
-static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
-{
- int fd = get_unused_fd();
- int err;
- struct sync_fence *fence2, *fence3;
- struct sync_merge_data data;
-
- if (fd < 0)
- return fd;
-
- if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
- err = -EFAULT;
- goto err_put_fd;
- }
-
- fence2 = sync_fence_fdget(data.fd2);
- if (fence2 == NULL) {
- err = -ENOENT;
- goto err_put_fd;
- }
-
- data.name[sizeof(data.name) - 1] = '\0';
- fence3 = sync_fence_merge(data.name, fence, fence2);
- if (fence3 == NULL) {
- err = -ENOMEM;
- goto err_put_fence2;
- }
-
- data.fence = fd;
- if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
- err = -EFAULT;
- goto err_put_fence3;
- }
-
- sync_fence_install(fence3, fd);
- sync_fence_put(fence2);
- return 0;
-
-err_put_fence3:
- sync_fence_put(fence3);
-
-err_put_fence2:
- sync_fence_put(fence2);
-
-err_put_fd:
- put_unused_fd(fd);
- return err;
-}
-
-static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
-{
- struct sync_pt_info *info = data;
- int ret;
-
- if (size < sizeof(struct sync_pt_info))
- return -ENOMEM;
-
- info->len = sizeof(struct sync_pt_info);
-
- if (pt->parent->ops->fill_driver_data) {
- ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
- size - sizeof(*info));
- if (ret < 0)
- return ret;
-
- info->len += ret;
- }
-
- strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
- strlcpy(info->driver_name, pt->parent->ops->driver_name,
- sizeof(info->driver_name));
- info->status = pt->status;
- info->timestamp_ns = ktime_to_ns(pt->timestamp);
-
- return info->len;
-}
-
-static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
- unsigned long arg)
-{
- struct sync_fence_info_data *data;
- struct list_head *pos;
- __u32 size;
- __u32 len = 0;
- int ret;
-
- if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
- return -EFAULT;
-
- if (size < sizeof(struct sync_fence_info_data))
- return -EINVAL;
-
- if (size > 4096)
- size = 4096;
-
- data = kzalloc(size, GFP_KERNEL);
- if (data == NULL)
- return -ENOMEM;
-
- strlcpy(data->name, fence->name, sizeof(data->name));
- data->status = fence->status;
- len = sizeof(struct sync_fence_info_data);
-
- list_for_each(pos, &fence->pt_list_head) {
- struct sync_pt *pt =
- container_of(pos, struct sync_pt, pt_list);
-
- ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
-
- if (ret < 0)
- goto out;
-
- len += ret;
- }
-
- data->len = len;
-
- if (copy_to_user((void __user *)arg, data, len))
- ret = -EFAULT;
- else
- ret = 0;
-
-out:
- kfree(data);
-
- return ret;
-}
-
-static long sync_fence_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct sync_fence *fence = file->private_data;
- switch (cmd) {
- case SYNC_IOC_WAIT:
- return sync_fence_ioctl_wait(fence, arg);
-
- case SYNC_IOC_MERGE:
- return sync_fence_ioctl_merge(fence, arg);
-
- case SYNC_IOC_FENCE_INFO:
- return sync_fence_ioctl_fence_info(fence, arg);
-
- default:
- return -ENOTTY;
- }
-}
-
-#ifdef CONFIG_DEBUG_FS
-static const char *sync_status_str(int status)
-{
- if (status > 0)
- return "signaled";
- else if (status == 0)
- return "active";
- else
- return "error";
-}
-
-static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
-{
- int status = pt->status;
- seq_printf(s, " %s%spt %s",
- fence ? pt->parent->name : "",
- fence ? "_" : "",
- sync_status_str(status));
- if (pt->status) {
- struct timeval tv = ktime_to_timeval(pt->timestamp);
- seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
- }
-
- if (pt->parent->ops->timeline_value_str &&
- pt->parent->ops->pt_value_str) {
- char value[64];
- pt->parent->ops->pt_value_str(pt, value, sizeof(value));
- seq_printf(s, ": %s", value);
- if (fence) {
- pt->parent->ops->timeline_value_str(pt->parent, value,
- sizeof(value));
- seq_printf(s, " / %s", value);
- }
- } else if (pt->parent->ops->print_pt) {
- seq_printf(s, ": ");
- pt->parent->ops->print_pt(s, pt);
- }
-
- seq_printf(s, "\n");
-}
-
-static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
-{
- struct list_head *pos;
- unsigned long flags;
-
- seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
-
- if (obj->ops->timeline_value_str) {
- char value[64];
- obj->ops->timeline_value_str(obj, value, sizeof(value));
- seq_printf(s, ": %s", value);
- } else if (obj->ops->print_obj) {
- seq_printf(s, ": ");
- obj->ops->print_obj(s, obj);
- }
-
- seq_printf(s, "\n");
-
- spin_lock_irqsave(&obj->child_list_lock, flags);
- list_for_each(pos, &obj->child_list_head) {
- struct sync_pt *pt =
- container_of(pos, struct sync_pt, child_list);
- sync_print_pt(s, pt, false);
- }
- spin_unlock_irqrestore(&obj->child_list_lock, flags);
-}
-
-static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
-{
- struct list_head *pos;
- unsigned long flags;
-
- seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
- sync_status_str(fence->status));
-
- list_for_each(pos, &fence->pt_list_head) {
- struct sync_pt *pt =
- container_of(pos, struct sync_pt, pt_list);
- sync_print_pt(s, pt, true);
- }
-
- spin_lock_irqsave(&fence->waiter_list_lock, flags);
- list_for_each(pos, &fence->waiter_list_head) {
- struct sync_fence_waiter *waiter =
- container_of(pos, struct sync_fence_waiter,
- waiter_list);
-
- seq_printf(s, "waiter %pF\n", waiter->callback);
- }
- spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
-}
-
-static int sync_debugfs_show(struct seq_file *s, void *unused)
-{
- unsigned long flags;
- struct list_head *pos;
-
- seq_printf(s, "objs:\n--------------\n");
-
- spin_lock_irqsave(&sync_timeline_list_lock, flags);
- list_for_each(pos, &sync_timeline_list_head) {
- struct sync_timeline *obj =
- container_of(pos, struct sync_timeline,
- sync_timeline_list);
-
- sync_print_obj(s, obj);
- seq_printf(s, "\n");
- }
- spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
-
- seq_printf(s, "fences:\n--------------\n");
-
- spin_lock_irqsave(&sync_fence_list_lock, flags);
- list_for_each(pos, &sync_fence_list_head) {
- struct sync_fence *fence =
- container_of(pos, struct sync_fence, sync_fence_list);
-
- sync_print_fence(s, fence);
- seq_printf(s, "\n");
- }
- spin_unlock_irqrestore(&sync_fence_list_lock, flags);
- return 0;
-}
-
-static int sync_debugfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, sync_debugfs_show, inode->i_private);
-}
-
-static const struct file_operations sync_debugfs_fops = {
- .open = sync_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static __init int sync_debugfs_init(void)
-{
- debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
- return 0;
-}
-late_initcall(sync_debugfs_init);
-
-#define DUMP_CHUNK 256
-static char sync_dump_buf[64 * 1024];
-void sync_dump(void)
-{
- struct seq_file s = {
- .buf = sync_dump_buf,
- .size = sizeof(sync_dump_buf) - 1,
- };
- int i;
-
- sync_debugfs_show(&s, NULL);
-
- for (i = 0; i < s.count; i += DUMP_CHUNK) {
- if ((s.count - i) > DUMP_CHUNK) {
- char c = s.buf[i + DUMP_CHUNK];
- s.buf[i + DUMP_CHUNK] = 0;
- pr_cont("%s", s.buf + i);
- s.buf[i + DUMP_CHUNK] = c;
- } else {
- s.buf[s.count] = 0;
- pr_cont("%s", s.buf + i);
- }
- }
-}
-#else
-static void sync_dump(void)
-{
-}
-#endif
diff --git a/ANDROID_3.4.5/drivers/base/syscore.c b/ANDROID_3.4.5/drivers/base/syscore.c
deleted file mode 100644
index e8d11b66..00000000
--- a/ANDROID_3.4.5/drivers/base/syscore.c
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * syscore.c - Execution of system core operations.
- *
- * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- *
- * This file is released under the GPLv2.
- */
-
-#include <linux/syscore_ops.h>
-#include <linux/mutex.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-
-static LIST_HEAD(syscore_ops_list);
-static DEFINE_MUTEX(syscore_ops_lock);
-
-/**
- * register_syscore_ops - Register a set of system core operations.
- * @ops: System core operations to register.
- */
-void register_syscore_ops(struct syscore_ops *ops)
-{
- mutex_lock(&syscore_ops_lock);
- list_add_tail(&ops->node, &syscore_ops_list);
- mutex_unlock(&syscore_ops_lock);
-}
-EXPORT_SYMBOL_GPL(register_syscore_ops);
-
-/**
- * unregister_syscore_ops - Unregister a set of system core operations.
- * @ops: System core operations to unregister.
- */
-void unregister_syscore_ops(struct syscore_ops *ops)
-{
- mutex_lock(&syscore_ops_lock);
- list_del(&ops->node);
- mutex_unlock(&syscore_ops_lock);
-}
-EXPORT_SYMBOL_GPL(unregister_syscore_ops);
-
-#ifdef CONFIG_PM_SLEEP
-/**
- * syscore_suspend - Execute all the registered system core suspend callbacks.
- *
- * This function is executed with one CPU on-line and disabled interrupts.
- */
-int syscore_suspend(void)
-{
- struct syscore_ops *ops;
- int ret = 0;
-
- pr_debug("Checking wakeup interrupts\n");
-
- /* Return error code if there are any wakeup interrupts pending. */
- ret = check_wakeup_irqs();
- if (ret)
- return ret;
-
- WARN_ONCE(!irqs_disabled(),
- "Interrupts enabled before system core suspend.\n");
-
- list_for_each_entry_reverse(ops, &syscore_ops_list, node)
- if (ops->suspend) {
- if (initcall_debug)
- pr_info("PM: Calling %pF\n", ops->suspend);
- ret = ops->suspend();
- if (ret)
- goto err_out;
- WARN_ONCE(!irqs_disabled(),
- "Interrupts enabled after %pF\n", ops->suspend);
- }
-
- return 0;
-
- err_out:
- pr_err("PM: System core suspend callback %pF failed.\n", ops->suspend);
-
- list_for_each_entry_continue(ops, &syscore_ops_list, node)
- if (ops->resume)
- ops->resume();
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(syscore_suspend);
-
-/**
- * syscore_resume - Execute all the registered system core resume callbacks.
- *
- * This function is executed with one CPU on-line and disabled interrupts.
- */
-void syscore_resume(void)
-{
- struct syscore_ops *ops;
-
- WARN_ONCE(!irqs_disabled(),
- "Interrupts enabled before system core resume.\n");
-
- list_for_each_entry(ops, &syscore_ops_list, node)
- if (ops->resume) {
- if (initcall_debug)
- pr_info("PM: Calling %pF\n", ops->resume);
- ops->resume();
- WARN_ONCE(!irqs_disabled(),
- "Interrupts enabled after %pF\n", ops->resume);
- }
-}
-EXPORT_SYMBOL_GPL(syscore_resume);
-#endif /* CONFIG_PM_SLEEP */
-
-/**
- * syscore_shutdown - Execute all the registered system core shutdown callbacks.
- */
-void syscore_shutdown(void)
-{
- struct syscore_ops *ops;
-
- mutex_lock(&syscore_ops_lock);
-
- list_for_each_entry_reverse(ops, &syscore_ops_list, node)
- if (ops->shutdown) {
- if (initcall_debug)
- pr_info("PM: Calling %pF\n", ops->shutdown);
- ops->shutdown();
- }
-
- mutex_unlock(&syscore_ops_lock);
-}
diff --git a/ANDROID_3.4.5/drivers/base/topology.c b/ANDROID_3.4.5/drivers/base/topology.c
deleted file mode 100644
index ae989c57..00000000
--- a/ANDROID_3.4.5/drivers/base/topology.c
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * driver/base/topology.c - Populate sysfs with cpu topology information
- *
- * Written by: Zhang Yanmin, Intel Corporation
- *
- * Copyright (C) 2006, Intel Corp.
- *
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/cpu.h>
-#include <linux/module.h>
-#include <linux/hardirq.h>
-#include <linux/topology.h>
-
-#define define_one_ro_named(_name, _func) \
- static DEVICE_ATTR(_name, 0444, _func, NULL)
-
-#define define_one_ro(_name) \
- static DEVICE_ATTR(_name, 0444, show_##_name, NULL)
-
-#define define_id_show_func(name) \
-static ssize_t show_##name(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- unsigned int cpu = dev->id; \
- return sprintf(buf, "%d\n", topology_##name(cpu)); \
-}
-
-#if defined(topology_thread_cpumask) || defined(topology_core_cpumask) || \
- defined(topology_book_cpumask)
-static ssize_t show_cpumap(int type, const struct cpumask *mask, char *buf)
-{
- ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
- int n = 0;
-
- if (len > 1) {
- n = type?
- cpulist_scnprintf(buf, len-2, mask) :
- cpumask_scnprintf(buf, len-2, mask);
- buf[n++] = '\n';
- buf[n] = '\0';
- }
- return n;
-}
-#endif
-
-#ifdef arch_provides_topology_pointers
-#define define_siblings_show_map(name) \
-static ssize_t show_##name(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- unsigned int cpu = dev->id; \
- return show_cpumap(0, topology_##name(cpu), buf); \
-}
-
-#define define_siblings_show_list(name) \
-static ssize_t show_##name##_list(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
-{ \
- unsigned int cpu = dev->id; \
- return show_cpumap(1, topology_##name(cpu), buf); \
-}
-
-#else
-#define define_siblings_show_map(name) \
-static ssize_t show_##name(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- return show_cpumap(0, topology_##name(dev->id), buf); \
-}
-
-#define define_siblings_show_list(name) \
-static ssize_t show_##name##_list(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
-{ \
- return show_cpumap(1, topology_##name(dev->id), buf); \
-}
-#endif
-
-#define define_siblings_show_func(name) \
- define_siblings_show_map(name); define_siblings_show_list(name)
-
-define_id_show_func(physical_package_id);
-define_one_ro(physical_package_id);
-
-define_id_show_func(core_id);
-define_one_ro(core_id);
-
-define_siblings_show_func(thread_cpumask);
-define_one_ro_named(thread_siblings, show_thread_cpumask);
-define_one_ro_named(thread_siblings_list, show_thread_cpumask_list);
-
-define_siblings_show_func(core_cpumask);
-define_one_ro_named(core_siblings, show_core_cpumask);
-define_one_ro_named(core_siblings_list, show_core_cpumask_list);
-
-#ifdef CONFIG_SCHED_BOOK
-define_id_show_func(book_id);
-define_one_ro(book_id);
-define_siblings_show_func(book_cpumask);
-define_one_ro_named(book_siblings, show_book_cpumask);
-define_one_ro_named(book_siblings_list, show_book_cpumask_list);
-#endif
-
-static struct attribute *default_attrs[] = {
- &dev_attr_physical_package_id.attr,
- &dev_attr_core_id.attr,
- &dev_attr_thread_siblings.attr,
- &dev_attr_thread_siblings_list.attr,
- &dev_attr_core_siblings.attr,
- &dev_attr_core_siblings_list.attr,
-#ifdef CONFIG_SCHED_BOOK
- &dev_attr_book_id.attr,
- &dev_attr_book_siblings.attr,
- &dev_attr_book_siblings_list.attr,
-#endif
- NULL
-};
-
-static struct attribute_group topology_attr_group = {
- .attrs = default_attrs,
- .name = "topology"
-};
-
-/* Add/Remove cpu_topology interface for CPU device */
-static int __cpuinit topology_add_dev(unsigned int cpu)
-{
- struct device *dev = get_cpu_device(cpu);
-
- return sysfs_create_group(&dev->kobj, &topology_attr_group);
-}
-
-static void __cpuinit topology_remove_dev(unsigned int cpu)
-{
- struct device *dev = get_cpu_device(cpu);
-
- sysfs_remove_group(&dev->kobj, &topology_attr_group);
-}
-
-static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
- int rc = 0;
-
- switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- rc = topology_add_dev(cpu);
- break;
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- topology_remove_dev(cpu);
- break;
- }
- return notifier_from_errno(rc);
-}
-
-static int __cpuinit topology_sysfs_init(void)
-{
- int cpu;
- int rc;
-
- for_each_online_cpu(cpu) {
- rc = topology_add_dev(cpu);
- if (rc)
- return rc;
- }
- hotcpu_notifier(topology_cpu_callback, 0);
-
- return 0;
-}
-
-device_initcall(topology_sysfs_init);
diff --git a/ANDROID_3.4.5/drivers/base/transport_class.c b/ANDROID_3.4.5/drivers/base/transport_class.c
deleted file mode 100644
index f6c453c3..00000000
--- a/ANDROID_3.4.5/drivers/base/transport_class.c
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- * transport_class.c - implementation of generic transport classes
- * using attribute_containers
- *
- * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com>
- *
- * This file is licensed under GPLv2
- *
- * The basic idea here is to allow any "device controller" (which
- * would most often be a Host Bus Adapter to use the services of one
- * or more tranport classes for performing transport specific
- * services. Transport specific services are things that the generic
- * command layer doesn't want to know about (speed settings, line
- * condidtioning, etc), but which the user might be interested in.
- * Thus, the HBA's use the routines exported by the transport classes
- * to perform these functions. The transport classes export certain
- * values to the user via sysfs using attribute containers.
- *
- * Note: because not every HBA will care about every transport
- * attribute, there's a many to one relationship that goes like this:
- *
- * transport class<-----attribute container<----class device
- *
- * Usually the attribute container is per-HBA, but the design doesn't
- * mandate that. Although most of the services will be specific to
- * the actual external storage connection used by the HBA, the generic
- * transport class is framed entirely in terms of generic devices to
- * allow it to be used by any physical HBA in the system.
- */
-#include <linux/export.h>
-#include <linux/attribute_container.h>
-#include <linux/transport_class.h>
-
-/**
- * transport_class_register - register an initial transport class
- *
- * @tclass: a pointer to the transport class structure to be initialised
- *
- * The transport class contains an embedded class which is used to
- * identify it. The caller should initialise this structure with
- * zeros and then generic class must have been initialised with the
- * actual transport class unique name. There's a macro
- * DECLARE_TRANSPORT_CLASS() to do this (declared classes still must
- * be registered).
- *
- * Returns 0 on success or error on failure.
- */
-int transport_class_register(struct transport_class *tclass)
-{
- return class_register(&tclass->class);
-}
-EXPORT_SYMBOL_GPL(transport_class_register);
-
-/**
- * transport_class_unregister - unregister a previously registered class
- *
- * @tclass: The transport class to unregister
- *
- * Must be called prior to deallocating the memory for the transport
- * class.
- */
-void transport_class_unregister(struct transport_class *tclass)
-{
- class_unregister(&tclass->class);
-}
-EXPORT_SYMBOL_GPL(transport_class_unregister);
-
-static int anon_transport_dummy_function(struct transport_container *tc,
- struct device *dev,
- struct device *cdev)
-{
- /* do nothing */
- return 0;
-}
-
-/**
- * anon_transport_class_register - register an anonymous class
- *
- * @atc: The anon transport class to register
- *
- * The anonymous transport class contains both a transport class and a
- * container. The idea of an anonymous class is that it never
- * actually has any device attributes associated with it (and thus
- * saves on container storage). So it can only be used for triggering
- * events. Use prezero and then use DECLARE_ANON_TRANSPORT_CLASS() to
- * initialise the anon transport class storage.
- */
-int anon_transport_class_register(struct anon_transport_class *atc)
-{
- int error;
- atc->container.class = &atc->tclass.class;
- attribute_container_set_no_classdevs(&atc->container);
- error = attribute_container_register(&atc->container);
- if (error)
- return error;
- atc->tclass.setup = anon_transport_dummy_function;
- atc->tclass.remove = anon_transport_dummy_function;
- return 0;
-}
-EXPORT_SYMBOL_GPL(anon_transport_class_register);
-
-/**
- * anon_transport_class_unregister - unregister an anon class
- *
- * @atc: Pointer to the anon transport class to unregister
- *
- * Must be called prior to deallocating the memory for the anon
- * transport class.
- */
-void anon_transport_class_unregister(struct anon_transport_class *atc)
-{
- if (unlikely(attribute_container_unregister(&atc->container)))
- BUG();
-}
-EXPORT_SYMBOL_GPL(anon_transport_class_unregister);
-
-static int transport_setup_classdev(struct attribute_container *cont,
- struct device *dev,
- struct device *classdev)
-{
- struct transport_class *tclass = class_to_transport_class(cont->class);
- struct transport_container *tcont = attribute_container_to_transport_container(cont);
-
- if (tclass->setup)
- tclass->setup(tcont, dev, classdev);
-
- return 0;
-}
-
-/**
- * transport_setup_device - declare a new dev for transport class association but don't make it visible yet.
- * @dev: the generic device representing the entity being added
- *
- * Usually, dev represents some component in the HBA system (either
- * the HBA itself or a device remote across the HBA bus). This
- * routine is simply a trigger point to see if any set of transport
- * classes wishes to associate with the added device. This allocates
- * storage for the class device and initialises it, but does not yet
- * add it to the system or add attributes to it (you do this with
- * transport_add_device). If you have no need for a separate setup
- * and add operations, use transport_register_device (see
- * transport_class.h).
- */
-
-void transport_setup_device(struct device *dev)
-{
- attribute_container_add_device(dev, transport_setup_classdev);
-}
-EXPORT_SYMBOL_GPL(transport_setup_device);
-
-static int transport_add_class_device(struct attribute_container *cont,
- struct device *dev,
- struct device *classdev)
-{
- int error = attribute_container_add_class_device(classdev);
- struct transport_container *tcont =
- attribute_container_to_transport_container(cont);
-
- if (!error && tcont->statistics)
- error = sysfs_create_group(&classdev->kobj, tcont->statistics);
-
- return error;
-}
-
-
-/**
- * transport_add_device - declare a new dev for transport class association
- *
- * @dev: the generic device representing the entity being added
- *
- * Usually, dev represents some component in the HBA system (either
- * the HBA itself or a device remote across the HBA bus). This
- * routine is simply a trigger point used to add the device to the
- * system and register attributes for it.
- */
-
-void transport_add_device(struct device *dev)
-{
- attribute_container_device_trigger(dev, transport_add_class_device);
-}
-EXPORT_SYMBOL_GPL(transport_add_device);
-
-static int transport_configure(struct attribute_container *cont,
- struct device *dev,
- struct device *cdev)
-{
- struct transport_class *tclass = class_to_transport_class(cont->class);
- struct transport_container *tcont = attribute_container_to_transport_container(cont);
-
- if (tclass->configure)
- tclass->configure(tcont, dev, cdev);
-
- return 0;
-}
-
-/**
- * transport_configure_device - configure an already set up device
- *
- * @dev: generic device representing device to be configured
- *
- * The idea of configure is simply to provide a point within the setup
- * process to allow the transport class to extract information from a
- * device after it has been setup. This is used in SCSI because we
- * have to have a setup device to begin using the HBA, but after we
- * send the initial inquiry, we use configure to extract the device
- * parameters. The device need not have been added to be configured.
- */
-void transport_configure_device(struct device *dev)
-{
- attribute_container_device_trigger(dev, transport_configure);
-}
-EXPORT_SYMBOL_GPL(transport_configure_device);
-
-static int transport_remove_classdev(struct attribute_container *cont,
- struct device *dev,
- struct device *classdev)
-{
- struct transport_container *tcont =
- attribute_container_to_transport_container(cont);
- struct transport_class *tclass = class_to_transport_class(cont->class);
-
- if (tclass->remove)
- tclass->remove(tcont, dev, classdev);
-
- if (tclass->remove != anon_transport_dummy_function) {
- if (tcont->statistics)
- sysfs_remove_group(&classdev->kobj, tcont->statistics);
- attribute_container_class_device_del(classdev);
- }
-
- return 0;
-}
-
-
-/**
- * transport_remove_device - remove the visibility of a device
- *
- * @dev: generic device to remove
- *
- * This call removes the visibility of the device (to the user from
- * sysfs), but does not destroy it. To eliminate a device entirely
- * you must also call transport_destroy_device. If you don't need to
- * do remove and destroy as separate operations, use
- * transport_unregister_device() (see transport_class.h) which will
- * perform both calls for you.
- */
-void transport_remove_device(struct device *dev)
-{
- attribute_container_device_trigger(dev, transport_remove_classdev);
-}
-EXPORT_SYMBOL_GPL(transport_remove_device);
-
-static void transport_destroy_classdev(struct attribute_container *cont,
- struct device *dev,
- struct device *classdev)
-{
- struct transport_class *tclass = class_to_transport_class(cont->class);
-
- if (tclass->remove != anon_transport_dummy_function)
- put_device(classdev);
-}
-
-
-/**
- * transport_destroy_device - destroy a removed device
- *
- * @dev: device to eliminate from the transport class.
- *
- * This call triggers the elimination of storage associated with the
- * transport classdev. Note: all it really does is relinquish a
- * reference to the classdev. The memory will not be freed until the
- * last reference goes to zero. Note also that the classdev retains a
- * reference count on dev, so dev too will remain for as long as the
- * transport class device remains around.
- */
-void transport_destroy_device(struct device *dev)
-{
- attribute_container_remove_device(dev, transport_destroy_classdev);
-}
-EXPORT_SYMBOL_GPL(transport_destroy_device);