summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorSrikant Patnaik2015-01-11 12:28:04 +0530
committerSrikant Patnaik2015-01-11 12:28:04 +0530
commit871480933a1c28f8a9fed4c4d34d06c439a7a422 (patch)
tree8718f573808810c2a1e8cb8fb6ac469093ca2784 /block
parent9d40ac5867b9aefe0722bc1f110b965ff294d30d (diff)
downloadFOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.tar.gz
FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.tar.bz2
FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.zip
Moved, renamed, and deleted files
The original directory structure was scattered and unorganized. Changes are basically to make it look like kernel structure.
Diffstat (limited to 'block')
-rw-r--r--block/Kconfig115
-rw-r--r--block/Kconfig.iosched72
-rw-r--r--block/Makefile20
-rw-r--r--block/blk-cgroup.c1689
-rw-r--r--block/blk-cgroup.h364
-rw-r--r--block/blk-core.c2908
-rw-r--r--block/blk-exec.c121
-rw-r--r--block/blk-flush.c455
-rw-r--r--block/blk-integrity.c448
-rw-r--r--block/blk-ioc.c459
-rw-r--r--block/blk-iopoll.c227
-rw-r--r--block/blk-lib.c178
-rw-r--r--block/blk-map.c331
-rw-r--r--block/blk-merge.c510
-rw-r--r--block/blk-settings.c822
-rw-r--r--block/blk-softirq.c186
-rw-r--r--block/blk-sysfs.c567
-rw-r--r--block/blk-tag.c399
-rw-r--r--block/blk-throttle.c1330
-rw-r--r--block/blk-timeout.c240
-rw-r--r--block/blk.h237
-rw-r--r--block/bsg-lib.c298
-rw-r--r--block/bsg.c1126
-rw-r--r--block/cfq-iosched.c3936
-rw-r--r--block/cfq.h115
-rw-r--r--block/compat_ioctl.c756
-rw-r--r--block/deadline-iosched.c464
-rw-r--r--block/elevator.c1051
-rw-r--r--block/genhd.c1824
-rw-r--r--block/ioctl.c351
-rw-r--r--block/noop-iosched.c111
-rw-r--r--block/partition-generic.c580
-rw-r--r--block/partitions/Kconfig251
-rw-r--r--block/partitions/Makefile20
-rw-r--r--block/partitions/acorn.c556
-rw-r--r--block/partitions/acorn.h14
-rw-r--r--block/partitions/amiga.c139
-rw-r--r--block/partitions/amiga.h6
-rw-r--r--block/partitions/atari.c149
-rw-r--r--block/partitions/atari.h34
-rw-r--r--block/partitions/check.c166
-rw-r--r--block/partitions/check.h52
-rw-r--r--block/partitions/efi.c675
-rw-r--r--block/partitions/efi.h134
-rw-r--r--block/partitions/ibm.c275
-rw-r--r--block/partitions/ibm.h1
-rw-r--r--block/partitions/karma.c57
-rw-r--r--block/partitions/karma.h8
-rw-r--r--block/partitions/ldm.c1567
-rw-r--r--block/partitions/ldm.h215
-rw-r--r--block/partitions/mac.c134
-rw-r--r--block/partitions/mac.h44
-rw-r--r--block/partitions/msdos.c552
-rw-r--r--block/partitions/msdos.h8
-rw-r--r--block/partitions/osf.c86
-rw-r--r--block/partitions/osf.h7
-rw-r--r--block/partitions/sgi.c82
-rw-r--r--block/partitions/sgi.h8
-rw-r--r--block/partitions/sun.c122
-rw-r--r--block/partitions/sun.h8
-rw-r--r--block/partitions/sysv68.c95
-rw-r--r--block/partitions/sysv68.h1
-rw-r--r--block/partitions/ultrix.c48
-rw-r--r--block/partitions/ultrix.h5
-rw-r--r--block/scsi_ioctl.c750
65 files changed, 28559 insertions, 0 deletions
diff --git a/block/Kconfig b/block/Kconfig
new file mode 100644
index 00000000..09acf1b3
--- /dev/null
+++ b/block/Kconfig
@@ -0,0 +1,115 @@
+#
+# Block layer core configuration
+#
+menuconfig BLOCK
+ bool "Enable the block layer" if EXPERT
+ default y
+ help
+ Provide block layer support for the kernel.
+
+ Disable this option to remove the block layer support from the
+ kernel. This may be useful for embedded devices.
+
+ If this option is disabled:
+
+ - block device files will become unusable
+ - some filesystems (such as ext3) will become unavailable.
+
+ Also, SCSI character devices and USB storage will be disabled since
+ they make use of various block layer definitions and facilities.
+
+ Say Y here unless you know you really don't want to mount disks and
+ suchlike.
+
+if BLOCK
+
+config LBDAF
+ bool "Support for large (2TB+) block devices and files"
+ depends on !64BIT
+ default y
+ help
+ Enable block devices or files of size 2TB and larger.
+
+ This option is required to support the full capacity of large
+ (2TB+) block devices, including RAID, disk, Network Block Device,
+ Logical Volume Manager (LVM) and loopback.
+
+ This option also enables support for single files larger than
+ 2TB.
+
+ The ext4 filesystem requires that this feature be enabled in
+ order to support filesystems that have the huge_file feature
+ enabled. Otherwise, it will refuse to mount in the read-write
+ mode any filesystems that use the huge_file feature, which is
+ enabled by default by mke2fs.ext4.
+
+ The GFS2 filesystem also requires this feature.
+
+ If unsure, say Y.
+
+config BLK_DEV_BSG
+ bool "Block layer SG support v4"
+ default y
+ help
+ Saying Y here will enable generic SG (SCSI generic) v4 support
+ for any block device.
+
+ Unlike SG v3 (aka block/scsi_ioctl.c drivers/scsi/sg.c), SG v4
+ can handle complicated SCSI commands: tagged variable length cdbs
+ with bidirectional data transfers and generic request/response
+ protocols (e.g. Task Management Functions and SMP in Serial
+ Attached SCSI).
+
+ This option is required by recent UDEV versions to properly
+ access device serial numbers, etc.
+
+ If unsure, say Y.
+
+config BLK_DEV_BSGLIB
+ bool "Block layer SG support v4 helper lib"
+ default n
+ select BLK_DEV_BSG
+ help
+ Subsystems will normally enable this if needed. Users will not
+ normally need to manually enable this.
+
+ If unsure, say N.
+
+config BLK_DEV_INTEGRITY
+ bool "Block layer data integrity support"
+ ---help---
+ Some storage devices allow extra information to be
+ stored/retrieved to help protect the data. The block layer
+ data integrity option provides hooks which can be used by
+ filesystems to ensure better data integrity.
+
+ Say yes here if you have a storage device that provides the
+ T10/SCSI Data Integrity Field or the T13/ATA External Path
+ Protection. If in doubt, say N.
+
+config BLK_DEV_THROTTLING
+ bool "Block layer bio throttling support"
+ depends on BLK_CGROUP=y && EXPERIMENTAL
+ default n
+ ---help---
+ Block layer bio throttling support. It can be used to limit
+ the IO rate to a device. IO rate policies are per cgroup and
+ one needs to mount and use blkio cgroup controller for creating
+ cgroups and specifying per device IO rate policies.
+
+ See Documentation/cgroups/blkio-controller.txt for more information.
+
+menu "Partition Types"
+
+source "block/partitions/Kconfig"
+
+endmenu
+
+endif # BLOCK
+
+config BLOCK_COMPAT
+ bool
+ depends on BLOCK && COMPAT
+ default y
+
+source block/Kconfig.iosched
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
new file mode 100644
index 00000000..3199b76f
--- /dev/null
+++ b/block/Kconfig.iosched
@@ -0,0 +1,72 @@
+if BLOCK
+
+menu "IO Schedulers"
+
+config IOSCHED_NOOP
+ bool
+ default y
+ ---help---
+ The no-op I/O scheduler is a minimal scheduler that does basic merging
+ and sorting. Its main uses include non-disk based block devices like
+ memory devices, and specialised software or hardware environments
+ that do their own scheduling and require only minimal assistance from
+ the kernel.
+
+config IOSCHED_DEADLINE
+ tristate "Deadline I/O scheduler"
+ default y
+ ---help---
+ The deadline I/O scheduler is simple and compact. It will provide
+ CSCAN service with FIFO expiration of requests, switching to
+ a new point in the service tree and doing a batch of IO from there
+ in case of expiry.
+
+config IOSCHED_CFQ
+ tristate "CFQ I/O scheduler"
+ # If BLK_CGROUP is a module, CFQ has to be built as module.
+ depends on (BLK_CGROUP=m && m) || !BLK_CGROUP || BLK_CGROUP=y
+ default y
+ ---help---
+ The CFQ I/O scheduler tries to distribute bandwidth equally
+ among all processes in the system. It should provide a fair
+ and low latency working environment, suitable for both desktop
+ and server systems.
+
+ This is the default I/O scheduler.
+
+ Note: If BLK_CGROUP=m, then CFQ can be built only as module.
+
+config CFQ_GROUP_IOSCHED
+ bool "CFQ Group Scheduling support"
+ depends on IOSCHED_CFQ && BLK_CGROUP
+ default n
+ ---help---
+ Enable group IO scheduling in CFQ.
+
+choice
+ prompt "Default I/O scheduler"
+ default DEFAULT_CFQ
+ help
+ Select the I/O scheduler which will be used by default for all
+ block devices.
+
+ config DEFAULT_DEADLINE
+ bool "Deadline" if IOSCHED_DEADLINE=y
+
+ config DEFAULT_CFQ
+ bool "CFQ" if IOSCHED_CFQ=y
+
+ config DEFAULT_NOOP
+ bool "No-op"
+
+endchoice
+
+config DEFAULT_IOSCHED
+ string
+ default "deadline" if DEFAULT_DEADLINE
+ default "cfq" if DEFAULT_CFQ
+ default "noop" if DEFAULT_NOOP
+
+endmenu
+
+endif
diff --git a/block/Makefile b/block/Makefile
new file mode 100644
index 00000000..39b76ba6
--- /dev/null
+++ b/block/Makefile
@@ -0,0 +1,20 @@
+#
+# Makefile for the kernel block layer
+#
+
+obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
+ blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
+ blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
+ blk-iopoll.o blk-lib.o ioctl.o genhd.o scsi_ioctl.o \
+ partition-generic.o partitions/
+
+obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
+obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o
+obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o
+obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o
+obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
+obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
+obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
+
+obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
+obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
new file mode 100644
index 00000000..ea84a23d
--- /dev/null
+++ b/block/blk-cgroup.c
@@ -0,0 +1,1689 @@
+/*
+ * Common Block IO controller cgroup interface
+ *
+ * Based on ideas and code from CFQ, CFS and BFQ:
+ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
+ *
+ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
+ * Paolo Valente <paolo.valente@unimore.it>
+ *
+ * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
+ * Nauman Rafique <nauman@google.com>
+ */
+#include <linux/ioprio.h>
+#include <linux/seq_file.h>
+#include <linux/kdev_t.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include "blk-cgroup.h"
+#include <linux/genhd.h>
+
+#define MAX_KEY_LEN 100
+
+static DEFINE_SPINLOCK(blkio_list_lock);
+static LIST_HEAD(blkio_list);
+
+struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
+EXPORT_SYMBOL_GPL(blkio_root_cgroup);
+
+static struct cgroup_subsys_state *blkiocg_create(struct cgroup *);
+static int blkiocg_can_attach(struct cgroup *, struct cgroup_taskset *);
+static void blkiocg_attach(struct cgroup *, struct cgroup_taskset *);
+static void blkiocg_destroy(struct cgroup *);
+static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
+
+/* for encoding cft->private value on file */
+#define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
+/* What policy owns the file, proportional or throttle */
+#define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
+#define BLKIOFILE_ATTR(val) ((val) & 0xffff)
+
+struct cgroup_subsys blkio_subsys = {
+ .name = "blkio",
+ .create = blkiocg_create,
+ .can_attach = blkiocg_can_attach,
+ .attach = blkiocg_attach,
+ .destroy = blkiocg_destroy,
+ .populate = blkiocg_populate,
+#ifdef CONFIG_BLK_CGROUP
+ /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
+ .subsys_id = blkio_subsys_id,
+#endif
+ .use_id = 1,
+ .module = THIS_MODULE,
+};
+EXPORT_SYMBOL_GPL(blkio_subsys);
+
+static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
+ struct blkio_policy_node *pn)
+{
+ list_add(&pn->node, &blkcg->policy_list);
+}
+
+static inline bool cftype_blkg_same_policy(struct cftype *cft,
+ struct blkio_group *blkg)
+{
+ enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
+
+ if (blkg->plid == plid)
+ return 1;
+
+ return 0;
+}
+
+/* Determines if policy node matches cgroup file being accessed */
+static inline bool pn_matches_cftype(struct cftype *cft,
+ struct blkio_policy_node *pn)
+{
+ enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
+ int fileid = BLKIOFILE_ATTR(cft->private);
+
+ return (plid == pn->plid && fileid == pn->fileid);
+}
+
+/* Must be called with blkcg->lock held */
+static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
+{
+ list_del(&pn->node);
+}
+
+/* Must be called with blkcg->lock held */
+static struct blkio_policy_node *
+blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev,
+ enum blkio_policy_id plid, int fileid)
+{
+ struct blkio_policy_node *pn;
+
+ list_for_each_entry(pn, &blkcg->policy_list, node) {
+ if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid)
+ return pn;
+ }
+
+ return NULL;
+}
+
+struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
+{
+ return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
+ struct blkio_cgroup, css);
+}
+EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
+
+struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
+{
+ return container_of(task_subsys_state(tsk, blkio_subsys_id),
+ struct blkio_cgroup, css);
+}
+EXPORT_SYMBOL_GPL(task_blkio_cgroup);
+
+static inline void
+blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
+{
+ struct blkio_policy_type *blkiop;
+
+ list_for_each_entry(blkiop, &blkio_list, list) {
+ /* If this policy does not own the blkg, do not send updates */
+ if (blkiop->plid != blkg->plid)
+ continue;
+ if (blkiop->ops.blkio_update_group_weight_fn)
+ blkiop->ops.blkio_update_group_weight_fn(blkg->key,
+ blkg, weight);
+ }
+}
+
+static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
+ int fileid)
+{
+ struct blkio_policy_type *blkiop;
+
+ list_for_each_entry(blkiop, &blkio_list, list) {
+
+ /* If this policy does not own the blkg, do not send updates */
+ if (blkiop->plid != blkg->plid)
+ continue;
+
+ if (fileid == BLKIO_THROTL_read_bps_device
+ && blkiop->ops.blkio_update_group_read_bps_fn)
+ blkiop->ops.blkio_update_group_read_bps_fn(blkg->key,
+ blkg, bps);
+
+ if (fileid == BLKIO_THROTL_write_bps_device
+ && blkiop->ops.blkio_update_group_write_bps_fn)
+ blkiop->ops.blkio_update_group_write_bps_fn(blkg->key,
+ blkg, bps);
+ }
+}
+
+static inline void blkio_update_group_iops(struct blkio_group *blkg,
+ unsigned int iops, int fileid)
+{
+ struct blkio_policy_type *blkiop;
+
+ list_for_each_entry(blkiop, &blkio_list, list) {
+
+ /* If this policy does not own the blkg, do not send updates */
+ if (blkiop->plid != blkg->plid)
+ continue;
+
+ if (fileid == BLKIO_THROTL_read_iops_device
+ && blkiop->ops.blkio_update_group_read_iops_fn)
+ blkiop->ops.blkio_update_group_read_iops_fn(blkg->key,
+ blkg, iops);
+
+ if (fileid == BLKIO_THROTL_write_iops_device
+ && blkiop->ops.blkio_update_group_write_iops_fn)
+ blkiop->ops.blkio_update_group_write_iops_fn(blkg->key,
+ blkg,iops);
+ }
+}
+
+/*
+ * Add to the appropriate stat variable depending on the request type.
+ * This should be called with the blkg->stats_lock held.
+ */
+static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
+ bool sync)
+{
+ if (direction)
+ stat[BLKIO_STAT_WRITE] += add;
+ else
+ stat[BLKIO_STAT_READ] += add;
+ if (sync)
+ stat[BLKIO_STAT_SYNC] += add;
+ else
+ stat[BLKIO_STAT_ASYNC] += add;
+}
+
+/*
+ * Decrements the appropriate stat variable if non-zero depending on the
+ * request type. Panics on value being zero.
+ * This should be called with the blkg->stats_lock held.
+ */
+static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
+{
+ if (direction) {
+ BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
+ stat[BLKIO_STAT_WRITE]--;
+ } else {
+ BUG_ON(stat[BLKIO_STAT_READ] == 0);
+ stat[BLKIO_STAT_READ]--;
+ }
+ if (sync) {
+ BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
+ stat[BLKIO_STAT_SYNC]--;
+ } else {
+ BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
+ stat[BLKIO_STAT_ASYNC]--;
+ }
+}
+
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+/* This should be called with the blkg->stats_lock held. */
+static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
+ struct blkio_group *curr_blkg)
+{
+ if (blkio_blkg_waiting(&blkg->stats))
+ return;
+ if (blkg == curr_blkg)
+ return;
+ blkg->stats.start_group_wait_time = sched_clock();
+ blkio_mark_blkg_waiting(&blkg->stats);
+}
+
+/* This should be called with the blkg->stats_lock held. */
+static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
+{
+ unsigned long long now;
+
+ if (!blkio_blkg_waiting(stats))
+ return;
+
+ now = sched_clock();
+ if (time_after64(now, stats->start_group_wait_time))
+ stats->group_wait_time += now - stats->start_group_wait_time;
+ blkio_clear_blkg_waiting(stats);
+}
+
+/* This should be called with the blkg->stats_lock held. */
+static void blkio_end_empty_time(struct blkio_group_stats *stats)
+{
+ unsigned long long now;
+
+ if (!blkio_blkg_empty(stats))
+ return;
+
+ now = sched_clock();
+ if (time_after64(now, stats->start_empty_time))
+ stats->empty_time += now - stats->start_empty_time;
+ blkio_clear_blkg_empty(stats);
+}
+
+void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&blkg->stats_lock, flags);
+ BUG_ON(blkio_blkg_idling(&blkg->stats));
+ blkg->stats.start_idle_time = sched_clock();
+ blkio_mark_blkg_idling(&blkg->stats);
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
+
+void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
+{
+ unsigned long flags;
+ unsigned long long now;
+ struct blkio_group_stats *stats;
+
+ spin_lock_irqsave(&blkg->stats_lock, flags);
+ stats = &blkg->stats;
+ if (blkio_blkg_idling(stats)) {
+ now = sched_clock();
+ if (time_after64(now, stats->start_idle_time))
+ stats->idle_time += now - stats->start_idle_time;
+ blkio_clear_blkg_idling(stats);
+ }
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
+
+void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
+{
+ unsigned long flags;
+ struct blkio_group_stats *stats;
+
+ spin_lock_irqsave(&blkg->stats_lock, flags);
+ stats = &blkg->stats;
+ stats->avg_queue_size_sum +=
+ stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
+ stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
+ stats->avg_queue_size_samples++;
+ blkio_update_group_wait_time(stats);
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
+
+void blkiocg_set_start_empty_time(struct blkio_group *blkg)
+{
+ unsigned long flags;
+ struct blkio_group_stats *stats;
+
+ spin_lock_irqsave(&blkg->stats_lock, flags);
+ stats = &blkg->stats;
+
+ if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
+ stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+ return;
+ }
+
+ /*
+ * group is already marked empty. This can happen if cfqq got new
+ * request in parent group and moved to this group while being added
+ * to service tree. Just ignore the event and move on.
+ */
+ if(blkio_blkg_empty(stats)) {
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+ return;
+ }
+
+ stats->start_empty_time = sched_clock();
+ blkio_mark_blkg_empty(stats);
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
+
+void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
+ unsigned long dequeue)
+{
+ blkg->stats.dequeue += dequeue;
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
+#else
+static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
+ struct blkio_group *curr_blkg) {}
+static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
+#endif
+
+void blkiocg_update_io_add_stats(struct blkio_group *blkg,
+ struct blkio_group *curr_blkg, bool direction,
+ bool sync)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&blkg->stats_lock, flags);
+ blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
+ sync);
+ blkio_end_empty_time(&blkg->stats);
+ blkio_set_start_group_wait_time(blkg, curr_blkg);
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
+
+void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
+ bool direction, bool sync)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&blkg->stats_lock, flags);
+ blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
+ direction, sync);
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
+
+void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
+ unsigned long unaccounted_time)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&blkg->stats_lock, flags);
+ blkg->stats.time += time;
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ blkg->stats.unaccounted_time += unaccounted_time;
+#endif
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
+
+/*
+ * should be called under rcu read lock or queue lock to make sure blkg pointer
+ * is valid.
+ */
+void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
+ uint64_t bytes, bool direction, bool sync)
+{
+ struct blkio_group_stats_cpu *stats_cpu;
+ unsigned long flags;
+
+ /*
+ * Disabling interrupts to provide mutual exclusion between two
+ * writes on same cpu. It probably is not needed for 64bit. Not
+ * optimizing that case yet.
+ */
+ local_irq_save(flags);
+
+ stats_cpu = this_cpu_ptr(blkg->stats_cpu);
+
+ u64_stats_update_begin(&stats_cpu->syncp);
+ stats_cpu->sectors += bytes >> 9;
+ blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
+ 1, direction, sync);
+ blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
+ bytes, direction, sync);
+ u64_stats_update_end(&stats_cpu->syncp);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
+
+void blkiocg_update_completion_stats(struct blkio_group *blkg,
+ uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
+{
+ struct blkio_group_stats *stats;
+ unsigned long flags;
+ unsigned long long now = sched_clock();
+
+ spin_lock_irqsave(&blkg->stats_lock, flags);
+ stats = &blkg->stats;
+ if (time_after64(now, io_start_time))
+ blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
+ now - io_start_time, direction, sync);
+ if (time_after64(io_start_time, start_time))
+ blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
+ io_start_time - start_time, direction, sync);
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
+
+/* Merged stats are per cpu. */
+void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
+ bool sync)
+{
+ struct blkio_group_stats_cpu *stats_cpu;
+ unsigned long flags;
+
+ /*
+ * Disabling interrupts to provide mutual exclusion between two
+ * writes on same cpu. It probably is not needed for 64bit. Not
+ * optimizing that case yet.
+ */
+ local_irq_save(flags);
+
+ stats_cpu = this_cpu_ptr(blkg->stats_cpu);
+
+ u64_stats_update_begin(&stats_cpu->syncp);
+ blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
+ direction, sync);
+ u64_stats_update_end(&stats_cpu->syncp);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
+
+/*
+ * This function allocates the per cpu stats for blkio_group. Should be called
+ * from sleepable context as alloc_per_cpu() requires that.
+ */
+int blkio_alloc_blkg_stats(struct blkio_group *blkg)
+{
+ /* Allocate memory for per cpu stats */
+ blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
+ if (!blkg->stats_cpu)
+ return -ENOMEM;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(blkio_alloc_blkg_stats);
+
+void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
+ struct blkio_group *blkg, void *key, dev_t dev,
+ enum blkio_policy_id plid)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&blkcg->lock, flags);
+ spin_lock_init(&blkg->stats_lock);
+ rcu_assign_pointer(blkg->key, key);
+ blkg->blkcg_id = css_id(&blkcg->css);
+ hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
+ blkg->plid = plid;
+ spin_unlock_irqrestore(&blkcg->lock, flags);
+ /* Need to take css reference ? */
+ cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
+ blkg->dev = dev;
+}
+EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
+
+static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
+{
+ hlist_del_init_rcu(&blkg->blkcg_node);
+ blkg->blkcg_id = 0;
+}
+
+/*
+ * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
+ * indicating that blk_group was unhashed by the time we got to it.
+ */
+int blkiocg_del_blkio_group(struct blkio_group *blkg)
+{
+ struct blkio_cgroup *blkcg;
+ unsigned long flags;
+ struct cgroup_subsys_state *css;
+ int ret = 1;
+
+ rcu_read_lock();
+ css = css_lookup(&blkio_subsys, blkg->blkcg_id);
+ if (css) {
+ blkcg = container_of(css, struct blkio_cgroup, css);
+ spin_lock_irqsave(&blkcg->lock, flags);
+ if (!hlist_unhashed(&blkg->blkcg_node)) {
+ __blkiocg_del_blkio_group(blkg);
+ ret = 0;
+ }
+ spin_unlock_irqrestore(&blkcg->lock, flags);
+ }
+
+ rcu_read_unlock();
+ return ret;
+}
+EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
+
+/* called under rcu_read_lock(). */
+struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
+{
+ struct blkio_group *blkg;
+ struct hlist_node *n;
+ void *__key;
+
+ hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
+ __key = blkg->key;
+ if (__key == key)
+ return blkg;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
+
+static void blkio_reset_stats_cpu(struct blkio_group *blkg)
+{
+ struct blkio_group_stats_cpu *stats_cpu;
+ int i, j, k;
+ /*
+ * Note: On 64 bit arch this should not be an issue. This has the
+ * possibility of returning some inconsistent value on 32bit arch
+ * as 64bit update on 32bit is non atomic. Taking care of this
+ * corner case makes code very complicated, like sending IPIs to
+ * cpus, taking care of stats of offline cpus etc.
+ *
+ * reset stats is anyway more of a debug feature and this sounds a
+ * corner case. So I am not complicating the code yet until and
+ * unless this becomes a real issue.
+ */
+ for_each_possible_cpu(i) {
+ stats_cpu = per_cpu_ptr(blkg->stats_cpu, i);
+ stats_cpu->sectors = 0;
+ for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
+ for (k = 0; k < BLKIO_STAT_TOTAL; k++)
+ stats_cpu->stat_arr_cpu[j][k] = 0;
+ }
+}
+
+static int
+blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
+{
+ struct blkio_cgroup *blkcg;
+ struct blkio_group *blkg;
+ struct blkio_group_stats *stats;
+ struct hlist_node *n;
+ uint64_t queued[BLKIO_STAT_TOTAL];
+ int i;
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ bool idling, waiting, empty;
+ unsigned long long now = sched_clock();
+#endif
+
+ blkcg = cgroup_to_blkio_cgroup(cgroup);
+ spin_lock_irq(&blkcg->lock);
+ hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
+ spin_lock(&blkg->stats_lock);
+ stats = &blkg->stats;
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ idling = blkio_blkg_idling(stats);
+ waiting = blkio_blkg_waiting(stats);
+ empty = blkio_blkg_empty(stats);
+#endif
+ for (i = 0; i < BLKIO_STAT_TOTAL; i++)
+ queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
+ memset(stats, 0, sizeof(struct blkio_group_stats));
+ for (i = 0; i < BLKIO_STAT_TOTAL; i++)
+ stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ if (idling) {
+ blkio_mark_blkg_idling(stats);
+ stats->start_idle_time = now;
+ }
+ if (waiting) {
+ blkio_mark_blkg_waiting(stats);
+ stats->start_group_wait_time = now;
+ }
+ if (empty) {
+ blkio_mark_blkg_empty(stats);
+ stats->start_empty_time = now;
+ }
+#endif
+ spin_unlock(&blkg->stats_lock);
+
+ /* Reset Per cpu stats which don't take blkg->stats_lock */
+ blkio_reset_stats_cpu(blkg);
+ }
+
+ spin_unlock_irq(&blkcg->lock);
+ return 0;
+}
+
+static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
+ int chars_left, bool diskname_only)
+{
+ snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
+ chars_left -= strlen(str);
+ if (chars_left <= 0) {
+ printk(KERN_WARNING
+ "Possibly incorrect cgroup stat display format");
+ return;
+ }
+ if (diskname_only)
+ return;
+ switch (type) {
+ case BLKIO_STAT_READ:
+ strlcat(str, " Read", chars_left);
+ break;
+ case BLKIO_STAT_WRITE:
+ strlcat(str, " Write", chars_left);
+ break;
+ case BLKIO_STAT_SYNC:
+ strlcat(str, " Sync", chars_left);
+ break;
+ case BLKIO_STAT_ASYNC:
+ strlcat(str, " Async", chars_left);
+ break;
+ case BLKIO_STAT_TOTAL:
+ strlcat(str, " Total", chars_left);
+ break;
+ default:
+ strlcat(str, " Invalid", chars_left);
+ }
+}
+
+static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
+ struct cgroup_map_cb *cb, dev_t dev)
+{
+ blkio_get_key_name(0, dev, str, chars_left, true);
+ cb->fill(cb, str, val);
+ return val;
+}
+
+
+static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
+ enum stat_type_cpu type, enum stat_sub_type sub_type)
+{
+ int cpu;
+ struct blkio_group_stats_cpu *stats_cpu;
+ u64 val = 0, tval;
+
+ for_each_possible_cpu(cpu) {
+ unsigned int start;
+ stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu);
+
+ do {
+ start = u64_stats_fetch_begin(&stats_cpu->syncp);
+ if (type == BLKIO_STAT_CPU_SECTORS)
+ tval = stats_cpu->sectors;
+ else
+ tval = stats_cpu->stat_arr_cpu[type][sub_type];
+ } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
+
+ val += tval;
+ }
+
+ return val;
+}
+
+static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
+ struct cgroup_map_cb *cb, dev_t dev, enum stat_type_cpu type)
+{
+ uint64_t disk_total, val;
+ char key_str[MAX_KEY_LEN];
+ enum stat_sub_type sub_type;
+
+ if (type == BLKIO_STAT_CPU_SECTORS) {
+ val = blkio_read_stat_cpu(blkg, type, 0);
+ return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, dev);
+ }
+
+ for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
+ sub_type++) {
+ blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
+ val = blkio_read_stat_cpu(blkg, type, sub_type);
+ cb->fill(cb, key_str, val);
+ }
+
+ disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) +
+ blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE);
+
+ blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
+ cb->fill(cb, key_str, disk_total);
+ return disk_total;
+}
+
+/* This should be called with blkg->stats_lock held */
+static uint64_t blkio_get_stat(struct blkio_group *blkg,
+ struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
+{
+ uint64_t disk_total;
+ char key_str[MAX_KEY_LEN];
+ enum stat_sub_type sub_type;
+
+ if (type == BLKIO_STAT_TIME)
+ return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
+ blkg->stats.time, cb, dev);
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ if (type == BLKIO_STAT_UNACCOUNTED_TIME)
+ return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
+ blkg->stats.unaccounted_time, cb, dev);
+ if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
+ uint64_t sum = blkg->stats.avg_queue_size_sum;
+ uint64_t samples = blkg->stats.avg_queue_size_samples;
+ if (samples)
+ do_div(sum, samples);
+ else
+ sum = 0;
+ return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
+ }
+ if (type == BLKIO_STAT_GROUP_WAIT_TIME)
+ return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
+ blkg->stats.group_wait_time, cb, dev);
+ if (type == BLKIO_STAT_IDLE_TIME)
+ return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
+ blkg->stats.idle_time, cb, dev);
+ if (type == BLKIO_STAT_EMPTY_TIME)
+ return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
+ blkg->stats.empty_time, cb, dev);
+ if (type == BLKIO_STAT_DEQUEUE)
+ return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
+ blkg->stats.dequeue, cb, dev);
+#endif
+
+ for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
+ sub_type++) {
+ blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
+ cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
+ }
+ disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
+ blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
+ blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
+ cb->fill(cb, key_str, disk_total);
+ return disk_total;
+}
+
+static int blkio_policy_parse_and_set(char *buf,
+ struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
+{
+ struct gendisk *disk = NULL;
+ char *s[4], *p, *major_s = NULL, *minor_s = NULL;
+ unsigned long major, minor;
+ int i = 0, ret = -EINVAL;
+ int part;
+ dev_t dev;
+ u64 temp;
+
+ memset(s, 0, sizeof(s));
+
+ while ((p = strsep(&buf, " ")) != NULL) {
+ if (!*p)
+ continue;
+
+ s[i++] = p;
+
+ /* Prevent from inputing too many things */
+ if (i == 3)
+ break;
+ }
+
+ if (i != 2)
+ goto out;
+
+ p = strsep(&s[0], ":");
+ if (p != NULL)
+ major_s = p;
+ else
+ goto out;
+
+ minor_s = s[0];
+ if (!minor_s)
+ goto out;
+
+ if (strict_strtoul(major_s, 10, &major))
+ goto out;
+
+ if (strict_strtoul(minor_s, 10, &minor))
+ goto out;
+
+ dev = MKDEV(major, minor);
+
+ if (strict_strtoull(s[1], 10, &temp))
+ goto out;
+
+ /* For rule removal, do not check for device presence. */
+ if (temp) {
+ disk = get_gendisk(dev, &part);
+ if (!disk || part) {
+ ret = -ENODEV;
+ goto out;
+ }
+ }
+
+ newpn->dev = dev;
+
+ switch (plid) {
+ case BLKIO_POLICY_PROP:
+ if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
+ temp > BLKIO_WEIGHT_MAX)
+ goto out;
+
+ newpn->plid = plid;
+ newpn->fileid = fileid;
+ newpn->val.weight = temp;
+ break;
+ case BLKIO_POLICY_THROTL:
+ switch(fileid) {
+ case BLKIO_THROTL_read_bps_device:
+ case BLKIO_THROTL_write_bps_device:
+ newpn->plid = plid;
+ newpn->fileid = fileid;
+ newpn->val.bps = temp;
+ break;
+ case BLKIO_THROTL_read_iops_device:
+ case BLKIO_THROTL_write_iops_device:
+ if (temp > THROTL_IOPS_MAX)
+ goto out;
+
+ newpn->plid = plid;
+ newpn->fileid = fileid;
+ newpn->val.iops = (unsigned int)temp;
+ break;
+ }
+ break;
+ default:
+ BUG();
+ }
+ ret = 0;
+out:
+ put_disk(disk);
+ return ret;
+}
+
+unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
+ dev_t dev)
+{
+ struct blkio_policy_node *pn;
+ unsigned long flags;
+ unsigned int weight;
+
+ spin_lock_irqsave(&blkcg->lock, flags);
+
+ pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
+ BLKIO_PROP_weight_device);
+ if (pn)
+ weight = pn->val.weight;
+ else
+ weight = blkcg->weight;
+
+ spin_unlock_irqrestore(&blkcg->lock, flags);
+
+ return weight;
+}
+EXPORT_SYMBOL_GPL(blkcg_get_weight);
+
+uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev)
+{
+ struct blkio_policy_node *pn;
+ unsigned long flags;
+ uint64_t bps = -1;
+
+ spin_lock_irqsave(&blkcg->lock, flags);
+ pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
+ BLKIO_THROTL_read_bps_device);
+ if (pn)
+ bps = pn->val.bps;
+ spin_unlock_irqrestore(&blkcg->lock, flags);
+
+ return bps;
+}
+
+uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev)
+{
+ struct blkio_policy_node *pn;
+ unsigned long flags;
+ uint64_t bps = -1;
+
+ spin_lock_irqsave(&blkcg->lock, flags);
+ pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
+ BLKIO_THROTL_write_bps_device);
+ if (pn)
+ bps = pn->val.bps;
+ spin_unlock_irqrestore(&blkcg->lock, flags);
+
+ return bps;
+}
+
+unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev)
+{
+ struct blkio_policy_node *pn;
+ unsigned long flags;
+ unsigned int iops = -1;
+
+ spin_lock_irqsave(&blkcg->lock, flags);
+ pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
+ BLKIO_THROTL_read_iops_device);
+ if (pn)
+ iops = pn->val.iops;
+ spin_unlock_irqrestore(&blkcg->lock, flags);
+
+ return iops;
+}
+
+unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev)
+{
+ struct blkio_policy_node *pn;
+ unsigned long flags;
+ unsigned int iops = -1;
+
+ spin_lock_irqsave(&blkcg->lock, flags);
+ pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
+ BLKIO_THROTL_write_iops_device);
+ if (pn)
+ iops = pn->val.iops;
+ spin_unlock_irqrestore(&blkcg->lock, flags);
+
+ return iops;
+}
+
+/* Checks whether user asked for deleting a policy rule */
+static bool blkio_delete_rule_command(struct blkio_policy_node *pn)
+{
+ switch(pn->plid) {
+ case BLKIO_POLICY_PROP:
+ if (pn->val.weight == 0)
+ return 1;
+ break;
+ case BLKIO_POLICY_THROTL:
+ switch(pn->fileid) {
+ case BLKIO_THROTL_read_bps_device:
+ case BLKIO_THROTL_write_bps_device:
+ if (pn->val.bps == 0)
+ return 1;
+ break;
+ case BLKIO_THROTL_read_iops_device:
+ case BLKIO_THROTL_write_iops_device:
+ if (pn->val.iops == 0)
+ return 1;
+ }
+ break;
+ default:
+ BUG();
+ }
+
+ return 0;
+}
+
+static void blkio_update_policy_rule(struct blkio_policy_node *oldpn,
+ struct blkio_policy_node *newpn)
+{
+ switch(oldpn->plid) {
+ case BLKIO_POLICY_PROP:
+ oldpn->val.weight = newpn->val.weight;
+ break;
+ case BLKIO_POLICY_THROTL:
+ switch(newpn->fileid) {
+ case BLKIO_THROTL_read_bps_device:
+ case BLKIO_THROTL_write_bps_device:
+ oldpn->val.bps = newpn->val.bps;
+ break;
+ case BLKIO_THROTL_read_iops_device:
+ case BLKIO_THROTL_write_iops_device:
+ oldpn->val.iops = newpn->val.iops;
+ }
+ break;
+ default:
+ BUG();
+ }
+}
+
+/*
+ * Some rules/values in blkg have changed. Propagate those to respective
+ * policies.
+ */
+static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg,
+ struct blkio_group *blkg, struct blkio_policy_node *pn)
+{
+ unsigned int weight, iops;
+ u64 bps;
+
+ switch(pn->plid) {
+ case BLKIO_POLICY_PROP:
+ weight = pn->val.weight ? pn->val.weight :
+ blkcg->weight;
+ blkio_update_group_weight(blkg, weight);
+ break;
+ case BLKIO_POLICY_THROTL:
+ switch(pn->fileid) {
+ case BLKIO_THROTL_read_bps_device:
+ case BLKIO_THROTL_write_bps_device:
+ bps = pn->val.bps ? pn->val.bps : (-1);
+ blkio_update_group_bps(blkg, bps, pn->fileid);
+ break;
+ case BLKIO_THROTL_read_iops_device:
+ case BLKIO_THROTL_write_iops_device:
+ iops = pn->val.iops ? pn->val.iops : (-1);
+ blkio_update_group_iops(blkg, iops, pn->fileid);
+ break;
+ }
+ break;
+ default:
+ BUG();
+ }
+}
+
+/*
+ * A policy node rule has been updated. Propagate this update to all the
+ * block groups which might be affected by this update.
+ */
+static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg,
+ struct blkio_policy_node *pn)
+{
+ struct blkio_group *blkg;
+ struct hlist_node *n;
+
+ spin_lock(&blkio_list_lock);
+ spin_lock_irq(&blkcg->lock);
+
+ hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
+ if (pn->dev != blkg->dev || pn->plid != blkg->plid)
+ continue;
+ blkio_update_blkg_policy(blkcg, blkg, pn);
+ }
+
+ spin_unlock_irq(&blkcg->lock);
+ spin_unlock(&blkio_list_lock);
+}
+
+static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
+ const char *buffer)
+{
+ int ret = 0;
+ char *buf;
+ struct blkio_policy_node *newpn, *pn;
+ struct blkio_cgroup *blkcg;
+ int keep_newpn = 0;
+ enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
+ int fileid = BLKIOFILE_ATTR(cft->private);
+
+ buf = kstrdup(buffer, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
+ if (!newpn) {
+ ret = -ENOMEM;
+ goto free_buf;
+ }
+
+ ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid);
+ if (ret)
+ goto free_newpn;
+
+ blkcg = cgroup_to_blkio_cgroup(cgrp);
+
+ spin_lock_irq(&blkcg->lock);
+
+ pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid);
+ if (!pn) {
+ if (!blkio_delete_rule_command(newpn)) {
+ blkio_policy_insert_node(blkcg, newpn);
+ keep_newpn = 1;
+ }
+ spin_unlock_irq(&blkcg->lock);
+ goto update_io_group;
+ }
+
+ if (blkio_delete_rule_command(newpn)) {
+ blkio_policy_delete_node(pn);
+ kfree(pn);
+ spin_unlock_irq(&blkcg->lock);
+ goto update_io_group;
+ }
+ spin_unlock_irq(&blkcg->lock);
+
+ blkio_update_policy_rule(pn, newpn);
+
+update_io_group:
+ blkio_update_policy_node_blkg(blkcg, newpn);
+
+free_newpn:
+ if (!keep_newpn)
+ kfree(newpn);
+free_buf:
+ kfree(buf);
+ return ret;
+}
+
+static void
+blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn)
+{
+ switch(pn->plid) {
+ case BLKIO_POLICY_PROP:
+ if (pn->fileid == BLKIO_PROP_weight_device)
+ seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
+ MINOR(pn->dev), pn->val.weight);
+ break;
+ case BLKIO_POLICY_THROTL:
+ switch(pn->fileid) {
+ case BLKIO_THROTL_read_bps_device:
+ case BLKIO_THROTL_write_bps_device:
+ seq_printf(m, "%u:%u\t%llu\n", MAJOR(pn->dev),
+ MINOR(pn->dev), pn->val.bps);
+ break;
+ case BLKIO_THROTL_read_iops_device:
+ case BLKIO_THROTL_write_iops_device:
+ seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
+ MINOR(pn->dev), pn->val.iops);
+ break;
+ }
+ break;
+ default:
+ BUG();
+ }
+}
+
+/* cgroup files which read their data from policy nodes end up here */
+static void blkio_read_policy_node_files(struct cftype *cft,
+ struct blkio_cgroup *blkcg, struct seq_file *m)
+{
+ struct blkio_policy_node *pn;
+
+ if (!list_empty(&blkcg->policy_list)) {
+ spin_lock_irq(&blkcg->lock);
+ list_for_each_entry(pn, &blkcg->policy_list, node) {
+ if (!pn_matches_cftype(cft, pn))
+ continue;
+ blkio_print_policy_node(m, pn);
+ }
+ spin_unlock_irq(&blkcg->lock);
+ }
+}
+
+static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *m)
+{
+ struct blkio_cgroup *blkcg;
+ enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
+ int name = BLKIOFILE_ATTR(cft->private);
+
+ blkcg = cgroup_to_blkio_cgroup(cgrp);
+
+ switch(plid) {
+ case BLKIO_POLICY_PROP:
+ switch(name) {
+ case BLKIO_PROP_weight_device:
+ blkio_read_policy_node_files(cft, blkcg, m);
+ return 0;
+ default:
+ BUG();
+ }
+ break;
+ case BLKIO_POLICY_THROTL:
+ switch(name){
+ case BLKIO_THROTL_read_bps_device:
+ case BLKIO_THROTL_write_bps_device:
+ case BLKIO_THROTL_read_iops_device:
+ case BLKIO_THROTL_write_iops_device:
+ blkio_read_policy_node_files(cft, blkcg, m);
+ return 0;
+ default:
+ BUG();
+ }
+ break;
+ default:
+ BUG();
+ }
+
+ return 0;
+}
+
+static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
+ struct cftype *cft, struct cgroup_map_cb *cb,
+ enum stat_type type, bool show_total, bool pcpu)
+{
+ struct blkio_group *blkg;
+ struct hlist_node *n;
+ uint64_t cgroup_total = 0;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
+ if (blkg->dev) {
+ if (!cftype_blkg_same_policy(cft, blkg))
+ continue;
+ if (pcpu)
+ cgroup_total += blkio_get_stat_cpu(blkg, cb,
+ blkg->dev, type);
+ else {
+ spin_lock_irq(&blkg->stats_lock);
+ cgroup_total += blkio_get_stat(blkg, cb,
+ blkg->dev, type);
+ spin_unlock_irq(&blkg->stats_lock);
+ }
+ }
+ }
+ if (show_total)
+ cb->fill(cb, "Total", cgroup_total);
+ rcu_read_unlock();
+ return 0;
+}
+
+/* All map kind of cgroup file get serviced by this function */
+static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
+ struct cgroup_map_cb *cb)
+{
+ struct blkio_cgroup *blkcg;
+ enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
+ int name = BLKIOFILE_ATTR(cft->private);
+
+ blkcg = cgroup_to_blkio_cgroup(cgrp);
+
+ switch(plid) {
+ case BLKIO_POLICY_PROP:
+ switch(name) {
+ case BLKIO_PROP_time:
+ return blkio_read_blkg_stats(blkcg, cft, cb,
+ BLKIO_STAT_TIME, 0, 0);
+ case BLKIO_PROP_sectors:
+ return blkio_read_blkg_stats(blkcg, cft, cb,
+ BLKIO_STAT_CPU_SECTORS, 0, 1);
+ case BLKIO_PROP_io_service_bytes:
+ return blkio_read_blkg_stats(blkcg, cft, cb,
+ BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
+ case BLKIO_PROP_io_serviced:
+ return blkio_read_blkg_stats(blkcg, cft, cb,
+ BLKIO_STAT_CPU_SERVICED, 1, 1);
+ case BLKIO_PROP_io_service_time:
+ return blkio_read_blkg_stats(blkcg, cft, cb,
+ BLKIO_STAT_SERVICE_TIME, 1, 0);
+ case BLKIO_PROP_io_wait_time:
+ return blkio_read_blkg_stats(blkcg, cft, cb,
+ BLKIO_STAT_WAIT_TIME, 1, 0);
+ case BLKIO_PROP_io_merged:
+ return blkio_read_blkg_stats(blkcg, cft, cb,
+ BLKIO_STAT_CPU_MERGED, 1, 1);
+ case BLKIO_PROP_io_queued:
+ return blkio_read_blkg_stats(blkcg, cft, cb,
+ BLKIO_STAT_QUEUED, 1, 0);
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ case BLKIO_PROP_unaccounted_time:
+ return blkio_read_blkg_stats(blkcg, cft, cb,
+ BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
+ case BLKIO_PROP_dequeue:
+ return blkio_read_blkg_stats(blkcg, cft, cb,
+ BLKIO_STAT_DEQUEUE, 0, 0);
+ case BLKIO_PROP_avg_queue_size:
+ return blkio_read_blkg_stats(blkcg, cft, cb,
+ BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
+ case BLKIO_PROP_group_wait_time:
+ return blkio_read_blkg_stats(blkcg, cft, cb,
+ BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
+ case BLKIO_PROP_idle_time:
+ return blkio_read_blkg_stats(blkcg, cft, cb,
+ BLKIO_STAT_IDLE_TIME, 0, 0);
+ case BLKIO_PROP_empty_time:
+ return blkio_read_blkg_stats(blkcg, cft, cb,
+ BLKIO_STAT_EMPTY_TIME, 0, 0);
+#endif
+ default:
+ BUG();
+ }
+ break;
+ case BLKIO_POLICY_THROTL:
+ switch(name){
+ case BLKIO_THROTL_io_service_bytes:
+ return blkio_read_blkg_stats(blkcg, cft, cb,
+ BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
+ case BLKIO_THROTL_io_serviced:
+ return blkio_read_blkg_stats(blkcg, cft, cb,
+ BLKIO_STAT_CPU_SERVICED, 1, 1);
+ default:
+ BUG();
+ }
+ break;
+ default:
+ BUG();
+ }
+
+ return 0;
+}
+
+static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val)
+{
+ struct blkio_group *blkg;
+ struct hlist_node *n;
+ struct blkio_policy_node *pn;
+
+ if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
+ return -EINVAL;
+
+ spin_lock(&blkio_list_lock);
+ spin_lock_irq(&blkcg->lock);
+ blkcg->weight = (unsigned int)val;
+
+ hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
+ pn = blkio_policy_search_node(blkcg, blkg->dev,
+ BLKIO_POLICY_PROP, BLKIO_PROP_weight_device);
+ if (pn)
+ continue;
+
+ blkio_update_group_weight(blkg, blkcg->weight);
+ }
+ spin_unlock_irq(&blkcg->lock);
+ spin_unlock(&blkio_list_lock);
+ return 0;
+}
+
+static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
+ struct blkio_cgroup *blkcg;
+ enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
+ int name = BLKIOFILE_ATTR(cft->private);
+
+ blkcg = cgroup_to_blkio_cgroup(cgrp);
+
+ switch(plid) {
+ case BLKIO_POLICY_PROP:
+ switch(name) {
+ case BLKIO_PROP_weight:
+ return (u64)blkcg->weight;
+ }
+ break;
+ default:
+ BUG();
+ }
+ return 0;
+}
+
+static int
+blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
+{
+ struct blkio_cgroup *blkcg;
+ enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
+ int name = BLKIOFILE_ATTR(cft->private);
+
+ blkcg = cgroup_to_blkio_cgroup(cgrp);
+
+ switch(plid) {
+ case BLKIO_POLICY_PROP:
+ switch(name) {
+ case BLKIO_PROP_weight:
+ return blkio_weight_write(blkcg, val);
+ }
+ break;
+ default:
+ BUG();
+ }
+
+ return 0;
+}
+
+struct cftype blkio_files[] = {
+ {
+ .name = "weight_device",
+ .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
+ BLKIO_PROP_weight_device),
+ .read_seq_string = blkiocg_file_read,
+ .write_string = blkiocg_file_write,
+ .max_write_len = 256,
+ },
+ {
+ .name = "weight",
+ .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
+ BLKIO_PROP_weight),
+ .read_u64 = blkiocg_file_read_u64,
+ .write_u64 = blkiocg_file_write_u64,
+ },
+ {
+ .name = "time",
+ .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
+ BLKIO_PROP_time),
+ .read_map = blkiocg_file_read_map,
+ },
+ {
+ .name = "sectors",
+ .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
+ BLKIO_PROP_sectors),
+ .read_map = blkiocg_file_read_map,
+ },
+ {
+ .name = "io_service_bytes",
+ .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
+ BLKIO_PROP_io_service_bytes),
+ .read_map = blkiocg_file_read_map,
+ },
+ {
+ .name = "io_serviced",
+ .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
+ BLKIO_PROP_io_serviced),
+ .read_map = blkiocg_file_read_map,
+ },
+ {
+ .name = "io_service_time",
+ .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
+ BLKIO_PROP_io_service_time),
+ .read_map = blkiocg_file_read_map,
+ },
+ {
+ .name = "io_wait_time",
+ .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
+ BLKIO_PROP_io_wait_time),
+ .read_map = blkiocg_file_read_map,
+ },
+ {
+ .name = "io_merged",
+ .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
+ BLKIO_PROP_io_merged),
+ .read_map = blkiocg_file_read_map,
+ },
+ {
+ .name = "io_queued",
+ .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
+ BLKIO_PROP_io_queued),
+ .read_map = blkiocg_file_read_map,
+ },
+ {
+ .name = "reset_stats",
+ .write_u64 = blkiocg_reset_stats,
+ },
+#ifdef CONFIG_BLK_DEV_THROTTLING
+ {
+ .name = "throttle.read_bps_device",
+ .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
+ BLKIO_THROTL_read_bps_device),
+ .read_seq_string = blkiocg_file_read,
+ .write_string = blkiocg_file_write,
+ .max_write_len = 256,
+ },
+
+ {
+ .name = "throttle.write_bps_device",
+ .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
+ BLKIO_THROTL_write_bps_device),
+ .read_seq_string = blkiocg_file_read,
+ .write_string = blkiocg_file_write,
+ .max_write_len = 256,
+ },
+
+ {
+ .name = "throttle.read_iops_device",
+ .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
+ BLKIO_THROTL_read_iops_device),
+ .read_seq_string = blkiocg_file_read,
+ .write_string = blkiocg_file_write,
+ .max_write_len = 256,
+ },
+
+ {
+ .name = "throttle.write_iops_device",
+ .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
+ BLKIO_THROTL_write_iops_device),
+ .read_seq_string = blkiocg_file_read,
+ .write_string = blkiocg_file_write,
+ .max_write_len = 256,
+ },
+ {
+ .name = "throttle.io_service_bytes",
+ .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
+ BLKIO_THROTL_io_service_bytes),
+ .read_map = blkiocg_file_read_map,
+ },
+ {
+ .name = "throttle.io_serviced",
+ .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
+ BLKIO_THROTL_io_serviced),
+ .read_map = blkiocg_file_read_map,
+ },
+#endif /* CONFIG_BLK_DEV_THROTTLING */
+
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ {
+ .name = "avg_queue_size",
+ .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
+ BLKIO_PROP_avg_queue_size),
+ .read_map = blkiocg_file_read_map,
+ },
+ {
+ .name = "group_wait_time",
+ .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
+ BLKIO_PROP_group_wait_time),
+ .read_map = blkiocg_file_read_map,
+ },
+ {
+ .name = "idle_time",
+ .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
+ BLKIO_PROP_idle_time),
+ .read_map = blkiocg_file_read_map,
+ },
+ {
+ .name = "empty_time",
+ .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
+ BLKIO_PROP_empty_time),
+ .read_map = blkiocg_file_read_map,
+ },
+ {
+ .name = "dequeue",
+ .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
+ BLKIO_PROP_dequeue),
+ .read_map = blkiocg_file_read_map,
+ },
+ {
+ .name = "unaccounted_time",
+ .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
+ BLKIO_PROP_unaccounted_time),
+ .read_map = blkiocg_file_read_map,
+ },
+#endif
+};
+
+static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
+{
+ return cgroup_add_files(cgroup, subsys, blkio_files,
+ ARRAY_SIZE(blkio_files));
+}
+
+static void blkiocg_destroy(struct cgroup *cgroup)
+{
+ struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
+ unsigned long flags;
+ struct blkio_group *blkg;
+ void *key;
+ struct blkio_policy_type *blkiop;
+ struct blkio_policy_node *pn, *pntmp;
+
+ rcu_read_lock();
+ do {
+ spin_lock_irqsave(&blkcg->lock, flags);
+
+ if (hlist_empty(&blkcg->blkg_list)) {
+ spin_unlock_irqrestore(&blkcg->lock, flags);
+ break;
+ }
+
+ blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
+ blkcg_node);
+ key = rcu_dereference(blkg->key);
+ __blkiocg_del_blkio_group(blkg);
+
+ spin_unlock_irqrestore(&blkcg->lock, flags);
+
+ /*
+ * This blkio_group is being unlinked as associated cgroup is
+ * going away. Let all the IO controlling policies know about
+ * this event.
+ */
+ spin_lock(&blkio_list_lock);
+ list_for_each_entry(blkiop, &blkio_list, list) {
+ if (blkiop->plid != blkg->plid)
+ continue;
+ blkiop->ops.blkio_unlink_group_fn(key, blkg);
+ }
+ spin_unlock(&blkio_list_lock);
+ } while (1);
+
+ list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
+ blkio_policy_delete_node(pn);
+ kfree(pn);
+ }
+
+ free_css_id(&blkio_subsys, &blkcg->css);
+ rcu_read_unlock();
+ if (blkcg != &blkio_root_cgroup)
+ kfree(blkcg);
+}
+
+static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup)
+{
+ struct blkio_cgroup *blkcg;
+ struct cgroup *parent = cgroup->parent;
+
+ if (!parent) {
+ blkcg = &blkio_root_cgroup;
+ goto done;
+ }
+
+ blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
+ if (!blkcg)
+ return ERR_PTR(-ENOMEM);
+
+ blkcg->weight = BLKIO_WEIGHT_DEFAULT;
+done:
+ spin_lock_init(&blkcg->lock);
+ INIT_HLIST_HEAD(&blkcg->blkg_list);
+
+ INIT_LIST_HEAD(&blkcg->policy_list);
+ return &blkcg->css;
+}
+
+/*
+ * We cannot support shared io contexts, as we have no mean to support
+ * two tasks with the same ioc in two different groups without major rework
+ * of the main cic data structures. For now we allow a task to change
+ * its cgroup only if it's the only owner of its ioc.
+ */
+static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+{
+ struct task_struct *task;
+ struct io_context *ioc;
+ int ret = 0;
+
+ /* task_lock() is needed to avoid races with exit_io_context() */
+ cgroup_taskset_for_each(task, cgrp, tset) {
+ task_lock(task);
+ ioc = task->io_context;
+ if (ioc && atomic_read(&ioc->nr_tasks) > 1)
+ ret = -EINVAL;
+ task_unlock(task);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static void blkiocg_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+{
+ struct task_struct *task;
+ struct io_context *ioc;
+
+ cgroup_taskset_for_each(task, cgrp, tset) {
+ /* we don't lose anything even if ioc allocation fails */
+ ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
+ if (ioc) {
+ ioc_cgroup_changed(ioc);
+ put_io_context(ioc);
+ }
+ }
+}
+
+void blkio_policy_register(struct blkio_policy_type *blkiop)
+{
+ spin_lock(&blkio_list_lock);
+ list_add_tail(&blkiop->list, &blkio_list);
+ spin_unlock(&blkio_list_lock);
+}
+EXPORT_SYMBOL_GPL(blkio_policy_register);
+
+void blkio_policy_unregister(struct blkio_policy_type *blkiop)
+{
+ spin_lock(&blkio_list_lock);
+ list_del_init(&blkiop->list);
+ spin_unlock(&blkio_list_lock);
+}
+EXPORT_SYMBOL_GPL(blkio_policy_unregister);
+
+static int __init init_cgroup_blkio(void)
+{
+ return cgroup_load_subsys(&blkio_subsys);
+}
+
+static void __exit exit_cgroup_blkio(void)
+{
+ cgroup_unload_subsys(&blkio_subsys);
+}
+
+module_init(init_cgroup_blkio);
+module_exit(exit_cgroup_blkio);
+MODULE_LICENSE("GPL");
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
new file mode 100644
index 00000000..6f3ace7e
--- /dev/null
+++ b/block/blk-cgroup.h
@@ -0,0 +1,364 @@
+#ifndef _BLK_CGROUP_H
+#define _BLK_CGROUP_H
+/*
+ * Common Block IO controller cgroup interface
+ *
+ * Based on ideas and code from CFQ, CFS and BFQ:
+ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
+ *
+ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
+ * Paolo Valente <paolo.valente@unimore.it>
+ *
+ * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
+ * Nauman Rafique <nauman@google.com>
+ */
+
+#include <linux/cgroup.h>
+#include <linux/u64_stats_sync.h>
+
+enum blkio_policy_id {
+ BLKIO_POLICY_PROP = 0, /* Proportional Bandwidth division */
+ BLKIO_POLICY_THROTL, /* Throttling */
+};
+
+/* Max limits for throttle policy */
+#define THROTL_IOPS_MAX UINT_MAX
+
+#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
+
+#ifndef CONFIG_BLK_CGROUP
+/* When blk-cgroup is a module, its subsys_id isn't a compile-time constant */
+extern struct cgroup_subsys blkio_subsys;
+#define blkio_subsys_id blkio_subsys.subsys_id
+#endif
+
+enum stat_type {
+ /* Total time spent (in ns) between request dispatch to the driver and
+ * request completion for IOs doen by this cgroup. This may not be
+ * accurate when NCQ is turned on. */
+ BLKIO_STAT_SERVICE_TIME = 0,
+ /* Total time spent waiting in scheduler queue in ns */
+ BLKIO_STAT_WAIT_TIME,
+ /* Number of IOs queued up */
+ BLKIO_STAT_QUEUED,
+ /* All the single valued stats go below this */
+ BLKIO_STAT_TIME,
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ /* Time not charged to this cgroup */
+ BLKIO_STAT_UNACCOUNTED_TIME,
+ BLKIO_STAT_AVG_QUEUE_SIZE,
+ BLKIO_STAT_IDLE_TIME,
+ BLKIO_STAT_EMPTY_TIME,
+ BLKIO_STAT_GROUP_WAIT_TIME,
+ BLKIO_STAT_DEQUEUE
+#endif
+};
+
+/* Per cpu stats */
+enum stat_type_cpu {
+ BLKIO_STAT_CPU_SECTORS,
+ /* Total bytes transferred */
+ BLKIO_STAT_CPU_SERVICE_BYTES,
+ /* Total IOs serviced, post merge */
+ BLKIO_STAT_CPU_SERVICED,
+ /* Number of IOs merged */
+ BLKIO_STAT_CPU_MERGED,
+ BLKIO_STAT_CPU_NR
+};
+
+enum stat_sub_type {
+ BLKIO_STAT_READ = 0,
+ BLKIO_STAT_WRITE,
+ BLKIO_STAT_SYNC,
+ BLKIO_STAT_ASYNC,
+ BLKIO_STAT_TOTAL
+};
+
+/* blkg state flags */
+enum blkg_state_flags {
+ BLKG_waiting = 0,
+ BLKG_idling,
+ BLKG_empty,
+};
+
+/* cgroup files owned by proportional weight policy */
+enum blkcg_file_name_prop {
+ BLKIO_PROP_weight = 1,
+ BLKIO_PROP_weight_device,
+ BLKIO_PROP_io_service_bytes,
+ BLKIO_PROP_io_serviced,
+ BLKIO_PROP_time,
+ BLKIO_PROP_sectors,
+ BLKIO_PROP_unaccounted_time,
+ BLKIO_PROP_io_service_time,
+ BLKIO_PROP_io_wait_time,
+ BLKIO_PROP_io_merged,
+ BLKIO_PROP_io_queued,
+ BLKIO_PROP_avg_queue_size,
+ BLKIO_PROP_group_wait_time,
+ BLKIO_PROP_idle_time,
+ BLKIO_PROP_empty_time,
+ BLKIO_PROP_dequeue,
+};
+
+/* cgroup files owned by throttle policy */
+enum blkcg_file_name_throtl {
+ BLKIO_THROTL_read_bps_device,
+ BLKIO_THROTL_write_bps_device,
+ BLKIO_THROTL_read_iops_device,
+ BLKIO_THROTL_write_iops_device,
+ BLKIO_THROTL_io_service_bytes,
+ BLKIO_THROTL_io_serviced,
+};
+
+struct blkio_cgroup {
+ struct cgroup_subsys_state css;
+ unsigned int weight;
+ spinlock_t lock;
+ struct hlist_head blkg_list;
+ struct list_head policy_list; /* list of blkio_policy_node */
+};
+
+struct blkio_group_stats {
+ /* total disk time and nr sectors dispatched by this group */
+ uint64_t time;
+ uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL];
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ /* Time not charged to this cgroup */
+ uint64_t unaccounted_time;
+
+ /* Sum of number of IOs queued across all samples */
+ uint64_t avg_queue_size_sum;
+ /* Count of samples taken for average */
+ uint64_t avg_queue_size_samples;
+ /* How many times this group has been removed from service tree */
+ unsigned long dequeue;
+
+ /* Total time spent waiting for it to be assigned a timeslice. */
+ uint64_t group_wait_time;
+ uint64_t start_group_wait_time;
+
+ /* Time spent idling for this blkio_group */
+ uint64_t idle_time;
+ uint64_t start_idle_time;
+ /*
+ * Total time when we have requests queued and do not contain the
+ * current active queue.
+ */
+ uint64_t empty_time;
+ uint64_t start_empty_time;
+ uint16_t flags;
+#endif
+};
+
+/* Per cpu blkio group stats */
+struct blkio_group_stats_cpu {
+ uint64_t sectors;
+ uint64_t stat_arr_cpu[BLKIO_STAT_CPU_NR][BLKIO_STAT_TOTAL];
+ struct u64_stats_sync syncp;
+};
+
+struct blkio_group {
+ /* An rcu protected unique identifier for the group */
+ void *key;
+ struct hlist_node blkcg_node;
+ unsigned short blkcg_id;
+ /* Store cgroup path */
+ char path[128];
+ /* The device MKDEV(major, minor), this group has been created for */
+ dev_t dev;
+ /* policy which owns this blk group */
+ enum blkio_policy_id plid;
+
+ /* Need to serialize the stats in the case of reset/update */
+ spinlock_t stats_lock;
+ struct blkio_group_stats stats;
+ /* Per cpu stats pointer */
+ struct blkio_group_stats_cpu __percpu *stats_cpu;
+};
+
+struct blkio_policy_node {
+ struct list_head node;
+ dev_t dev;
+ /* This node belongs to max bw policy or porportional weight policy */
+ enum blkio_policy_id plid;
+ /* cgroup file to which this rule belongs to */
+ int fileid;
+
+ union {
+ unsigned int weight;
+ /*
+ * Rate read/write in terms of bytes per second
+ * Whether this rate represents read or write is determined
+ * by file type "fileid".
+ */
+ u64 bps;
+ unsigned int iops;
+ } val;
+};
+
+extern unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
+ dev_t dev);
+extern uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg,
+ dev_t dev);
+extern uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg,
+ dev_t dev);
+extern unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg,
+ dev_t dev);
+extern unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg,
+ dev_t dev);
+
+typedef void (blkio_unlink_group_fn) (void *key, struct blkio_group *blkg);
+
+typedef void (blkio_update_group_weight_fn) (void *key,
+ struct blkio_group *blkg, unsigned int weight);
+typedef void (blkio_update_group_read_bps_fn) (void * key,
+ struct blkio_group *blkg, u64 read_bps);
+typedef void (blkio_update_group_write_bps_fn) (void *key,
+ struct blkio_group *blkg, u64 write_bps);
+typedef void (blkio_update_group_read_iops_fn) (void *key,
+ struct blkio_group *blkg, unsigned int read_iops);
+typedef void (blkio_update_group_write_iops_fn) (void *key,
+ struct blkio_group *blkg, unsigned int write_iops);
+
+struct blkio_policy_ops {
+ blkio_unlink_group_fn *blkio_unlink_group_fn;
+ blkio_update_group_weight_fn *blkio_update_group_weight_fn;
+ blkio_update_group_read_bps_fn *blkio_update_group_read_bps_fn;
+ blkio_update_group_write_bps_fn *blkio_update_group_write_bps_fn;
+ blkio_update_group_read_iops_fn *blkio_update_group_read_iops_fn;
+ blkio_update_group_write_iops_fn *blkio_update_group_write_iops_fn;
+};
+
+struct blkio_policy_type {
+ struct list_head list;
+ struct blkio_policy_ops ops;
+ enum blkio_policy_id plid;
+};
+
+/* Blkio controller policy registration */
+extern void blkio_policy_register(struct blkio_policy_type *);
+extern void blkio_policy_unregister(struct blkio_policy_type *);
+
+static inline char *blkg_path(struct blkio_group *blkg)
+{
+ return blkg->path;
+}
+
+#else
+
+struct blkio_group {
+};
+
+struct blkio_policy_type {
+};
+
+static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { }
+static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
+
+static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
+
+#endif
+
+#define BLKIO_WEIGHT_MIN 10
+#define BLKIO_WEIGHT_MAX 1000
+#define BLKIO_WEIGHT_DEFAULT 500
+
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg);
+void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
+ unsigned long dequeue);
+void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg);
+void blkiocg_update_idle_time_stats(struct blkio_group *blkg);
+void blkiocg_set_start_empty_time(struct blkio_group *blkg);
+
+#define BLKG_FLAG_FNS(name) \
+static inline void blkio_mark_blkg_##name( \
+ struct blkio_group_stats *stats) \
+{ \
+ stats->flags |= (1 << BLKG_##name); \
+} \
+static inline void blkio_clear_blkg_##name( \
+ struct blkio_group_stats *stats) \
+{ \
+ stats->flags &= ~(1 << BLKG_##name); \
+} \
+static inline int blkio_blkg_##name(struct blkio_group_stats *stats) \
+{ \
+ return (stats->flags & (1 << BLKG_##name)) != 0; \
+} \
+
+BLKG_FLAG_FNS(waiting)
+BLKG_FLAG_FNS(idling)
+BLKG_FLAG_FNS(empty)
+#undef BLKG_FLAG_FNS
+#else
+static inline void blkiocg_update_avg_queue_size_stats(
+ struct blkio_group *blkg) {}
+static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
+ unsigned long dequeue) {}
+static inline void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
+{}
+static inline void blkiocg_update_idle_time_stats(struct blkio_group *blkg) {}
+static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
+#endif
+
+#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
+extern struct blkio_cgroup blkio_root_cgroup;
+extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
+extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk);
+extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
+ struct blkio_group *blkg, void *key, dev_t dev,
+ enum blkio_policy_id plid);
+extern int blkio_alloc_blkg_stats(struct blkio_group *blkg);
+extern int blkiocg_del_blkio_group(struct blkio_group *blkg);
+extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg,
+ void *key);
+void blkiocg_update_timeslice_used(struct blkio_group *blkg,
+ unsigned long time,
+ unsigned long unaccounted_time);
+void blkiocg_update_dispatch_stats(struct blkio_group *blkg, uint64_t bytes,
+ bool direction, bool sync);
+void blkiocg_update_completion_stats(struct blkio_group *blkg,
+ uint64_t start_time, uint64_t io_start_time, bool direction, bool sync);
+void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
+ bool sync);
+void blkiocg_update_io_add_stats(struct blkio_group *blkg,
+ struct blkio_group *curr_blkg, bool direction, bool sync);
+void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
+ bool direction, bool sync);
+#else
+struct cgroup;
+static inline struct blkio_cgroup *
+cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
+static inline struct blkio_cgroup *
+task_blkio_cgroup(struct task_struct *tsk) { return NULL; }
+
+static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
+ struct blkio_group *blkg, void *key, dev_t dev,
+ enum blkio_policy_id plid) {}
+
+static inline int blkio_alloc_blkg_stats(struct blkio_group *blkg) { return 0; }
+
+static inline int
+blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; }
+
+static inline struct blkio_group *
+blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) { return NULL; }
+static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg,
+ unsigned long time,
+ unsigned long unaccounted_time)
+{}
+static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
+ uint64_t bytes, bool direction, bool sync) {}
+static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
+ uint64_t start_time, uint64_t io_start_time, bool direction,
+ bool sync) {}
+static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
+ bool direction, bool sync) {}
+static inline void blkiocg_update_io_add_stats(struct blkio_group *blkg,
+ struct blkio_group *curr_blkg, bool direction, bool sync) {}
+static inline void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
+ bool direction, bool sync) {}
+#endif
+#endif /* _BLK_CGROUP_H */
diff --git a/block/blk-core.c b/block/blk-core.c
new file mode 100644
index 00000000..1f61b748
--- /dev/null
+++ b/block/blk-core.c
@@ -0,0 +1,2908 @@
+/*
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
+ * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
+ * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
+ * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
+ * - July2000
+ * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
+ */
+
+/*
+ * This handles all read/write requests to block devices
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/backing-dev.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/kernel_stat.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/writeback.h>
+#include <linux/task_io_accounting_ops.h>
+#include <linux/fault-inject.h>
+#include <linux/list_sort.h>
+#include <linux/delay.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/block.h>
+
+#include "blk.h"
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
+EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
+EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
+
+DEFINE_IDA(blk_queue_ida);
+
+/*
+ * For the allocated request tables
+ */
+static struct kmem_cache *request_cachep;
+
+/*
+ * For queue allocation
+ */
+struct kmem_cache *blk_requestq_cachep;
+
+/*
+ * Controlling structure to kblockd
+ */
+static struct workqueue_struct *kblockd_workqueue;
+
+static void drive_stat_acct(struct request *rq, int new_io)
+{
+ struct hd_struct *part;
+ int rw = rq_data_dir(rq);
+ int cpu;
+
+ if (!blk_do_io_stat(rq))
+ return;
+
+ cpu = part_stat_lock();
+
+ if (!new_io) {
+ part = rq->part;
+ part_stat_inc(cpu, part, merges[rw]);
+ } else {
+ part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
+ if (!hd_struct_try_get(part)) {
+ /*
+ * The partition is already being removed,
+ * the request will be accounted on the disk only
+ *
+ * We take a reference on disk->part0 although that
+ * partition will never be deleted, so we can treat
+ * it as any other partition.
+ */
+ part = &rq->rq_disk->part0;
+ hd_struct_get(part);
+ }
+ part_round_stats(cpu, part);
+ part_inc_in_flight(part, rw);
+ rq->part = part;
+ }
+
+ part_stat_unlock();
+}
+
+void blk_queue_congestion_threshold(struct request_queue *q)
+{
+ int nr;
+
+ nr = q->nr_requests - (q->nr_requests / 8) + 1;
+ if (nr > q->nr_requests)
+ nr = q->nr_requests;
+ q->nr_congestion_on = nr;
+
+ nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
+ if (nr < 1)
+ nr = 1;
+ q->nr_congestion_off = nr;
+}
+
+/**
+ * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
+ * @bdev: device
+ *
+ * Locates the passed device's request queue and returns the address of its
+ * backing_dev_info
+ *
+ * Will return NULL if the request queue cannot be located.
+ */
+struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
+{
+ struct backing_dev_info *ret = NULL;
+ struct request_queue *q = bdev_get_queue(bdev);
+
+ if (q)
+ ret = &q->backing_dev_info;
+ return ret;
+}
+EXPORT_SYMBOL(blk_get_backing_dev_info);
+
+void blk_rq_init(struct request_queue *q, struct request *rq)
+{
+ memset(rq, 0, sizeof(*rq));
+
+ INIT_LIST_HEAD(&rq->queuelist);
+ INIT_LIST_HEAD(&rq->timeout_list);
+ rq->cpu = -1;
+ rq->q = q;
+ rq->__sector = (sector_t) -1;
+ INIT_HLIST_NODE(&rq->hash);
+ RB_CLEAR_NODE(&rq->rb_node);
+ rq->cmd = rq->__cmd;
+ rq->cmd_len = BLK_MAX_CDB;
+ rq->tag = -1;
+ rq->ref_count = 1;
+ rq->start_time = jiffies;
+ set_start_time_ns(rq);
+ rq->part = NULL;
+}
+EXPORT_SYMBOL(blk_rq_init);
+
+static void req_bio_endio(struct request *rq, struct bio *bio,
+ unsigned int nbytes, int error)
+{
+ if (error)
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+ error = -EIO;
+
+ if (unlikely(nbytes > bio->bi_size)) {
+ printk(KERN_ERR "%s: want %u bytes done, %u left\n",
+ __func__, nbytes, bio->bi_size);
+ nbytes = bio->bi_size;
+ }
+
+ if (unlikely(rq->cmd_flags & REQ_QUIET))
+ set_bit(BIO_QUIET, &bio->bi_flags);
+
+ bio->bi_size -= nbytes;
+ bio->bi_sector += (nbytes >> 9);
+
+ if (bio_integrity(bio))
+ bio_integrity_advance(bio, nbytes);
+
+ /* don't actually finish bio if it's part of flush sequence */
+ if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
+ bio_endio(bio, error);
+}
+
+void blk_dump_rq_flags(struct request *rq, char *msg)
+{
+ int bit;
+
+ printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
+ rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
+ rq->cmd_flags);
+
+ printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
+ (unsigned long long)blk_rq_pos(rq),
+ blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
+ printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n",
+ rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
+
+ if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
+ printk(KERN_INFO " cdb: ");
+ for (bit = 0; bit < BLK_MAX_CDB; bit++)
+ printk("%02x ", rq->cmd[bit]);
+ printk("\n");
+ }
+}
+EXPORT_SYMBOL(blk_dump_rq_flags);
+
+static void blk_delay_work(struct work_struct *work)
+{
+ struct request_queue *q;
+
+ q = container_of(work, struct request_queue, delay_work.work);
+ spin_lock_irq(q->queue_lock);
+ __blk_run_queue(q);
+ spin_unlock_irq(q->queue_lock);
+}
+
+/**
+ * blk_delay_queue - restart queueing after defined interval
+ * @q: The &struct request_queue in question
+ * @msecs: Delay in msecs
+ *
+ * Description:
+ * Sometimes queueing needs to be postponed for a little while, to allow
+ * resources to come back. This function will make sure that queueing is
+ * restarted around the specified time.
+ */
+void blk_delay_queue(struct request_queue *q, unsigned long msecs)
+{
+ queue_delayed_work(kblockd_workqueue, &q->delay_work,
+ msecs_to_jiffies(msecs));
+}
+EXPORT_SYMBOL(blk_delay_queue);
+
+/**
+ * blk_start_queue - restart a previously stopped queue
+ * @q: The &struct request_queue in question
+ *
+ * Description:
+ * blk_start_queue() will clear the stop flag on the queue, and call
+ * the request_fn for the queue if it was in a stopped state when
+ * entered. Also see blk_stop_queue(). Queue lock must be held.
+ **/
+void blk_start_queue(struct request_queue *q)
+{
+ WARN_ON(!irqs_disabled());
+
+ queue_flag_clear(QUEUE_FLAG_STOPPED, q);
+ __blk_run_queue(q);
+}
+EXPORT_SYMBOL(blk_start_queue);
+
+/**
+ * blk_stop_queue - stop a queue
+ * @q: The &struct request_queue in question
+ *
+ * Description:
+ * The Linux block layer assumes that a block driver will consume all
+ * entries on the request queue when the request_fn strategy is called.
+ * Often this will not happen, because of hardware limitations (queue
+ * depth settings). If a device driver gets a 'queue full' response,
+ * or if it simply chooses not to queue more I/O at one point, it can
+ * call this function to prevent the request_fn from being called until
+ * the driver has signalled it's ready to go again. This happens by calling
+ * blk_start_queue() to restart queue operations. Queue lock must be held.
+ **/
+void blk_stop_queue(struct request_queue *q)
+{
+ __cancel_delayed_work(&q->delay_work);
+ queue_flag_set(QUEUE_FLAG_STOPPED, q);
+}
+EXPORT_SYMBOL(blk_stop_queue);
+
+/**
+ * blk_sync_queue - cancel any pending callbacks on a queue
+ * @q: the queue
+ *
+ * Description:
+ * The block layer may perform asynchronous callback activity
+ * on a queue, such as calling the unplug function after a timeout.
+ * A block device may call blk_sync_queue to ensure that any
+ * such activity is cancelled, thus allowing it to release resources
+ * that the callbacks might use. The caller must already have made sure
+ * that its ->make_request_fn will not re-add plugging prior to calling
+ * this function.
+ *
+ * This function does not cancel any asynchronous activity arising
+ * out of elevator or throttling code. That would require elevaotor_exit()
+ * and blk_throtl_exit() to be called with queue lock initialized.
+ *
+ */
+void blk_sync_queue(struct request_queue *q)
+{
+ del_timer_sync(&q->timeout);
+ cancel_delayed_work_sync(&q->delay_work);
+}
+EXPORT_SYMBOL(blk_sync_queue);
+
+/**
+ * __blk_run_queue - run a single device queue
+ * @q: The queue to run
+ *
+ * Description:
+ * See @blk_run_queue. This variant must be called with the queue lock
+ * held and interrupts disabled.
+ */
+void __blk_run_queue(struct request_queue *q)
+{
+ if (unlikely(blk_queue_stopped(q)))
+ return;
+
+ q->request_fn(q);
+}
+EXPORT_SYMBOL(__blk_run_queue);
+
+/**
+ * blk_run_queue_async - run a single device queue in workqueue context
+ * @q: The queue to run
+ *
+ * Description:
+ * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
+ * of us.
+ */
+void blk_run_queue_async(struct request_queue *q)
+{
+ if (likely(!blk_queue_stopped(q))) {
+ __cancel_delayed_work(&q->delay_work);
+ queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
+ }
+}
+EXPORT_SYMBOL(blk_run_queue_async);
+
+/**
+ * blk_run_queue - run a single device queue
+ * @q: The queue to run
+ *
+ * Description:
+ * Invoke request handling on this queue, if it has pending work to do.
+ * May be used to restart queueing when a request has completed.
+ */
+void blk_run_queue(struct request_queue *q)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ __blk_run_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+EXPORT_SYMBOL(blk_run_queue);
+
+void blk_put_queue(struct request_queue *q)
+{
+ kobject_put(&q->kobj);
+}
+EXPORT_SYMBOL(blk_put_queue);
+
+/**
+ * blk_drain_queue - drain requests from request_queue
+ * @q: queue to drain
+ * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
+ *
+ * Drain requests from @q. If @drain_all is set, all requests are drained.
+ * If not, only ELVPRIV requests are drained. The caller is responsible
+ * for ensuring that no new requests which need to be drained are queued.
+ */
+void blk_drain_queue(struct request_queue *q, bool drain_all)
+{
+ while (true) {
+ bool drain = false;
+ int i;
+
+ spin_lock_irq(q->queue_lock);
+
+ elv_drain_elevator(q);
+ if (drain_all)
+ blk_throtl_drain(q);
+
+ /*
+ * This function might be called on a queue which failed
+ * driver init after queue creation. Some drivers
+ * (e.g. fd) get unhappy in such cases. Kick queue iff
+ * dispatch queue has something on it.
+ */
+ if (!list_empty(&q->queue_head))
+ __blk_run_queue(q);
+
+ drain |= q->rq.elvpriv;
+
+ /*
+ * Unfortunately, requests are queued at and tracked from
+ * multiple places and there's no single counter which can
+ * be drained. Check all the queues and counters.
+ */
+ if (drain_all) {
+ drain |= !list_empty(&q->queue_head);
+ for (i = 0; i < 2; i++) {
+ drain |= q->rq.count[i];
+ drain |= q->in_flight[i];
+ drain |= !list_empty(&q->flush_queue[i]);
+ }
+ }
+
+ spin_unlock_irq(q->queue_lock);
+
+ if (!drain)
+ break;
+ msleep(10);
+ }
+}
+
+/**
+ * blk_cleanup_queue - shutdown a request queue
+ * @q: request queue to shutdown
+ *
+ * Mark @q DEAD, drain all pending requests, destroy and put it. All
+ * future requests will be failed immediately with -ENODEV.
+ */
+void blk_cleanup_queue(struct request_queue *q)
+{
+ spinlock_t *lock = q->queue_lock;
+
+ /* mark @q DEAD, no new request or merges will be allowed afterwards */
+ mutex_lock(&q->sysfs_lock);
+ queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
+
+ spin_lock_irq(lock);
+ queue_flag_set(QUEUE_FLAG_NOMERGES, q);
+ queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
+ queue_flag_set(QUEUE_FLAG_DEAD, q);
+
+ if (q->queue_lock != &q->__queue_lock)
+ q->queue_lock = &q->__queue_lock;
+
+ spin_unlock_irq(lock);
+ mutex_unlock(&q->sysfs_lock);
+
+ /*
+ * Drain all requests queued before DEAD marking. The caller might
+ * be trying to tear down @q before its elevator is initialized, in
+ * which case we don't want to call into draining.
+ */
+ if (q->elevator)
+ blk_drain_queue(q, true);
+
+ /* @q won't process any more request, flush async actions */
+ del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
+ blk_sync_queue(q);
+
+ /* @q is and will stay empty, shutdown and put */
+ blk_put_queue(q);
+}
+EXPORT_SYMBOL(blk_cleanup_queue);
+
+static int blk_init_free_list(struct request_queue *q)
+{
+ struct request_list *rl = &q->rq;
+
+ if (unlikely(rl->rq_pool))
+ return 0;
+
+ rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
+ rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
+ rl->elvpriv = 0;
+ init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
+ init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
+
+ rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
+ mempool_free_slab, request_cachep, q->node);
+
+ if (!rl->rq_pool)
+ return -ENOMEM;
+
+ return 0;
+}
+
+struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
+{
+ return blk_alloc_queue_node(gfp_mask, -1);
+}
+EXPORT_SYMBOL(blk_alloc_queue);
+
+struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
+{
+ struct request_queue *q;
+ int err;
+
+ q = kmem_cache_alloc_node(blk_requestq_cachep,
+ gfp_mask | __GFP_ZERO, node_id);
+ if (!q)
+ return NULL;
+
+ q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
+ if (q->id < 0)
+ goto fail_q;
+
+ q->backing_dev_info.ra_pages =
+ (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
+ q->backing_dev_info.state = 0;
+ q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
+ q->backing_dev_info.name = "block";
+ q->node = node_id;
+
+ err = bdi_init(&q->backing_dev_info);
+ if (err)
+ goto fail_id;
+
+ if (blk_throtl_init(q))
+ goto fail_id;
+
+ setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
+ laptop_mode_timer_fn, (unsigned long) q);
+ setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
+ INIT_LIST_HEAD(&q->timeout_list);
+ INIT_LIST_HEAD(&q->icq_list);
+ INIT_LIST_HEAD(&q->flush_queue[0]);
+ INIT_LIST_HEAD(&q->flush_queue[1]);
+ INIT_LIST_HEAD(&q->flush_data_in_flight);
+ INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
+
+ kobject_init(&q->kobj, &blk_queue_ktype);
+
+ mutex_init(&q->sysfs_lock);
+ spin_lock_init(&q->__queue_lock);
+
+ /*
+ * By default initialize queue_lock to internal lock and driver can
+ * override it later if need be.
+ */
+ q->queue_lock = &q->__queue_lock;
+
+ return q;
+
+fail_id:
+ ida_simple_remove(&blk_queue_ida, q->id);
+fail_q:
+ kmem_cache_free(blk_requestq_cachep, q);
+ return NULL;
+}
+EXPORT_SYMBOL(blk_alloc_queue_node);
+
+/**
+ * blk_init_queue - prepare a request queue for use with a block device
+ * @rfn: The function to be called to process requests that have been
+ * placed on the queue.
+ * @lock: Request queue spin lock
+ *
+ * Description:
+ * If a block device wishes to use the standard request handling procedures,
+ * which sorts requests and coalesces adjacent requests, then it must
+ * call blk_init_queue(). The function @rfn will be called when there
+ * are requests on the queue that need to be processed. If the device
+ * supports plugging, then @rfn may not be called immediately when requests
+ * are available on the queue, but may be called at some time later instead.
+ * Plugged queues are generally unplugged when a buffer belonging to one
+ * of the requests on the queue is needed, or due to memory pressure.
+ *
+ * @rfn is not required, or even expected, to remove all requests off the
+ * queue, but only as many as it can handle at a time. If it does leave
+ * requests on the queue, it is responsible for arranging that the requests
+ * get dealt with eventually.
+ *
+ * The queue spin lock must be held while manipulating the requests on the
+ * request queue; this lock will be taken also from interrupt context, so irq
+ * disabling is needed for it.
+ *
+ * Function returns a pointer to the initialized request queue, or %NULL if
+ * it didn't succeed.
+ *
+ * Note:
+ * blk_init_queue() must be paired with a blk_cleanup_queue() call
+ * when the block device is deactivated (such as at module unload).
+ **/
+
+struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
+{
+ return blk_init_queue_node(rfn, lock, -1);
+}
+EXPORT_SYMBOL(blk_init_queue);
+
+struct request_queue *
+blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
+{
+ struct request_queue *uninit_q, *q;
+
+ uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id);
+ if (!uninit_q)
+ return NULL;
+
+ q = blk_init_allocated_queue(uninit_q, rfn, lock);
+ if (!q)
+ blk_cleanup_queue(uninit_q);
+
+ return q;
+}
+EXPORT_SYMBOL(blk_init_queue_node);
+
+struct request_queue *
+blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
+ spinlock_t *lock)
+{
+ if (!q)
+ return NULL;
+
+ if (blk_init_free_list(q))
+ return NULL;
+
+ q->request_fn = rfn;
+ q->prep_rq_fn = NULL;
+ q->unprep_rq_fn = NULL;
+ q->queue_flags = QUEUE_FLAG_DEFAULT;
+
+ /* Override internal queue lock with supplied lock pointer */
+ if (lock)
+ q->queue_lock = lock;
+
+ /*
+ * This also sets hw/phys segments, boundary and size
+ */
+ blk_queue_make_request(q, blk_queue_bio);
+
+ q->sg_reserved_size = INT_MAX;
+
+ /*
+ * all done
+ */
+ if (!elevator_init(q, NULL)) {
+ blk_queue_congestion_threshold(q);
+ return q;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(blk_init_allocated_queue);
+
+bool blk_get_queue(struct request_queue *q)
+{
+ if (likely(!blk_queue_dead(q))) {
+ __blk_get_queue(q);
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(blk_get_queue);
+
+static inline void blk_free_request(struct request_queue *q, struct request *rq)
+{
+ if (rq->cmd_flags & REQ_ELVPRIV) {
+ elv_put_request(q, rq);
+ if (rq->elv.icq)
+ put_io_context(rq->elv.icq->ioc);
+ }
+
+ mempool_free(rq, q->rq.rq_pool);
+}
+
+static struct request *
+blk_alloc_request(struct request_queue *q, struct io_cq *icq,
+ unsigned int flags, gfp_t gfp_mask)
+{
+ struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
+
+ if (!rq)
+ return NULL;
+
+ blk_rq_init(q, rq);
+
+ rq->cmd_flags = flags | REQ_ALLOCED;
+
+ if (flags & REQ_ELVPRIV) {
+ rq->elv.icq = icq;
+ if (unlikely(elv_set_request(q, rq, gfp_mask))) {
+ mempool_free(rq, q->rq.rq_pool);
+ return NULL;
+ }
+ /* @rq->elv.icq holds on to io_context until @rq is freed */
+ if (icq)
+ get_io_context(icq->ioc);
+ }
+
+ return rq;
+}
+
+/*
+ * ioc_batching returns true if the ioc is a valid batching request and
+ * should be given priority access to a request.
+ */
+static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
+{
+ if (!ioc)
+ return 0;
+
+ /*
+ * Make sure the process is able to allocate at least 1 request
+ * even if the batch times out, otherwise we could theoretically
+ * lose wakeups.
+ */
+ return ioc->nr_batch_requests == q->nr_batching ||
+ (ioc->nr_batch_requests > 0
+ && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
+}
+
+/*
+ * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
+ * will cause the process to be a "batcher" on all queues in the system. This
+ * is the behaviour we want though - once it gets a wakeup it should be given
+ * a nice run.
+ */
+static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
+{
+ if (!ioc || ioc_batching(q, ioc))
+ return;
+
+ ioc->nr_batch_requests = q->nr_batching;
+ ioc->last_waited = jiffies;
+}
+
+static void __freed_request(struct request_queue *q, int sync)
+{
+ struct request_list *rl = &q->rq;
+
+ if (rl->count[sync] < queue_congestion_off_threshold(q))
+ blk_clear_queue_congested(q, sync);
+
+ if (rl->count[sync] + 1 <= q->nr_requests) {
+ if (waitqueue_active(&rl->wait[sync]))
+ wake_up(&rl->wait[sync]);
+
+ blk_clear_queue_full(q, sync);
+ }
+}
+
+/*
+ * A request has just been released. Account for it, update the full and
+ * congestion status, wake up any waiters. Called under q->queue_lock.
+ */
+static void freed_request(struct request_queue *q, unsigned int flags)
+{
+ struct request_list *rl = &q->rq;
+ int sync = rw_is_sync(flags);
+
+ rl->count[sync]--;
+ if (flags & REQ_ELVPRIV)
+ rl->elvpriv--;
+
+ __freed_request(q, sync);
+
+ if (unlikely(rl->starved[sync ^ 1]))
+ __freed_request(q, sync ^ 1);
+}
+
+/*
+ * Determine if elevator data should be initialized when allocating the
+ * request associated with @bio.
+ */
+static bool blk_rq_should_init_elevator(struct bio *bio)
+{
+ if (!bio)
+ return true;
+
+ /*
+ * Flush requests do not use the elevator so skip initialization.
+ * This allows a request to share the flush and elevator data.
+ */
+ if (bio->bi_rw & (REQ_FLUSH | REQ_FUA))
+ return false;
+
+ return true;
+}
+
+/**
+ * get_request - get a free request
+ * @q: request_queue to allocate request from
+ * @rw_flags: RW and SYNC flags
+ * @bio: bio to allocate request for (can be %NULL)
+ * @gfp_mask: allocation mask
+ *
+ * Get a free request from @q. This function may fail under memory
+ * pressure or if @q is dead.
+ *
+ * Must be callled with @q->queue_lock held and,
+ * Returns %NULL on failure, with @q->queue_lock held.
+ * Returns !%NULL on success, with @q->queue_lock *not held*.
+ */
+static struct request *get_request(struct request_queue *q, int rw_flags,
+ struct bio *bio, gfp_t gfp_mask)
+{
+ struct request *rq = NULL;
+ struct request_list *rl = &q->rq;
+ struct elevator_type *et;
+ struct io_context *ioc;
+ struct io_cq *icq = NULL;
+ const bool is_sync = rw_is_sync(rw_flags) != 0;
+ bool retried = false;
+ int may_queue;
+retry:
+ et = q->elevator->type;
+ ioc = current->io_context;
+
+ if (unlikely(blk_queue_dead(q)))
+ return NULL;
+
+ may_queue = elv_may_queue(q, rw_flags);
+ if (may_queue == ELV_MQUEUE_NO)
+ goto rq_starved;
+
+ if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
+ if (rl->count[is_sync]+1 >= q->nr_requests) {
+ /*
+ * We want ioc to record batching state. If it's
+ * not already there, creating a new one requires
+ * dropping queue_lock, which in turn requires
+ * retesting conditions to avoid queue hang.
+ */
+ if (!ioc && !retried) {
+ spin_unlock_irq(q->queue_lock);
+ create_io_context(current, gfp_mask, q->node);
+ spin_lock_irq(q->queue_lock);
+ retried = true;
+ goto retry;
+ }
+
+ /*
+ * The queue will fill after this allocation, so set
+ * it as full, and mark this process as "batching".
+ * This process will be allowed to complete a batch of
+ * requests, others will be blocked.
+ */
+ if (!blk_queue_full(q, is_sync)) {
+ ioc_set_batching(q, ioc);
+ blk_set_queue_full(q, is_sync);
+ } else {
+ if (may_queue != ELV_MQUEUE_MUST
+ && !ioc_batching(q, ioc)) {
+ /*
+ * The queue is full and the allocating
+ * process is not a "batcher", and not
+ * exempted by the IO scheduler
+ */
+ goto out;
+ }
+ }
+ }
+ blk_set_queue_congested(q, is_sync);
+ }
+
+ /*
+ * Only allow batching queuers to allocate up to 50% over the defined
+ * limit of requests, otherwise we could have thousands of requests
+ * allocated with any setting of ->nr_requests
+ */
+ if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
+ goto out;
+
+ rl->count[is_sync]++;
+ rl->starved[is_sync] = 0;
+
+ /*
+ * Decide whether the new request will be managed by elevator. If
+ * so, mark @rw_flags and increment elvpriv. Non-zero elvpriv will
+ * prevent the current elevator from being destroyed until the new
+ * request is freed. This guarantees icq's won't be destroyed and
+ * makes creating new ones safe.
+ *
+ * Also, lookup icq while holding queue_lock. If it doesn't exist,
+ * it will be created after releasing queue_lock.
+ */
+ if (blk_rq_should_init_elevator(bio) &&
+ !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags)) {
+ rw_flags |= REQ_ELVPRIV;
+ rl->elvpriv++;
+ if (et->icq_cache && ioc)
+ icq = ioc_lookup_icq(ioc, q);
+ }
+
+ if (blk_queue_io_stat(q))
+ rw_flags |= REQ_IO_STAT;
+ spin_unlock_irq(q->queue_lock);
+
+ /* create icq if missing */
+ if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) {
+ icq = ioc_create_icq(q, gfp_mask);
+ if (!icq)
+ goto fail_icq;
+ }
+
+ rq = blk_alloc_request(q, icq, rw_flags, gfp_mask);
+
+fail_icq:
+ if (unlikely(!rq)) {
+ /*
+ * Allocation failed presumably due to memory. Undo anything
+ * we might have messed up.
+ *
+ * Allocating task should really be put onto the front of the
+ * wait queue, but this is pretty rare.
+ */
+ spin_lock_irq(q->queue_lock);
+ freed_request(q, rw_flags);
+
+ /*
+ * in the very unlikely event that allocation failed and no
+ * requests for this direction was pending, mark us starved
+ * so that freeing of a request in the other direction will
+ * notice us. another possible fix would be to split the
+ * rq mempool into READ and WRITE
+ */
+rq_starved:
+ if (unlikely(rl->count[is_sync] == 0))
+ rl->starved[is_sync] = 1;
+
+ goto out;
+ }
+
+ /*
+ * ioc may be NULL here, and ioc_batching will be false. That's
+ * OK, if the queue is under the request limit then requests need
+ * not count toward the nr_batch_requests limit. There will always
+ * be some limit enforced by BLK_BATCH_TIME.
+ */
+ if (ioc_batching(q, ioc))
+ ioc->nr_batch_requests--;
+
+ trace_block_getrq(q, bio, rw_flags & 1);
+out:
+ return rq;
+}
+
+/**
+ * get_request_wait - get a free request with retry
+ * @q: request_queue to allocate request from
+ * @rw_flags: RW and SYNC flags
+ * @bio: bio to allocate request for (can be %NULL)
+ *
+ * Get a free request from @q. This function keeps retrying under memory
+ * pressure and fails iff @q is dead.
+ *
+ * Must be callled with @q->queue_lock held and,
+ * Returns %NULL on failure, with @q->queue_lock held.
+ * Returns !%NULL on success, with @q->queue_lock *not held*.
+ */
+static struct request *get_request_wait(struct request_queue *q, int rw_flags,
+ struct bio *bio)
+{
+ const bool is_sync = rw_is_sync(rw_flags) != 0;
+ struct request *rq;
+
+ rq = get_request(q, rw_flags, bio, GFP_NOIO);
+ while (!rq) {
+ DEFINE_WAIT(wait);
+ struct request_list *rl = &q->rq;
+
+ if (unlikely(blk_queue_dead(q)))
+ return NULL;
+
+ prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
+ TASK_UNINTERRUPTIBLE);
+
+ trace_block_sleeprq(q, bio, rw_flags & 1);
+
+ spin_unlock_irq(q->queue_lock);
+ io_schedule();
+
+ /*
+ * After sleeping, we become a "batching" process and
+ * will be able to allocate at least one request, and
+ * up to a big batch of them for a small period time.
+ * See ioc_batching, ioc_set_batching
+ */
+ create_io_context(current, GFP_NOIO, q->node);
+ ioc_set_batching(q, current->io_context);
+
+ spin_lock_irq(q->queue_lock);
+ finish_wait(&rl->wait[is_sync], &wait);
+
+ rq = get_request(q, rw_flags, bio, GFP_NOIO);
+ };
+
+ return rq;
+}
+
+struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
+{
+ struct request *rq;
+
+ BUG_ON(rw != READ && rw != WRITE);
+
+ spin_lock_irq(q->queue_lock);
+ if (gfp_mask & __GFP_WAIT)
+ rq = get_request_wait(q, rw, NULL);
+ else
+ rq = get_request(q, rw, NULL, gfp_mask);
+ if (!rq)
+ spin_unlock_irq(q->queue_lock);
+ /* q->queue_lock is unlocked at this point */
+
+ return rq;
+}
+EXPORT_SYMBOL(blk_get_request);
+
+/**
+ * blk_make_request - given a bio, allocate a corresponding struct request.
+ * @q: target request queue
+ * @bio: The bio describing the memory mappings that will be submitted for IO.
+ * It may be a chained-bio properly constructed by block/bio layer.
+ * @gfp_mask: gfp flags to be used for memory allocation
+ *
+ * blk_make_request is the parallel of generic_make_request for BLOCK_PC
+ * type commands. Where the struct request needs to be farther initialized by
+ * the caller. It is passed a &struct bio, which describes the memory info of
+ * the I/O transfer.
+ *
+ * The caller of blk_make_request must make sure that bi_io_vec
+ * are set to describe the memory buffers. That bio_data_dir() will return
+ * the needed direction of the request. (And all bio's in the passed bio-chain
+ * are properly set accordingly)
+ *
+ * If called under none-sleepable conditions, mapped bio buffers must not
+ * need bouncing, by calling the appropriate masked or flagged allocator,
+ * suitable for the target device. Otherwise the call to blk_queue_bounce will
+ * BUG.
+ *
+ * WARNING: When allocating/cloning a bio-chain, careful consideration should be
+ * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for
+ * anything but the first bio in the chain. Otherwise you risk waiting for IO
+ * completion of a bio that hasn't been submitted yet, thus resulting in a
+ * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead
+ * of bio_alloc(), as that avoids the mempool deadlock.
+ * If possible a big IO should be split into smaller parts when allocation
+ * fails. Partial allocation should not be an error, or you risk a live-lock.
+ */
+struct request *blk_make_request(struct request_queue *q, struct bio *bio,
+ gfp_t gfp_mask)
+{
+ struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
+
+ if (unlikely(!rq))
+ return ERR_PTR(-ENOMEM);
+
+ for_each_bio(bio) {
+ struct bio *bounce_bio = bio;
+ int ret;
+
+ blk_queue_bounce(q, &bounce_bio);
+ ret = blk_rq_append_bio(q, rq, bounce_bio);
+ if (unlikely(ret)) {
+ blk_put_request(rq);
+ return ERR_PTR(ret);
+ }
+ }
+
+ return rq;
+}
+EXPORT_SYMBOL(blk_make_request);
+
+/**
+ * blk_requeue_request - put a request back on queue
+ * @q: request queue where request should be inserted
+ * @rq: request to be inserted
+ *
+ * Description:
+ * Drivers often keep queueing requests until the hardware cannot accept
+ * more, when that condition happens we need to put the request back
+ * on the queue. Must be called with queue lock held.
+ */
+void blk_requeue_request(struct request_queue *q, struct request *rq)
+{
+ blk_delete_timer(rq);
+ blk_clear_rq_complete(rq);
+ trace_block_rq_requeue(q, rq);
+
+ if (blk_rq_tagged(rq))
+ blk_queue_end_tag(q, rq);
+
+ BUG_ON(blk_queued_rq(rq));
+
+ elv_requeue_request(q, rq);
+}
+EXPORT_SYMBOL(blk_requeue_request);
+
+static void add_acct_request(struct request_queue *q, struct request *rq,
+ int where)
+{
+ drive_stat_acct(rq, 1);
+ __elv_add_request(q, rq, where);
+}
+
+static void part_round_stats_single(int cpu, struct hd_struct *part,
+ unsigned long now)
+{
+ if (now == part->stamp)
+ return;
+
+ if (part_in_flight(part)) {
+ __part_stat_add(cpu, part, time_in_queue,
+ part_in_flight(part) * (now - part->stamp));
+ __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
+ }
+ part->stamp = now;
+}
+
+/**
+ * part_round_stats() - Round off the performance stats on a struct disk_stats.
+ * @cpu: cpu number for stats access
+ * @part: target partition
+ *
+ * The average IO queue length and utilisation statistics are maintained
+ * by observing the current state of the queue length and the amount of
+ * time it has been in this state for.
+ *
+ * Normally, that accounting is done on IO completion, but that can result
+ * in more than a second's worth of IO being accounted for within any one
+ * second, leading to >100% utilisation. To deal with that, we call this
+ * function to do a round-off before returning the results when reading
+ * /proc/diskstats. This accounts immediately for all queue usage up to
+ * the current jiffies and restarts the counters again.
+ */
+void part_round_stats(int cpu, struct hd_struct *part)
+{
+ unsigned long now = jiffies;
+
+ if (part->partno)
+ part_round_stats_single(cpu, &part_to_disk(part)->part0, now);
+ part_round_stats_single(cpu, part, now);
+}
+EXPORT_SYMBOL_GPL(part_round_stats);
+
+/*
+ * queue lock must be held
+ */
+void __blk_put_request(struct request_queue *q, struct request *req)
+{
+ if (unlikely(!q))
+ return;
+ if (unlikely(--req->ref_count))
+ return;
+
+ elv_completed_request(q, req);
+
+ /* this is a bio leak */
+ WARN_ON(req->bio != NULL);
+
+ /*
+ * Request may not have originated from ll_rw_blk. if not,
+ * it didn't come out of our reserved rq pools
+ */
+ if (req->cmd_flags & REQ_ALLOCED) {
+ unsigned int flags = req->cmd_flags;
+
+ BUG_ON(!list_empty(&req->queuelist));
+ BUG_ON(!hlist_unhashed(&req->hash));
+
+ blk_free_request(q, req);
+ freed_request(q, flags);
+ }
+}
+EXPORT_SYMBOL_GPL(__blk_put_request);
+
+void blk_put_request(struct request *req)
+{
+ unsigned long flags;
+ struct request_queue *q = req->q;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ __blk_put_request(q, req);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+EXPORT_SYMBOL(blk_put_request);
+
+/**
+ * blk_add_request_payload - add a payload to a request
+ * @rq: request to update
+ * @page: page backing the payload
+ * @len: length of the payload.
+ *
+ * This allows to later add a payload to an already submitted request by
+ * a block driver. The driver needs to take care of freeing the payload
+ * itself.
+ *
+ * Note that this is a quite horrible hack and nothing but handling of
+ * discard requests should ever use it.
+ */
+void blk_add_request_payload(struct request *rq, struct page *page,
+ unsigned int len)
+{
+ struct bio *bio = rq->bio;
+
+ bio->bi_io_vec->bv_page = page;
+ bio->bi_io_vec->bv_offset = 0;
+ bio->bi_io_vec->bv_len = len;
+
+ bio->bi_size = len;
+ bio->bi_vcnt = 1;
+ bio->bi_phys_segments = 1;
+
+ rq->__data_len = rq->resid_len = len;
+ rq->nr_phys_segments = 1;
+ rq->buffer = bio_data(bio);
+}
+EXPORT_SYMBOL_GPL(blk_add_request_payload);
+
+static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
+ struct bio *bio)
+{
+ const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
+
+ if (!ll_back_merge_fn(q, req, bio))
+ return false;
+
+ trace_block_bio_backmerge(q, bio);
+
+ if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
+ blk_rq_set_mixed_merge(req);
+
+ req->biotail->bi_next = bio;
+ req->biotail = bio;
+ req->__data_len += bio->bi_size;
+ req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
+
+ drive_stat_acct(req, 0);
+ return true;
+}
+
+static bool bio_attempt_front_merge(struct request_queue *q,
+ struct request *req, struct bio *bio)
+{
+ const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
+
+ if (!ll_front_merge_fn(q, req, bio))
+ return false;
+
+ trace_block_bio_frontmerge(q, bio);
+
+ if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
+ blk_rq_set_mixed_merge(req);
+
+ bio->bi_next = req->bio;
+ req->bio = bio;
+
+ /*
+ * may not be valid. if the low level driver said
+ * it didn't need a bounce buffer then it better
+ * not touch req->buffer either...
+ */
+ req->buffer = bio_data(bio);
+ req->__sector = bio->bi_sector;
+ req->__data_len += bio->bi_size;
+ req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
+
+ drive_stat_acct(req, 0);
+ return true;
+}
+
+/**
+ * attempt_plug_merge - try to merge with %current's plugged list
+ * @q: request_queue new bio is being queued at
+ * @bio: new bio being queued
+ * @request_count: out parameter for number of traversed plugged requests
+ *
+ * Determine whether @bio being queued on @q can be merged with a request
+ * on %current's plugged list. Returns %true if merge was successful,
+ * otherwise %false.
+ *
+ * Plugging coalesces IOs from the same issuer for the same purpose without
+ * going through @q->queue_lock. As such it's more of an issuing mechanism
+ * than scheduling, and the request, while may have elvpriv data, is not
+ * added on the elevator at this point. In addition, we don't have
+ * reliable access to the elevator outside queue lock. Only check basic
+ * merging parameters without querying the elevator.
+ */
+static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
+ unsigned int *request_count)
+{
+ struct blk_plug *plug;
+ struct request *rq;
+ bool ret = false;
+
+ plug = current->plug;
+ if (!plug)
+ goto out;
+ *request_count = 0;
+
+ list_for_each_entry_reverse(rq, &plug->list, queuelist) {
+ int el_ret;
+
+ if (rq->q == q)
+ (*request_count)++;
+
+ if (rq->q != q || !blk_rq_merge_ok(rq, bio))
+ continue;
+
+ el_ret = blk_try_merge(rq, bio);
+ if (el_ret == ELEVATOR_BACK_MERGE) {
+ ret = bio_attempt_back_merge(q, rq, bio);
+ if (ret)
+ break;
+ } else if (el_ret == ELEVATOR_FRONT_MERGE) {
+ ret = bio_attempt_front_merge(q, rq, bio);
+ if (ret)
+ break;
+ }
+ }
+out:
+ return ret;
+}
+
+void init_request_from_bio(struct request *req, struct bio *bio)
+{
+ req->cmd_type = REQ_TYPE_FS;
+
+ req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
+ if (bio->bi_rw & REQ_RAHEAD)
+ req->cmd_flags |= REQ_FAILFAST_MASK;
+
+ req->errors = 0;
+ req->__sector = bio->bi_sector;
+ req->ioprio = bio_prio(bio);
+ blk_rq_bio_prep(req->q, req, bio);
+}
+
+void blk_queue_bio(struct request_queue *q, struct bio *bio)
+{
+ const bool sync = !!(bio->bi_rw & REQ_SYNC);
+ struct blk_plug *plug;
+ int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
+ struct request *req;
+ unsigned int request_count = 0;
+
+ /*
+ * low level driver can indicate that it wants pages above a
+ * certain limit bounced to low memory (ie for highmem, or even
+ * ISA dma in theory)
+ */
+ blk_queue_bounce(q, &bio);
+
+ if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
+ spin_lock_irq(q->queue_lock);
+ where = ELEVATOR_INSERT_FLUSH;
+ goto get_rq;
+ }
+
+ /*
+ * Check if we can merge with the plugged list before grabbing
+ * any locks.
+ */
+ if (attempt_plug_merge(q, bio, &request_count))
+ return;
+
+ spin_lock_irq(q->queue_lock);
+
+ el_ret = elv_merge(q, &req, bio);
+ if (el_ret == ELEVATOR_BACK_MERGE) {
+ if (bio_attempt_back_merge(q, req, bio)) {
+ elv_bio_merged(q, req, bio);
+ if (!attempt_back_merge(q, req))
+ elv_merged_request(q, req, el_ret);
+ goto out_unlock;
+ }
+ } else if (el_ret == ELEVATOR_FRONT_MERGE) {
+ if (bio_attempt_front_merge(q, req, bio)) {
+ elv_bio_merged(q, req, bio);
+ if (!attempt_front_merge(q, req))
+ elv_merged_request(q, req, el_ret);
+ goto out_unlock;
+ }
+ }
+
+get_rq:
+ /*
+ * This sync check and mask will be re-done in init_request_from_bio(),
+ * but we need to set it earlier to expose the sync flag to the
+ * rq allocator and io schedulers.
+ */
+ rw_flags = bio_data_dir(bio);
+ if (sync)
+ rw_flags |= REQ_SYNC;
+
+ /*
+ * Grab a free request. This is might sleep but can not fail.
+ * Returns with the queue unlocked.
+ */
+ req = get_request_wait(q, rw_flags, bio);
+ if (unlikely(!req)) {
+ bio_endio(bio, -ENODEV); /* @q is dead */
+ goto out_unlock;
+ }
+
+ /*
+ * After dropping the lock and possibly sleeping here, our request
+ * may now be mergeable after it had proven unmergeable (above).
+ * We don't worry about that case for efficiency. It won't happen
+ * often, and the elevators are able to handle it.
+ */
+ init_request_from_bio(req, bio);
+
+ if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
+ req->cpu = raw_smp_processor_id();
+
+ plug = current->plug;
+ if (plug) {
+ /*
+ * If this is the first request added after a plug, fire
+ * of a plug trace. If others have been added before, check
+ * if we have multiple devices in this plug. If so, make a
+ * note to sort the list before dispatch.
+ */
+ if (list_empty(&plug->list))
+ trace_block_plug(q);
+ else {
+ if (!plug->should_sort) {
+ struct request *__rq;
+
+ __rq = list_entry_rq(plug->list.prev);
+ if (__rq->q != q)
+ plug->should_sort = 1;
+ }
+ if (request_count >= BLK_MAX_REQUEST_COUNT) {
+ blk_flush_plug_list(plug, false);
+ trace_block_plug(q);
+ }
+ }
+ list_add_tail(&req->queuelist, &plug->list);
+ drive_stat_acct(req, 1);
+ } else {
+ spin_lock_irq(q->queue_lock);
+ add_acct_request(q, req, where);
+ __blk_run_queue(q);
+out_unlock:
+ spin_unlock_irq(q->queue_lock);
+ }
+}
+EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */
+
+/*
+ * If bio->bi_dev is a partition, remap the location
+ */
+static inline void blk_partition_remap(struct bio *bio)
+{
+ struct block_device *bdev = bio->bi_bdev;
+
+ if (bio_sectors(bio) && bdev != bdev->bd_contains) {
+ struct hd_struct *p = bdev->bd_part;
+
+ bio->bi_sector += p->start_sect;
+ bio->bi_bdev = bdev->bd_contains;
+
+ trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
+ bdev->bd_dev,
+ bio->bi_sector - p->start_sect);
+ }
+}
+
+static void handle_bad_sector(struct bio *bio)
+{
+ char b[BDEVNAME_SIZE];
+
+ printk(KERN_INFO "attempt to access beyond end of device\n");
+ printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
+ bdevname(bio->bi_bdev, b),
+ bio->bi_rw,
+ (unsigned long long)bio->bi_sector + bio_sectors(bio),
+ (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
+
+ set_bit(BIO_EOF, &bio->bi_flags);
+}
+
+#ifdef CONFIG_FAIL_MAKE_REQUEST
+
+static DECLARE_FAULT_ATTR(fail_make_request);
+
+static int __init setup_fail_make_request(char *str)
+{
+ return setup_fault_attr(&fail_make_request, str);
+}
+__setup("fail_make_request=", setup_fail_make_request);
+
+static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
+{
+ return part->make_it_fail && should_fail(&fail_make_request, bytes);
+}
+
+static int __init fail_make_request_debugfs(void)
+{
+ struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
+ NULL, &fail_make_request);
+
+ return IS_ERR(dir) ? PTR_ERR(dir) : 0;
+}
+
+late_initcall(fail_make_request_debugfs);
+
+#else /* CONFIG_FAIL_MAKE_REQUEST */
+
+static inline bool should_fail_request(struct hd_struct *part,
+ unsigned int bytes)
+{
+ return false;
+}
+
+#endif /* CONFIG_FAIL_MAKE_REQUEST */
+
+/*
+ * Check whether this bio extends beyond the end of the device.
+ */
+static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
+{
+ sector_t maxsector;
+
+ if (!nr_sectors)
+ return 0;
+
+ /* Test device or partition size, when known. */
+ maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
+ if (maxsector) {
+ sector_t sector = bio->bi_sector;
+
+ if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
+ /*
+ * This may well happen - the kernel calls bread()
+ * without checking the size of the device, e.g., when
+ * mounting a device.
+ */
+ handle_bad_sector(bio);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static noinline_for_stack bool
+generic_make_request_checks(struct bio *bio)
+{
+ struct request_queue *q;
+ int nr_sectors = bio_sectors(bio);
+ int err = -EIO;
+ char b[BDEVNAME_SIZE];
+ struct hd_struct *part;
+
+ might_sleep();
+
+ if (bio_check_eod(bio, nr_sectors))
+ goto end_io;
+
+ q = bdev_get_queue(bio->bi_bdev);
+ if (unlikely(!q)) {
+ printk(KERN_ERR
+ "generic_make_request: Trying to access "
+ "nonexistent block-device %s (%Lu)\n",
+ bdevname(bio->bi_bdev, b),
+ (long long) bio->bi_sector);
+ goto end_io;
+ }
+
+ if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
+ nr_sectors > queue_max_hw_sectors(q))) {
+ printk(KERN_ERR "bio too big device %s (%u > %u)\n",
+ bdevname(bio->bi_bdev, b),
+ bio_sectors(bio),
+ queue_max_hw_sectors(q));
+ goto end_io;
+ }
+
+ part = bio->bi_bdev->bd_part;
+ if (should_fail_request(part, bio->bi_size) ||
+ should_fail_request(&part_to_disk(part)->part0,
+ bio->bi_size))
+ goto end_io;
+
+ /*
+ * If this device has partitions, remap block n
+ * of partition p to block n+start(p) of the disk.
+ */
+ blk_partition_remap(bio);
+
+ if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
+ goto end_io;
+
+ if (bio_check_eod(bio, nr_sectors))
+ goto end_io;
+
+ /*
+ * Filter flush bio's early so that make_request based
+ * drivers without flush support don't have to worry
+ * about them.
+ */
+ if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
+ bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
+ if (!nr_sectors) {
+ err = 0;
+ goto end_io;
+ }
+ }
+
+ if ((bio->bi_rw & REQ_DISCARD) &&
+ (!blk_queue_discard(q) ||
+ ((bio->bi_rw & REQ_SECURE) &&
+ !blk_queue_secdiscard(q)))) {
+ err = -EOPNOTSUPP;
+ goto end_io;
+ }
+
+ if (blk_throtl_bio(q, bio))
+ return false; /* throttled, will be resubmitted later */
+
+ trace_block_bio_queue(q, bio);
+ return true;
+
+end_io:
+ bio_endio(bio, err);
+ return false;
+}
+
+/**
+ * generic_make_request - hand a buffer to its device driver for I/O
+ * @bio: The bio describing the location in memory and on the device.
+ *
+ * generic_make_request() is used to make I/O requests of block
+ * devices. It is passed a &struct bio, which describes the I/O that needs
+ * to be done.
+ *
+ * generic_make_request() does not return any status. The
+ * success/failure status of the request, along with notification of
+ * completion, is delivered asynchronously through the bio->bi_end_io
+ * function described (one day) else where.
+ *
+ * The caller of generic_make_request must make sure that bi_io_vec
+ * are set to describe the memory buffer, and that bi_dev and bi_sector are
+ * set to describe the device address, and the
+ * bi_end_io and optionally bi_private are set to describe how
+ * completion notification should be signaled.
+ *
+ * generic_make_request and the drivers it calls may use bi_next if this
+ * bio happens to be merged with someone else, and may resubmit the bio to
+ * a lower device by calling into generic_make_request recursively, which
+ * means the bio should NOT be touched after the call to ->make_request_fn.
+ */
+void generic_make_request(struct bio *bio)
+{
+ struct bio_list bio_list_on_stack;
+
+ if (!generic_make_request_checks(bio))
+ return;
+
+ /*
+ * We only want one ->make_request_fn to be active at a time, else
+ * stack usage with stacked devices could be a problem. So use
+ * current->bio_list to keep a list of requests submited by a
+ * make_request_fn function. current->bio_list is also used as a
+ * flag to say if generic_make_request is currently active in this
+ * task or not. If it is NULL, then no make_request is active. If
+ * it is non-NULL, then a make_request is active, and new requests
+ * should be added at the tail
+ */
+ if (current->bio_list) {
+ bio_list_add(current->bio_list, bio);
+ return;
+ }
+
+ /* following loop may be a bit non-obvious, and so deserves some
+ * explanation.
+ * Before entering the loop, bio->bi_next is NULL (as all callers
+ * ensure that) so we have a list with a single bio.
+ * We pretend that we have just taken it off a longer list, so
+ * we assign bio_list to a pointer to the bio_list_on_stack,
+ * thus initialising the bio_list of new bios to be
+ * added. ->make_request() may indeed add some more bios
+ * through a recursive call to generic_make_request. If it
+ * did, we find a non-NULL value in bio_list and re-enter the loop
+ * from the top. In this case we really did just take the bio
+ * of the top of the list (no pretending) and so remove it from
+ * bio_list, and call into ->make_request() again.
+ */
+ BUG_ON(bio->bi_next);
+ bio_list_init(&bio_list_on_stack);
+ current->bio_list = &bio_list_on_stack;
+ do {
+ struct request_queue *q = bdev_get_queue(bio->bi_bdev);
+
+ q->make_request_fn(q, bio);
+
+ bio = bio_list_pop(current->bio_list);
+ } while (bio);
+ current->bio_list = NULL; /* deactivate */
+}
+EXPORT_SYMBOL(generic_make_request);
+
+/**
+ * submit_bio - submit a bio to the block device layer for I/O
+ * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
+ * @bio: The &struct bio which describes the I/O
+ *
+ * submit_bio() is very similar in purpose to generic_make_request(), and
+ * uses that function to do most of the work. Both are fairly rough
+ * interfaces; @bio must be presetup and ready for I/O.
+ *
+ */
+void submit_bio(int rw, struct bio *bio)
+{
+ int count = bio_sectors(bio);
+
+ bio->bi_rw |= rw;
+
+ /*
+ * If it's a regular read/write or a barrier with data attached,
+ * go through the normal accounting stuff before submission.
+ */
+ if (bio_has_data(bio) && !(rw & REQ_DISCARD)) {
+ if (rw & WRITE) {
+ count_vm_events(PGPGOUT, count);
+ } else {
+ task_io_account_read(bio->bi_size);
+ count_vm_events(PGPGIN, count);
+ }
+
+ if (unlikely(block_dump)) {
+ char b[BDEVNAME_SIZE];
+ printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
+ current->comm, task_pid_nr(current),
+ (rw & WRITE) ? "WRITE" : "READ",
+ (unsigned long long)bio->bi_sector,
+ bdevname(bio->bi_bdev, b),
+ count);
+ }
+ }
+
+ generic_make_request(bio);
+}
+EXPORT_SYMBOL(submit_bio);
+
+/**
+ * blk_rq_check_limits - Helper function to check a request for the queue limit
+ * @q: the queue
+ * @rq: the request being checked
+ *
+ * Description:
+ * @rq may have been made based on weaker limitations of upper-level queues
+ * in request stacking drivers, and it may violate the limitation of @q.
+ * Since the block layer and the underlying device driver trust @rq
+ * after it is inserted to @q, it should be checked against @q before
+ * the insertion using this generic function.
+ *
+ * This function should also be useful for request stacking drivers
+ * in some cases below, so export this function.
+ * Request stacking drivers like request-based dm may change the queue
+ * limits while requests are in the queue (e.g. dm's table swapping).
+ * Such request stacking drivers should check those requests agaist
+ * the new queue limits again when they dispatch those requests,
+ * although such checkings are also done against the old queue limits
+ * when submitting requests.
+ */
+int blk_rq_check_limits(struct request_queue *q, struct request *rq)
+{
+ if (rq->cmd_flags & REQ_DISCARD)
+ return 0;
+
+ if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
+ blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
+ printk(KERN_ERR "%s: over max size limit.\n", __func__);
+ return -EIO;
+ }
+
+ /*
+ * queue's settings related to segment counting like q->bounce_pfn
+ * may differ from that of other stacking queues.
+ * Recalculate it to check the request correctly on this queue's
+ * limitation.
+ */
+ blk_recalc_rq_segments(rq);
+ if (rq->nr_phys_segments > queue_max_segments(q)) {
+ printk(KERN_ERR "%s: over max segments limit.\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(blk_rq_check_limits);
+
+/**
+ * blk_insert_cloned_request - Helper for stacking drivers to submit a request
+ * @q: the queue to submit the request
+ * @rq: the request being queued
+ */
+int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
+{
+ unsigned long flags;
+ int where = ELEVATOR_INSERT_BACK;
+
+ if (blk_rq_check_limits(q, rq))
+ return -EIO;
+
+ if (rq->rq_disk &&
+ should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
+ return -EIO;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (unlikely(blk_queue_dead(q))) {
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ return -ENODEV;
+ }
+
+ /*
+ * Submitting request must be dequeued before calling this function
+ * because it will be linked to another request_queue
+ */
+ BUG_ON(blk_queued_rq(rq));
+
+ if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA))
+ where = ELEVATOR_INSERT_FLUSH;
+
+ add_acct_request(q, rq, where);
+ if (where == ELEVATOR_INSERT_FLUSH)
+ __blk_run_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
+
+/**
+ * blk_rq_err_bytes - determine number of bytes till the next failure boundary
+ * @rq: request to examine
+ *
+ * Description:
+ * A request could be merge of IOs which require different failure
+ * handling. This function determines the number of bytes which
+ * can be failed from the beginning of the request without
+ * crossing into area which need to be retried further.
+ *
+ * Return:
+ * The number of bytes to fail.
+ *
+ * Context:
+ * queue_lock must be held.
+ */
+unsigned int blk_rq_err_bytes(const struct request *rq)
+{
+ unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
+ unsigned int bytes = 0;
+ struct bio *bio;
+
+ if (!(rq->cmd_flags & REQ_MIXED_MERGE))
+ return blk_rq_bytes(rq);
+
+ /*
+ * Currently the only 'mixing' which can happen is between
+ * different fastfail types. We can safely fail portions
+ * which have all the failfast bits that the first one has -
+ * the ones which are at least as eager to fail as the first
+ * one.
+ */
+ for (bio = rq->bio; bio; bio = bio->bi_next) {
+ if ((bio->bi_rw & ff) != ff)
+ break;
+ bytes += bio->bi_size;
+ }
+
+ /* this could lead to infinite loop */
+ BUG_ON(blk_rq_bytes(rq) && !bytes);
+ return bytes;
+}
+EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
+
+static void blk_account_io_completion(struct request *req, unsigned int bytes)
+{
+ if (blk_do_io_stat(req)) {
+ const int rw = rq_data_dir(req);
+ struct hd_struct *part;
+ int cpu;
+
+ cpu = part_stat_lock();
+ part = req->part;
+ part_stat_add(cpu, part, sectors[rw], bytes >> 9);
+ part_stat_unlock();
+ }
+}
+
+static void blk_account_io_done(struct request *req)
+{
+ /*
+ * Account IO completion. flush_rq isn't accounted as a
+ * normal IO on queueing nor completion. Accounting the
+ * containing request is enough.
+ */
+ if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) {
+ unsigned long duration = jiffies - req->start_time;
+ const int rw = rq_data_dir(req);
+ struct hd_struct *part;
+ int cpu;
+
+ cpu = part_stat_lock();
+ part = req->part;
+
+ part_stat_inc(cpu, part, ios[rw]);
+ part_stat_add(cpu, part, ticks[rw], duration);
+ part_round_stats(cpu, part);
+ part_dec_in_flight(part, rw);
+
+ hd_struct_put(part);
+ part_stat_unlock();
+ }
+}
+
+/**
+ * blk_peek_request - peek at the top of a request queue
+ * @q: request queue to peek at
+ *
+ * Description:
+ * Return the request at the top of @q. The returned request
+ * should be started using blk_start_request() before LLD starts
+ * processing it.
+ *
+ * Return:
+ * Pointer to the request at the top of @q if available. Null
+ * otherwise.
+ *
+ * Context:
+ * queue_lock must be held.
+ */
+struct request *blk_peek_request(struct request_queue *q)
+{
+ struct request *rq;
+ int ret;
+
+ while ((rq = __elv_next_request(q)) != NULL) {
+ if (!(rq->cmd_flags & REQ_STARTED)) {
+ /*
+ * This is the first time the device driver
+ * sees this request (possibly after
+ * requeueing). Notify IO scheduler.
+ */
+ if (rq->cmd_flags & REQ_SORTED)
+ elv_activate_rq(q, rq);
+
+ /*
+ * just mark as started even if we don't start
+ * it, a request that has been delayed should
+ * not be passed by new incoming requests
+ */
+ rq->cmd_flags |= REQ_STARTED;
+ trace_block_rq_issue(q, rq);
+ }
+
+ if (!q->boundary_rq || q->boundary_rq == rq) {
+ q->end_sector = rq_end_sector(rq);
+ q->boundary_rq = NULL;
+ }
+
+ if (rq->cmd_flags & REQ_DONTPREP)
+ break;
+
+ if (q->dma_drain_size && blk_rq_bytes(rq)) {
+ /*
+ * make sure space for the drain appears we
+ * know we can do this because max_hw_segments
+ * has been adjusted to be one fewer than the
+ * device can handle
+ */
+ rq->nr_phys_segments++;
+ }
+
+ if (!q->prep_rq_fn)
+ break;
+
+ ret = q->prep_rq_fn(q, rq);
+ if (ret == BLKPREP_OK) {
+ break;
+ } else if (ret == BLKPREP_DEFER) {
+ /*
+ * the request may have been (partially) prepped.
+ * we need to keep this request in the front to
+ * avoid resource deadlock. REQ_STARTED will
+ * prevent other fs requests from passing this one.
+ */
+ if (q->dma_drain_size && blk_rq_bytes(rq) &&
+ !(rq->cmd_flags & REQ_DONTPREP)) {
+ /*
+ * remove the space for the drain we added
+ * so that we don't add it again
+ */
+ --rq->nr_phys_segments;
+ }
+
+ rq = NULL;
+ break;
+ } else if (ret == BLKPREP_KILL) {
+ rq->cmd_flags |= REQ_QUIET;
+ /*
+ * Mark this request as started so we don't trigger
+ * any debug logic in the end I/O path.
+ */
+ blk_start_request(rq);
+ __blk_end_request_all(rq, -EIO);
+ } else {
+ printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
+ break;
+ }
+ }
+
+ return rq;
+}
+EXPORT_SYMBOL(blk_peek_request);
+
+void blk_dequeue_request(struct request *rq)
+{
+ struct request_queue *q = rq->q;
+
+ BUG_ON(list_empty(&rq->queuelist));
+ BUG_ON(ELV_ON_HASH(rq));
+
+ list_del_init(&rq->queuelist);
+
+ /*
+ * the time frame between a request being removed from the lists
+ * and to it is freed is accounted as io that is in progress at
+ * the driver side.
+ */
+ if (blk_account_rq(rq)) {
+ q->in_flight[rq_is_sync(rq)]++;
+ set_io_start_time_ns(rq);
+ }
+}
+
+/**
+ * blk_start_request - start request processing on the driver
+ * @req: request to dequeue
+ *
+ * Description:
+ * Dequeue @req and start timeout timer on it. This hands off the
+ * request to the driver.
+ *
+ * Block internal functions which don't want to start timer should
+ * call blk_dequeue_request().
+ *
+ * Context:
+ * queue_lock must be held.
+ */
+void blk_start_request(struct request *req)
+{
+ blk_dequeue_request(req);
+
+ /*
+ * We are now handing the request to the hardware, initialize
+ * resid_len to full count and add the timeout handler.
+ */
+ req->resid_len = blk_rq_bytes(req);
+ if (unlikely(blk_bidi_rq(req)))
+ req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
+
+ blk_add_timer(req);
+}
+EXPORT_SYMBOL(blk_start_request);
+
+/**
+ * blk_fetch_request - fetch a request from a request queue
+ * @q: request queue to fetch a request from
+ *
+ * Description:
+ * Return the request at the top of @q. The request is started on
+ * return and LLD can start processing it immediately.
+ *
+ * Return:
+ * Pointer to the request at the top of @q if available. Null
+ * otherwise.
+ *
+ * Context:
+ * queue_lock must be held.
+ */
+struct request *blk_fetch_request(struct request_queue *q)
+{
+ struct request *rq;
+
+ rq = blk_peek_request(q);
+ if (rq)
+ blk_start_request(rq);
+ return rq;
+}
+EXPORT_SYMBOL(blk_fetch_request);
+
+/**
+ * blk_update_request - Special helper function for request stacking drivers
+ * @req: the request being processed
+ * @error: %0 for success, < %0 for error
+ * @nr_bytes: number of bytes to complete @req
+ *
+ * Description:
+ * Ends I/O on a number of bytes attached to @req, but doesn't complete
+ * the request structure even if @req doesn't have leftover.
+ * If @req has leftover, sets it up for the next range of segments.
+ *
+ * This special helper function is only for request stacking drivers
+ * (e.g. request-based dm) so that they can handle partial completion.
+ * Actual device drivers should use blk_end_request instead.
+ *
+ * Passing the result of blk_rq_bytes() as @nr_bytes guarantees
+ * %false return from this function.
+ *
+ * Return:
+ * %false - this request doesn't have any more data
+ * %true - this request has more data
+ **/
+bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
+{
+ int total_bytes, bio_nbytes, next_idx = 0;
+ struct bio *bio;
+
+ if (!req->bio)
+ return false;
+
+ trace_block_rq_complete(req->q, req);
+
+ /*
+ * For fs requests, rq is just carrier of independent bio's
+ * and each partial completion should be handled separately.
+ * Reset per-request error on each partial completion.
+ *
+ * TODO: tj: This is too subtle. It would be better to let
+ * low level drivers do what they see fit.
+ */
+ if (req->cmd_type == REQ_TYPE_FS)
+ req->errors = 0;
+
+ if (error && req->cmd_type == REQ_TYPE_FS &&
+ !(req->cmd_flags & REQ_QUIET)) {
+ char *error_type;
+
+ switch (error) {
+ case -ENOLINK:
+ error_type = "recoverable transport";
+ break;
+ case -EREMOTEIO:
+ error_type = "critical target";
+ break;
+ case -EBADE:
+ error_type = "critical nexus";
+ break;
+ case -EIO:
+ default:
+ error_type = "I/O";
+ break;
+ }
+ printk(KERN_ERR "end_request: %s error, dev %s, sector %llu\n",
+ error_type, req->rq_disk ? req->rq_disk->disk_name : "?",
+ (unsigned long long)blk_rq_pos(req));
+ }
+
+ blk_account_io_completion(req, nr_bytes);
+
+ total_bytes = bio_nbytes = 0;
+ while ((bio = req->bio) != NULL) {
+ int nbytes;
+
+ if (nr_bytes >= bio->bi_size) {
+ req->bio = bio->bi_next;
+ nbytes = bio->bi_size;
+ req_bio_endio(req, bio, nbytes, error);
+ next_idx = 0;
+ bio_nbytes = 0;
+ } else {
+ int idx = bio->bi_idx + next_idx;
+
+ if (unlikely(idx >= bio->bi_vcnt)) {
+ blk_dump_rq_flags(req, "__end_that");
+ printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
+ __func__, idx, bio->bi_vcnt);
+ break;
+ }
+
+ nbytes = bio_iovec_idx(bio, idx)->bv_len;
+ BIO_BUG_ON(nbytes > bio->bi_size);
+
+ /*
+ * not a complete bvec done
+ */
+ if (unlikely(nbytes > nr_bytes)) {
+ bio_nbytes += nr_bytes;
+ total_bytes += nr_bytes;
+ break;
+ }
+
+ /*
+ * advance to the next vector
+ */
+ next_idx++;
+ bio_nbytes += nbytes;
+ }
+
+ total_bytes += nbytes;
+ nr_bytes -= nbytes;
+
+ bio = req->bio;
+ if (bio) {
+ /*
+ * end more in this run, or just return 'not-done'
+ */
+ if (unlikely(nr_bytes <= 0))
+ break;
+ }
+ }
+
+ /*
+ * completely done
+ */
+ if (!req->bio) {
+ /*
+ * Reset counters so that the request stacking driver
+ * can find how many bytes remain in the request
+ * later.
+ */
+ req->__data_len = 0;
+ return false;
+ }
+
+ /*
+ * if the request wasn't completed, update state
+ */
+ if (bio_nbytes) {
+ req_bio_endio(req, bio, bio_nbytes, error);
+ bio->bi_idx += next_idx;
+ bio_iovec(bio)->bv_offset += nr_bytes;
+ bio_iovec(bio)->bv_len -= nr_bytes;
+ }
+
+ req->__data_len -= total_bytes;
+ req->buffer = bio_data(req->bio);
+
+ /* update sector only for requests with clear definition of sector */
+ if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD))
+ req->__sector += total_bytes >> 9;
+
+ /* mixed attributes always follow the first bio */
+ if (req->cmd_flags & REQ_MIXED_MERGE) {
+ req->cmd_flags &= ~REQ_FAILFAST_MASK;
+ req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK;
+ }
+
+ /*
+ * If total number of sectors is less than the first segment
+ * size, something has gone terribly wrong.
+ */
+ if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
+ blk_dump_rq_flags(req, "request botched");
+ req->__data_len = blk_rq_cur_bytes(req);
+ }
+
+ /* recalculate the number of segments */
+ blk_recalc_rq_segments(req);
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(blk_update_request);
+
+static bool blk_update_bidi_request(struct request *rq, int error,
+ unsigned int nr_bytes,
+ unsigned int bidi_bytes)
+{
+ if (blk_update_request(rq, error, nr_bytes))
+ return true;
+
+ /* Bidi request must be completed as a whole */
+ if (unlikely(blk_bidi_rq(rq)) &&
+ blk_update_request(rq->next_rq, error, bidi_bytes))
+ return true;
+
+ if (blk_queue_add_random(rq->q))
+ add_disk_randomness(rq->rq_disk);
+
+ return false;
+}
+
+/**
+ * blk_unprep_request - unprepare a request
+ * @req: the request
+ *
+ * This function makes a request ready for complete resubmission (or
+ * completion). It happens only after all error handling is complete,
+ * so represents the appropriate moment to deallocate any resources
+ * that were allocated to the request in the prep_rq_fn. The queue
+ * lock is held when calling this.
+ */
+void blk_unprep_request(struct request *req)
+{
+ struct request_queue *q = req->q;
+
+ req->cmd_flags &= ~REQ_DONTPREP;
+ if (q->unprep_rq_fn)
+ q->unprep_rq_fn(q, req);
+}
+EXPORT_SYMBOL_GPL(blk_unprep_request);
+
+/*
+ * queue lock must be held
+ */
+static void blk_finish_request(struct request *req, int error)
+{
+ if (blk_rq_tagged(req))
+ blk_queue_end_tag(req->q, req);
+
+ BUG_ON(blk_queued_rq(req));
+
+ if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
+ laptop_io_completion(&req->q->backing_dev_info);
+
+ blk_delete_timer(req);
+
+ if (req->cmd_flags & REQ_DONTPREP)
+ blk_unprep_request(req);
+
+
+ blk_account_io_done(req);
+
+ if (req->end_io)
+ req->end_io(req, error);
+ else {
+ if (blk_bidi_rq(req))
+ __blk_put_request(req->next_rq->q, req->next_rq);
+
+ __blk_put_request(req->q, req);
+ }
+}
+
+/**
+ * blk_end_bidi_request - Complete a bidi request
+ * @rq: the request to complete
+ * @error: %0 for success, < %0 for error
+ * @nr_bytes: number of bytes to complete @rq
+ * @bidi_bytes: number of bytes to complete @rq->next_rq
+ *
+ * Description:
+ * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
+ * Drivers that supports bidi can safely call this member for any
+ * type of request, bidi or uni. In the later case @bidi_bytes is
+ * just ignored.
+ *
+ * Return:
+ * %false - we are done with this request
+ * %true - still buffers pending for this request
+ **/
+static bool blk_end_bidi_request(struct request *rq, int error,
+ unsigned int nr_bytes, unsigned int bidi_bytes)
+{
+ struct request_queue *q = rq->q;
+ unsigned long flags;
+
+ if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
+ return true;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_finish_request(rq, error);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return false;
+}
+
+/**
+ * __blk_end_bidi_request - Complete a bidi request with queue lock held
+ * @rq: the request to complete
+ * @error: %0 for success, < %0 for error
+ * @nr_bytes: number of bytes to complete @rq
+ * @bidi_bytes: number of bytes to complete @rq->next_rq
+ *
+ * Description:
+ * Identical to blk_end_bidi_request() except that queue lock is
+ * assumed to be locked on entry and remains so on return.
+ *
+ * Return:
+ * %false - we are done with this request
+ * %true - still buffers pending for this request
+ **/
+bool __blk_end_bidi_request(struct request *rq, int error,
+ unsigned int nr_bytes, unsigned int bidi_bytes)
+{
+ if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
+ return true;
+
+ blk_finish_request(rq, error);
+
+ return false;
+}
+
+/**
+ * blk_end_request - Helper function for drivers to complete the request.
+ * @rq: the request being processed
+ * @error: %0 for success, < %0 for error
+ * @nr_bytes: number of bytes to complete
+ *
+ * Description:
+ * Ends I/O on a number of bytes attached to @rq.
+ * If @rq has leftover, sets it up for the next range of segments.
+ *
+ * Return:
+ * %false - we are done with this request
+ * %true - still buffers pending for this request
+ **/
+bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
+{
+ return blk_end_bidi_request(rq, error, nr_bytes, 0);
+}
+EXPORT_SYMBOL(blk_end_request);
+
+/**
+ * blk_end_request_all - Helper function for drives to finish the request.
+ * @rq: the request to finish
+ * @error: %0 for success, < %0 for error
+ *
+ * Description:
+ * Completely finish @rq.
+ */
+void blk_end_request_all(struct request *rq, int error)
+{
+ bool pending;
+ unsigned int bidi_bytes = 0;
+
+ if (unlikely(blk_bidi_rq(rq)))
+ bidi_bytes = blk_rq_bytes(rq->next_rq);
+
+ pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
+ BUG_ON(pending);
+}
+EXPORT_SYMBOL(blk_end_request_all);
+
+/**
+ * blk_end_request_cur - Helper function to finish the current request chunk.
+ * @rq: the request to finish the current chunk for
+ * @error: %0 for success, < %0 for error
+ *
+ * Description:
+ * Complete the current consecutively mapped chunk from @rq.
+ *
+ * Return:
+ * %false - we are done with this request
+ * %true - still buffers pending for this request
+ */
+bool blk_end_request_cur(struct request *rq, int error)
+{
+ return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
+}
+EXPORT_SYMBOL(blk_end_request_cur);
+
+/**
+ * blk_end_request_err - Finish a request till the next failure boundary.
+ * @rq: the request to finish till the next failure boundary for
+ * @error: must be negative errno
+ *
+ * Description:
+ * Complete @rq till the next failure boundary.
+ *
+ * Return:
+ * %false - we are done with this request
+ * %true - still buffers pending for this request
+ */
+bool blk_end_request_err(struct request *rq, int error)
+{
+ WARN_ON(error >= 0);
+ return blk_end_request(rq, error, blk_rq_err_bytes(rq));
+}
+EXPORT_SYMBOL_GPL(blk_end_request_err);
+
+/**
+ * __blk_end_request - Helper function for drivers to complete the request.
+ * @rq: the request being processed
+ * @error: %0 for success, < %0 for error
+ * @nr_bytes: number of bytes to complete
+ *
+ * Description:
+ * Must be called with queue lock held unlike blk_end_request().
+ *
+ * Return:
+ * %false - we are done with this request
+ * %true - still buffers pending for this request
+ **/
+bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
+{
+ return __blk_end_bidi_request(rq, error, nr_bytes, 0);
+}
+EXPORT_SYMBOL(__blk_end_request);
+
+/**
+ * __blk_end_request_all - Helper function for drives to finish the request.
+ * @rq: the request to finish
+ * @error: %0 for success, < %0 for error
+ *
+ * Description:
+ * Completely finish @rq. Must be called with queue lock held.
+ */
+void __blk_end_request_all(struct request *rq, int error)
+{
+ bool pending;
+ unsigned int bidi_bytes = 0;
+
+ if (unlikely(blk_bidi_rq(rq)))
+ bidi_bytes = blk_rq_bytes(rq->next_rq);
+
+ pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
+ BUG_ON(pending);
+}
+EXPORT_SYMBOL(__blk_end_request_all);
+
+/**
+ * __blk_end_request_cur - Helper function to finish the current request chunk.
+ * @rq: the request to finish the current chunk for
+ * @error: %0 for success, < %0 for error
+ *
+ * Description:
+ * Complete the current consecutively mapped chunk from @rq. Must
+ * be called with queue lock held.
+ *
+ * Return:
+ * %false - we are done with this request
+ * %true - still buffers pending for this request
+ */
+bool __blk_end_request_cur(struct request *rq, int error)
+{
+ return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
+}
+EXPORT_SYMBOL(__blk_end_request_cur);
+
+/**
+ * __blk_end_request_err - Finish a request till the next failure boundary.
+ * @rq: the request to finish till the next failure boundary for
+ * @error: must be negative errno
+ *
+ * Description:
+ * Complete @rq till the next failure boundary. Must be called
+ * with queue lock held.
+ *
+ * Return:
+ * %false - we are done with this request
+ * %true - still buffers pending for this request
+ */
+bool __blk_end_request_err(struct request *rq, int error)
+{
+ WARN_ON(error >= 0);
+ return __blk_end_request(rq, error, blk_rq_err_bytes(rq));
+}
+EXPORT_SYMBOL_GPL(__blk_end_request_err);
+
+void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
+ struct bio *bio)
+{
+ /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
+ rq->cmd_flags |= bio->bi_rw & REQ_WRITE;
+
+ if (bio_has_data(bio)) {
+ rq->nr_phys_segments = bio_phys_segments(q, bio);
+ rq->buffer = bio_data(bio);
+ }
+ rq->__data_len = bio->bi_size;
+ rq->bio = rq->biotail = bio;
+
+ if (bio->bi_bdev)
+ rq->rq_disk = bio->bi_bdev->bd_disk;
+}
+
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+/**
+ * rq_flush_dcache_pages - Helper function to flush all pages in a request
+ * @rq: the request to be flushed
+ *
+ * Description:
+ * Flush all pages in @rq.
+ */
+void rq_flush_dcache_pages(struct request *rq)
+{
+ struct req_iterator iter;
+ struct bio_vec *bvec;
+
+ rq_for_each_segment(bvec, rq, iter)
+ flush_dcache_page(bvec->bv_page);
+}
+EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
+#endif
+
+/**
+ * blk_lld_busy - Check if underlying low-level drivers of a device are busy
+ * @q : the queue of the device being checked
+ *
+ * Description:
+ * Check if underlying low-level drivers of a device are busy.
+ * If the drivers want to export their busy state, they must set own
+ * exporting function using blk_queue_lld_busy() first.
+ *
+ * Basically, this function is used only by request stacking drivers
+ * to stop dispatching requests to underlying devices when underlying
+ * devices are busy. This behavior helps more I/O merging on the queue
+ * of the request stacking driver and prevents I/O throughput regression
+ * on burst I/O load.
+ *
+ * Return:
+ * 0 - Not busy (The request stacking driver should dispatch request)
+ * 1 - Busy (The request stacking driver should stop dispatching request)
+ */
+int blk_lld_busy(struct request_queue *q)
+{
+ if (q->lld_busy_fn)
+ return q->lld_busy_fn(q);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(blk_lld_busy);
+
+/**
+ * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
+ * @rq: the clone request to be cleaned up
+ *
+ * Description:
+ * Free all bios in @rq for a cloned request.
+ */
+void blk_rq_unprep_clone(struct request *rq)
+{
+ struct bio *bio;
+
+ while ((bio = rq->bio) != NULL) {
+ rq->bio = bio->bi_next;
+
+ bio_put(bio);
+ }
+}
+EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
+
+/*
+ * Copy attributes of the original request to the clone request.
+ * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied.
+ */
+static void __blk_rq_prep_clone(struct request *dst, struct request *src)
+{
+ dst->cpu = src->cpu;
+ dst->cmd_flags = (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE;
+ dst->cmd_type = src->cmd_type;
+ dst->__sector = blk_rq_pos(src);
+ dst->__data_len = blk_rq_bytes(src);
+ dst->nr_phys_segments = src->nr_phys_segments;
+ dst->ioprio = src->ioprio;
+ dst->extra_len = src->extra_len;
+}
+
+/**
+ * blk_rq_prep_clone - Helper function to setup clone request
+ * @rq: the request to be setup
+ * @rq_src: original request to be cloned
+ * @bs: bio_set that bios for clone are allocated from
+ * @gfp_mask: memory allocation mask for bio
+ * @bio_ctr: setup function to be called for each clone bio.
+ * Returns %0 for success, non %0 for failure.
+ * @data: private data to be passed to @bio_ctr
+ *
+ * Description:
+ * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
+ * The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense)
+ * are not copied, and copying such parts is the caller's responsibility.
+ * Also, pages which the original bios are pointing to are not copied
+ * and the cloned bios just point same pages.
+ * So cloned bios must be completed before original bios, which means
+ * the caller must complete @rq before @rq_src.
+ */
+int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
+ struct bio_set *bs, gfp_t gfp_mask,
+ int (*bio_ctr)(struct bio *, struct bio *, void *),
+ void *data)
+{
+ struct bio *bio, *bio_src;
+
+ if (!bs)
+ bs = fs_bio_set;
+
+ blk_rq_init(NULL, rq);
+
+ __rq_for_each_bio(bio_src, rq_src) {
+ bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bs);
+ if (!bio)
+ goto free_and_out;
+
+ __bio_clone(bio, bio_src);
+
+ if (bio_integrity(bio_src) &&
+ bio_integrity_clone(bio, bio_src, gfp_mask, bs))
+ goto free_and_out;
+
+ if (bio_ctr && bio_ctr(bio, bio_src, data))
+ goto free_and_out;
+
+ if (rq->bio) {
+ rq->biotail->bi_next = bio;
+ rq->biotail = bio;
+ } else
+ rq->bio = rq->biotail = bio;
+ }
+
+ __blk_rq_prep_clone(rq, rq_src);
+
+ return 0;
+
+free_and_out:
+ if (bio)
+ bio_free(bio, bs);
+ blk_rq_unprep_clone(rq);
+
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
+
+int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
+{
+ return queue_work(kblockd_workqueue, work);
+}
+EXPORT_SYMBOL(kblockd_schedule_work);
+
+int kblockd_schedule_delayed_work(struct request_queue *q,
+ struct delayed_work *dwork, unsigned long delay)
+{
+ return queue_delayed_work(kblockd_workqueue, dwork, delay);
+}
+EXPORT_SYMBOL(kblockd_schedule_delayed_work);
+
+#define PLUG_MAGIC 0x91827364
+
+/**
+ * blk_start_plug - initialize blk_plug and track it inside the task_struct
+ * @plug: The &struct blk_plug that needs to be initialized
+ *
+ * Description:
+ * Tracking blk_plug inside the task_struct will help with auto-flushing the
+ * pending I/O should the task end up blocking between blk_start_plug() and
+ * blk_finish_plug(). This is important from a performance perspective, but
+ * also ensures that we don't deadlock. For instance, if the task is blocking
+ * for a memory allocation, memory reclaim could end up wanting to free a
+ * page belonging to that request that is currently residing in our private
+ * plug. By flushing the pending I/O when the process goes to sleep, we avoid
+ * this kind of deadlock.
+ */
+void blk_start_plug(struct blk_plug *plug)
+{
+ struct task_struct *tsk = current;
+
+ plug->magic = PLUG_MAGIC;
+ INIT_LIST_HEAD(&plug->list);
+ INIT_LIST_HEAD(&plug->cb_list);
+ plug->should_sort = 0;
+
+ /*
+ * If this is a nested plug, don't actually assign it. It will be
+ * flushed on its own.
+ */
+ if (!tsk->plug) {
+ /*
+ * Store ordering should not be needed here, since a potential
+ * preempt will imply a full memory barrier
+ */
+ tsk->plug = plug;
+ }
+}
+EXPORT_SYMBOL(blk_start_plug);
+
+static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+ struct request *rqa = container_of(a, struct request, queuelist);
+ struct request *rqb = container_of(b, struct request, queuelist);
+
+ return !(rqa->q <= rqb->q);
+}
+
+/*
+ * If 'from_schedule' is true, then postpone the dispatch of requests
+ * until a safe kblockd context. We due this to avoid accidental big
+ * additional stack usage in driver dispatch, in places where the originally
+ * plugger did not intend it.
+ */
+static void queue_unplugged(struct request_queue *q, unsigned int depth,
+ bool from_schedule)
+ __releases(q->queue_lock)
+{
+ trace_block_unplug(q, depth, !from_schedule);
+
+ /*
+ * Don't mess with dead queue.
+ */
+ if (unlikely(blk_queue_dead(q))) {
+ spin_unlock(q->queue_lock);
+ return;
+ }
+
+ /*
+ * If we are punting this to kblockd, then we can safely drop
+ * the queue_lock before waking kblockd (which needs to take
+ * this lock).
+ */
+ if (from_schedule) {
+ spin_unlock(q->queue_lock);
+ blk_run_queue_async(q);
+ } else {
+ __blk_run_queue(q);
+ spin_unlock(q->queue_lock);
+ }
+
+}
+
+static void flush_plug_callbacks(struct blk_plug *plug)
+{
+ LIST_HEAD(callbacks);
+
+ if (list_empty(&plug->cb_list))
+ return;
+
+ list_splice_init(&plug->cb_list, &callbacks);
+
+ while (!list_empty(&callbacks)) {
+ struct blk_plug_cb *cb = list_first_entry(&callbacks,
+ struct blk_plug_cb,
+ list);
+ list_del(&cb->list);
+ cb->callback(cb);
+ }
+}
+
+void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
+{
+ struct request_queue *q;
+ unsigned long flags;
+ struct request *rq;
+ LIST_HEAD(list);
+ unsigned int depth;
+
+ BUG_ON(plug->magic != PLUG_MAGIC);
+
+ flush_plug_callbacks(plug);
+ if (list_empty(&plug->list))
+ return;
+
+ list_splice_init(&plug->list, &list);
+
+ if (plug->should_sort) {
+ list_sort(NULL, &list, plug_rq_cmp);
+ plug->should_sort = 0;
+ }
+
+ q = NULL;
+ depth = 0;
+
+ /*
+ * Save and disable interrupts here, to avoid doing it for every
+ * queue lock we have to take.
+ */
+ local_irq_save(flags);
+ while (!list_empty(&list)) {
+ rq = list_entry_rq(list.next);
+ list_del_init(&rq->queuelist);
+ BUG_ON(!rq->q);
+ if (rq->q != q) {
+ /*
+ * This drops the queue lock
+ */
+ if (q)
+ queue_unplugged(q, depth, from_schedule);
+ q = rq->q;
+ depth = 0;
+ spin_lock(q->queue_lock);
+ }
+
+ /*
+ * Short-circuit if @q is dead
+ */
+ if (unlikely(blk_queue_dead(q))) {
+ __blk_end_request_all(rq, -ENODEV);
+ continue;
+ }
+
+ /*
+ * rq is already accounted, so use raw insert
+ */
+ if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA))
+ __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
+ else
+ __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
+
+ depth++;
+ }
+
+ /*
+ * This drops the queue lock
+ */
+ if (q)
+ queue_unplugged(q, depth, from_schedule);
+
+ local_irq_restore(flags);
+}
+
+void blk_finish_plug(struct blk_plug *plug)
+{
+ blk_flush_plug_list(plug, false);
+
+ if (plug == current->plug)
+ current->plug = NULL;
+}
+EXPORT_SYMBOL(blk_finish_plug);
+
+int __init blk_dev_init(void)
+{
+ BUILD_BUG_ON(__REQ_NR_BITS > 8 *
+ sizeof(((struct request *)0)->cmd_flags));
+
+ /* used for unplugging and affects IO latency/throughput - HIGHPRI */
+ kblockd_workqueue = alloc_workqueue("kblockd",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ if (!kblockd_workqueue)
+ panic("Failed to create kblockd\n");
+
+ request_cachep = kmem_cache_create("blkdev_requests",
+ sizeof(struct request), 0, SLAB_PANIC, NULL);
+
+ blk_requestq_cachep = kmem_cache_create("blkdev_queue",
+ sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
+
+ return 0;
+}
diff --git a/block/blk-exec.c b/block/blk-exec.c
new file mode 100644
index 00000000..fb2cbd55
--- /dev/null
+++ b/block/blk-exec.c
@@ -0,0 +1,121 @@
+/*
+ * Functions related to setting various queue properties from drivers
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+
+#include "blk.h"
+
+/*
+ * for max sense size
+ */
+#include <scsi/scsi_cmnd.h>
+
+/**
+ * blk_end_sync_rq - executes a completion event on a request
+ * @rq: request to complete
+ * @error: end I/O status of the request
+ */
+static void blk_end_sync_rq(struct request *rq, int error)
+{
+ struct completion *waiting = rq->end_io_data;
+
+ rq->end_io_data = NULL;
+ __blk_put_request(rq->q, rq);
+
+ /*
+ * complete last, if this is a stack request the process (and thus
+ * the rq pointer) could be invalid right after this complete()
+ */
+ complete(waiting);
+}
+
+/**
+ * blk_execute_rq_nowait - insert a request into queue for execution
+ * @q: queue to insert the request in
+ * @bd_disk: matching gendisk
+ * @rq: request to insert
+ * @at_head: insert request at head or tail of queue
+ * @done: I/O completion handler
+ *
+ * Description:
+ * Insert a fully prepared request at the back of the I/O scheduler queue
+ * for execution. Don't wait for completion.
+ */
+void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
+ struct request *rq, int at_head,
+ rq_end_io_fn *done)
+{
+ int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
+
+ WARN_ON(irqs_disabled());
+ spin_lock_irq(q->queue_lock);
+
+ if (unlikely(blk_queue_dead(q))) {
+ spin_unlock_irq(q->queue_lock);
+ rq->errors = -ENXIO;
+ if (rq->end_io)
+ rq->end_io(rq, rq->errors);
+ return;
+ }
+
+ rq->rq_disk = bd_disk;
+ rq->end_io = done;
+ __elv_add_request(q, rq, where);
+ __blk_run_queue(q);
+ /* the queue is stopped so it won't be run */
+ if (rq->cmd_type == REQ_TYPE_PM_RESUME)
+ q->request_fn(q);
+ spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
+
+/**
+ * blk_execute_rq - insert a request into queue for execution
+ * @q: queue to insert the request in
+ * @bd_disk: matching gendisk
+ * @rq: request to insert
+ * @at_head: insert request at head or tail of queue
+ *
+ * Description:
+ * Insert a fully prepared request at the back of the I/O scheduler queue
+ * for execution and wait for completion.
+ */
+int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
+ struct request *rq, int at_head)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+ char sense[SCSI_SENSE_BUFFERSIZE];
+ int err = 0;
+ unsigned long hang_check;
+
+ /*
+ * we need an extra reference to the request, so we can look at
+ * it after io completion
+ */
+ rq->ref_count++;
+
+ if (!rq->sense) {
+ memset(sense, 0, sizeof(sense));
+ rq->sense = sense;
+ rq->sense_len = 0;
+ }
+
+ rq->end_io_data = &wait;
+ blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
+
+ /* Prevent hang_check timer from firing at us during very long I/O */
+ hang_check = sysctl_hung_task_timeout_secs;
+ if (hang_check)
+ while (!wait_for_completion_timeout(&wait, hang_check * (HZ/2)));
+ else
+ wait_for_completion(&wait);
+
+ if (rq->errors)
+ err = -EIO;
+
+ return err;
+}
+EXPORT_SYMBOL(blk_execute_rq);
diff --git a/block/blk-flush.c b/block/blk-flush.c
new file mode 100644
index 00000000..720ad607
--- /dev/null
+++ b/block/blk-flush.c
@@ -0,0 +1,455 @@
+/*
+ * Functions to sequence FLUSH and FUA writes.
+ *
+ * Copyright (C) 2011 Max Planck Institute for Gravitational Physics
+ * Copyright (C) 2011 Tejun Heo <tj@kernel.org>
+ *
+ * This file is released under the GPLv2.
+ *
+ * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three
+ * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
+ * properties and hardware capability.
+ *
+ * If a request doesn't have data, only REQ_FLUSH makes sense, which
+ * indicates a simple flush request. If there is data, REQ_FLUSH indicates
+ * that the device cache should be flushed before the data is executed, and
+ * REQ_FUA means that the data must be on non-volatile media on request
+ * completion.
+ *
+ * If the device doesn't have writeback cache, FLUSH and FUA don't make any
+ * difference. The requests are either completed immediately if there's no
+ * data or executed as normal requests otherwise.
+ *
+ * If the device has writeback cache and supports FUA, REQ_FLUSH is
+ * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
+ *
+ * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is
+ * translated to PREFLUSH and REQ_FUA to POSTFLUSH.
+ *
+ * The actual execution of flush is double buffered. Whenever a request
+ * needs to execute PRE or POSTFLUSH, it queues at
+ * q->flush_queue[q->flush_pending_idx]. Once certain criteria are met, a
+ * flush is issued and the pending_idx is toggled. When the flush
+ * completes, all the requests which were pending are proceeded to the next
+ * step. This allows arbitrary merging of different types of FLUSH/FUA
+ * requests.
+ *
+ * Currently, the following conditions are used to determine when to issue
+ * flush.
+ *
+ * C1. At any given time, only one flush shall be in progress. This makes
+ * double buffering sufficient.
+ *
+ * C2. Flush is deferred if any request is executing DATA of its sequence.
+ * This avoids issuing separate POSTFLUSHes for requests which shared
+ * PREFLUSH.
+ *
+ * C3. The second condition is ignored if there is a request which has
+ * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid
+ * starvation in the unlikely case where there are continuous stream of
+ * FUA (without FLUSH) requests.
+ *
+ * For devices which support FUA, it isn't clear whether C2 (and thus C3)
+ * is beneficial.
+ *
+ * Note that a sequenced FLUSH/FUA request with DATA is completed twice.
+ * Once while executing DATA and again after the whole sequence is
+ * complete. The first completion updates the contained bio but doesn't
+ * finish it so that the bio submitter is notified only after the whole
+ * sequence is complete. This is implemented by testing REQ_FLUSH_SEQ in
+ * req_bio_endio().
+ *
+ * The above peculiarity requires that each FLUSH/FUA request has only one
+ * bio attached to it, which is guaranteed as they aren't allowed to be
+ * merged in the usual way.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/gfp.h>
+
+#include "blk.h"
+
+/* FLUSH/FUA sequences */
+enum {
+ REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */
+ REQ_FSEQ_DATA = (1 << 1), /* data write in progress */
+ REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */
+ REQ_FSEQ_DONE = (1 << 3),
+
+ REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
+ REQ_FSEQ_POSTFLUSH,
+
+ /*
+ * If flush has been pending longer than the following timeout,
+ * it's issued even if flush_data requests are still in flight.
+ */
+ FLUSH_PENDING_TIMEOUT = 5 * HZ,
+};
+
+static bool blk_kick_flush(struct request_queue *q);
+
+static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
+{
+ unsigned int policy = 0;
+
+ if (blk_rq_sectors(rq))
+ policy |= REQ_FSEQ_DATA;
+
+ if (fflags & REQ_FLUSH) {
+ if (rq->cmd_flags & REQ_FLUSH)
+ policy |= REQ_FSEQ_PREFLUSH;
+ if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
+ policy |= REQ_FSEQ_POSTFLUSH;
+ }
+ return policy;
+}
+
+static unsigned int blk_flush_cur_seq(struct request *rq)
+{
+ return 1 << ffz(rq->flush.seq);
+}
+
+static void blk_flush_restore_request(struct request *rq)
+{
+ /*
+ * After flush data completion, @rq->bio is %NULL but we need to
+ * complete the bio again. @rq->biotail is guaranteed to equal the
+ * original @rq->bio. Restore it.
+ */
+ rq->bio = rq->biotail;
+
+ /* make @rq a normal request */
+ rq->cmd_flags &= ~REQ_FLUSH_SEQ;
+ rq->end_io = rq->flush.saved_end_io;
+}
+
+/**
+ * blk_flush_complete_seq - complete flush sequence
+ * @rq: FLUSH/FUA request being sequenced
+ * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
+ * @error: whether an error occurred
+ *
+ * @rq just completed @seq part of its flush sequence, record the
+ * completion and trigger the next step.
+ *
+ * CONTEXT:
+ * spin_lock_irq(q->queue_lock)
+ *
+ * RETURNS:
+ * %true if requests were added to the dispatch queue, %false otherwise.
+ */
+static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
+ int error)
+{
+ struct request_queue *q = rq->q;
+ struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
+ bool queued = false;
+
+ BUG_ON(rq->flush.seq & seq);
+ rq->flush.seq |= seq;
+
+ if (likely(!error))
+ seq = blk_flush_cur_seq(rq);
+ else
+ seq = REQ_FSEQ_DONE;
+
+ switch (seq) {
+ case REQ_FSEQ_PREFLUSH:
+ case REQ_FSEQ_POSTFLUSH:
+ /* queue for flush */
+ if (list_empty(pending))
+ q->flush_pending_since = jiffies;
+ list_move_tail(&rq->flush.list, pending);
+ break;
+
+ case REQ_FSEQ_DATA:
+ list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
+ list_add(&rq->queuelist, &q->queue_head);
+ queued = true;
+ break;
+
+ case REQ_FSEQ_DONE:
+ /*
+ * @rq was previously adjusted by blk_flush_issue() for
+ * flush sequencing and may already have gone through the
+ * flush data request completion path. Restore @rq for
+ * normal completion and end it.
+ */
+ BUG_ON(!list_empty(&rq->queuelist));
+ list_del_init(&rq->flush.list);
+ blk_flush_restore_request(rq);
+ __blk_end_request_all(rq, error);
+ break;
+
+ default:
+ BUG();
+ }
+
+ return blk_kick_flush(q) | queued;
+}
+
+static void flush_end_io(struct request *flush_rq, int error)
+{
+ struct request_queue *q = flush_rq->q;
+ struct list_head *running = &q->flush_queue[q->flush_running_idx];
+ bool queued = false;
+ struct request *rq, *n;
+
+ BUG_ON(q->flush_pending_idx == q->flush_running_idx);
+
+ /* account completion of the flush request */
+ q->flush_running_idx ^= 1;
+ elv_completed_request(q, flush_rq);
+
+ /* and push the waiting requests to the next stage */
+ list_for_each_entry_safe(rq, n, running, flush.list) {
+ unsigned int seq = blk_flush_cur_seq(rq);
+
+ BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
+ queued |= blk_flush_complete_seq(rq, seq, error);
+ }
+
+ /*
+ * Kick the queue to avoid stall for two cases:
+ * 1. Moving a request silently to empty queue_head may stall the
+ * queue.
+ * 2. When flush request is running in non-queueable queue, the
+ * queue is hold. Restart the queue after flush request is finished
+ * to avoid stall.
+ * This function is called from request completion path and calling
+ * directly into request_fn may confuse the driver. Always use
+ * kblockd.
+ */
+ if (queued || q->flush_queue_delayed)
+ blk_run_queue_async(q);
+ q->flush_queue_delayed = 0;
+}
+
+/**
+ * blk_kick_flush - consider issuing flush request
+ * @q: request_queue being kicked
+ *
+ * Flush related states of @q have changed, consider issuing flush request.
+ * Please read the comment at the top of this file for more info.
+ *
+ * CONTEXT:
+ * spin_lock_irq(q->queue_lock)
+ *
+ * RETURNS:
+ * %true if flush was issued, %false otherwise.
+ */
+static bool blk_kick_flush(struct request_queue *q)
+{
+ struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
+ struct request *first_rq =
+ list_first_entry(pending, struct request, flush.list);
+
+ /* C1 described at the top of this file */
+ if (q->flush_pending_idx != q->flush_running_idx || list_empty(pending))
+ return false;
+
+ /* C2 and C3 */
+ if (!list_empty(&q->flush_data_in_flight) &&
+ time_before(jiffies,
+ q->flush_pending_since + FLUSH_PENDING_TIMEOUT))
+ return false;
+
+ /*
+ * Issue flush and toggle pending_idx. This makes pending_idx
+ * different from running_idx, which means flush is in flight.
+ */
+ blk_rq_init(q, &q->flush_rq);
+ q->flush_rq.cmd_type = REQ_TYPE_FS;
+ q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
+ q->flush_rq.rq_disk = first_rq->rq_disk;
+ q->flush_rq.end_io = flush_end_io;
+
+ q->flush_pending_idx ^= 1;
+ list_add_tail(&q->flush_rq.queuelist, &q->queue_head);
+ return true;
+}
+
+static void flush_data_end_io(struct request *rq, int error)
+{
+ struct request_queue *q = rq->q;
+
+ /*
+ * After populating an empty queue, kick it to avoid stall. Read
+ * the comment in flush_end_io().
+ */
+ if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
+ blk_run_queue_async(q);
+}
+
+/**
+ * blk_insert_flush - insert a new FLUSH/FUA request
+ * @rq: request to insert
+ *
+ * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
+ * @rq is being submitted. Analyze what needs to be done and put it on the
+ * right queue.
+ *
+ * CONTEXT:
+ * spin_lock_irq(q->queue_lock)
+ */
+void blk_insert_flush(struct request *rq)
+{
+ struct request_queue *q = rq->q;
+ unsigned int fflags = q->flush_flags; /* may change, cache */
+ unsigned int policy = blk_flush_policy(fflags, rq);
+
+ /*
+ * @policy now records what operations need to be done. Adjust
+ * REQ_FLUSH and FUA for the driver.
+ */
+ rq->cmd_flags &= ~REQ_FLUSH;
+ if (!(fflags & REQ_FUA))
+ rq->cmd_flags &= ~REQ_FUA;
+
+ /*
+ * An empty flush handed down from a stacking driver may
+ * translate into nothing if the underlying device does not
+ * advertise a write-back cache. In this case, simply
+ * complete the request.
+ */
+ if (!policy) {
+ __blk_end_bidi_request(rq, 0, 0, 0);
+ return;
+ }
+
+ BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
+
+ /*
+ * If there's data but flush is not necessary, the request can be
+ * processed directly without going through flush machinery. Queue
+ * for normal execution.
+ */
+ if ((policy & REQ_FSEQ_DATA) &&
+ !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
+ list_add_tail(&rq->queuelist, &q->queue_head);
+ return;
+ }
+
+ /*
+ * @rq should go through flush machinery. Mark it part of flush
+ * sequence and submit for further processing.
+ */
+ memset(&rq->flush, 0, sizeof(rq->flush));
+ INIT_LIST_HEAD(&rq->flush.list);
+ rq->cmd_flags |= REQ_FLUSH_SEQ;
+ rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
+ rq->end_io = flush_data_end_io;
+
+ blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
+}
+
+/**
+ * blk_abort_flushes - @q is being aborted, abort flush requests
+ * @q: request_queue being aborted
+ *
+ * To be called from elv_abort_queue(). @q is being aborted. Prepare all
+ * FLUSH/FUA requests for abortion.
+ *
+ * CONTEXT:
+ * spin_lock_irq(q->queue_lock)
+ */
+void blk_abort_flushes(struct request_queue *q)
+{
+ struct request *rq, *n;
+ int i;
+
+ /*
+ * Requests in flight for data are already owned by the dispatch
+ * queue or the device driver. Just restore for normal completion.
+ */
+ list_for_each_entry_safe(rq, n, &q->flush_data_in_flight, flush.list) {
+ list_del_init(&rq->flush.list);
+ blk_flush_restore_request(rq);
+ }
+
+ /*
+ * We need to give away requests on flush queues. Restore for
+ * normal completion and put them on the dispatch queue.
+ */
+ for (i = 0; i < ARRAY_SIZE(q->flush_queue); i++) {
+ list_for_each_entry_safe(rq, n, &q->flush_queue[i],
+ flush.list) {
+ list_del_init(&rq->flush.list);
+ blk_flush_restore_request(rq);
+ list_add_tail(&rq->queuelist, &q->queue_head);
+ }
+ }
+}
+
+static void bio_end_flush(struct bio *bio, int err)
+{
+ if (err)
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ if (bio->bi_private)
+ complete(bio->bi_private);
+ bio_put(bio);
+}
+
+/**
+ * blkdev_issue_flush - queue a flush
+ * @bdev: blockdev to issue flush for
+ * @gfp_mask: memory allocation flags (for bio_alloc)
+ * @error_sector: error sector
+ *
+ * Description:
+ * Issue a flush for the block device in question. Caller can supply
+ * room for storing the error offset in case of a flush error, if they
+ * wish to. If WAIT flag is not passed then caller may check only what
+ * request was pushed in some internal queue for later handling.
+ */
+int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
+ sector_t *error_sector)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+ struct request_queue *q;
+ struct bio *bio;
+ int ret = 0;
+
+ if (bdev->bd_disk == NULL)
+ return -ENXIO;
+
+ q = bdev_get_queue(bdev);
+ if (!q)
+ return -ENXIO;
+
+ /*
+ * some block devices may not have their queue correctly set up here
+ * (e.g. loop device without a backing file) and so issuing a flush
+ * here will panic. Ensure there is a request function before issuing
+ * the flush.
+ */
+ if (!q->make_request_fn)
+ return -ENXIO;
+
+ bio = bio_alloc(gfp_mask, 0);
+ bio->bi_end_io = bio_end_flush;
+ bio->bi_bdev = bdev;
+ bio->bi_private = &wait;
+
+ bio_get(bio);
+ submit_bio(WRITE_FLUSH, bio);
+ wait_for_completion(&wait);
+
+ /*
+ * The driver must store the error location in ->bi_sector, if
+ * it supports it. For non-stacked drivers, this should be
+ * copied from blk_rq_pos(rq).
+ */
+ if (error_sector)
+ *error_sector = bio->bi_sector;
+
+ if (!bio_flagged(bio, BIO_UPTODATE))
+ ret = -EIO;
+
+ bio_put(bio);
+ return ret;
+}
+EXPORT_SYMBOL(blkdev_issue_flush);
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
new file mode 100644
index 00000000..da2a818c
--- /dev/null
+++ b/block/blk-integrity.c
@@ -0,0 +1,448 @@
+/*
+ * blk-integrity.c - Block layer data integrity extensions
+ *
+ * Copyright (C) 2007, 2008 Oracle Corporation
+ * Written by: Martin K. Petersen <martin.petersen@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
+ * USA.
+ *
+ */
+
+#include <linux/blkdev.h>
+#include <linux/mempool.h>
+#include <linux/bio.h>
+#include <linux/scatterlist.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+
+#include "blk.h"
+
+static struct kmem_cache *integrity_cachep;
+
+static const char *bi_unsupported_name = "unsupported";
+
+/**
+ * blk_rq_count_integrity_sg - Count number of integrity scatterlist elements
+ * @q: request queue
+ * @bio: bio with integrity metadata attached
+ *
+ * Description: Returns the number of elements required in a
+ * scatterlist corresponding to the integrity metadata in a bio.
+ */
+int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
+{
+ struct bio_vec *iv, *ivprv = NULL;
+ unsigned int segments = 0;
+ unsigned int seg_size = 0;
+ unsigned int i = 0;
+
+ bio_for_each_integrity_vec(iv, bio, i) {
+
+ if (ivprv) {
+ if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv))
+ goto new_segment;
+
+ if (!BIOVEC_SEG_BOUNDARY(q, ivprv, iv))
+ goto new_segment;
+
+ if (seg_size + iv->bv_len > queue_max_segment_size(q))
+ goto new_segment;
+
+ seg_size += iv->bv_len;
+ } else {
+new_segment:
+ segments++;
+ seg_size = iv->bv_len;
+ }
+
+ ivprv = iv;
+ }
+
+ return segments;
+}
+EXPORT_SYMBOL(blk_rq_count_integrity_sg);
+
+/**
+ * blk_rq_map_integrity_sg - Map integrity metadata into a scatterlist
+ * @q: request queue
+ * @bio: bio with integrity metadata attached
+ * @sglist: target scatterlist
+ *
+ * Description: Map the integrity vectors in request into a
+ * scatterlist. The scatterlist must be big enough to hold all
+ * elements. I.e. sized using blk_rq_count_integrity_sg().
+ */
+int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
+ struct scatterlist *sglist)
+{
+ struct bio_vec *iv, *ivprv = NULL;
+ struct scatterlist *sg = NULL;
+ unsigned int segments = 0;
+ unsigned int i = 0;
+
+ bio_for_each_integrity_vec(iv, bio, i) {
+
+ if (ivprv) {
+ if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv))
+ goto new_segment;
+
+ if (!BIOVEC_SEG_BOUNDARY(q, ivprv, iv))
+ goto new_segment;
+
+ if (sg->length + iv->bv_len > queue_max_segment_size(q))
+ goto new_segment;
+
+ sg->length += iv->bv_len;
+ } else {
+new_segment:
+ if (!sg)
+ sg = sglist;
+ else {
+ sg->page_link &= ~0x02;
+ sg = sg_next(sg);
+ }
+
+ sg_set_page(sg, iv->bv_page, iv->bv_len, iv->bv_offset);
+ segments++;
+ }
+
+ ivprv = iv;
+ }
+
+ if (sg)
+ sg_mark_end(sg);
+
+ return segments;
+}
+EXPORT_SYMBOL(blk_rq_map_integrity_sg);
+
+/**
+ * blk_integrity_compare - Compare integrity profile of two disks
+ * @gd1: Disk to compare
+ * @gd2: Disk to compare
+ *
+ * Description: Meta-devices like DM and MD need to verify that all
+ * sub-devices use the same integrity format before advertising to
+ * upper layers that they can send/receive integrity metadata. This
+ * function can be used to check whether two gendisk devices have
+ * compatible integrity formats.
+ */
+int blk_integrity_compare(struct gendisk *gd1, struct gendisk *gd2)
+{
+ struct blk_integrity *b1 = gd1->integrity;
+ struct blk_integrity *b2 = gd2->integrity;
+
+ if (!b1 && !b2)
+ return 0;
+
+ if (!b1 || !b2)
+ return -1;
+
+ if (b1->sector_size != b2->sector_size) {
+ printk(KERN_ERR "%s: %s/%s sector sz %u != %u\n", __func__,
+ gd1->disk_name, gd2->disk_name,
+ b1->sector_size, b2->sector_size);
+ return -1;
+ }
+
+ if (b1->tuple_size != b2->tuple_size) {
+ printk(KERN_ERR "%s: %s/%s tuple sz %u != %u\n", __func__,
+ gd1->disk_name, gd2->disk_name,
+ b1->tuple_size, b2->tuple_size);
+ return -1;
+ }
+
+ if (b1->tag_size && b2->tag_size && (b1->tag_size != b2->tag_size)) {
+ printk(KERN_ERR "%s: %s/%s tag sz %u != %u\n", __func__,
+ gd1->disk_name, gd2->disk_name,
+ b1->tag_size, b2->tag_size);
+ return -1;
+ }
+
+ if (strcmp(b1->name, b2->name)) {
+ printk(KERN_ERR "%s: %s/%s type %s != %s\n", __func__,
+ gd1->disk_name, gd2->disk_name,
+ b1->name, b2->name);
+ return -1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(blk_integrity_compare);
+
+int blk_integrity_merge_rq(struct request_queue *q, struct request *req,
+ struct request *next)
+{
+ if (blk_integrity_rq(req) != blk_integrity_rq(next))
+ return -1;
+
+ if (req->nr_integrity_segments + next->nr_integrity_segments >
+ q->limits.max_integrity_segments)
+ return -1;
+
+ return 0;
+}
+EXPORT_SYMBOL(blk_integrity_merge_rq);
+
+int blk_integrity_merge_bio(struct request_queue *q, struct request *req,
+ struct bio *bio)
+{
+ int nr_integrity_segs;
+ struct bio *next = bio->bi_next;
+
+ bio->bi_next = NULL;
+ nr_integrity_segs = blk_rq_count_integrity_sg(q, bio);
+ bio->bi_next = next;
+
+ if (req->nr_integrity_segments + nr_integrity_segs >
+ q->limits.max_integrity_segments)
+ return -1;
+
+ req->nr_integrity_segments += nr_integrity_segs;
+
+ return 0;
+}
+EXPORT_SYMBOL(blk_integrity_merge_bio);
+
+struct integrity_sysfs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct blk_integrity *, char *);
+ ssize_t (*store)(struct blk_integrity *, const char *, size_t);
+};
+
+static ssize_t integrity_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *page)
+{
+ struct blk_integrity *bi =
+ container_of(kobj, struct blk_integrity, kobj);
+ struct integrity_sysfs_entry *entry =
+ container_of(attr, struct integrity_sysfs_entry, attr);
+
+ return entry->show(bi, page);
+}
+
+static ssize_t integrity_attr_store(struct kobject *kobj,
+ struct attribute *attr, const char *page,
+ size_t count)
+{
+ struct blk_integrity *bi =
+ container_of(kobj, struct blk_integrity, kobj);
+ struct integrity_sysfs_entry *entry =
+ container_of(attr, struct integrity_sysfs_entry, attr);
+ ssize_t ret = 0;
+
+ if (entry->store)
+ ret = entry->store(bi, page, count);
+
+ return ret;
+}
+
+static ssize_t integrity_format_show(struct blk_integrity *bi, char *page)
+{
+ if (bi != NULL && bi->name != NULL)
+ return sprintf(page, "%s\n", bi->name);
+ else
+ return sprintf(page, "none\n");
+}
+
+static ssize_t integrity_tag_size_show(struct blk_integrity *bi, char *page)
+{
+ if (bi != NULL)
+ return sprintf(page, "%u\n", bi->tag_size);
+ else
+ return sprintf(page, "0\n");
+}
+
+static ssize_t integrity_read_store(struct blk_integrity *bi,
+ const char *page, size_t count)
+{
+ char *p = (char *) page;
+ unsigned long val = simple_strtoul(p, &p, 10);
+
+ if (val)
+ bi->flags |= INTEGRITY_FLAG_READ;
+ else
+ bi->flags &= ~INTEGRITY_FLAG_READ;
+
+ return count;
+}
+
+static ssize_t integrity_read_show(struct blk_integrity *bi, char *page)
+{
+ return sprintf(page, "%d\n", (bi->flags & INTEGRITY_FLAG_READ) != 0);
+}
+
+static ssize_t integrity_write_store(struct blk_integrity *bi,
+ const char *page, size_t count)
+{
+ char *p = (char *) page;
+ unsigned long val = simple_strtoul(p, &p, 10);
+
+ if (val)
+ bi->flags |= INTEGRITY_FLAG_WRITE;
+ else
+ bi->flags &= ~INTEGRITY_FLAG_WRITE;
+
+ return count;
+}
+
+static ssize_t integrity_write_show(struct blk_integrity *bi, char *page)
+{
+ return sprintf(page, "%d\n", (bi->flags & INTEGRITY_FLAG_WRITE) != 0);
+}
+
+static struct integrity_sysfs_entry integrity_format_entry = {
+ .attr = { .name = "format", .mode = S_IRUGO },
+ .show = integrity_format_show,
+};
+
+static struct integrity_sysfs_entry integrity_tag_size_entry = {
+ .attr = { .name = "tag_size", .mode = S_IRUGO },
+ .show = integrity_tag_size_show,
+};
+
+static struct integrity_sysfs_entry integrity_read_entry = {
+ .attr = { .name = "read_verify", .mode = S_IRUGO | S_IWUSR },
+ .show = integrity_read_show,
+ .store = integrity_read_store,
+};
+
+static struct integrity_sysfs_entry integrity_write_entry = {
+ .attr = { .name = "write_generate", .mode = S_IRUGO | S_IWUSR },
+ .show = integrity_write_show,
+ .store = integrity_write_store,
+};
+
+static struct attribute *integrity_attrs[] = {
+ &integrity_format_entry.attr,
+ &integrity_tag_size_entry.attr,
+ &integrity_read_entry.attr,
+ &integrity_write_entry.attr,
+ NULL,
+};
+
+static const struct sysfs_ops integrity_ops = {
+ .show = &integrity_attr_show,
+ .store = &integrity_attr_store,
+};
+
+static int __init blk_dev_integrity_init(void)
+{
+ integrity_cachep = kmem_cache_create("blkdev_integrity",
+ sizeof(struct blk_integrity),
+ 0, SLAB_PANIC, NULL);
+ return 0;
+}
+subsys_initcall(blk_dev_integrity_init);
+
+static void blk_integrity_release(struct kobject *kobj)
+{
+ struct blk_integrity *bi =
+ container_of(kobj, struct blk_integrity, kobj);
+
+ kmem_cache_free(integrity_cachep, bi);
+}
+
+static struct kobj_type integrity_ktype = {
+ .default_attrs = integrity_attrs,
+ .sysfs_ops = &integrity_ops,
+ .release = blk_integrity_release,
+};
+
+bool blk_integrity_is_initialized(struct gendisk *disk)
+{
+ struct blk_integrity *bi = blk_get_integrity(disk);
+
+ return (bi && bi->name && strcmp(bi->name, bi_unsupported_name) != 0);
+}
+EXPORT_SYMBOL(blk_integrity_is_initialized);
+
+/**
+ * blk_integrity_register - Register a gendisk as being integrity-capable
+ * @disk: struct gendisk pointer to make integrity-aware
+ * @template: optional integrity profile to register
+ *
+ * Description: When a device needs to advertise itself as being able
+ * to send/receive integrity metadata it must use this function to
+ * register the capability with the block layer. The template is a
+ * blk_integrity struct with values appropriate for the underlying
+ * hardware. If template is NULL the new profile is allocated but
+ * not filled out. See Documentation/block/data-integrity.txt.
+ */
+int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
+{
+ struct blk_integrity *bi;
+
+ BUG_ON(disk == NULL);
+
+ if (disk->integrity == NULL) {
+ bi = kmem_cache_alloc(integrity_cachep,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!bi)
+ return -1;
+
+ if (kobject_init_and_add(&bi->kobj, &integrity_ktype,
+ &disk_to_dev(disk)->kobj,
+ "%s", "integrity")) {
+ kmem_cache_free(integrity_cachep, bi);
+ return -1;
+ }
+
+ kobject_uevent(&bi->kobj, KOBJ_ADD);
+
+ bi->flags |= INTEGRITY_FLAG_READ | INTEGRITY_FLAG_WRITE;
+ bi->sector_size = queue_logical_block_size(disk->queue);
+ disk->integrity = bi;
+ } else
+ bi = disk->integrity;
+
+ /* Use the provided profile as template */
+ if (template != NULL) {
+ bi->name = template->name;
+ bi->generate_fn = template->generate_fn;
+ bi->verify_fn = template->verify_fn;
+ bi->tuple_size = template->tuple_size;
+ bi->set_tag_fn = template->set_tag_fn;
+ bi->get_tag_fn = template->get_tag_fn;
+ bi->tag_size = template->tag_size;
+ } else
+ bi->name = bi_unsupported_name;
+
+ return 0;
+}
+EXPORT_SYMBOL(blk_integrity_register);
+
+/**
+ * blk_integrity_unregister - Remove block integrity profile
+ * @disk: disk whose integrity profile to deallocate
+ *
+ * Description: This function frees all memory used by the block
+ * integrity profile. To be called at device teardown.
+ */
+void blk_integrity_unregister(struct gendisk *disk)
+{
+ struct blk_integrity *bi;
+
+ if (!disk || !disk->integrity)
+ return;
+
+ bi = disk->integrity;
+
+ kobject_uevent(&bi->kobj, KOBJ_REMOVE);
+ kobject_del(&bi->kobj);
+ kobject_put(&bi->kobj);
+ disk->integrity = NULL;
+}
+EXPORT_SYMBOL(blk_integrity_unregister);
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
new file mode 100644
index 00000000..fb95dd2f
--- /dev/null
+++ b/block/blk-ioc.c
@@ -0,0 +1,459 @@
+/*
+ * Functions related to io context handling
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
+#include <linux/slab.h>
+
+#include "blk.h"
+
+/*
+ * For io context allocations
+ */
+static struct kmem_cache *iocontext_cachep;
+
+/**
+ * get_io_context - increment reference count to io_context
+ * @ioc: io_context to get
+ *
+ * Increment reference count to @ioc.
+ */
+void get_io_context(struct io_context *ioc)
+{
+ BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
+ atomic_long_inc(&ioc->refcount);
+}
+EXPORT_SYMBOL(get_io_context);
+
+static void icq_free_icq_rcu(struct rcu_head *head)
+{
+ struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
+
+ kmem_cache_free(icq->__rcu_icq_cache, icq);
+}
+
+/* Exit an icq. Called with both ioc and q locked. */
+static void ioc_exit_icq(struct io_cq *icq)
+{
+ struct elevator_type *et = icq->q->elevator->type;
+
+ if (icq->flags & ICQ_EXITED)
+ return;
+
+ if (et->ops.elevator_exit_icq_fn)
+ et->ops.elevator_exit_icq_fn(icq);
+
+ icq->flags |= ICQ_EXITED;
+}
+
+/* Release an icq. Called with both ioc and q locked. */
+static void ioc_destroy_icq(struct io_cq *icq)
+{
+ struct io_context *ioc = icq->ioc;
+ struct request_queue *q = icq->q;
+ struct elevator_type *et = q->elevator->type;
+
+ lockdep_assert_held(&ioc->lock);
+ lockdep_assert_held(q->queue_lock);
+
+ radix_tree_delete(&ioc->icq_tree, icq->q->id);
+ hlist_del_init(&icq->ioc_node);
+ list_del_init(&icq->q_node);
+
+ /*
+ * Both setting lookup hint to and clearing it from @icq are done
+ * under queue_lock. If it's not pointing to @icq now, it never
+ * will. Hint assignment itself can race safely.
+ */
+ if (rcu_dereference_raw(ioc->icq_hint) == icq)
+ rcu_assign_pointer(ioc->icq_hint, NULL);
+
+ ioc_exit_icq(icq);
+
+ /*
+ * @icq->q might have gone away by the time RCU callback runs
+ * making it impossible to determine icq_cache. Record it in @icq.
+ */
+ icq->__rcu_icq_cache = et->icq_cache;
+ call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
+}
+
+/*
+ * Slow path for ioc release in put_io_context(). Performs double-lock
+ * dancing to unlink all icq's and then frees ioc.
+ */
+static void ioc_release_fn(struct work_struct *work)
+{
+ struct io_context *ioc = container_of(work, struct io_context,
+ release_work);
+ unsigned long flags;
+
+ /*
+ * Exiting icq may call into put_io_context() through elevator
+ * which will trigger lockdep warning. The ioc's are guaranteed to
+ * be different, use a different locking subclass here. Use
+ * irqsave variant as there's no spin_lock_irq_nested().
+ */
+ spin_lock_irqsave_nested(&ioc->lock, flags, 1);
+
+ while (!hlist_empty(&ioc->icq_list)) {
+ struct io_cq *icq = hlist_entry(ioc->icq_list.first,
+ struct io_cq, ioc_node);
+ struct request_queue *q = icq->q;
+
+ if (spin_trylock(q->queue_lock)) {
+ ioc_destroy_icq(icq);
+ spin_unlock(q->queue_lock);
+ } else {
+ spin_unlock_irqrestore(&ioc->lock, flags);
+ cpu_relax();
+ spin_lock_irqsave_nested(&ioc->lock, flags, 1);
+ }
+ }
+
+ spin_unlock_irqrestore(&ioc->lock, flags);
+
+ kmem_cache_free(iocontext_cachep, ioc);
+}
+
+/**
+ * put_io_context - put a reference of io_context
+ * @ioc: io_context to put
+ *
+ * Decrement reference count of @ioc and release it if the count reaches
+ * zero.
+ */
+void put_io_context(struct io_context *ioc)
+{
+ unsigned long flags;
+ bool free_ioc = false;
+
+ if (ioc == NULL)
+ return;
+
+ BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
+
+ /*
+ * Releasing ioc requires reverse order double locking and we may
+ * already be holding a queue_lock. Do it asynchronously from wq.
+ */
+ if (atomic_long_dec_and_test(&ioc->refcount)) {
+ spin_lock_irqsave(&ioc->lock, flags);
+ if (!hlist_empty(&ioc->icq_list))
+ schedule_work(&ioc->release_work);
+ else
+ free_ioc = true;
+ spin_unlock_irqrestore(&ioc->lock, flags);
+ }
+
+ if (free_ioc)
+ kmem_cache_free(iocontext_cachep, ioc);
+}
+EXPORT_SYMBOL(put_io_context);
+
+/* Called by the exiting task */
+void exit_io_context(struct task_struct *task)
+{
+ struct io_context *ioc;
+ struct io_cq *icq;
+ struct hlist_node *n;
+ unsigned long flags;
+
+ task_lock(task);
+ ioc = task->io_context;
+ task->io_context = NULL;
+ task_unlock(task);
+
+ if (!atomic_dec_and_test(&ioc->nr_tasks)) {
+ put_io_context(ioc);
+ return;
+ }
+
+ /*
+ * Need ioc lock to walk icq_list and q lock to exit icq. Perform
+ * reverse double locking. Read comment in ioc_release_fn() for
+ * explanation on the nested locking annotation.
+ */
+retry:
+ spin_lock_irqsave_nested(&ioc->lock, flags, 1);
+ hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) {
+ if (icq->flags & ICQ_EXITED)
+ continue;
+ if (spin_trylock(icq->q->queue_lock)) {
+ ioc_exit_icq(icq);
+ spin_unlock(icq->q->queue_lock);
+ } else {
+ spin_unlock_irqrestore(&ioc->lock, flags);
+ cpu_relax();
+ goto retry;
+ }
+ }
+ spin_unlock_irqrestore(&ioc->lock, flags);
+
+ put_io_context(ioc);
+}
+
+/**
+ * ioc_clear_queue - break any ioc association with the specified queue
+ * @q: request_queue being cleared
+ *
+ * Walk @q->icq_list and exit all io_cq's. Must be called with @q locked.
+ */
+void ioc_clear_queue(struct request_queue *q)
+{
+ lockdep_assert_held(q->queue_lock);
+
+ while (!list_empty(&q->icq_list)) {
+ struct io_cq *icq = list_entry(q->icq_list.next,
+ struct io_cq, q_node);
+ struct io_context *ioc = icq->ioc;
+
+ spin_lock(&ioc->lock);
+ ioc_destroy_icq(icq);
+ spin_unlock(&ioc->lock);
+ }
+}
+
+void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
+ int node)
+{
+ struct io_context *ioc;
+
+ ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
+ node);
+ if (unlikely(!ioc))
+ return;
+
+ /* initialize */
+ atomic_long_set(&ioc->refcount, 1);
+ atomic_set(&ioc->nr_tasks, 1);
+ spin_lock_init(&ioc->lock);
+ INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
+ INIT_HLIST_HEAD(&ioc->icq_list);
+ INIT_WORK(&ioc->release_work, ioc_release_fn);
+
+ /*
+ * Try to install. ioc shouldn't be installed if someone else
+ * already did or @task, which isn't %current, is exiting. Note
+ * that we need to allow ioc creation on exiting %current as exit
+ * path may issue IOs from e.g. exit_files(). The exit path is
+ * responsible for not issuing IO after exit_io_context().
+ */
+ task_lock(task);
+ if (!task->io_context &&
+ (task == current || !(task->flags & PF_EXITING)))
+ task->io_context = ioc;
+ else
+ kmem_cache_free(iocontext_cachep, ioc);
+ task_unlock(task);
+}
+
+/**
+ * get_task_io_context - get io_context of a task
+ * @task: task of interest
+ * @gfp_flags: allocation flags, used if allocation is necessary
+ * @node: allocation node, used if allocation is necessary
+ *
+ * Return io_context of @task. If it doesn't exist, it is created with
+ * @gfp_flags and @node. The returned io_context has its reference count
+ * incremented.
+ *
+ * This function always goes through task_lock() and it's better to use
+ * %current->io_context + get_io_context() for %current.
+ */
+struct io_context *get_task_io_context(struct task_struct *task,
+ gfp_t gfp_flags, int node)
+{
+ struct io_context *ioc;
+
+ might_sleep_if(gfp_flags & __GFP_WAIT);
+
+ do {
+ task_lock(task);
+ ioc = task->io_context;
+ if (likely(ioc)) {
+ get_io_context(ioc);
+ task_unlock(task);
+ return ioc;
+ }
+ task_unlock(task);
+ } while (create_io_context(task, gfp_flags, node));
+
+ return NULL;
+}
+EXPORT_SYMBOL(get_task_io_context);
+
+/**
+ * ioc_lookup_icq - lookup io_cq from ioc
+ * @ioc: the associated io_context
+ * @q: the associated request_queue
+ *
+ * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called
+ * with @q->queue_lock held.
+ */
+struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
+{
+ struct io_cq *icq;
+
+ lockdep_assert_held(q->queue_lock);
+
+ /*
+ * icq's are indexed from @ioc using radix tree and hint pointer,
+ * both of which are protected with RCU. All removals are done
+ * holding both q and ioc locks, and we're holding q lock - if we
+ * find a icq which points to us, it's guaranteed to be valid.
+ */
+ rcu_read_lock();
+ icq = rcu_dereference(ioc->icq_hint);
+ if (icq && icq->q == q)
+ goto out;
+
+ icq = radix_tree_lookup(&ioc->icq_tree, q->id);
+ if (icq && icq->q == q)
+ rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */
+ else
+ icq = NULL;
+out:
+ rcu_read_unlock();
+ return icq;
+}
+EXPORT_SYMBOL(ioc_lookup_icq);
+
+/**
+ * ioc_create_icq - create and link io_cq
+ * @q: request_queue of interest
+ * @gfp_mask: allocation mask
+ *
+ * Make sure io_cq linking %current->io_context and @q exists. If either
+ * io_context and/or icq don't exist, they will be created using @gfp_mask.
+ *
+ * The caller is responsible for ensuring @ioc won't go away and @q is
+ * alive and will stay alive until this function returns.
+ */
+struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
+{
+ struct elevator_type *et = q->elevator->type;
+ struct io_context *ioc;
+ struct io_cq *icq;
+
+ /* allocate stuff */
+ ioc = create_io_context(current, gfp_mask, q->node);
+ if (!ioc)
+ return NULL;
+
+ icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
+ q->node);
+ if (!icq)
+ return NULL;
+
+ if (radix_tree_preload(gfp_mask) < 0) {
+ kmem_cache_free(et->icq_cache, icq);
+ return NULL;
+ }
+
+ icq->ioc = ioc;
+ icq->q = q;
+ INIT_LIST_HEAD(&icq->q_node);
+ INIT_HLIST_NODE(&icq->ioc_node);
+
+ /* lock both q and ioc and try to link @icq */
+ spin_lock_irq(q->queue_lock);
+ spin_lock(&ioc->lock);
+
+ if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
+ hlist_add_head(&icq->ioc_node, &ioc->icq_list);
+ list_add(&icq->q_node, &q->icq_list);
+ if (et->ops.elevator_init_icq_fn)
+ et->ops.elevator_init_icq_fn(icq);
+ } else {
+ kmem_cache_free(et->icq_cache, icq);
+ icq = ioc_lookup_icq(ioc, q);
+ if (!icq)
+ printk(KERN_ERR "cfq: icq link failed!\n");
+ }
+
+ spin_unlock(&ioc->lock);
+ spin_unlock_irq(q->queue_lock);
+ radix_tree_preload_end();
+ return icq;
+}
+
+void ioc_set_icq_flags(struct io_context *ioc, unsigned int flags)
+{
+ struct io_cq *icq;
+ struct hlist_node *n;
+
+ hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node)
+ icq->flags |= flags;
+}
+
+/**
+ * ioc_ioprio_changed - notify ioprio change
+ * @ioc: io_context of interest
+ * @ioprio: new ioprio
+ *
+ * @ioc's ioprio has changed to @ioprio. Set %ICQ_IOPRIO_CHANGED for all
+ * icq's. iosched is responsible for checking the bit and applying it on
+ * request issue path.
+ */
+void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->lock, flags);
+ ioc->ioprio = ioprio;
+ ioc_set_icq_flags(ioc, ICQ_IOPRIO_CHANGED);
+ spin_unlock_irqrestore(&ioc->lock, flags);
+}
+
+/**
+ * ioc_cgroup_changed - notify cgroup change
+ * @ioc: io_context of interest
+ *
+ * @ioc's cgroup has changed. Set %ICQ_CGROUP_CHANGED for all icq's.
+ * iosched is responsible for checking the bit and applying it on request
+ * issue path.
+ */
+void ioc_cgroup_changed(struct io_context *ioc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->lock, flags);
+ ioc_set_icq_flags(ioc, ICQ_CGROUP_CHANGED);
+ spin_unlock_irqrestore(&ioc->lock, flags);
+}
+EXPORT_SYMBOL(ioc_cgroup_changed);
+
+/**
+ * icq_get_changed - fetch and clear icq changed mask
+ * @icq: icq of interest
+ *
+ * Fetch and clear ICQ_*_CHANGED bits from @icq. Grabs and releases
+ * @icq->ioc->lock.
+ */
+unsigned icq_get_changed(struct io_cq *icq)
+{
+ unsigned int changed = 0;
+ unsigned long flags;
+
+ if (unlikely(icq->flags & ICQ_CHANGED_MASK)) {
+ spin_lock_irqsave(&icq->ioc->lock, flags);
+ changed = icq->flags & ICQ_CHANGED_MASK;
+ icq->flags &= ~ICQ_CHANGED_MASK;
+ spin_unlock_irqrestore(&icq->ioc->lock, flags);
+ }
+ return changed;
+}
+EXPORT_SYMBOL(icq_get_changed);
+
+static int __init blk_ioc_init(void)
+{
+ iocontext_cachep = kmem_cache_create("blkdev_ioc",
+ sizeof(struct io_context), 0, SLAB_PANIC, NULL);
+ return 0;
+}
+subsys_initcall(blk_ioc_init);
diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
new file mode 100644
index 00000000..58916afb
--- /dev/null
+++ b/block/blk-iopoll.c
@@ -0,0 +1,227 @@
+/*
+ * Functions related to interrupt-poll handling in the block layer. This
+ * is similar to NAPI for network devices.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/cpu.h>
+#include <linux/blk-iopoll.h>
+#include <linux/delay.h>
+
+#include "blk.h"
+
+int blk_iopoll_enabled = 1;
+EXPORT_SYMBOL(blk_iopoll_enabled);
+
+static unsigned int blk_iopoll_budget __read_mostly = 256;
+
+static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
+
+/**
+ * blk_iopoll_sched - Schedule a run of the iopoll handler
+ * @iop: The parent iopoll structure
+ *
+ * Description:
+ * Add this blk_iopoll structure to the pending poll list and trigger the
+ * raise of the blk iopoll softirq. The driver must already have gotten a
+ * successful return from blk_iopoll_sched_prep() before calling this.
+ **/
+void blk_iopoll_sched(struct blk_iopoll *iop)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ list_add_tail(&iop->list, &__get_cpu_var(blk_cpu_iopoll));
+ __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(blk_iopoll_sched);
+
+/**
+ * __blk_iopoll_complete - Mark this @iop as un-polled again
+ * @iop: The parent iopoll structure
+ *
+ * Description:
+ * See blk_iopoll_complete(). This function must be called with interrupts
+ * disabled.
+ **/
+void __blk_iopoll_complete(struct blk_iopoll *iop)
+{
+ list_del(&iop->list);
+ smp_mb__before_clear_bit();
+ clear_bit_unlock(IOPOLL_F_SCHED, &iop->state);
+}
+EXPORT_SYMBOL(__blk_iopoll_complete);
+
+/**
+ * blk_iopoll_complete - Mark this @iop as un-polled again
+ * @iop: The parent iopoll structure
+ *
+ * Description:
+ * If a driver consumes less than the assigned budget in its run of the
+ * iopoll handler, it'll end the polled mode by calling this function. The
+ * iopoll handler will not be invoked again before blk_iopoll_sched_prep()
+ * is called.
+ **/
+void blk_iopoll_complete(struct blk_iopoll *iopoll)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __blk_iopoll_complete(iopoll);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(blk_iopoll_complete);
+
+static void blk_iopoll_softirq(struct softirq_action *h)
+{
+ struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
+ int rearm = 0, budget = blk_iopoll_budget;
+ unsigned long start_time = jiffies;
+
+ local_irq_disable();
+
+ while (!list_empty(list)) {
+ struct blk_iopoll *iop;
+ int work, weight;
+
+ /*
+ * If softirq window is exhausted then punt.
+ */
+ if (budget <= 0 || time_after(jiffies, start_time)) {
+ rearm = 1;
+ break;
+ }
+
+ local_irq_enable();
+
+ /* Even though interrupts have been re-enabled, this
+ * access is safe because interrupts can only add new
+ * entries to the tail of this list, and only ->poll()
+ * calls can remove this head entry from the list.
+ */
+ iop = list_entry(list->next, struct blk_iopoll, list);
+
+ weight = iop->weight;
+ work = 0;
+ if (test_bit(IOPOLL_F_SCHED, &iop->state))
+ work = iop->poll(iop, weight);
+
+ budget -= work;
+
+ local_irq_disable();
+
+ /*
+ * Drivers must not modify the iopoll state, if they
+ * consume their assigned weight (or more, some drivers can't
+ * easily just stop processing, they have to complete an
+ * entire mask of commands).In such cases this code
+ * still "owns" the iopoll instance and therefore can
+ * move the instance around on the list at-will.
+ */
+ if (work >= weight) {
+ if (blk_iopoll_disable_pending(iop))
+ __blk_iopoll_complete(iop);
+ else
+ list_move_tail(&iop->list, list);
+ }
+ }
+
+ if (rearm)
+ __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
+
+ local_irq_enable();
+}
+
+/**
+ * blk_iopoll_disable - Disable iopoll on this @iop
+ * @iop: The parent iopoll structure
+ *
+ * Description:
+ * Disable io polling and wait for any pending callbacks to have completed.
+ **/
+void blk_iopoll_disable(struct blk_iopoll *iop)
+{
+ set_bit(IOPOLL_F_DISABLE, &iop->state);
+ while (test_and_set_bit(IOPOLL_F_SCHED, &iop->state))
+ msleep(1);
+ clear_bit(IOPOLL_F_DISABLE, &iop->state);
+}
+EXPORT_SYMBOL(blk_iopoll_disable);
+
+/**
+ * blk_iopoll_enable - Enable iopoll on this @iop
+ * @iop: The parent iopoll structure
+ *
+ * Description:
+ * Enable iopoll on this @iop. Note that the handler run will not be
+ * scheduled, it will only mark it as active.
+ **/
+void blk_iopoll_enable(struct blk_iopoll *iop)
+{
+ BUG_ON(!test_bit(IOPOLL_F_SCHED, &iop->state));
+ smp_mb__before_clear_bit();
+ clear_bit_unlock(IOPOLL_F_SCHED, &iop->state);
+}
+EXPORT_SYMBOL(blk_iopoll_enable);
+
+/**
+ * blk_iopoll_init - Initialize this @iop
+ * @iop: The parent iopoll structure
+ * @weight: The default weight (or command completion budget)
+ * @poll_fn: The handler to invoke
+ *
+ * Description:
+ * Initialize this blk_iopoll structure. Before being actively used, the
+ * driver must call blk_iopoll_enable().
+ **/
+void blk_iopoll_init(struct blk_iopoll *iop, int weight, blk_iopoll_fn *poll_fn)
+{
+ memset(iop, 0, sizeof(*iop));
+ INIT_LIST_HEAD(&iop->list);
+ iop->weight = weight;
+ iop->poll = poll_fn;
+ set_bit(IOPOLL_F_SCHED, &iop->state);
+}
+EXPORT_SYMBOL(blk_iopoll_init);
+
+static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ /*
+ * If a CPU goes away, splice its entries to the current CPU
+ * and trigger a run of the softirq
+ */
+ if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
+ int cpu = (unsigned long) hcpu;
+
+ local_irq_disable();
+ list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
+ &__get_cpu_var(blk_cpu_iopoll));
+ __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
+ local_irq_enable();
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata blk_iopoll_cpu_notifier = {
+ .notifier_call = blk_iopoll_cpu_notify,
+};
+
+static __init int blk_iopoll_setup(void)
+{
+ int i;
+
+ for_each_possible_cpu(i)
+ INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i));
+
+ open_softirq(BLOCK_IOPOLL_SOFTIRQ, blk_iopoll_softirq);
+ register_hotcpu_notifier(&blk_iopoll_cpu_notifier);
+ return 0;
+}
+subsys_initcall(blk_iopoll_setup);
diff --git a/block/blk-lib.c b/block/blk-lib.c
new file mode 100644
index 00000000..2b461b49
--- /dev/null
+++ b/block/blk-lib.c
@@ -0,0 +1,178 @@
+/*
+ * Functions related to generic helpers functions
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/scatterlist.h>
+
+#include "blk.h"
+
+struct bio_batch {
+ atomic_t done;
+ unsigned long flags;
+ struct completion *wait;
+};
+
+static void bio_batch_end_io(struct bio *bio, int err)
+{
+ struct bio_batch *bb = bio->bi_private;
+
+ if (err && (err != -EOPNOTSUPP))
+ clear_bit(BIO_UPTODATE, &bb->flags);
+ if (atomic_dec_and_test(&bb->done))
+ complete(bb->wait);
+ bio_put(bio);
+}
+
+/**
+ * blkdev_issue_discard - queue a discard
+ * @bdev: blockdev to issue discard for
+ * @sector: start sector
+ * @nr_sects: number of sectors to discard
+ * @gfp_mask: memory allocation flags (for bio_alloc)
+ * @flags: BLKDEV_IFL_* flags to control behaviour
+ *
+ * Description:
+ * Issue a discard request for the sectors in question.
+ */
+int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+ struct request_queue *q = bdev_get_queue(bdev);
+ int type = REQ_WRITE | REQ_DISCARD;
+ unsigned int max_discard_sectors;
+ struct bio_batch bb;
+ struct bio *bio;
+ int ret = 0;
+
+ if (!q)
+ return -ENXIO;
+
+ if (!blk_queue_discard(q))
+ return -EOPNOTSUPP;
+
+ /*
+ * Ensure that max_discard_sectors is of the proper
+ * granularity
+ */
+ max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
+ if (unlikely(!max_discard_sectors)) {
+ /* Avoid infinite loop below. Being cautious never hurts. */
+ return -EOPNOTSUPP;
+ } else if (q->limits.discard_granularity) {
+ unsigned int disc_sects = q->limits.discard_granularity >> 9;
+
+ max_discard_sectors &= ~(disc_sects - 1);
+ }
+
+ if (flags & BLKDEV_DISCARD_SECURE) {
+ if (!blk_queue_secdiscard(q))
+ return -EOPNOTSUPP;
+ type |= REQ_SECURE;
+ }
+
+ atomic_set(&bb.done, 1);
+ bb.flags = 1 << BIO_UPTODATE;
+ bb.wait = &wait;
+
+ while (nr_sects) {
+ bio = bio_alloc(gfp_mask, 1);
+ if (!bio) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ bio->bi_sector = sector;
+ bio->bi_end_io = bio_batch_end_io;
+ bio->bi_bdev = bdev;
+ bio->bi_private = &bb;
+
+ if (nr_sects > max_discard_sectors) {
+ bio->bi_size = max_discard_sectors << 9;
+ nr_sects -= max_discard_sectors;
+ sector += max_discard_sectors;
+ } else {
+ bio->bi_size = nr_sects << 9;
+ nr_sects = 0;
+ }
+
+ atomic_inc(&bb.done);
+ submit_bio(type, bio);
+ }
+
+ /* Wait for bios in-flight */
+ if (!atomic_dec_and_test(&bb.done))
+ wait_for_completion(&wait);
+
+ if (!test_bit(BIO_UPTODATE, &bb.flags))
+ ret = -EIO;
+
+ return ret;
+}
+EXPORT_SYMBOL(blkdev_issue_discard);
+
+/**
+ * blkdev_issue_zeroout - generate number of zero filed write bios
+ * @bdev: blockdev to issue
+ * @sector: start sector
+ * @nr_sects: number of sectors to write
+ * @gfp_mask: memory allocation flags (for bio_alloc)
+ *
+ * Description:
+ * Generate and issue number of bios with zerofiled pages.
+ */
+
+int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask)
+{
+ int ret;
+ struct bio *bio;
+ struct bio_batch bb;
+ unsigned int sz;
+ DECLARE_COMPLETION_ONSTACK(wait);
+
+ atomic_set(&bb.done, 1);
+ bb.flags = 1 << BIO_UPTODATE;
+ bb.wait = &wait;
+
+ ret = 0;
+ while (nr_sects != 0) {
+ bio = bio_alloc(gfp_mask,
+ min(nr_sects, (sector_t)BIO_MAX_PAGES));
+ if (!bio) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ bio->bi_sector = sector;
+ bio->bi_bdev = bdev;
+ bio->bi_end_io = bio_batch_end_io;
+ bio->bi_private = &bb;
+
+ while (nr_sects != 0) {
+ sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
+ ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
+ nr_sects -= ret >> 9;
+ sector += ret >> 9;
+ if (ret < (sz << 9))
+ break;
+ }
+ ret = 0;
+ atomic_inc(&bb.done);
+ submit_bio(WRITE, bio);
+ }
+
+ /* Wait for bios in-flight */
+ if (!atomic_dec_and_test(&bb.done))
+ wait_for_completion(&wait);
+
+ if (!test_bit(BIO_UPTODATE, &bb.flags))
+ /* One of bios in the batch was completed with error.*/
+ ret = -EIO;
+
+ return ret;
+}
+EXPORT_SYMBOL(blkdev_issue_zeroout);
diff --git a/block/blk-map.c b/block/blk-map.c
new file mode 100644
index 00000000..623e1cd4
--- /dev/null
+++ b/block/blk-map.c
@@ -0,0 +1,331 @@
+/*
+ * Functions related to mapping data to requests
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <scsi/sg.h> /* for struct sg_iovec */
+
+#include "blk.h"
+
+int blk_rq_append_bio(struct request_queue *q, struct request *rq,
+ struct bio *bio)
+{
+ if (!rq->bio)
+ blk_rq_bio_prep(q, rq, bio);
+ else if (!ll_back_merge_fn(q, rq, bio))
+ return -EINVAL;
+ else {
+ rq->biotail->bi_next = bio;
+ rq->biotail = bio;
+
+ rq->__data_len += bio->bi_size;
+ }
+ return 0;
+}
+
+static int __blk_rq_unmap_user(struct bio *bio)
+{
+ int ret = 0;
+
+ if (bio) {
+ if (bio_flagged(bio, BIO_USER_MAPPED))
+ bio_unmap_user(bio);
+ else
+ ret = bio_uncopy_user(bio);
+ }
+
+ return ret;
+}
+
+static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
+ struct rq_map_data *map_data, void __user *ubuf,
+ unsigned int len, gfp_t gfp_mask)
+{
+ unsigned long uaddr;
+ struct bio *bio, *orig_bio;
+ int reading, ret;
+
+ reading = rq_data_dir(rq) == READ;
+
+ /*
+ * if alignment requirement is satisfied, map in user pages for
+ * direct dma. else, set up kernel bounce buffers
+ */
+ uaddr = (unsigned long) ubuf;
+ if (blk_rq_aligned(q, uaddr, len) && !map_data)
+ bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
+ else
+ bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
+
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ if (map_data && map_data->null_mapped)
+ bio->bi_flags |= (1 << BIO_NULL_MAPPED);
+
+ orig_bio = bio;
+ blk_queue_bounce(q, &bio);
+
+ /*
+ * We link the bounce buffer in and could have to traverse it
+ * later so we have to get a ref to prevent it from being freed
+ */
+ bio_get(bio);
+
+ ret = blk_rq_append_bio(q, rq, bio);
+ if (!ret)
+ return bio->bi_size;
+
+ /* if it was boucned we must call the end io function */
+ bio_endio(bio, 0);
+ __blk_rq_unmap_user(orig_bio);
+ bio_put(bio);
+ return ret;
+}
+
+/**
+ * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage
+ * @q: request queue where request should be inserted
+ * @rq: request structure to fill
+ * @map_data: pointer to the rq_map_data holding pages (if necessary)
+ * @ubuf: the user buffer
+ * @len: length of user data
+ * @gfp_mask: memory allocation flags
+ *
+ * Description:
+ * Data will be mapped directly for zero copy I/O, if possible. Otherwise
+ * a kernel bounce buffer is used.
+ *
+ * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
+ * still in process context.
+ *
+ * Note: The mapped bio may need to be bounced through blk_queue_bounce()
+ * before being submitted to the device, as pages mapped may be out of
+ * reach. It's the callers responsibility to make sure this happens. The
+ * original bio must be passed back in to blk_rq_unmap_user() for proper
+ * unmapping.
+ */
+int blk_rq_map_user(struct request_queue *q, struct request *rq,
+ struct rq_map_data *map_data, void __user *ubuf,
+ unsigned long len, gfp_t gfp_mask)
+{
+ unsigned long bytes_read = 0;
+ struct bio *bio = NULL;
+ int ret;
+
+ if (len > (queue_max_hw_sectors(q) << 9))
+ return -EINVAL;
+ if (!len)
+ return -EINVAL;
+
+ if (!ubuf && (!map_data || !map_data->null_mapped))
+ return -EINVAL;
+
+ while (bytes_read != len) {
+ unsigned long map_len, end, start;
+
+ map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
+ end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
+ >> PAGE_SHIFT;
+ start = (unsigned long)ubuf >> PAGE_SHIFT;
+
+ /*
+ * A bad offset could cause us to require BIO_MAX_PAGES + 1
+ * pages. If this happens we just lower the requested
+ * mapping len by a page so that we can fit
+ */
+ if (end - start > BIO_MAX_PAGES)
+ map_len -= PAGE_SIZE;
+
+ ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
+ gfp_mask);
+ if (ret < 0)
+ goto unmap_rq;
+ if (!bio)
+ bio = rq->bio;
+ bytes_read += ret;
+ ubuf += ret;
+
+ if (map_data)
+ map_data->offset += ret;
+ }
+
+ if (!bio_flagged(bio, BIO_USER_MAPPED))
+ rq->cmd_flags |= REQ_COPY_USER;
+
+ rq->buffer = NULL;
+ return 0;
+unmap_rq:
+ blk_rq_unmap_user(bio);
+ rq->bio = NULL;
+ return ret;
+}
+EXPORT_SYMBOL(blk_rq_map_user);
+
+/**
+ * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
+ * @q: request queue where request should be inserted
+ * @rq: request to map data to
+ * @map_data: pointer to the rq_map_data holding pages (if necessary)
+ * @iov: pointer to the iovec
+ * @iov_count: number of elements in the iovec
+ * @len: I/O byte count
+ * @gfp_mask: memory allocation flags
+ *
+ * Description:
+ * Data will be mapped directly for zero copy I/O, if possible. Otherwise
+ * a kernel bounce buffer is used.
+ *
+ * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
+ * still in process context.
+ *
+ * Note: The mapped bio may need to be bounced through blk_queue_bounce()
+ * before being submitted to the device, as pages mapped may be out of
+ * reach. It's the callers responsibility to make sure this happens. The
+ * original bio must be passed back in to blk_rq_unmap_user() for proper
+ * unmapping.
+ */
+int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
+ struct rq_map_data *map_data, struct sg_iovec *iov,
+ int iov_count, unsigned int len, gfp_t gfp_mask)
+{
+ struct bio *bio;
+ int i, read = rq_data_dir(rq) == READ;
+ int unaligned = 0;
+
+ if (!iov || iov_count <= 0)
+ return -EINVAL;
+
+ for (i = 0; i < iov_count; i++) {
+ unsigned long uaddr = (unsigned long)iov[i].iov_base;
+
+ if (!iov[i].iov_len)
+ return -EINVAL;
+
+ /*
+ * Keep going so we check length of all segments
+ */
+ if (uaddr & queue_dma_alignment(q))
+ unaligned = 1;
+ }
+
+ if (unaligned || (q->dma_pad_mask & len) || map_data)
+ bio = bio_copy_user_iov(q, map_data, iov, iov_count, read,
+ gfp_mask);
+ else
+ bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask);
+
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ if (bio->bi_size != len) {
+ /*
+ * Grab an extra reference to this bio, as bio_unmap_user()
+ * expects to be able to drop it twice as it happens on the
+ * normal IO completion path
+ */
+ bio_get(bio);
+ bio_endio(bio, 0);
+ __blk_rq_unmap_user(bio);
+ return -EINVAL;
+ }
+
+ if (!bio_flagged(bio, BIO_USER_MAPPED))
+ rq->cmd_flags |= REQ_COPY_USER;
+
+ blk_queue_bounce(q, &bio);
+ bio_get(bio);
+ blk_rq_bio_prep(q, rq, bio);
+ rq->buffer = NULL;
+ return 0;
+}
+EXPORT_SYMBOL(blk_rq_map_user_iov);
+
+/**
+ * blk_rq_unmap_user - unmap a request with user data
+ * @bio: start of bio list
+ *
+ * Description:
+ * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
+ * supply the original rq->bio from the blk_rq_map_user() return, since
+ * the I/O completion may have changed rq->bio.
+ */
+int blk_rq_unmap_user(struct bio *bio)
+{
+ struct bio *mapped_bio;
+ int ret = 0, ret2;
+
+ while (bio) {
+ mapped_bio = bio;
+ if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
+ mapped_bio = bio->bi_private;
+
+ ret2 = __blk_rq_unmap_user(mapped_bio);
+ if (ret2 && !ret)
+ ret = ret2;
+
+ mapped_bio = bio;
+ bio = bio->bi_next;
+ bio_put(mapped_bio);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(blk_rq_unmap_user);
+
+/**
+ * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
+ * @q: request queue where request should be inserted
+ * @rq: request to fill
+ * @kbuf: the kernel buffer
+ * @len: length of user data
+ * @gfp_mask: memory allocation flags
+ *
+ * Description:
+ * Data will be mapped directly if possible. Otherwise a bounce
+ * buffer is used. Can be called multple times to append multple
+ * buffers.
+ */
+int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
+ unsigned int len, gfp_t gfp_mask)
+{
+ int reading = rq_data_dir(rq) == READ;
+ unsigned long addr = (unsigned long) kbuf;
+ int do_copy = 0;
+ struct bio *bio;
+ int ret;
+
+ if (len > (queue_max_hw_sectors(q) << 9))
+ return -EINVAL;
+ if (!len || !kbuf)
+ return -EINVAL;
+
+ do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
+ if (do_copy)
+ bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
+ else
+ bio = bio_map_kern(q, kbuf, len, gfp_mask);
+
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ if (!reading)
+ bio->bi_rw |= REQ_WRITE;
+
+ if (do_copy)
+ rq->cmd_flags |= REQ_COPY_USER;
+
+ ret = blk_rq_append_bio(q, rq, bio);
+ if (unlikely(ret)) {
+ /* request is too big */
+ bio_put(bio);
+ return ret;
+ }
+
+ blk_queue_bounce(q, &rq->bio);
+ rq->buffer = NULL;
+ return 0;
+}
+EXPORT_SYMBOL(blk_rq_map_kern);
diff --git a/block/blk-merge.c b/block/blk-merge.c
new file mode 100644
index 00000000..160035f5
--- /dev/null
+++ b/block/blk-merge.c
@@ -0,0 +1,510 @@
+/*
+ * Functions related to segment and merge handling
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/scatterlist.h>
+
+#include "blk.h"
+
+static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
+ struct bio *bio)
+{
+ struct bio_vec *bv, *bvprv = NULL;
+ int cluster, i, high, highprv = 1;
+ unsigned int seg_size, nr_phys_segs;
+ struct bio *fbio, *bbio;
+
+ if (!bio)
+ return 0;
+
+ fbio = bio;
+ cluster = blk_queue_cluster(q);
+ seg_size = 0;
+ nr_phys_segs = 0;
+ for_each_bio(bio) {
+ bio_for_each_segment(bv, bio, i) {
+ /*
+ * the trick here is making sure that a high page is
+ * never considered part of another segment, since that
+ * might change with the bounce page.
+ */
+ high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
+ if (high || highprv)
+ goto new_segment;
+ if (cluster) {
+ if (seg_size + bv->bv_len
+ > queue_max_segment_size(q))
+ goto new_segment;
+ if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
+ goto new_segment;
+ if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
+ goto new_segment;
+
+ seg_size += bv->bv_len;
+ bvprv = bv;
+ continue;
+ }
+new_segment:
+ if (nr_phys_segs == 1 && seg_size >
+ fbio->bi_seg_front_size)
+ fbio->bi_seg_front_size = seg_size;
+
+ nr_phys_segs++;
+ bvprv = bv;
+ seg_size = bv->bv_len;
+ highprv = high;
+ }
+ bbio = bio;
+ }
+
+ if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
+ fbio->bi_seg_front_size = seg_size;
+ if (seg_size > bbio->bi_seg_back_size)
+ bbio->bi_seg_back_size = seg_size;
+
+ return nr_phys_segs;
+}
+
+void blk_recalc_rq_segments(struct request *rq)
+{
+ rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
+}
+
+void blk_recount_segments(struct request_queue *q, struct bio *bio)
+{
+ struct bio *nxt = bio->bi_next;
+
+ bio->bi_next = NULL;
+ bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
+ bio->bi_next = nxt;
+ bio->bi_flags |= (1 << BIO_SEG_VALID);
+}
+EXPORT_SYMBOL(blk_recount_segments);
+
+static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
+ struct bio *nxt)
+{
+ if (!blk_queue_cluster(q))
+ return 0;
+
+ if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
+ queue_max_segment_size(q))
+ return 0;
+
+ if (!bio_has_data(bio))
+ return 1;
+
+ if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
+ return 0;
+
+ /*
+ * bio and nxt are contiguous in memory; check if the queue allows
+ * these two to be merged into one
+ */
+ if (BIO_SEG_BOUNDARY(q, bio, nxt))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * map a request to scatterlist, return number of sg entries setup. Caller
+ * must make sure sg can hold rq->nr_phys_segments entries
+ */
+int blk_rq_map_sg(struct request_queue *q, struct request *rq,
+ struct scatterlist *sglist)
+{
+ struct bio_vec *bvec, *bvprv;
+ struct req_iterator iter;
+ struct scatterlist *sg;
+ int nsegs, cluster;
+
+ nsegs = 0;
+ cluster = blk_queue_cluster(q);
+
+ /*
+ * for each bio in rq
+ */
+ bvprv = NULL;
+ sg = NULL;
+ rq_for_each_segment(bvec, rq, iter) {
+ int nbytes = bvec->bv_len;
+
+ if (bvprv && cluster) {
+ if (sg->length + nbytes > queue_max_segment_size(q))
+ goto new_segment;
+
+ if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
+ goto new_segment;
+ if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
+ goto new_segment;
+
+ sg->length += nbytes;
+ } else {
+new_segment:
+ if (!sg)
+ sg = sglist;
+ else {
+ /*
+ * If the driver previously mapped a shorter
+ * list, we could see a termination bit
+ * prematurely unless it fully inits the sg
+ * table on each mapping. We KNOW that there
+ * must be more entries here or the driver
+ * would be buggy, so force clear the
+ * termination bit to avoid doing a full
+ * sg_init_table() in drivers for each command.
+ */
+ sg->page_link &= ~0x02;
+ sg = sg_next(sg);
+ }
+
+ sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset);
+ nsegs++;
+ }
+ bvprv = bvec;
+ } /* segments in rq */
+
+
+ if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
+ (blk_rq_bytes(rq) & q->dma_pad_mask)) {
+ unsigned int pad_len =
+ (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
+
+ sg->length += pad_len;
+ rq->extra_len += pad_len;
+ }
+
+ if (q->dma_drain_size && q->dma_drain_needed(rq)) {
+ if (rq->cmd_flags & REQ_WRITE)
+ memset(q->dma_drain_buffer, 0, q->dma_drain_size);
+
+ sg->page_link &= ~0x02;
+ sg = sg_next(sg);
+ sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
+ q->dma_drain_size,
+ ((unsigned long)q->dma_drain_buffer) &
+ (PAGE_SIZE - 1));
+ nsegs++;
+ rq->extra_len += q->dma_drain_size;
+ }
+
+ if (sg)
+ sg_mark_end(sg);
+
+ return nsegs;
+}
+EXPORT_SYMBOL(blk_rq_map_sg);
+
+static inline int ll_new_hw_segment(struct request_queue *q,
+ struct request *req,
+ struct bio *bio)
+{
+ int nr_phys_segs = bio_phys_segments(q, bio);
+
+ if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
+ goto no_merge;
+
+ if (bio_integrity(bio) && blk_integrity_merge_bio(q, req, bio))
+ goto no_merge;
+
+ /*
+ * This will form the start of a new hw segment. Bump both
+ * counters.
+ */
+ req->nr_phys_segments += nr_phys_segs;
+ return 1;
+
+no_merge:
+ req->cmd_flags |= REQ_NOMERGE;
+ if (req == q->last_merge)
+ q->last_merge = NULL;
+ return 0;
+}
+
+int ll_back_merge_fn(struct request_queue *q, struct request *req,
+ struct bio *bio)
+{
+ unsigned short max_sectors;
+
+ if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
+ max_sectors = queue_max_hw_sectors(q);
+ else
+ max_sectors = queue_max_sectors(q);
+
+ if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
+ req->cmd_flags |= REQ_NOMERGE;
+ if (req == q->last_merge)
+ q->last_merge = NULL;
+ return 0;
+ }
+ if (!bio_flagged(req->biotail, BIO_SEG_VALID))
+ blk_recount_segments(q, req->biotail);
+ if (!bio_flagged(bio, BIO_SEG_VALID))
+ blk_recount_segments(q, bio);
+
+ return ll_new_hw_segment(q, req, bio);
+}
+
+int ll_front_merge_fn(struct request_queue *q, struct request *req,
+ struct bio *bio)
+{
+ unsigned short max_sectors;
+
+ if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
+ max_sectors = queue_max_hw_sectors(q);
+ else
+ max_sectors = queue_max_sectors(q);
+
+
+ if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
+ req->cmd_flags |= REQ_NOMERGE;
+ if (req == q->last_merge)
+ q->last_merge = NULL;
+ return 0;
+ }
+ if (!bio_flagged(bio, BIO_SEG_VALID))
+ blk_recount_segments(q, bio);
+ if (!bio_flagged(req->bio, BIO_SEG_VALID))
+ blk_recount_segments(q, req->bio);
+
+ return ll_new_hw_segment(q, req, bio);
+}
+
+static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
+ struct request *next)
+{
+ int total_phys_segments;
+ unsigned int seg_size =
+ req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
+
+ /*
+ * First check if the either of the requests are re-queued
+ * requests. Can't merge them if they are.
+ */
+ if (req->special || next->special)
+ return 0;
+
+ /*
+ * Will it become too large?
+ */
+ if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
+ return 0;
+
+ total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
+ if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
+ if (req->nr_phys_segments == 1)
+ req->bio->bi_seg_front_size = seg_size;
+ if (next->nr_phys_segments == 1)
+ next->biotail->bi_seg_back_size = seg_size;
+ total_phys_segments--;
+ }
+
+ if (total_phys_segments > queue_max_segments(q))
+ return 0;
+
+ if (blk_integrity_rq(req) && blk_integrity_merge_rq(q, req, next))
+ return 0;
+
+ /* Merge is OK... */
+ req->nr_phys_segments = total_phys_segments;
+ return 1;
+}
+
+/**
+ * blk_rq_set_mixed_merge - mark a request as mixed merge
+ * @rq: request to mark as mixed merge
+ *
+ * Description:
+ * @rq is about to be mixed merged. Make sure the attributes
+ * which can be mixed are set in each bio and mark @rq as mixed
+ * merged.
+ */
+void blk_rq_set_mixed_merge(struct request *rq)
+{
+ unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
+ struct bio *bio;
+
+ if (rq->cmd_flags & REQ_MIXED_MERGE)
+ return;
+
+ /*
+ * @rq will no longer represent mixable attributes for all the
+ * contained bios. It will just track those of the first one.
+ * Distributes the attributs to each bio.
+ */
+ for (bio = rq->bio; bio; bio = bio->bi_next) {
+ WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
+ (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
+ bio->bi_rw |= ff;
+ }
+ rq->cmd_flags |= REQ_MIXED_MERGE;
+}
+
+static void blk_account_io_merge(struct request *req)
+{
+ if (blk_do_io_stat(req)) {
+ struct hd_struct *part;
+ int cpu;
+
+ cpu = part_stat_lock();
+ part = req->part;
+
+ part_round_stats(cpu, part);
+ part_dec_in_flight(part, rq_data_dir(req));
+
+ hd_struct_put(part);
+ part_stat_unlock();
+ }
+}
+
+/*
+ * Has to be called with the request spinlock acquired
+ */
+static int attempt_merge(struct request_queue *q, struct request *req,
+ struct request *next)
+{
+ if (!rq_mergeable(req) || !rq_mergeable(next))
+ return 0;
+
+ /*
+ * Don't merge file system requests and discard requests
+ */
+ if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD))
+ return 0;
+
+ /*
+ * Don't merge discard requests and secure discard requests
+ */
+ if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE))
+ return 0;
+
+ /*
+ * not contiguous
+ */
+ if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
+ return 0;
+
+ if (rq_data_dir(req) != rq_data_dir(next)
+ || req->rq_disk != next->rq_disk
+ || next->special)
+ return 0;
+
+ /*
+ * If we are allowed to merge, then append bio list
+ * from next to rq and release next. merge_requests_fn
+ * will have updated segment counts, update sector
+ * counts here.
+ */
+ if (!ll_merge_requests_fn(q, req, next))
+ return 0;
+
+ /*
+ * If failfast settings disagree or any of the two is already
+ * a mixed merge, mark both as mixed before proceeding. This
+ * makes sure that all involved bios have mixable attributes
+ * set properly.
+ */
+ if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
+ (req->cmd_flags & REQ_FAILFAST_MASK) !=
+ (next->cmd_flags & REQ_FAILFAST_MASK)) {
+ blk_rq_set_mixed_merge(req);
+ blk_rq_set_mixed_merge(next);
+ }
+
+ /*
+ * At this point we have either done a back merge
+ * or front merge. We need the smaller start_time of
+ * the merged requests to be the current request
+ * for accounting purposes.
+ */
+ if (time_after(req->start_time, next->start_time))
+ req->start_time = next->start_time;
+
+ req->biotail->bi_next = next->bio;
+ req->biotail = next->biotail;
+
+ req->__data_len += blk_rq_bytes(next);
+
+ elv_merge_requests(q, req, next);
+
+ /*
+ * 'next' is going away, so update stats accordingly
+ */
+ blk_account_io_merge(next);
+
+ req->ioprio = ioprio_best(req->ioprio, next->ioprio);
+ if (blk_rq_cpu_valid(next))
+ req->cpu = next->cpu;
+
+ /* owner-ship of bio passed from next to req */
+ next->bio = NULL;
+ __blk_put_request(q, next);
+ return 1;
+}
+
+int attempt_back_merge(struct request_queue *q, struct request *rq)
+{
+ struct request *next = elv_latter_request(q, rq);
+
+ if (next)
+ return attempt_merge(q, rq, next);
+
+ return 0;
+}
+
+int attempt_front_merge(struct request_queue *q, struct request *rq)
+{
+ struct request *prev = elv_former_request(q, rq);
+
+ if (prev)
+ return attempt_merge(q, prev, rq);
+
+ return 0;
+}
+
+int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
+ struct request *next)
+{
+ return attempt_merge(q, rq, next);
+}
+
+bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
+{
+ if (!rq_mergeable(rq))
+ return false;
+
+ /* don't merge file system requests and discard requests */
+ if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
+ return false;
+
+ /* don't merge discard requests and secure discard requests */
+ if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
+ return false;
+
+ /* different data direction or already started, don't merge */
+ if (bio_data_dir(bio) != rq_data_dir(rq))
+ return false;
+
+ /* must be same device and not a special request */
+ if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
+ return false;
+
+ /* only merge integrity protected bio into ditto rq */
+ if (bio_integrity(bio) != blk_integrity_rq(rq))
+ return false;
+
+ return true;
+}
+
+int blk_try_merge(struct request *rq, struct bio *bio)
+{
+ if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector)
+ return ELEVATOR_BACK_MERGE;
+ else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector)
+ return ELEVATOR_FRONT_MERGE;
+ return ELEVATOR_NO_MERGE;
+}
diff --git a/block/blk-settings.c b/block/blk-settings.c
new file mode 100644
index 00000000..d3234fc4
--- /dev/null
+++ b/block/blk-settings.c
@@ -0,0 +1,822 @@
+/*
+ * Functions related to setting various queue properties from drivers
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
+#include <linux/gcd.h>
+#include <linux/lcm.h>
+#include <linux/jiffies.h>
+#include <linux/gfp.h>
+
+#include "blk.h"
+
+unsigned long blk_max_low_pfn;
+EXPORT_SYMBOL(blk_max_low_pfn);
+
+unsigned long blk_max_pfn;
+
+/**
+ * blk_queue_prep_rq - set a prepare_request function for queue
+ * @q: queue
+ * @pfn: prepare_request function
+ *
+ * It's possible for a queue to register a prepare_request callback which
+ * is invoked before the request is handed to the request_fn. The goal of
+ * the function is to prepare a request for I/O, it can be used to build a
+ * cdb from the request data for instance.
+ *
+ */
+void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
+{
+ q->prep_rq_fn = pfn;
+}
+EXPORT_SYMBOL(blk_queue_prep_rq);
+
+/**
+ * blk_queue_unprep_rq - set an unprepare_request function for queue
+ * @q: queue
+ * @ufn: unprepare_request function
+ *
+ * It's possible for a queue to register an unprepare_request callback
+ * which is invoked before the request is finally completed. The goal
+ * of the function is to deallocate any data that was allocated in the
+ * prepare_request callback.
+ *
+ */
+void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
+{
+ q->unprep_rq_fn = ufn;
+}
+EXPORT_SYMBOL(blk_queue_unprep_rq);
+
+/**
+ * blk_queue_merge_bvec - set a merge_bvec function for queue
+ * @q: queue
+ * @mbfn: merge_bvec_fn
+ *
+ * Usually queues have static limitations on the max sectors or segments that
+ * we can put in a request. Stacking drivers may have some settings that
+ * are dynamic, and thus we have to query the queue whether it is ok to
+ * add a new bio_vec to a bio at a given offset or not. If the block device
+ * has such limitations, it needs to register a merge_bvec_fn to control
+ * the size of bio's sent to it. Note that a block device *must* allow a
+ * single page to be added to an empty bio. The block device driver may want
+ * to use the bio_split() function to deal with these bio's. By default
+ * no merge_bvec_fn is defined for a queue, and only the fixed limits are
+ * honored.
+ */
+void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
+{
+ q->merge_bvec_fn = mbfn;
+}
+EXPORT_SYMBOL(blk_queue_merge_bvec);
+
+void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
+{
+ q->softirq_done_fn = fn;
+}
+EXPORT_SYMBOL(blk_queue_softirq_done);
+
+void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
+{
+ q->rq_timeout = timeout;
+}
+EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
+
+void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
+{
+ q->rq_timed_out_fn = fn;
+}
+EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
+
+void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
+{
+ q->lld_busy_fn = fn;
+}
+EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
+
+/**
+ * blk_set_default_limits - reset limits to default values
+ * @lim: the queue_limits structure to reset
+ *
+ * Description:
+ * Returns a queue_limit struct to its default state.
+ */
+void blk_set_default_limits(struct queue_limits *lim)
+{
+ lim->max_segments = BLK_MAX_SEGMENTS;
+ lim->max_integrity_segments = 0;
+ lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
+ lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
+ lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
+ lim->max_discard_sectors = 0;
+ lim->discard_granularity = 0;
+ lim->discard_alignment = 0;
+ lim->discard_misaligned = 0;
+ lim->discard_zeroes_data = 0;
+ lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
+ lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
+ lim->alignment_offset = 0;
+ lim->io_opt = 0;
+ lim->misaligned = 0;
+ lim->cluster = 1;
+}
+EXPORT_SYMBOL(blk_set_default_limits);
+
+/**
+ * blk_set_stacking_limits - set default limits for stacking devices
+ * @lim: the queue_limits structure to reset
+ *
+ * Description:
+ * Returns a queue_limit struct to its default state. Should be used
+ * by stacking drivers like DM that have no internal limits.
+ */
+void blk_set_stacking_limits(struct queue_limits *lim)
+{
+ blk_set_default_limits(lim);
+
+ /* Inherit limits from component devices */
+ lim->discard_zeroes_data = 1;
+ lim->max_segments = USHRT_MAX;
+ lim->max_hw_sectors = UINT_MAX;
+
+ lim->max_sectors = BLK_DEF_MAX_SECTORS;
+}
+EXPORT_SYMBOL(blk_set_stacking_limits);
+
+/**
+ * blk_queue_make_request - define an alternate make_request function for a device
+ * @q: the request queue for the device to be affected
+ * @mfn: the alternate make_request function
+ *
+ * Description:
+ * The normal way for &struct bios to be passed to a device
+ * driver is for them to be collected into requests on a request
+ * queue, and then to allow the device driver to select requests
+ * off that queue when it is ready. This works well for many block
+ * devices. However some block devices (typically virtual devices
+ * such as md or lvm) do not benefit from the processing on the
+ * request queue, and are served best by having the requests passed
+ * directly to them. This can be achieved by providing a function
+ * to blk_queue_make_request().
+ *
+ * Caveat:
+ * The driver that does this *must* be able to deal appropriately
+ * with buffers in "highmemory". This can be accomplished by either calling
+ * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
+ * blk_queue_bounce() to create a buffer in normal memory.
+ **/
+void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
+{
+ /*
+ * set defaults
+ */
+ q->nr_requests = BLKDEV_MAX_RQ;
+
+ q->make_request_fn = mfn;
+ blk_queue_dma_alignment(q, 511);
+ blk_queue_congestion_threshold(q);
+ q->nr_batching = BLK_BATCH_REQ;
+
+ blk_set_default_limits(&q->limits);
+
+ /*
+ * by default assume old behaviour and bounce for any highmem page
+ */
+ blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
+}
+EXPORT_SYMBOL(blk_queue_make_request);
+
+/**
+ * blk_queue_bounce_limit - set bounce buffer limit for queue
+ * @q: the request queue for the device
+ * @dma_mask: the maximum address the device can handle
+ *
+ * Description:
+ * Different hardware can have different requirements as to what pages
+ * it can do I/O directly to. A low level driver can call
+ * blk_queue_bounce_limit to have lower memory pages allocated as bounce
+ * buffers for doing I/O to pages residing above @dma_mask.
+ **/
+void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
+{
+ unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
+ int dma = 0;
+
+ q->bounce_gfp = GFP_NOIO;
+#if BITS_PER_LONG == 64
+ /*
+ * Assume anything <= 4GB can be handled by IOMMU. Actually
+ * some IOMMUs can handle everything, but I don't know of a
+ * way to test this here.
+ */
+ if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
+ dma = 1;
+ q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
+#else
+ if (b_pfn < blk_max_low_pfn)
+ dma = 1;
+ q->limits.bounce_pfn = b_pfn;
+#endif
+ if (dma) {
+ init_emergency_isa_pool();
+ q->bounce_gfp = GFP_NOIO | GFP_DMA;
+ q->limits.bounce_pfn = b_pfn;
+ }
+}
+EXPORT_SYMBOL(blk_queue_bounce_limit);
+
+/**
+ * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request
+ * @limits: the queue limits
+ * @max_hw_sectors: max hardware sectors in the usual 512b unit
+ *
+ * Description:
+ * Enables a low level driver to set a hard upper limit,
+ * max_hw_sectors, on the size of requests. max_hw_sectors is set by
+ * the device driver based upon the combined capabilities of I/O
+ * controller and storage device.
+ *
+ * max_sectors is a soft limit imposed by the block layer for
+ * filesystem type requests. This value can be overridden on a
+ * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
+ * The soft limit can not exceed max_hw_sectors.
+ **/
+void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors)
+{
+ if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
+ max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
+ printk(KERN_INFO "%s: set to minimum %d\n",
+ __func__, max_hw_sectors);
+ }
+
+ limits->max_hw_sectors = max_hw_sectors;
+ limits->max_sectors = min_t(unsigned int, max_hw_sectors,
+ BLK_DEF_MAX_SECTORS);
+}
+EXPORT_SYMBOL(blk_limits_max_hw_sectors);
+
+/**
+ * blk_queue_max_hw_sectors - set max sectors for a request for this queue
+ * @q: the request queue for the device
+ * @max_hw_sectors: max hardware sectors in the usual 512b unit
+ *
+ * Description:
+ * See description for blk_limits_max_hw_sectors().
+ **/
+void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
+{
+ blk_limits_max_hw_sectors(&q->limits, max_hw_sectors);
+}
+EXPORT_SYMBOL(blk_queue_max_hw_sectors);
+
+/**
+ * blk_queue_max_discard_sectors - set max sectors for a single discard
+ * @q: the request queue for the device
+ * @max_discard_sectors: maximum number of sectors to discard
+ **/
+void blk_queue_max_discard_sectors(struct request_queue *q,
+ unsigned int max_discard_sectors)
+{
+ q->limits.max_discard_sectors = max_discard_sectors;
+}
+EXPORT_SYMBOL(blk_queue_max_discard_sectors);
+
+/**
+ * blk_queue_max_segments - set max hw segments for a request for this queue
+ * @q: the request queue for the device
+ * @max_segments: max number of segments
+ *
+ * Description:
+ * Enables a low level driver to set an upper limit on the number of
+ * hw data segments in a request.
+ **/
+void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
+{
+ if (!max_segments) {
+ max_segments = 1;
+ printk(KERN_INFO "%s: set to minimum %d\n",
+ __func__, max_segments);
+ }
+
+ q->limits.max_segments = max_segments;
+}
+EXPORT_SYMBOL(blk_queue_max_segments);
+
+/**
+ * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
+ * @q: the request queue for the device
+ * @max_size: max size of segment in bytes
+ *
+ * Description:
+ * Enables a low level driver to set an upper limit on the size of a
+ * coalesced segment
+ **/
+void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
+{
+ if (max_size < PAGE_CACHE_SIZE) {
+ max_size = PAGE_CACHE_SIZE;
+ printk(KERN_INFO "%s: set to minimum %d\n",
+ __func__, max_size);
+ }
+
+ q->limits.max_segment_size = max_size;
+}
+EXPORT_SYMBOL(blk_queue_max_segment_size);
+
+/**
+ * blk_queue_logical_block_size - set logical block size for the queue
+ * @q: the request queue for the device
+ * @size: the logical block size, in bytes
+ *
+ * Description:
+ * This should be set to the lowest possible block size that the
+ * storage device can address. The default of 512 covers most
+ * hardware.
+ **/
+void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
+{
+ q->limits.logical_block_size = size;
+
+ if (q->limits.physical_block_size < size)
+ q->limits.physical_block_size = size;
+
+ if (q->limits.io_min < q->limits.physical_block_size)
+ q->limits.io_min = q->limits.physical_block_size;
+}
+EXPORT_SYMBOL(blk_queue_logical_block_size);
+
+/**
+ * blk_queue_physical_block_size - set physical block size for the queue
+ * @q: the request queue for the device
+ * @size: the physical block size, in bytes
+ *
+ * Description:
+ * This should be set to the lowest possible sector size that the
+ * hardware can operate on without reverting to read-modify-write
+ * operations.
+ */
+void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
+{
+ q->limits.physical_block_size = size;
+
+ if (q->limits.physical_block_size < q->limits.logical_block_size)
+ q->limits.physical_block_size = q->limits.logical_block_size;
+
+ if (q->limits.io_min < q->limits.physical_block_size)
+ q->limits.io_min = q->limits.physical_block_size;
+}
+EXPORT_SYMBOL(blk_queue_physical_block_size);
+
+/**
+ * blk_queue_alignment_offset - set physical block alignment offset
+ * @q: the request queue for the device
+ * @offset: alignment offset in bytes
+ *
+ * Description:
+ * Some devices are naturally misaligned to compensate for things like
+ * the legacy DOS partition table 63-sector offset. Low-level drivers
+ * should call this function for devices whose first sector is not
+ * naturally aligned.
+ */
+void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
+{
+ q->limits.alignment_offset =
+ offset & (q->limits.physical_block_size - 1);
+ q->limits.misaligned = 0;
+}
+EXPORT_SYMBOL(blk_queue_alignment_offset);
+
+/**
+ * blk_limits_io_min - set minimum request size for a device
+ * @limits: the queue limits
+ * @min: smallest I/O size in bytes
+ *
+ * Description:
+ * Some devices have an internal block size bigger than the reported
+ * hardware sector size. This function can be used to signal the
+ * smallest I/O the device can perform without incurring a performance
+ * penalty.
+ */
+void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
+{
+ limits->io_min = min;
+
+ if (limits->io_min < limits->logical_block_size)
+ limits->io_min = limits->logical_block_size;
+
+ if (limits->io_min < limits->physical_block_size)
+ limits->io_min = limits->physical_block_size;
+}
+EXPORT_SYMBOL(blk_limits_io_min);
+
+/**
+ * blk_queue_io_min - set minimum request size for the queue
+ * @q: the request queue for the device
+ * @min: smallest I/O size in bytes
+ *
+ * Description:
+ * Storage devices may report a granularity or preferred minimum I/O
+ * size which is the smallest request the device can perform without
+ * incurring a performance penalty. For disk drives this is often the
+ * physical block size. For RAID arrays it is often the stripe chunk
+ * size. A properly aligned multiple of minimum_io_size is the
+ * preferred request size for workloads where a high number of I/O
+ * operations is desired.
+ */
+void blk_queue_io_min(struct request_queue *q, unsigned int min)
+{
+ blk_limits_io_min(&q->limits, min);
+}
+EXPORT_SYMBOL(blk_queue_io_min);
+
+/**
+ * blk_limits_io_opt - set optimal request size for a device
+ * @limits: the queue limits
+ * @opt: smallest I/O size in bytes
+ *
+ * Description:
+ * Storage devices may report an optimal I/O size, which is the
+ * device's preferred unit for sustained I/O. This is rarely reported
+ * for disk drives. For RAID arrays it is usually the stripe width or
+ * the internal track size. A properly aligned multiple of
+ * optimal_io_size is the preferred request size for workloads where
+ * sustained throughput is desired.
+ */
+void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
+{
+ limits->io_opt = opt;
+}
+EXPORT_SYMBOL(blk_limits_io_opt);
+
+/**
+ * blk_queue_io_opt - set optimal request size for the queue
+ * @q: the request queue for the device
+ * @opt: optimal request size in bytes
+ *
+ * Description:
+ * Storage devices may report an optimal I/O size, which is the
+ * device's preferred unit for sustained I/O. This is rarely reported
+ * for disk drives. For RAID arrays it is usually the stripe width or
+ * the internal track size. A properly aligned multiple of
+ * optimal_io_size is the preferred request size for workloads where
+ * sustained throughput is desired.
+ */
+void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
+{
+ blk_limits_io_opt(&q->limits, opt);
+}
+EXPORT_SYMBOL(blk_queue_io_opt);
+
+/**
+ * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
+ * @t: the stacking driver (top)
+ * @b: the underlying device (bottom)
+ **/
+void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
+{
+ blk_stack_limits(&t->limits, &b->limits, 0);
+}
+EXPORT_SYMBOL(blk_queue_stack_limits);
+
+/**
+ * blk_stack_limits - adjust queue_limits for stacked devices
+ * @t: the stacking driver limits (top device)
+ * @b: the underlying queue limits (bottom, component device)
+ * @start: first data sector within component device
+ *
+ * Description:
+ * This function is used by stacking drivers like MD and DM to ensure
+ * that all component devices have compatible block sizes and
+ * alignments. The stacking driver must provide a queue_limits
+ * struct (top) and then iteratively call the stacking function for
+ * all component (bottom) devices. The stacking function will
+ * attempt to combine the values and ensure proper alignment.
+ *
+ * Returns 0 if the top and bottom queue_limits are compatible. The
+ * top device's block sizes and alignment offsets may be adjusted to
+ * ensure alignment with the bottom device. If no compatible sizes
+ * and alignments exist, -1 is returned and the resulting top
+ * queue_limits will have the misaligned flag set to indicate that
+ * the alignment_offset is undefined.
+ */
+int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+ sector_t start)
+{
+ unsigned int top, bottom, alignment, ret = 0;
+
+ t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
+ t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
+ t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
+
+ t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
+ b->seg_boundary_mask);
+
+ t->max_segments = min_not_zero(t->max_segments, b->max_segments);
+ t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
+ b->max_integrity_segments);
+
+ t->max_segment_size = min_not_zero(t->max_segment_size,
+ b->max_segment_size);
+
+ t->misaligned |= b->misaligned;
+
+ alignment = queue_limit_alignment_offset(b, start);
+
+ /* Bottom device has different alignment. Check that it is
+ * compatible with the current top alignment.
+ */
+ if (t->alignment_offset != alignment) {
+
+ top = max(t->physical_block_size, t->io_min)
+ + t->alignment_offset;
+ bottom = max(b->physical_block_size, b->io_min) + alignment;
+
+ /* Verify that top and bottom intervals line up */
+ if (max(top, bottom) & (min(top, bottom) - 1)) {
+ t->misaligned = 1;
+ ret = -1;
+ }
+ }
+
+ t->logical_block_size = max(t->logical_block_size,
+ b->logical_block_size);
+
+ t->physical_block_size = max(t->physical_block_size,
+ b->physical_block_size);
+
+ t->io_min = max(t->io_min, b->io_min);
+ t->io_opt = lcm(t->io_opt, b->io_opt);
+
+ t->cluster &= b->cluster;
+ t->discard_zeroes_data &= b->discard_zeroes_data;
+
+ /* Physical block size a multiple of the logical block size? */
+ if (t->physical_block_size & (t->logical_block_size - 1)) {
+ t->physical_block_size = t->logical_block_size;
+ t->misaligned = 1;
+ ret = -1;
+ }
+
+ /* Minimum I/O a multiple of the physical block size? */
+ if (t->io_min & (t->physical_block_size - 1)) {
+ t->io_min = t->physical_block_size;
+ t->misaligned = 1;
+ ret = -1;
+ }
+
+ /* Optimal I/O a multiple of the physical block size? */
+ if (t->io_opt & (t->physical_block_size - 1)) {
+ t->io_opt = 0;
+ t->misaligned = 1;
+ ret = -1;
+ }
+
+ /* Find lowest common alignment_offset */
+ t->alignment_offset = lcm(t->alignment_offset, alignment)
+ & (max(t->physical_block_size, t->io_min) - 1);
+
+ /* Verify that new alignment_offset is on a logical block boundary */
+ if (t->alignment_offset & (t->logical_block_size - 1)) {
+ t->misaligned = 1;
+ ret = -1;
+ }
+
+ /* Discard alignment and granularity */
+ if (b->discard_granularity) {
+ alignment = queue_limit_discard_alignment(b, start);
+
+ if (t->discard_granularity != 0 &&
+ t->discard_alignment != alignment) {
+ top = t->discard_granularity + t->discard_alignment;
+ bottom = b->discard_granularity + alignment;
+
+ /* Verify that top and bottom intervals line up */
+ if (max(top, bottom) & (min(top, bottom) - 1))
+ t->discard_misaligned = 1;
+ }
+
+ t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
+ b->max_discard_sectors);
+ t->discard_granularity = max(t->discard_granularity,
+ b->discard_granularity);
+ t->discard_alignment = lcm(t->discard_alignment, alignment) &
+ (t->discard_granularity - 1);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(blk_stack_limits);
+
+/**
+ * bdev_stack_limits - adjust queue limits for stacked drivers
+ * @t: the stacking driver limits (top device)
+ * @bdev: the component block_device (bottom)
+ * @start: first data sector within component device
+ *
+ * Description:
+ * Merges queue limits for a top device and a block_device. Returns
+ * 0 if alignment didn't change. Returns -1 if adding the bottom
+ * device caused misalignment.
+ */
+int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
+ sector_t start)
+{
+ struct request_queue *bq = bdev_get_queue(bdev);
+
+ start += get_start_sect(bdev);
+
+ return blk_stack_limits(t, &bq->limits, start);
+}
+EXPORT_SYMBOL(bdev_stack_limits);
+
+/**
+ * disk_stack_limits - adjust queue limits for stacked drivers
+ * @disk: MD/DM gendisk (top)
+ * @bdev: the underlying block device (bottom)
+ * @offset: offset to beginning of data within component device
+ *
+ * Description:
+ * Merges the limits for a top level gendisk and a bottom level
+ * block_device.
+ */
+void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
+ sector_t offset)
+{
+ struct request_queue *t = disk->queue;
+
+ if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
+ char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
+
+ disk_name(disk, 0, top);
+ bdevname(bdev, bottom);
+
+ printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
+ top, bottom);
+ }
+}
+EXPORT_SYMBOL(disk_stack_limits);
+
+/**
+ * blk_queue_dma_pad - set pad mask
+ * @q: the request queue for the device
+ * @mask: pad mask
+ *
+ * Set dma pad mask.
+ *
+ * Appending pad buffer to a request modifies the last entry of a
+ * scatter list such that it includes the pad buffer.
+ **/
+void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
+{
+ q->dma_pad_mask = mask;
+}
+EXPORT_SYMBOL(blk_queue_dma_pad);
+
+/**
+ * blk_queue_update_dma_pad - update pad mask
+ * @q: the request queue for the device
+ * @mask: pad mask
+ *
+ * Update dma pad mask.
+ *
+ * Appending pad buffer to a request modifies the last entry of a
+ * scatter list such that it includes the pad buffer.
+ **/
+void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
+{
+ if (mask > q->dma_pad_mask)
+ q->dma_pad_mask = mask;
+}
+EXPORT_SYMBOL(blk_queue_update_dma_pad);
+
+/**
+ * blk_queue_dma_drain - Set up a drain buffer for excess dma.
+ * @q: the request queue for the device
+ * @dma_drain_needed: fn which returns non-zero if drain is necessary
+ * @buf: physically contiguous buffer
+ * @size: size of the buffer in bytes
+ *
+ * Some devices have excess DMA problems and can't simply discard (or
+ * zero fill) the unwanted piece of the transfer. They have to have a
+ * real area of memory to transfer it into. The use case for this is
+ * ATAPI devices in DMA mode. If the packet command causes a transfer
+ * bigger than the transfer size some HBAs will lock up if there
+ * aren't DMA elements to contain the excess transfer. What this API
+ * does is adjust the queue so that the buf is always appended
+ * silently to the scatterlist.
+ *
+ * Note: This routine adjusts max_hw_segments to make room for appending
+ * the drain buffer. If you call blk_queue_max_segments() after calling
+ * this routine, you must set the limit to one fewer than your device
+ * can support otherwise there won't be room for the drain buffer.
+ */
+int blk_queue_dma_drain(struct request_queue *q,
+ dma_drain_needed_fn *dma_drain_needed,
+ void *buf, unsigned int size)
+{
+ if (queue_max_segments(q) < 2)
+ return -EINVAL;
+ /* make room for appending the drain */
+ blk_queue_max_segments(q, queue_max_segments(q) - 1);
+ q->dma_drain_needed = dma_drain_needed;
+ q->dma_drain_buffer = buf;
+ q->dma_drain_size = size;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
+
+/**
+ * blk_queue_segment_boundary - set boundary rules for segment merging
+ * @q: the request queue for the device
+ * @mask: the memory boundary mask
+ **/
+void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
+{
+ if (mask < PAGE_CACHE_SIZE - 1) {
+ mask = PAGE_CACHE_SIZE - 1;
+ printk(KERN_INFO "%s: set to minimum %lx\n",
+ __func__, mask);
+ }
+
+ q->limits.seg_boundary_mask = mask;
+}
+EXPORT_SYMBOL(blk_queue_segment_boundary);
+
+/**
+ * blk_queue_dma_alignment - set dma length and memory alignment
+ * @q: the request queue for the device
+ * @mask: alignment mask
+ *
+ * description:
+ * set required memory and length alignment for direct dma transactions.
+ * this is used when building direct io requests for the queue.
+ *
+ **/
+void blk_queue_dma_alignment(struct request_queue *q, int mask)
+{
+ q->dma_alignment = mask;
+}
+EXPORT_SYMBOL(blk_queue_dma_alignment);
+
+/**
+ * blk_queue_update_dma_alignment - update dma length and memory alignment
+ * @q: the request queue for the device
+ * @mask: alignment mask
+ *
+ * description:
+ * update required memory and length alignment for direct dma transactions.
+ * If the requested alignment is larger than the current alignment, then
+ * the current queue alignment is updated to the new value, otherwise it
+ * is left alone. The design of this is to allow multiple objects
+ * (driver, device, transport etc) to set their respective
+ * alignments without having them interfere.
+ *
+ **/
+void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
+{
+ BUG_ON(mask > PAGE_SIZE);
+
+ if (mask > q->dma_alignment)
+ q->dma_alignment = mask;
+}
+EXPORT_SYMBOL(blk_queue_update_dma_alignment);
+
+/**
+ * blk_queue_flush - configure queue's cache flush capability
+ * @q: the request queue for the device
+ * @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
+ *
+ * Tell block layer cache flush capability of @q. If it supports
+ * flushing, REQ_FLUSH should be set. If it supports bypassing
+ * write cache for individual writes, REQ_FUA should be set.
+ */
+void blk_queue_flush(struct request_queue *q, unsigned int flush)
+{
+ WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
+
+ if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
+ flush &= ~REQ_FUA;
+
+ q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
+}
+EXPORT_SYMBOL_GPL(blk_queue_flush);
+
+void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
+{
+ q->flush_not_queueable = !queueable;
+}
+EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
+
+static int __init blk_settings_init(void)
+{
+ blk_max_low_pfn = max_low_pfn - 1;
+ blk_max_pfn = max_pfn - 1;
+ return 0;
+}
+subsys_initcall(blk_settings_init);
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
new file mode 100644
index 00000000..467c8de8
--- /dev/null
+++ b/block/blk-softirq.c
@@ -0,0 +1,186 @@
+/*
+ * Functions related to softirq rq completions
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/cpu.h>
+#include <linux/sched.h>
+
+#include "blk.h"
+
+static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
+
+/*
+ * Softirq action handler - move entries to local list and loop over them
+ * while passing them to the queue registered handler.
+ */
+static void blk_done_softirq(struct softirq_action *h)
+{
+ struct list_head *cpu_list, local_list;
+
+ local_irq_disable();
+ cpu_list = &__get_cpu_var(blk_cpu_done);
+ list_replace_init(cpu_list, &local_list);
+ local_irq_enable();
+
+ while (!list_empty(&local_list)) {
+ struct request *rq;
+
+ rq = list_entry(local_list.next, struct request, csd.list);
+ list_del_init(&rq->csd.list);
+ rq->q->softirq_done_fn(rq);
+ }
+}
+
+#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS)
+static void trigger_softirq(void *data)
+{
+ struct request *rq = data;
+ unsigned long flags;
+ struct list_head *list;
+
+ local_irq_save(flags);
+ list = &__get_cpu_var(blk_cpu_done);
+ list_add_tail(&rq->csd.list, list);
+
+ if (list->next == &rq->csd.list)
+ raise_softirq_irqoff(BLOCK_SOFTIRQ);
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Setup and invoke a run of 'trigger_softirq' on the given cpu.
+ */
+static int raise_blk_irq(int cpu, struct request *rq)
+{
+ if (cpu_online(cpu)) {
+ struct call_single_data *data = &rq->csd;
+
+ data->func = trigger_softirq;
+ data->info = rq;
+ data->flags = 0;
+
+ __smp_call_function_single(cpu, data, 0);
+ return 0;
+ }
+
+ return 1;
+}
+#else /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */
+static int raise_blk_irq(int cpu, struct request *rq)
+{
+ return 1;
+}
+#endif
+
+static int __cpuinit blk_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ /*
+ * If a CPU goes away, splice its entries to the current CPU
+ * and trigger a run of the softirq
+ */
+ if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
+ int cpu = (unsigned long) hcpu;
+
+ local_irq_disable();
+ list_splice_init(&per_cpu(blk_cpu_done, cpu),
+ &__get_cpu_var(blk_cpu_done));
+ raise_softirq_irqoff(BLOCK_SOFTIRQ);
+ local_irq_enable();
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata blk_cpu_notifier = {
+ .notifier_call = blk_cpu_notify,
+};
+
+void __blk_complete_request(struct request *req)
+{
+ int ccpu, cpu;
+ struct request_queue *q = req->q;
+ unsigned long flags;
+ bool shared = false;
+
+ BUG_ON(!q->softirq_done_fn);
+
+ local_irq_save(flags);
+ cpu = smp_processor_id();
+
+ /*
+ * Select completion CPU
+ */
+ if (req->cpu != -1) {
+ ccpu = req->cpu;
+ if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags))
+ shared = cpus_share_cache(cpu, ccpu);
+ } else
+ ccpu = cpu;
+
+ /*
+ * If current CPU and requested CPU share a cache, run the softirq on
+ * the current CPU. One might concern this is just like
+ * QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is
+ * running in interrupt handler, and currently I/O controller doesn't
+ * support multiple interrupts, so current CPU is unique actually. This
+ * avoids IPI sending from current CPU to the first CPU of a group.
+ */
+ if (ccpu == cpu || shared) {
+ struct list_head *list;
+do_local:
+ list = &__get_cpu_var(blk_cpu_done);
+ list_add_tail(&req->csd.list, list);
+
+ /*
+ * if the list only contains our just added request,
+ * signal a raise of the softirq. If there are already
+ * entries there, someone already raised the irq but it
+ * hasn't run yet.
+ */
+ if (list->next == &req->csd.list)
+ raise_softirq_irqoff(BLOCK_SOFTIRQ);
+ } else if (raise_blk_irq(ccpu, req))
+ goto do_local;
+
+ local_irq_restore(flags);
+}
+
+/**
+ * blk_complete_request - end I/O on a request
+ * @req: the request being processed
+ *
+ * Description:
+ * Ends all I/O on a request. It does not handle partial completions,
+ * unless the driver actually implements this in its completion callback
+ * through requeueing. The actual completion happens out-of-order,
+ * through a softirq handler. The user must have registered a completion
+ * callback through blk_queue_softirq_done().
+ **/
+void blk_complete_request(struct request *req)
+{
+ if (unlikely(blk_should_fake_timeout(req->q)))
+ return;
+ if (!blk_mark_rq_complete(req))
+ __blk_complete_request(req);
+}
+EXPORT_SYMBOL(blk_complete_request);
+
+static __init int blk_softirq_init(void)
+{
+ int i;
+
+ for_each_possible_cpu(i)
+ INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
+
+ open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
+ register_hotcpu_notifier(&blk_cpu_notifier);
+ return 0;
+}
+subsys_initcall(blk_softirq_init);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
new file mode 100644
index 00000000..cf150011
--- /dev/null
+++ b/block/blk-sysfs.c
@@ -0,0 +1,567 @@
+/*
+ * Functions related to sysfs handling
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/blktrace_api.h>
+
+#include "blk.h"
+
+struct queue_sysfs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct request_queue *, char *);
+ ssize_t (*store)(struct request_queue *, const char *, size_t);
+};
+
+static ssize_t
+queue_var_show(unsigned long var, char *page)
+{
+ return sprintf(page, "%lu\n", var);
+}
+
+static ssize_t
+queue_var_store(unsigned long *var, const char *page, size_t count)
+{
+ char *p = (char *) page;
+
+ *var = simple_strtoul(p, &p, 10);
+ return count;
+}
+
+static ssize_t queue_requests_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(q->nr_requests, (page));
+}
+
+static ssize_t
+queue_requests_store(struct request_queue *q, const char *page, size_t count)
+{
+ struct request_list *rl = &q->rq;
+ unsigned long nr;
+ int ret;
+
+ if (!q->request_fn)
+ return -EINVAL;
+
+ ret = queue_var_store(&nr, page, count);
+ if (nr < BLKDEV_MIN_RQ)
+ nr = BLKDEV_MIN_RQ;
+
+ spin_lock_irq(q->queue_lock);
+ q->nr_requests = nr;
+ blk_queue_congestion_threshold(q);
+
+ if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
+ blk_set_queue_congested(q, BLK_RW_SYNC);
+ else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
+ blk_clear_queue_congested(q, BLK_RW_SYNC);
+
+ if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q))
+ blk_set_queue_congested(q, BLK_RW_ASYNC);
+ else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
+ blk_clear_queue_congested(q, BLK_RW_ASYNC);
+
+ if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
+ blk_set_queue_full(q, BLK_RW_SYNC);
+ } else {
+ blk_clear_queue_full(q, BLK_RW_SYNC);
+ wake_up(&rl->wait[BLK_RW_SYNC]);
+ }
+
+ if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
+ blk_set_queue_full(q, BLK_RW_ASYNC);
+ } else {
+ blk_clear_queue_full(q, BLK_RW_ASYNC);
+ wake_up(&rl->wait[BLK_RW_ASYNC]);
+ }
+ spin_unlock_irq(q->queue_lock);
+ return ret;
+}
+
+static ssize_t queue_ra_show(struct request_queue *q, char *page)
+{
+ unsigned long ra_kb = q->backing_dev_info.ra_pages <<
+ (PAGE_CACHE_SHIFT - 10);
+
+ return queue_var_show(ra_kb, (page));
+}
+
+static ssize_t
+queue_ra_store(struct request_queue *q, const char *page, size_t count)
+{
+ unsigned long ra_kb;
+ ssize_t ret = queue_var_store(&ra_kb, page, count);
+
+ q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
+
+ return ret;
+}
+
+static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
+{
+ int max_sectors_kb = queue_max_sectors(q) >> 1;
+
+ return queue_var_show(max_sectors_kb, (page));
+}
+
+static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(queue_max_segments(q), (page));
+}
+
+static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(q->limits.max_integrity_segments, (page));
+}
+
+static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
+{
+ if (blk_queue_cluster(q))
+ return queue_var_show(queue_max_segment_size(q), (page));
+
+ return queue_var_show(PAGE_CACHE_SIZE, (page));
+}
+
+static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(queue_logical_block_size(q), page);
+}
+
+static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(queue_physical_block_size(q), page);
+}
+
+static ssize_t queue_io_min_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(queue_io_min(q), page);
+}
+
+static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(queue_io_opt(q), page);
+}
+
+static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(q->limits.discard_granularity, page);
+}
+
+static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
+{
+ return sprintf(page, "%llu\n",
+ (unsigned long long)q->limits.max_discard_sectors << 9);
+}
+
+static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(queue_discard_zeroes_data(q), page);
+}
+
+static ssize_t
+queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
+{
+ unsigned long max_sectors_kb,
+ max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
+ page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
+ ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
+
+ if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
+ return -EINVAL;
+
+ spin_lock_irq(q->queue_lock);
+ q->limits.max_sectors = max_sectors_kb << 1;
+ spin_unlock_irq(q->queue_lock);
+
+ return ret;
+}
+
+static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
+{
+ int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
+
+ return queue_var_show(max_hw_sectors_kb, (page));
+}
+
+#define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
+static ssize_t \
+queue_show_##name(struct request_queue *q, char *page) \
+{ \
+ int bit; \
+ bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
+ return queue_var_show(neg ? !bit : bit, page); \
+} \
+static ssize_t \
+queue_store_##name(struct request_queue *q, const char *page, size_t count) \
+{ \
+ unsigned long val; \
+ ssize_t ret; \
+ ret = queue_var_store(&val, page, count); \
+ if (neg) \
+ val = !val; \
+ \
+ spin_lock_irq(q->queue_lock); \
+ if (val) \
+ queue_flag_set(QUEUE_FLAG_##flag, q); \
+ else \
+ queue_flag_clear(QUEUE_FLAG_##flag, q); \
+ spin_unlock_irq(q->queue_lock); \
+ return ret; \
+}
+
+QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
+QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
+QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
+#undef QUEUE_SYSFS_BIT_FNS
+
+static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
+{
+ return queue_var_show((blk_queue_nomerges(q) << 1) |
+ blk_queue_noxmerges(q), page);
+}
+
+static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ unsigned long nm;
+ ssize_t ret = queue_var_store(&nm, page, count);
+
+ spin_lock_irq(q->queue_lock);
+ queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
+ queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
+ if (nm == 2)
+ queue_flag_set(QUEUE_FLAG_NOMERGES, q);
+ else if (nm)
+ queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
+ spin_unlock_irq(q->queue_lock);
+
+ return ret;
+}
+
+static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
+{
+ bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
+ bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
+
+ return queue_var_show(set << force, page);
+}
+
+static ssize_t
+queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
+{
+ ssize_t ret = -EINVAL;
+#if defined(CONFIG_USE_GENERIC_SMP_HELPERS)
+ unsigned long val;
+
+ ret = queue_var_store(&val, page, count);
+ spin_lock_irq(q->queue_lock);
+ if (val == 2) {
+ queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
+ queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
+ } else if (val == 1) {
+ queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
+ queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
+ } else if (val == 0) {
+ queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
+ queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
+ }
+ spin_unlock_irq(q->queue_lock);
+#endif
+ return ret;
+}
+
+static struct queue_sysfs_entry queue_requests_entry = {
+ .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_requests_show,
+ .store = queue_requests_store,
+};
+
+static struct queue_sysfs_entry queue_ra_entry = {
+ .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_ra_show,
+ .store = queue_ra_store,
+};
+
+static struct queue_sysfs_entry queue_max_sectors_entry = {
+ .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_max_sectors_show,
+ .store = queue_max_sectors_store,
+};
+
+static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
+ .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
+ .show = queue_max_hw_sectors_show,
+};
+
+static struct queue_sysfs_entry queue_max_segments_entry = {
+ .attr = {.name = "max_segments", .mode = S_IRUGO },
+ .show = queue_max_segments_show,
+};
+
+static struct queue_sysfs_entry queue_max_integrity_segments_entry = {
+ .attr = {.name = "max_integrity_segments", .mode = S_IRUGO },
+ .show = queue_max_integrity_segments_show,
+};
+
+static struct queue_sysfs_entry queue_max_segment_size_entry = {
+ .attr = {.name = "max_segment_size", .mode = S_IRUGO },
+ .show = queue_max_segment_size_show,
+};
+
+static struct queue_sysfs_entry queue_iosched_entry = {
+ .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
+ .show = elv_iosched_show,
+ .store = elv_iosched_store,
+};
+
+static struct queue_sysfs_entry queue_hw_sector_size_entry = {
+ .attr = {.name = "hw_sector_size", .mode = S_IRUGO },
+ .show = queue_logical_block_size_show,
+};
+
+static struct queue_sysfs_entry queue_logical_block_size_entry = {
+ .attr = {.name = "logical_block_size", .mode = S_IRUGO },
+ .show = queue_logical_block_size_show,
+};
+
+static struct queue_sysfs_entry queue_physical_block_size_entry = {
+ .attr = {.name = "physical_block_size", .mode = S_IRUGO },
+ .show = queue_physical_block_size_show,
+};
+
+static struct queue_sysfs_entry queue_io_min_entry = {
+ .attr = {.name = "minimum_io_size", .mode = S_IRUGO },
+ .show = queue_io_min_show,
+};
+
+static struct queue_sysfs_entry queue_io_opt_entry = {
+ .attr = {.name = "optimal_io_size", .mode = S_IRUGO },
+ .show = queue_io_opt_show,
+};
+
+static struct queue_sysfs_entry queue_discard_granularity_entry = {
+ .attr = {.name = "discard_granularity", .mode = S_IRUGO },
+ .show = queue_discard_granularity_show,
+};
+
+static struct queue_sysfs_entry queue_discard_max_entry = {
+ .attr = {.name = "discard_max_bytes", .mode = S_IRUGO },
+ .show = queue_discard_max_show,
+};
+
+static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
+ .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO },
+ .show = queue_discard_zeroes_data_show,
+};
+
+static struct queue_sysfs_entry queue_nonrot_entry = {
+ .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_show_nonrot,
+ .store = queue_store_nonrot,
+};
+
+static struct queue_sysfs_entry queue_nomerges_entry = {
+ .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_nomerges_show,
+ .store = queue_nomerges_store,
+};
+
+static struct queue_sysfs_entry queue_rq_affinity_entry = {
+ .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_rq_affinity_show,
+ .store = queue_rq_affinity_store,
+};
+
+static struct queue_sysfs_entry queue_iostats_entry = {
+ .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_show_iostats,
+ .store = queue_store_iostats,
+};
+
+static struct queue_sysfs_entry queue_random_entry = {
+ .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_show_random,
+ .store = queue_store_random,
+};
+
+static struct attribute *default_attrs[] = {
+ &queue_requests_entry.attr,
+ &queue_ra_entry.attr,
+ &queue_max_hw_sectors_entry.attr,
+ &queue_max_sectors_entry.attr,
+ &queue_max_segments_entry.attr,
+ &queue_max_integrity_segments_entry.attr,
+ &queue_max_segment_size_entry.attr,
+ &queue_iosched_entry.attr,
+ &queue_hw_sector_size_entry.attr,
+ &queue_logical_block_size_entry.attr,
+ &queue_physical_block_size_entry.attr,
+ &queue_io_min_entry.attr,
+ &queue_io_opt_entry.attr,
+ &queue_discard_granularity_entry.attr,
+ &queue_discard_max_entry.attr,
+ &queue_discard_zeroes_data_entry.attr,
+ &queue_nonrot_entry.attr,
+ &queue_nomerges_entry.attr,
+ &queue_rq_affinity_entry.attr,
+ &queue_iostats_entry.attr,
+ &queue_random_entry.attr,
+ NULL,
+};
+
+#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
+
+static ssize_t
+queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
+{
+ struct queue_sysfs_entry *entry = to_queue(attr);
+ struct request_queue *q =
+ container_of(kobj, struct request_queue, kobj);
+ ssize_t res;
+
+ if (!entry->show)
+ return -EIO;
+ mutex_lock(&q->sysfs_lock);
+ if (blk_queue_dead(q)) {
+ mutex_unlock(&q->sysfs_lock);
+ return -ENOENT;
+ }
+ res = entry->show(q, page);
+ mutex_unlock(&q->sysfs_lock);
+ return res;
+}
+
+static ssize_t
+queue_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *page, size_t length)
+{
+ struct queue_sysfs_entry *entry = to_queue(attr);
+ struct request_queue *q;
+ ssize_t res;
+
+ if (!entry->store)
+ return -EIO;
+
+ q = container_of(kobj, struct request_queue, kobj);
+ mutex_lock(&q->sysfs_lock);
+ if (blk_queue_dead(q)) {
+ mutex_unlock(&q->sysfs_lock);
+ return -ENOENT;
+ }
+ res = entry->store(q, page, length);
+ mutex_unlock(&q->sysfs_lock);
+ return res;
+}
+
+/**
+ * blk_release_queue: - release a &struct request_queue when it is no longer needed
+ * @kobj: the kobj belonging to the request queue to be released
+ *
+ * Description:
+ * blk_release_queue is the pair to blk_init_queue() or
+ * blk_queue_make_request(). It should be called when a request queue is
+ * being released; typically when a block device is being de-registered.
+ * Currently, its primary task it to free all the &struct request
+ * structures that were allocated to the queue and the queue itself.
+ *
+ * Caveat:
+ * Hopefully the low level driver will have finished any
+ * outstanding requests first...
+ **/
+static void blk_release_queue(struct kobject *kobj)
+{
+ struct request_queue *q =
+ container_of(kobj, struct request_queue, kobj);
+ struct request_list *rl = &q->rq;
+
+ blk_sync_queue(q);
+
+ if (q->elevator) {
+ spin_lock_irq(q->queue_lock);
+ ioc_clear_queue(q);
+ spin_unlock_irq(q->queue_lock);
+ elevator_exit(q->elevator);
+ }
+
+ blk_throtl_exit(q);
+
+ if (rl->rq_pool)
+ mempool_destroy(rl->rq_pool);
+
+ if (q->queue_tags)
+ __blk_queue_free_tags(q);
+
+ blk_throtl_release(q);
+ blk_trace_shutdown(q);
+
+ bdi_destroy(&q->backing_dev_info);
+
+ ida_simple_remove(&blk_queue_ida, q->id);
+ kmem_cache_free(blk_requestq_cachep, q);
+}
+
+static const struct sysfs_ops queue_sysfs_ops = {
+ .show = queue_attr_show,
+ .store = queue_attr_store,
+};
+
+struct kobj_type blk_queue_ktype = {
+ .sysfs_ops = &queue_sysfs_ops,
+ .default_attrs = default_attrs,
+ .release = blk_release_queue,
+};
+
+int blk_register_queue(struct gendisk *disk)
+{
+ int ret;
+ struct device *dev = disk_to_dev(disk);
+ struct request_queue *q = disk->queue;
+
+ if (WARN_ON(!q))
+ return -ENXIO;
+
+ ret = blk_trace_init_sysfs(dev);
+ if (ret)
+ return ret;
+
+ ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
+ if (ret < 0) {
+ blk_trace_remove_sysfs(dev);
+ return ret;
+ }
+
+ kobject_uevent(&q->kobj, KOBJ_ADD);
+
+ if (!q->request_fn)
+ return 0;
+
+ ret = elv_register_queue(q);
+ if (ret) {
+ kobject_uevent(&q->kobj, KOBJ_REMOVE);
+ kobject_del(&q->kobj);
+ blk_trace_remove_sysfs(dev);
+ kobject_put(&dev->kobj);
+ return ret;
+ }
+
+ return 0;
+}
+
+void blk_unregister_queue(struct gendisk *disk)
+{
+ struct request_queue *q = disk->queue;
+
+ if (WARN_ON(!q))
+ return;
+
+ if (q->request_fn)
+ elv_unregister_queue(q);
+
+ kobject_uevent(&q->kobj, KOBJ_REMOVE);
+ kobject_del(&q->kobj);
+ blk_trace_remove_sysfs(disk_to_dev(disk));
+ kobject_put(&disk_to_dev(disk)->kobj);
+}
diff --git a/block/blk-tag.c b/block/blk-tag.c
new file mode 100644
index 00000000..4af6f5cc
--- /dev/null
+++ b/block/blk-tag.c
@@ -0,0 +1,399 @@
+/*
+ * Functions related to tagged command queuing
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+
+#include "blk.h"
+
+/**
+ * blk_queue_find_tag - find a request by its tag and queue
+ * @q: The request queue for the device
+ * @tag: The tag of the request
+ *
+ * Notes:
+ * Should be used when a device returns a tag and you want to match
+ * it with a request.
+ *
+ * no locks need be held.
+ **/
+struct request *blk_queue_find_tag(struct request_queue *q, int tag)
+{
+ return blk_map_queue_find_tag(q->queue_tags, tag);
+}
+EXPORT_SYMBOL(blk_queue_find_tag);
+
+/**
+ * __blk_free_tags - release a given set of tag maintenance info
+ * @bqt: the tag map to free
+ *
+ * Tries to free the specified @bqt. Returns true if it was
+ * actually freed and false if there are still references using it
+ */
+static int __blk_free_tags(struct blk_queue_tag *bqt)
+{
+ int retval;
+
+ retval = atomic_dec_and_test(&bqt->refcnt);
+ if (retval) {
+ BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
+ bqt->max_depth);
+
+ kfree(bqt->tag_index);
+ bqt->tag_index = NULL;
+
+ kfree(bqt->tag_map);
+ bqt->tag_map = NULL;
+
+ kfree(bqt);
+ }
+
+ return retval;
+}
+
+/**
+ * __blk_queue_free_tags - release tag maintenance info
+ * @q: the request queue for the device
+ *
+ * Notes:
+ * blk_cleanup_queue() will take care of calling this function, if tagging
+ * has been used. So there's no need to call this directly.
+ **/
+void __blk_queue_free_tags(struct request_queue *q)
+{
+ struct blk_queue_tag *bqt = q->queue_tags;
+
+ if (!bqt)
+ return;
+
+ __blk_free_tags(bqt);
+
+ q->queue_tags = NULL;
+ queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
+}
+
+/**
+ * blk_free_tags - release a given set of tag maintenance info
+ * @bqt: the tag map to free
+ *
+ * For externally managed @bqt frees the map. Callers of this
+ * function must guarantee to have released all the queues that
+ * might have been using this tag map.
+ */
+void blk_free_tags(struct blk_queue_tag *bqt)
+{
+ if (unlikely(!__blk_free_tags(bqt)))
+ BUG();
+}
+EXPORT_SYMBOL(blk_free_tags);
+
+/**
+ * blk_queue_free_tags - release tag maintenance info
+ * @q: the request queue for the device
+ *
+ * Notes:
+ * This is used to disable tagged queuing to a device, yet leave
+ * queue in function.
+ **/
+void blk_queue_free_tags(struct request_queue *q)
+{
+ queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
+}
+EXPORT_SYMBOL(blk_queue_free_tags);
+
+static int
+init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
+{
+ struct request **tag_index;
+ unsigned long *tag_map;
+ int nr_ulongs;
+
+ if (q && depth > q->nr_requests * 2) {
+ depth = q->nr_requests * 2;
+ printk(KERN_ERR "%s: adjusted depth to %d\n",
+ __func__, depth);
+ }
+
+ tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
+ if (!tag_index)
+ goto fail;
+
+ nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
+ tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
+ if (!tag_map)
+ goto fail;
+
+ tags->real_max_depth = depth;
+ tags->max_depth = depth;
+ tags->tag_index = tag_index;
+ tags->tag_map = tag_map;
+
+ return 0;
+fail:
+ kfree(tag_index);
+ return -ENOMEM;
+}
+
+static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
+ int depth)
+{
+ struct blk_queue_tag *tags;
+
+ tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
+ if (!tags)
+ goto fail;
+
+ if (init_tag_map(q, tags, depth))
+ goto fail;
+
+ atomic_set(&tags->refcnt, 1);
+ return tags;
+fail:
+ kfree(tags);
+ return NULL;
+}
+
+/**
+ * blk_init_tags - initialize the tag info for an external tag map
+ * @depth: the maximum queue depth supported
+ **/
+struct blk_queue_tag *blk_init_tags(int depth)
+{
+ return __blk_queue_init_tags(NULL, depth);
+}
+EXPORT_SYMBOL(blk_init_tags);
+
+/**
+ * blk_queue_init_tags - initialize the queue tag info
+ * @q: the request queue for the device
+ * @depth: the maximum queue depth supported
+ * @tags: the tag to use
+ *
+ * Queue lock must be held here if the function is called to resize an
+ * existing map.
+ **/
+int blk_queue_init_tags(struct request_queue *q, int depth,
+ struct blk_queue_tag *tags)
+{
+ int rc;
+
+ BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
+
+ if (!tags && !q->queue_tags) {
+ tags = __blk_queue_init_tags(q, depth);
+
+ if (!tags)
+ goto fail;
+ } else if (q->queue_tags) {
+ rc = blk_queue_resize_tags(q, depth);
+ if (rc)
+ return rc;
+ queue_flag_set(QUEUE_FLAG_QUEUED, q);
+ return 0;
+ } else
+ atomic_inc(&tags->refcnt);
+
+ /*
+ * assign it, all done
+ */
+ q->queue_tags = tags;
+ queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
+ INIT_LIST_HEAD(&q->tag_busy_list);
+ return 0;
+fail:
+ kfree(tags);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(blk_queue_init_tags);
+
+/**
+ * blk_queue_resize_tags - change the queueing depth
+ * @q: the request queue for the device
+ * @new_depth: the new max command queueing depth
+ *
+ * Notes:
+ * Must be called with the queue lock held.
+ **/
+int blk_queue_resize_tags(struct request_queue *q, int new_depth)
+{
+ struct blk_queue_tag *bqt = q->queue_tags;
+ struct request **tag_index;
+ unsigned long *tag_map;
+ int max_depth, nr_ulongs;
+
+ if (!bqt)
+ return -ENXIO;
+
+ /*
+ * if we already have large enough real_max_depth. just
+ * adjust max_depth. *NOTE* as requests with tag value
+ * between new_depth and real_max_depth can be in-flight, tag
+ * map can not be shrunk blindly here.
+ */
+ if (new_depth <= bqt->real_max_depth) {
+ bqt->max_depth = new_depth;
+ return 0;
+ }
+
+ /*
+ * Currently cannot replace a shared tag map with a new
+ * one, so error out if this is the case
+ */
+ if (atomic_read(&bqt->refcnt) != 1)
+ return -EBUSY;
+
+ /*
+ * save the old state info, so we can copy it back
+ */
+ tag_index = bqt->tag_index;
+ tag_map = bqt->tag_map;
+ max_depth = bqt->real_max_depth;
+
+ if (init_tag_map(q, bqt, new_depth))
+ return -ENOMEM;
+
+ memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
+ nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
+ memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
+
+ kfree(tag_index);
+ kfree(tag_map);
+ return 0;
+}
+EXPORT_SYMBOL(blk_queue_resize_tags);
+
+/**
+ * blk_queue_end_tag - end tag operations for a request
+ * @q: the request queue for the device
+ * @rq: the request that has completed
+ *
+ * Description:
+ * Typically called when end_that_request_first() returns %0, meaning
+ * all transfers have been done for a request. It's important to call
+ * this function before end_that_request_last(), as that will put the
+ * request back on the free list thus corrupting the internal tag list.
+ *
+ * Notes:
+ * queue lock must be held.
+ **/
+void blk_queue_end_tag(struct request_queue *q, struct request *rq)
+{
+ struct blk_queue_tag *bqt = q->queue_tags;
+ unsigned tag = rq->tag; /* negative tags invalid */
+
+ BUG_ON(tag >= bqt->real_max_depth);
+
+ list_del_init(&rq->queuelist);
+ rq->cmd_flags &= ~REQ_QUEUED;
+ rq->tag = -1;
+
+ if (unlikely(bqt->tag_index[tag] == NULL))
+ printk(KERN_ERR "%s: tag %d is missing\n",
+ __func__, tag);
+
+ bqt->tag_index[tag] = NULL;
+
+ if (unlikely(!test_bit(tag, bqt->tag_map))) {
+ printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
+ __func__, tag);
+ return;
+ }
+ /*
+ * The tag_map bit acts as a lock for tag_index[bit], so we need
+ * unlock memory barrier semantics.
+ */
+ clear_bit_unlock(tag, bqt->tag_map);
+}
+EXPORT_SYMBOL(blk_queue_end_tag);
+
+/**
+ * blk_queue_start_tag - find a free tag and assign it
+ * @q: the request queue for the device
+ * @rq: the block request that needs tagging
+ *
+ * Description:
+ * This can either be used as a stand-alone helper, or possibly be
+ * assigned as the queue &prep_rq_fn (in which case &struct request
+ * automagically gets a tag assigned). Note that this function
+ * assumes that any type of request can be queued! if this is not
+ * true for your device, you must check the request type before
+ * calling this function. The request will also be removed from
+ * the request queue, so it's the drivers responsibility to readd
+ * it if it should need to be restarted for some reason.
+ *
+ * Notes:
+ * queue lock must be held.
+ **/
+int blk_queue_start_tag(struct request_queue *q, struct request *rq)
+{
+ struct blk_queue_tag *bqt = q->queue_tags;
+ unsigned max_depth;
+ int tag;
+
+ if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
+ printk(KERN_ERR
+ "%s: request %p for device [%s] already tagged %d",
+ __func__, rq,
+ rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
+ BUG();
+ }
+
+ /*
+ * Protect against shared tag maps, as we may not have exclusive
+ * access to the tag map.
+ *
+ * We reserve a few tags just for sync IO, since we don't want
+ * to starve sync IO on behalf of flooding async IO.
+ */
+ max_depth = bqt->max_depth;
+ if (!rq_is_sync(rq) && max_depth > 1) {
+ max_depth -= 2;
+ if (!max_depth)
+ max_depth = 1;
+ if (q->in_flight[BLK_RW_ASYNC] > max_depth)
+ return 1;
+ }
+
+ do {
+ tag = find_first_zero_bit(bqt->tag_map, max_depth);
+ if (tag >= max_depth)
+ return 1;
+
+ } while (test_and_set_bit_lock(tag, bqt->tag_map));
+ /*
+ * We need lock ordering semantics given by test_and_set_bit_lock.
+ * See blk_queue_end_tag for details.
+ */
+
+ rq->cmd_flags |= REQ_QUEUED;
+ rq->tag = tag;
+ bqt->tag_index[tag] = rq;
+ blk_start_request(rq);
+ list_add(&rq->queuelist, &q->tag_busy_list);
+ return 0;
+}
+EXPORT_SYMBOL(blk_queue_start_tag);
+
+/**
+ * blk_queue_invalidate_tags - invalidate all pending tags
+ * @q: the request queue for the device
+ *
+ * Description:
+ * Hardware conditions may dictate a need to stop all pending requests.
+ * In this case, we will safely clear the block side of the tag queue and
+ * readd all requests to the request queue in the right order.
+ *
+ * Notes:
+ * queue lock must be held.
+ **/
+void blk_queue_invalidate_tags(struct request_queue *q)
+{
+ struct list_head *tmp, *n;
+
+ list_for_each_safe(tmp, n, &q->tag_busy_list)
+ blk_requeue_request(q, list_entry_rq(tmp));
+}
+EXPORT_SYMBOL(blk_queue_invalidate_tags);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
new file mode 100644
index 00000000..f2ddb946
--- /dev/null
+++ b/block/blk-throttle.c
@@ -0,0 +1,1330 @@
+/*
+ * Interface for controlling IO bandwidth on a request queue
+ *
+ * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+#include <linux/blktrace_api.h>
+#include "blk-cgroup.h"
+#include "blk.h"
+
+/* Max dispatch from a group in 1 round */
+static int throtl_grp_quantum = 8;
+
+/* Total max dispatch from all groups in one round */
+static int throtl_quantum = 32;
+
+/* Throttling is performed over 100ms slice and after that slice is renewed */
+static unsigned long throtl_slice = HZ/10; /* 100 ms */
+
+/* A workqueue to queue throttle related work */
+static struct workqueue_struct *kthrotld_workqueue;
+static void throtl_schedule_delayed_work(struct throtl_data *td,
+ unsigned long delay);
+
+struct throtl_rb_root {
+ struct rb_root rb;
+ struct rb_node *left;
+ unsigned int count;
+ unsigned long min_disptime;
+};
+
+#define THROTL_RB_ROOT (struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \
+ .count = 0, .min_disptime = 0}
+
+#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
+
+struct throtl_grp {
+ /* List of throtl groups on the request queue*/
+ struct hlist_node tg_node;
+
+ /* active throtl group service_tree member */
+ struct rb_node rb_node;
+
+ /*
+ * Dispatch time in jiffies. This is the estimated time when group
+ * will unthrottle and is ready to dispatch more bio. It is used as
+ * key to sort active groups in service tree.
+ */
+ unsigned long disptime;
+
+ struct blkio_group blkg;
+ atomic_t ref;
+ unsigned int flags;
+
+ /* Two lists for READ and WRITE */
+ struct bio_list bio_lists[2];
+
+ /* Number of queued bios on READ and WRITE lists */
+ unsigned int nr_queued[2];
+
+ /* bytes per second rate limits */
+ uint64_t bps[2];
+
+ /* IOPS limits */
+ unsigned int iops[2];
+
+ /* Number of bytes disptached in current slice */
+ uint64_t bytes_disp[2];
+ /* Number of bio's dispatched in current slice */
+ unsigned int io_disp[2];
+
+ /* When did we start a new slice */
+ unsigned long slice_start[2];
+ unsigned long slice_end[2];
+
+ /* Some throttle limits got updated for the group */
+ int limits_changed;
+
+ struct rcu_head rcu_head;
+};
+
+struct throtl_data
+{
+ /* List of throtl groups */
+ struct hlist_head tg_list;
+
+ /* service tree for active throtl groups */
+ struct throtl_rb_root tg_service_tree;
+
+ struct throtl_grp *root_tg;
+ struct request_queue *queue;
+
+ /* Total Number of queued bios on READ and WRITE lists */
+ unsigned int nr_queued[2];
+
+ /*
+ * number of total undestroyed groups
+ */
+ unsigned int nr_undestroyed_grps;
+
+ /* Work for dispatching throttled bios */
+ struct delayed_work throtl_work;
+
+ int limits_changed;
+};
+
+enum tg_state_flags {
+ THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */
+};
+
+#define THROTL_TG_FNS(name) \
+static inline void throtl_mark_tg_##name(struct throtl_grp *tg) \
+{ \
+ (tg)->flags |= (1 << THROTL_TG_FLAG_##name); \
+} \
+static inline void throtl_clear_tg_##name(struct throtl_grp *tg) \
+{ \
+ (tg)->flags &= ~(1 << THROTL_TG_FLAG_##name); \
+} \
+static inline int throtl_tg_##name(const struct throtl_grp *tg) \
+{ \
+ return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0; \
+}
+
+THROTL_TG_FNS(on_rr);
+
+#define throtl_log_tg(td, tg, fmt, args...) \
+ blk_add_trace_msg((td)->queue, "throtl %s " fmt, \
+ blkg_path(&(tg)->blkg), ##args); \
+
+#define throtl_log(td, fmt, args...) \
+ blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
+
+static inline struct throtl_grp *tg_of_blkg(struct blkio_group *blkg)
+{
+ if (blkg)
+ return container_of(blkg, struct throtl_grp, blkg);
+
+ return NULL;
+}
+
+static inline unsigned int total_nr_queued(struct throtl_data *td)
+{
+ return td->nr_queued[0] + td->nr_queued[1];
+}
+
+static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg)
+{
+ atomic_inc(&tg->ref);
+ return tg;
+}
+
+static void throtl_free_tg(struct rcu_head *head)
+{
+ struct throtl_grp *tg;
+
+ tg = container_of(head, struct throtl_grp, rcu_head);
+ free_percpu(tg->blkg.stats_cpu);
+ kfree(tg);
+}
+
+static void throtl_put_tg(struct throtl_grp *tg)
+{
+ BUG_ON(atomic_read(&tg->ref) <= 0);
+ if (!atomic_dec_and_test(&tg->ref))
+ return;
+
+ /*
+ * A group is freed in rcu manner. But having an rcu lock does not
+ * mean that one can access all the fields of blkg and assume these
+ * are valid. For example, don't try to follow throtl_data and
+ * request queue links.
+ *
+ * Having a reference to blkg under an rcu allows acess to only
+ * values local to groups like group stats and group rate limits
+ */
+ call_rcu(&tg->rcu_head, throtl_free_tg);
+}
+
+static void throtl_init_group(struct throtl_grp *tg)
+{
+ INIT_HLIST_NODE(&tg->tg_node);
+ RB_CLEAR_NODE(&tg->rb_node);
+ bio_list_init(&tg->bio_lists[0]);
+ bio_list_init(&tg->bio_lists[1]);
+ tg->limits_changed = false;
+
+ /* Practically unlimited BW */
+ tg->bps[0] = tg->bps[1] = -1;
+ tg->iops[0] = tg->iops[1] = -1;
+
+ /*
+ * Take the initial reference that will be released on destroy
+ * This can be thought of a joint reference by cgroup and
+ * request queue which will be dropped by either request queue
+ * exit or cgroup deletion path depending on who is exiting first.
+ */
+ atomic_set(&tg->ref, 1);
+}
+
+/* Should be called with rcu read lock held (needed for blkcg) */
+static void
+throtl_add_group_to_td_list(struct throtl_data *td, struct throtl_grp *tg)
+{
+ hlist_add_head(&tg->tg_node, &td->tg_list);
+ td->nr_undestroyed_grps++;
+}
+
+static void
+__throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
+{
+ struct backing_dev_info *bdi = &td->queue->backing_dev_info;
+ unsigned int major, minor;
+
+ if (!tg || tg->blkg.dev)
+ return;
+
+ /*
+ * Fill in device details for a group which might not have been
+ * filled at group creation time as queue was being instantiated
+ * and driver had not attached a device yet
+ */
+ if (bdi->dev && dev_name(bdi->dev)) {
+ sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
+ tg->blkg.dev = MKDEV(major, minor);
+ }
+}
+
+/*
+ * Should be called with without queue lock held. Here queue lock will be
+ * taken rarely. It will be taken only once during life time of a group
+ * if need be
+ */
+static void
+throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
+{
+ if (!tg || tg->blkg.dev)
+ return;
+
+ spin_lock_irq(td->queue->queue_lock);
+ __throtl_tg_fill_dev_details(td, tg);
+ spin_unlock_irq(td->queue->queue_lock);
+}
+
+static void throtl_init_add_tg_lists(struct throtl_data *td,
+ struct throtl_grp *tg, struct blkio_cgroup *blkcg)
+{
+ __throtl_tg_fill_dev_details(td, tg);
+
+ /* Add group onto cgroup list */
+ blkiocg_add_blkio_group(blkcg, &tg->blkg, (void *)td,
+ tg->blkg.dev, BLKIO_POLICY_THROTL);
+
+ tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev);
+ tg->bps[WRITE] = blkcg_get_write_bps(blkcg, tg->blkg.dev);
+ tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev);
+ tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev);
+
+ throtl_add_group_to_td_list(td, tg);
+}
+
+/* Should be called without queue lock and outside of rcu period */
+static struct throtl_grp *throtl_alloc_tg(struct throtl_data *td)
+{
+ struct throtl_grp *tg = NULL;
+ int ret;
+
+ tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, td->queue->node);
+ if (!tg)
+ return NULL;
+
+ ret = blkio_alloc_blkg_stats(&tg->blkg);
+
+ if (ret) {
+ kfree(tg);
+ return NULL;
+ }
+
+ throtl_init_group(tg);
+ return tg;
+}
+
+static struct
+throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
+{
+ struct throtl_grp *tg = NULL;
+ void *key = td;
+
+ /*
+ * This is the common case when there are no blkio cgroups.
+ * Avoid lookup in this case
+ */
+ if (blkcg == &blkio_root_cgroup)
+ tg = td->root_tg;
+ else
+ tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
+
+ __throtl_tg_fill_dev_details(td, tg);
+ return tg;
+}
+
+static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
+{
+ struct throtl_grp *tg = NULL, *__tg = NULL;
+ struct blkio_cgroup *blkcg;
+ struct request_queue *q = td->queue;
+
+ /* no throttling for dead queue */
+ if (unlikely(blk_queue_dead(q)))
+ return NULL;
+
+ rcu_read_lock();
+ blkcg = task_blkio_cgroup(current);
+ tg = throtl_find_tg(td, blkcg);
+ if (tg) {
+ rcu_read_unlock();
+ return tg;
+ }
+
+ /*
+ * Need to allocate a group. Allocation of group also needs allocation
+ * of per cpu stats which in-turn takes a mutex() and can block. Hence
+ * we need to drop rcu lock and queue_lock before we call alloc.
+ */
+ rcu_read_unlock();
+ spin_unlock_irq(q->queue_lock);
+
+ tg = throtl_alloc_tg(td);
+
+ /* Group allocated and queue is still alive. take the lock */
+ spin_lock_irq(q->queue_lock);
+
+ /* Make sure @q is still alive */
+ if (unlikely(blk_queue_dead(q))) {
+ kfree(tg);
+ return NULL;
+ }
+
+ /*
+ * Initialize the new group. After sleeping, read the blkcg again.
+ */
+ rcu_read_lock();
+ blkcg = task_blkio_cgroup(current);
+
+ /*
+ * If some other thread already allocated the group while we were
+ * not holding queue lock, free up the group
+ */
+ __tg = throtl_find_tg(td, blkcg);
+
+ if (__tg) {
+ kfree(tg);
+ rcu_read_unlock();
+ return __tg;
+ }
+
+ /* Group allocation failed. Account the IO to root group */
+ if (!tg) {
+ tg = td->root_tg;
+ return tg;
+ }
+
+ throtl_init_add_tg_lists(td, tg, blkcg);
+ rcu_read_unlock();
+ return tg;
+}
+
+static struct throtl_grp *throtl_rb_first(struct throtl_rb_root *root)
+{
+ /* Service tree is empty */
+ if (!root->count)
+ return NULL;
+
+ if (!root->left)
+ root->left = rb_first(&root->rb);
+
+ if (root->left)
+ return rb_entry_tg(root->left);
+
+ return NULL;
+}
+
+static void rb_erase_init(struct rb_node *n, struct rb_root *root)
+{
+ rb_erase(n, root);
+ RB_CLEAR_NODE(n);
+}
+
+static void throtl_rb_erase(struct rb_node *n, struct throtl_rb_root *root)
+{
+ if (root->left == n)
+ root->left = NULL;
+ rb_erase_init(n, &root->rb);
+ --root->count;
+}
+
+static void update_min_dispatch_time(struct throtl_rb_root *st)
+{
+ struct throtl_grp *tg;
+
+ tg = throtl_rb_first(st);
+ if (!tg)
+ return;
+
+ st->min_disptime = tg->disptime;
+}
+
+static void
+tg_service_tree_add(struct throtl_rb_root *st, struct throtl_grp *tg)
+{
+ struct rb_node **node = &st->rb.rb_node;
+ struct rb_node *parent = NULL;
+ struct throtl_grp *__tg;
+ unsigned long key = tg->disptime;
+ int left = 1;
+
+ while (*node != NULL) {
+ parent = *node;
+ __tg = rb_entry_tg(parent);
+
+ if (time_before(key, __tg->disptime))
+ node = &parent->rb_left;
+ else {
+ node = &parent->rb_right;
+ left = 0;
+ }
+ }
+
+ if (left)
+ st->left = &tg->rb_node;
+
+ rb_link_node(&tg->rb_node, parent, node);
+ rb_insert_color(&tg->rb_node, &st->rb);
+}
+
+static void __throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
+{
+ struct throtl_rb_root *st = &td->tg_service_tree;
+
+ tg_service_tree_add(st, tg);
+ throtl_mark_tg_on_rr(tg);
+ st->count++;
+}
+
+static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
+{
+ if (!throtl_tg_on_rr(tg))
+ __throtl_enqueue_tg(td, tg);
+}
+
+static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
+{
+ throtl_rb_erase(&tg->rb_node, &td->tg_service_tree);
+ throtl_clear_tg_on_rr(tg);
+}
+
+static void throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
+{
+ if (throtl_tg_on_rr(tg))
+ __throtl_dequeue_tg(td, tg);
+}
+
+static void throtl_schedule_next_dispatch(struct throtl_data *td)
+{
+ struct throtl_rb_root *st = &td->tg_service_tree;
+
+ /*
+ * If there are more bios pending, schedule more work.
+ */
+ if (!total_nr_queued(td))
+ return;
+
+ BUG_ON(!st->count);
+
+ update_min_dispatch_time(st);
+
+ if (time_before_eq(st->min_disptime, jiffies))
+ throtl_schedule_delayed_work(td, 0);
+ else
+ throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
+}
+
+static inline void
+throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
+{
+ tg->bytes_disp[rw] = 0;
+ tg->io_disp[rw] = 0;
+ tg->slice_start[rw] = jiffies;
+ tg->slice_end[rw] = jiffies + throtl_slice;
+ throtl_log_tg(td, tg, "[%c] new slice start=%lu end=%lu jiffies=%lu",
+ rw == READ ? 'R' : 'W', tg->slice_start[rw],
+ tg->slice_end[rw], jiffies);
+}
+
+static inline void throtl_set_slice_end(struct throtl_data *td,
+ struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
+{
+ tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
+}
+
+static inline void throtl_extend_slice(struct throtl_data *td,
+ struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
+{
+ tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
+ throtl_log_tg(td, tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
+ rw == READ ? 'R' : 'W', tg->slice_start[rw],
+ tg->slice_end[rw], jiffies);
+}
+
+/* Determine if previously allocated or extended slice is complete or not */
+static bool
+throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw)
+{
+ if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
+ return 0;
+
+ return 1;
+}
+
+/* Trim the used slices and adjust slice start accordingly */
+static inline void
+throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
+{
+ unsigned long nr_slices, time_elapsed, io_trim;
+ u64 bytes_trim, tmp;
+
+ BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
+
+ /*
+ * If bps are unlimited (-1), then time slice don't get
+ * renewed. Don't try to trim the slice if slice is used. A new
+ * slice will start when appropriate.
+ */
+ if (throtl_slice_used(td, tg, rw))
+ return;
+
+ /*
+ * A bio has been dispatched. Also adjust slice_end. It might happen
+ * that initially cgroup limit was very low resulting in high
+ * slice_end, but later limit was bumped up and bio was dispached
+ * sooner, then we need to reduce slice_end. A high bogus slice_end
+ * is bad because it does not allow new slice to start.
+ */
+
+ throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice);
+
+ time_elapsed = jiffies - tg->slice_start[rw];
+
+ nr_slices = time_elapsed / throtl_slice;
+
+ if (!nr_slices)
+ return;
+ tmp = tg->bps[rw] * throtl_slice * nr_slices;
+ do_div(tmp, HZ);
+ bytes_trim = tmp;
+
+ io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
+
+ if (!bytes_trim && !io_trim)
+ return;
+
+ if (tg->bytes_disp[rw] >= bytes_trim)
+ tg->bytes_disp[rw] -= bytes_trim;
+ else
+ tg->bytes_disp[rw] = 0;
+
+ if (tg->io_disp[rw] >= io_trim)
+ tg->io_disp[rw] -= io_trim;
+ else
+ tg->io_disp[rw] = 0;
+
+ tg->slice_start[rw] += nr_slices * throtl_slice;
+
+ throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
+ " start=%lu end=%lu jiffies=%lu",
+ rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
+ tg->slice_start[rw], tg->slice_end[rw], jiffies);
+}
+
+static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
+ struct bio *bio, unsigned long *wait)
+{
+ bool rw = bio_data_dir(bio);
+ unsigned int io_allowed;
+ unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
+ u64 tmp;
+
+ jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
+
+ /* Slice has just started. Consider one slice interval */
+ if (!jiffy_elapsed)
+ jiffy_elapsed_rnd = throtl_slice;
+
+ jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
+
+ /*
+ * jiffy_elapsed_rnd should not be a big value as minimum iops can be
+ * 1 then at max jiffy elapsed should be equivalent of 1 second as we
+ * will allow dispatch after 1 second and after that slice should
+ * have been trimmed.
+ */
+
+ tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
+ do_div(tmp, HZ);
+
+ if (tmp > UINT_MAX)
+ io_allowed = UINT_MAX;
+ else
+ io_allowed = tmp;
+
+ if (tg->io_disp[rw] + 1 <= io_allowed) {
+ if (wait)
+ *wait = 0;
+ return 1;
+ }
+
+ /* Calc approx time to dispatch */
+ jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
+
+ if (jiffy_wait > jiffy_elapsed)
+ jiffy_wait = jiffy_wait - jiffy_elapsed;
+ else
+ jiffy_wait = 1;
+
+ if (wait)
+ *wait = jiffy_wait;
+ return 0;
+}
+
+static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
+ struct bio *bio, unsigned long *wait)
+{
+ bool rw = bio_data_dir(bio);
+ u64 bytes_allowed, extra_bytes, tmp;
+ unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
+
+ jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
+
+ /* Slice has just started. Consider one slice interval */
+ if (!jiffy_elapsed)
+ jiffy_elapsed_rnd = throtl_slice;
+
+ jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
+
+ tmp = tg->bps[rw] * jiffy_elapsed_rnd;
+ do_div(tmp, HZ);
+ bytes_allowed = tmp;
+
+ if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
+ if (wait)
+ *wait = 0;
+ return 1;
+ }
+
+ /* Calc approx time to dispatch */
+ extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
+ jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
+
+ if (!jiffy_wait)
+ jiffy_wait = 1;
+
+ /*
+ * This wait time is without taking into consideration the rounding
+ * up we did. Add that time also.
+ */
+ jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
+ if (wait)
+ *wait = jiffy_wait;
+ return 0;
+}
+
+static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
+ if (tg->bps[rw] == -1 && tg->iops[rw] == -1)
+ return 1;
+ return 0;
+}
+
+/*
+ * Returns whether one can dispatch a bio or not. Also returns approx number
+ * of jiffies to wait before this bio is with-in IO rate and can be dispatched
+ */
+static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
+ struct bio *bio, unsigned long *wait)
+{
+ bool rw = bio_data_dir(bio);
+ unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
+
+ /*
+ * Currently whole state machine of group depends on first bio
+ * queued in the group bio list. So one should not be calling
+ * this function with a different bio if there are other bios
+ * queued.
+ */
+ BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
+
+ /* If tg->bps = -1, then BW is unlimited */
+ if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
+ if (wait)
+ *wait = 0;
+ return 1;
+ }
+
+ /*
+ * If previous slice expired, start a new one otherwise renew/extend
+ * existing slice to make sure it is at least throtl_slice interval
+ * long since now.
+ */
+ if (throtl_slice_used(td, tg, rw))
+ throtl_start_new_slice(td, tg, rw);
+ else {
+ if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
+ throtl_extend_slice(td, tg, rw, jiffies + throtl_slice);
+ }
+
+ if (tg_with_in_bps_limit(td, tg, bio, &bps_wait)
+ && tg_with_in_iops_limit(td, tg, bio, &iops_wait)) {
+ if (wait)
+ *wait = 0;
+ return 1;
+ }
+
+ max_wait = max(bps_wait, iops_wait);
+
+ if (wait)
+ *wait = max_wait;
+
+ if (time_before(tg->slice_end[rw], jiffies + max_wait))
+ throtl_extend_slice(td, tg, rw, jiffies + max_wait);
+
+ return 0;
+}
+
+static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
+{
+ bool rw = bio_data_dir(bio);
+ bool sync = rw_is_sync(bio->bi_rw);
+
+ /* Charge the bio to the group */
+ tg->bytes_disp[rw] += bio->bi_size;
+ tg->io_disp[rw]++;
+
+ blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync);
+}
+
+static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
+ struct bio *bio)
+{
+ bool rw = bio_data_dir(bio);
+
+ bio_list_add(&tg->bio_lists[rw], bio);
+ /* Take a bio reference on tg */
+ throtl_ref_get_tg(tg);
+ tg->nr_queued[rw]++;
+ td->nr_queued[rw]++;
+ throtl_enqueue_tg(td, tg);
+}
+
+static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg)
+{
+ unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
+ struct bio *bio;
+
+ if ((bio = bio_list_peek(&tg->bio_lists[READ])))
+ tg_may_dispatch(td, tg, bio, &read_wait);
+
+ if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
+ tg_may_dispatch(td, tg, bio, &write_wait);
+
+ min_wait = min(read_wait, write_wait);
+ disptime = jiffies + min_wait;
+
+ /* Update dispatch time */
+ throtl_dequeue_tg(td, tg);
+ tg->disptime = disptime;
+ throtl_enqueue_tg(td, tg);
+}
+
+static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
+ bool rw, struct bio_list *bl)
+{
+ struct bio *bio;
+
+ bio = bio_list_pop(&tg->bio_lists[rw]);
+ tg->nr_queued[rw]--;
+ /* Drop bio reference on tg */
+ throtl_put_tg(tg);
+
+ BUG_ON(td->nr_queued[rw] <= 0);
+ td->nr_queued[rw]--;
+
+ throtl_charge_bio(tg, bio);
+ bio_list_add(bl, bio);
+ bio->bi_rw |= REQ_THROTTLED;
+
+ throtl_trim_slice(td, tg, rw);
+}
+
+static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
+ struct bio_list *bl)
+{
+ unsigned int nr_reads = 0, nr_writes = 0;
+ unsigned int max_nr_reads = throtl_grp_quantum*3/4;
+ unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
+ struct bio *bio;
+
+ /* Try to dispatch 75% READS and 25% WRITES */
+
+ while ((bio = bio_list_peek(&tg->bio_lists[READ]))
+ && tg_may_dispatch(td, tg, bio, NULL)) {
+
+ tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
+ nr_reads++;
+
+ if (nr_reads >= max_nr_reads)
+ break;
+ }
+
+ while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))
+ && tg_may_dispatch(td, tg, bio, NULL)) {
+
+ tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
+ nr_writes++;
+
+ if (nr_writes >= max_nr_writes)
+ break;
+ }
+
+ return nr_reads + nr_writes;
+}
+
+static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
+{
+ unsigned int nr_disp = 0;
+ struct throtl_grp *tg;
+ struct throtl_rb_root *st = &td->tg_service_tree;
+
+ while (1) {
+ tg = throtl_rb_first(st);
+
+ if (!tg)
+ break;
+
+ if (time_before(jiffies, tg->disptime))
+ break;
+
+ throtl_dequeue_tg(td, tg);
+
+ nr_disp += throtl_dispatch_tg(td, tg, bl);
+
+ if (tg->nr_queued[0] || tg->nr_queued[1]) {
+ tg_update_disptime(td, tg);
+ throtl_enqueue_tg(td, tg);
+ }
+
+ if (nr_disp >= throtl_quantum)
+ break;
+ }
+
+ return nr_disp;
+}
+
+static void throtl_process_limit_change(struct throtl_data *td)
+{
+ struct throtl_grp *tg;
+ struct hlist_node *pos, *n;
+
+ if (!td->limits_changed)
+ return;
+
+ xchg(&td->limits_changed, false);
+
+ throtl_log(td, "limits changed");
+
+ hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
+ if (!tg->limits_changed)
+ continue;
+
+ if (!xchg(&tg->limits_changed, false))
+ continue;
+
+ throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
+ " riops=%u wiops=%u", tg->bps[READ], tg->bps[WRITE],
+ tg->iops[READ], tg->iops[WRITE]);
+
+ /*
+ * Restart the slices for both READ and WRITES. It
+ * might happen that a group's limit are dropped
+ * suddenly and we don't want to account recently
+ * dispatched IO with new low rate
+ */
+ throtl_start_new_slice(td, tg, 0);
+ throtl_start_new_slice(td, tg, 1);
+
+ if (throtl_tg_on_rr(tg))
+ tg_update_disptime(td, tg);
+ }
+}
+
+/* Dispatch throttled bios. Should be called without queue lock held. */
+static int throtl_dispatch(struct request_queue *q)
+{
+ struct throtl_data *td = q->td;
+ unsigned int nr_disp = 0;
+ struct bio_list bio_list_on_stack;
+ struct bio *bio;
+ struct blk_plug plug;
+
+ spin_lock_irq(q->queue_lock);
+
+ throtl_process_limit_change(td);
+
+ if (!total_nr_queued(td))
+ goto out;
+
+ bio_list_init(&bio_list_on_stack);
+
+ throtl_log(td, "dispatch nr_queued=%u read=%u write=%u",
+ total_nr_queued(td), td->nr_queued[READ],
+ td->nr_queued[WRITE]);
+
+ nr_disp = throtl_select_dispatch(td, &bio_list_on_stack);
+
+ if (nr_disp)
+ throtl_log(td, "bios disp=%u", nr_disp);
+
+ throtl_schedule_next_dispatch(td);
+out:
+ spin_unlock_irq(q->queue_lock);
+
+ /*
+ * If we dispatched some requests, unplug the queue to make sure
+ * immediate dispatch
+ */
+ if (nr_disp) {
+ blk_start_plug(&plug);
+ while((bio = bio_list_pop(&bio_list_on_stack)))
+ generic_make_request(bio);
+ blk_finish_plug(&plug);
+ }
+ return nr_disp;
+}
+
+void blk_throtl_work(struct work_struct *work)
+{
+ struct throtl_data *td = container_of(work, struct throtl_data,
+ throtl_work.work);
+ struct request_queue *q = td->queue;
+
+ throtl_dispatch(q);
+}
+
+/* Call with queue lock held */
+static void
+throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
+{
+
+ struct delayed_work *dwork = &td->throtl_work;
+
+ /* schedule work if limits changed even if no bio is queued */
+ if (total_nr_queued(td) || td->limits_changed) {
+ /*
+ * We might have a work scheduled to be executed in future.
+ * Cancel that and schedule a new one.
+ */
+ __cancel_delayed_work(dwork);
+ queue_delayed_work(kthrotld_workqueue, dwork, delay);
+ throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
+ delay, jiffies);
+ }
+}
+
+static void
+throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
+{
+ /* Something wrong if we are trying to remove same group twice */
+ BUG_ON(hlist_unhashed(&tg->tg_node));
+
+ hlist_del_init(&tg->tg_node);
+
+ /*
+ * Put the reference taken at the time of creation so that when all
+ * queues are gone, group can be destroyed.
+ */
+ throtl_put_tg(tg);
+ td->nr_undestroyed_grps--;
+}
+
+static void throtl_release_tgs(struct throtl_data *td)
+{
+ struct hlist_node *pos, *n;
+ struct throtl_grp *tg;
+
+ hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
+ /*
+ * If cgroup removal path got to blk_group first and removed
+ * it from cgroup list, then it will take care of destroying
+ * cfqg also.
+ */
+ if (!blkiocg_del_blkio_group(&tg->blkg))
+ throtl_destroy_tg(td, tg);
+ }
+}
+
+/*
+ * Blk cgroup controller notification saying that blkio_group object is being
+ * delinked as associated cgroup object is going away. That also means that
+ * no new IO will come in this group. So get rid of this group as soon as
+ * any pending IO in the group is finished.
+ *
+ * This function is called under rcu_read_lock(). key is the rcu protected
+ * pointer. That means "key" is a valid throtl_data pointer as long as we are
+ * rcu read lock.
+ *
+ * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
+ * it should not be NULL as even if queue was going away, cgroup deltion
+ * path got to it first.
+ */
+void throtl_unlink_blkio_group(void *key, struct blkio_group *blkg)
+{
+ unsigned long flags;
+ struct throtl_data *td = key;
+
+ spin_lock_irqsave(td->queue->queue_lock, flags);
+ throtl_destroy_tg(td, tg_of_blkg(blkg));
+ spin_unlock_irqrestore(td->queue->queue_lock, flags);
+}
+
+static void throtl_update_blkio_group_common(struct throtl_data *td,
+ struct throtl_grp *tg)
+{
+ xchg(&tg->limits_changed, true);
+ xchg(&td->limits_changed, true);
+ /* Schedule a work now to process the limit change */
+ throtl_schedule_delayed_work(td, 0);
+}
+
+/*
+ * For all update functions, key should be a valid pointer because these
+ * update functions are called under blkcg_lock, that means, blkg is
+ * valid and in turn key is valid. queue exit path can not race because
+ * of blkcg_lock
+ *
+ * Can not take queue lock in update functions as queue lock under blkcg_lock
+ * is not allowed. Under other paths we take blkcg_lock under queue_lock.
+ */
+static void throtl_update_blkio_group_read_bps(void *key,
+ struct blkio_group *blkg, u64 read_bps)
+{
+ struct throtl_data *td = key;
+ struct throtl_grp *tg = tg_of_blkg(blkg);
+
+ tg->bps[READ] = read_bps;
+ throtl_update_blkio_group_common(td, tg);
+}
+
+static void throtl_update_blkio_group_write_bps(void *key,
+ struct blkio_group *blkg, u64 write_bps)
+{
+ struct throtl_data *td = key;
+ struct throtl_grp *tg = tg_of_blkg(blkg);
+
+ tg->bps[WRITE] = write_bps;
+ throtl_update_blkio_group_common(td, tg);
+}
+
+static void throtl_update_blkio_group_read_iops(void *key,
+ struct blkio_group *blkg, unsigned int read_iops)
+{
+ struct throtl_data *td = key;
+ struct throtl_grp *tg = tg_of_blkg(blkg);
+
+ tg->iops[READ] = read_iops;
+ throtl_update_blkio_group_common(td, tg);
+}
+
+static void throtl_update_blkio_group_write_iops(void *key,
+ struct blkio_group *blkg, unsigned int write_iops)
+{
+ struct throtl_data *td = key;
+ struct throtl_grp *tg = tg_of_blkg(blkg);
+
+ tg->iops[WRITE] = write_iops;
+ throtl_update_blkio_group_common(td, tg);
+}
+
+static void throtl_shutdown_wq(struct request_queue *q)
+{
+ struct throtl_data *td = q->td;
+
+ cancel_delayed_work_sync(&td->throtl_work);
+}
+
+static struct blkio_policy_type blkio_policy_throtl = {
+ .ops = {
+ .blkio_unlink_group_fn = throtl_unlink_blkio_group,
+ .blkio_update_group_read_bps_fn =
+ throtl_update_blkio_group_read_bps,
+ .blkio_update_group_write_bps_fn =
+ throtl_update_blkio_group_write_bps,
+ .blkio_update_group_read_iops_fn =
+ throtl_update_blkio_group_read_iops,
+ .blkio_update_group_write_iops_fn =
+ throtl_update_blkio_group_write_iops,
+ },
+ .plid = BLKIO_POLICY_THROTL,
+};
+
+bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
+{
+ struct throtl_data *td = q->td;
+ struct throtl_grp *tg;
+ bool rw = bio_data_dir(bio), update_disptime = true;
+ struct blkio_cgroup *blkcg;
+ bool throttled = false;
+
+ if (bio->bi_rw & REQ_THROTTLED) {
+ bio->bi_rw &= ~REQ_THROTTLED;
+ goto out;
+ }
+
+ /*
+ * A throtl_grp pointer retrieved under rcu can be used to access
+ * basic fields like stats and io rates. If a group has no rules,
+ * just update the dispatch stats in lockless manner and return.
+ */
+
+ rcu_read_lock();
+ blkcg = task_blkio_cgroup(current);
+ tg = throtl_find_tg(td, blkcg);
+ if (tg) {
+ throtl_tg_fill_dev_details(td, tg);
+
+ if (tg_no_rule_group(tg, rw)) {
+ blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
+ rw, rw_is_sync(bio->bi_rw));
+ rcu_read_unlock();
+ goto out;
+ }
+ }
+ rcu_read_unlock();
+
+ /*
+ * Either group has not been allocated yet or it is not an unlimited
+ * IO group
+ */
+ spin_lock_irq(q->queue_lock);
+ tg = throtl_get_tg(td);
+ if (unlikely(!tg))
+ goto out_unlock;
+
+ if (tg->nr_queued[rw]) {
+ /*
+ * There is already another bio queued in same dir. No
+ * need to update dispatch time.
+ */
+ update_disptime = false;
+ goto queue_bio;
+
+ }
+
+ /* Bio is with-in rate limit of group */
+ if (tg_may_dispatch(td, tg, bio, NULL)) {
+ throtl_charge_bio(tg, bio);
+
+ /*
+ * We need to trim slice even when bios are not being queued
+ * otherwise it might happen that a bio is not queued for
+ * a long time and slice keeps on extending and trim is not
+ * called for a long time. Now if limits are reduced suddenly
+ * we take into account all the IO dispatched so far at new
+ * low rate and * newly queued IO gets a really long dispatch
+ * time.
+ *
+ * So keep on trimming slice even if bio is not queued.
+ */
+ throtl_trim_slice(td, tg, rw);
+ goto out_unlock;
+ }
+
+queue_bio:
+ throtl_log_tg(td, tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
+ " iodisp=%u iops=%u queued=%d/%d",
+ rw == READ ? 'R' : 'W',
+ tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
+ tg->io_disp[rw], tg->iops[rw],
+ tg->nr_queued[READ], tg->nr_queued[WRITE]);
+
+ throtl_add_bio_tg(q->td, tg, bio);
+ throttled = true;
+
+ if (update_disptime) {
+ tg_update_disptime(td, tg);
+ throtl_schedule_next_dispatch(td);
+ }
+
+out_unlock:
+ spin_unlock_irq(q->queue_lock);
+out:
+ return throttled;
+}
+
+/**
+ * blk_throtl_drain - drain throttled bios
+ * @q: request_queue to drain throttled bios for
+ *
+ * Dispatch all currently throttled bios on @q through ->make_request_fn().
+ */
+void blk_throtl_drain(struct request_queue *q)
+ __releases(q->queue_lock) __acquires(q->queue_lock)
+{
+ struct throtl_data *td = q->td;
+ struct throtl_rb_root *st = &td->tg_service_tree;
+ struct throtl_grp *tg;
+ struct bio_list bl;
+ struct bio *bio;
+
+ queue_lockdep_assert_held(q);
+
+ bio_list_init(&bl);
+
+ while ((tg = throtl_rb_first(st))) {
+ throtl_dequeue_tg(td, tg);
+
+ while ((bio = bio_list_peek(&tg->bio_lists[READ])))
+ tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
+ while ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
+ tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
+ }
+ spin_unlock_irq(q->queue_lock);
+
+ while ((bio = bio_list_pop(&bl)))
+ generic_make_request(bio);
+
+ spin_lock_irq(q->queue_lock);
+}
+
+int blk_throtl_init(struct request_queue *q)
+{
+ struct throtl_data *td;
+ struct throtl_grp *tg;
+
+ td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
+ if (!td)
+ return -ENOMEM;
+
+ INIT_HLIST_HEAD(&td->tg_list);
+ td->tg_service_tree = THROTL_RB_ROOT;
+ td->limits_changed = false;
+ INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
+
+ /* alloc and Init root group. */
+ td->queue = q;
+ tg = throtl_alloc_tg(td);
+
+ if (!tg) {
+ kfree(td);
+ return -ENOMEM;
+ }
+
+ td->root_tg = tg;
+
+ rcu_read_lock();
+ throtl_init_add_tg_lists(td, tg, &blkio_root_cgroup);
+ rcu_read_unlock();
+
+ /* Attach throtl data to request queue */
+ q->td = td;
+ return 0;
+}
+
+void blk_throtl_exit(struct request_queue *q)
+{
+ struct throtl_data *td = q->td;
+ bool wait = false;
+
+ BUG_ON(!td);
+
+ throtl_shutdown_wq(q);
+
+ spin_lock_irq(q->queue_lock);
+ throtl_release_tgs(td);
+
+ /* If there are other groups */
+ if (td->nr_undestroyed_grps > 0)
+ wait = true;
+
+ spin_unlock_irq(q->queue_lock);
+
+ /*
+ * Wait for tg->blkg->key accessors to exit their grace periods.
+ * Do this wait only if there are other undestroyed groups out
+ * there (other than root group). This can happen if cgroup deletion
+ * path claimed the responsibility of cleaning up a group before
+ * queue cleanup code get to the group.
+ *
+ * Do not call synchronize_rcu() unconditionally as there are drivers
+ * which create/delete request queue hundreds of times during scan/boot
+ * and synchronize_rcu() can take significant time and slow down boot.
+ */
+ if (wait)
+ synchronize_rcu();
+
+ /*
+ * Just being safe to make sure after previous flush if some body did
+ * update limits through cgroup and another work got queued, cancel
+ * it.
+ */
+ throtl_shutdown_wq(q);
+}
+
+void blk_throtl_release(struct request_queue *q)
+{
+ kfree(q->td);
+}
+
+static int __init throtl_init(void)
+{
+ kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
+ if (!kthrotld_workqueue)
+ panic("Failed to create kthrotld\n");
+
+ blkio_policy_register(&blkio_policy_throtl);
+ return 0;
+}
+
+module_init(throtl_init);
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
new file mode 100644
index 00000000..78035488
--- /dev/null
+++ b/block/blk-timeout.c
@@ -0,0 +1,240 @@
+/*
+ * Functions related to generic timeout handling of requests.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/fault-inject.h>
+
+#include "blk.h"
+
+#ifdef CONFIG_FAIL_IO_TIMEOUT
+
+static DECLARE_FAULT_ATTR(fail_io_timeout);
+
+static int __init setup_fail_io_timeout(char *str)
+{
+ return setup_fault_attr(&fail_io_timeout, str);
+}
+__setup("fail_io_timeout=", setup_fail_io_timeout);
+
+int blk_should_fake_timeout(struct request_queue *q)
+{
+ if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
+ return 0;
+
+ return should_fail(&fail_io_timeout, 1);
+}
+
+static int __init fail_io_timeout_debugfs(void)
+{
+ struct dentry *dir = fault_create_debugfs_attr("fail_io_timeout",
+ NULL, &fail_io_timeout);
+
+ return IS_ERR(dir) ? PTR_ERR(dir) : 0;
+}
+
+late_initcall(fail_io_timeout_debugfs);
+
+ssize_t part_timeout_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+ int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags);
+
+ return sprintf(buf, "%d\n", set != 0);
+}
+
+ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+ int val;
+
+ if (count) {
+ struct request_queue *q = disk->queue;
+ char *p = (char *) buf;
+
+ val = simple_strtoul(p, &p, 10);
+ spin_lock_irq(q->queue_lock);
+ if (val)
+ queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
+ else
+ queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
+ spin_unlock_irq(q->queue_lock);
+ }
+
+ return count;
+}
+
+#endif /* CONFIG_FAIL_IO_TIMEOUT */
+
+/*
+ * blk_delete_timer - Delete/cancel timer for a given function.
+ * @req: request that we are canceling timer for
+ *
+ */
+void blk_delete_timer(struct request *req)
+{
+ list_del_init(&req->timeout_list);
+}
+
+static void blk_rq_timed_out(struct request *req)
+{
+ struct request_queue *q = req->q;
+ enum blk_eh_timer_return ret;
+
+ ret = q->rq_timed_out_fn(req);
+ switch (ret) {
+ case BLK_EH_HANDLED:
+ __blk_complete_request(req);
+ break;
+ case BLK_EH_RESET_TIMER:
+ blk_clear_rq_complete(req);
+ blk_add_timer(req);
+ break;
+ case BLK_EH_NOT_HANDLED:
+ /*
+ * LLD handles this for now but in the future
+ * we can send a request msg to abort the command
+ * and we can move more of the generic scsi eh code to
+ * the blk layer.
+ */
+ break;
+ default:
+ printk(KERN_ERR "block: bad eh return: %d\n", ret);
+ break;
+ }
+}
+
+void blk_rq_timed_out_timer(unsigned long data)
+{
+ struct request_queue *q = (struct request_queue *) data;
+ unsigned long flags, next = 0;
+ struct request *rq, *tmp;
+ int next_set = 0;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+
+ list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) {
+ if (time_after_eq(jiffies, rq->deadline)) {
+ list_del_init(&rq->timeout_list);
+
+ /*
+ * Check if we raced with end io completion
+ */
+ if (blk_mark_rq_complete(rq))
+ continue;
+ blk_rq_timed_out(rq);
+ } else if (!next_set || time_after(next, rq->deadline)) {
+ next = rq->deadline;
+ next_set = 1;
+ }
+ }
+
+ if (next_set)
+ mod_timer(&q->timeout, round_jiffies_up(next));
+
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+/**
+ * blk_abort_request -- Request request recovery for the specified command
+ * @req: pointer to the request of interest
+ *
+ * This function requests that the block layer start recovery for the
+ * request by deleting the timer and calling the q's timeout function.
+ * LLDDs who implement their own error recovery MAY ignore the timeout
+ * event if they generated blk_abort_req. Must hold queue lock.
+ */
+void blk_abort_request(struct request *req)
+{
+ if (blk_mark_rq_complete(req))
+ return;
+ blk_delete_timer(req);
+ blk_rq_timed_out(req);
+}
+EXPORT_SYMBOL_GPL(blk_abort_request);
+
+/**
+ * blk_add_timer - Start timeout timer for a single request
+ * @req: request that is about to start running.
+ *
+ * Notes:
+ * Each request has its own timer, and as it is added to the queue, we
+ * set up the timer. When the request completes, we cancel the timer.
+ */
+void blk_add_timer(struct request *req)
+{
+ struct request_queue *q = req->q;
+ unsigned long expiry;
+
+ if (!q->rq_timed_out_fn)
+ return;
+
+ BUG_ON(!list_empty(&req->timeout_list));
+ BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
+
+ /*
+ * Some LLDs, like scsi, peek at the timeout to prevent a
+ * command from being retried forever.
+ */
+ if (!req->timeout)
+ req->timeout = q->rq_timeout;
+
+ req->deadline = jiffies + req->timeout;
+ list_add_tail(&req->timeout_list, &q->timeout_list);
+
+ /*
+ * If the timer isn't already pending or this timeout is earlier
+ * than an existing one, modify the timer. Round up to next nearest
+ * second.
+ */
+ expiry = round_jiffies_up(req->deadline);
+
+ if (!timer_pending(&q->timeout) ||
+ time_before(expiry, q->timeout.expires))
+ mod_timer(&q->timeout, expiry);
+}
+
+/**
+ * blk_abort_queue -- Abort all request on given queue
+ * @queue: pointer to queue
+ *
+ */
+void blk_abort_queue(struct request_queue *q)
+{
+ unsigned long flags;
+ struct request *rq, *tmp;
+ LIST_HEAD(list);
+
+ /*
+ * Not a request based block device, nothing to abort
+ */
+ if (!q->request_fn)
+ return;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+
+ elv_abort_queue(q);
+
+ /*
+ * Splice entries to local list, to avoid deadlocking if entries
+ * get readded to the timeout list by error handling
+ */
+ list_splice_init(&q->timeout_list, &list);
+
+ list_for_each_entry_safe(rq, tmp, &list, timeout_list)
+ blk_abort_request(rq);
+
+ /*
+ * Occasionally, blk_abort_request() will return without
+ * deleting the element from the list. Make sure we add those back
+ * instead of leaving them on the local stack list.
+ */
+ list_splice(&list, &q->timeout_list);
+
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+}
+EXPORT_SYMBOL_GPL(blk_abort_queue);
diff --git a/block/blk.h b/block/blk.h
new file mode 100644
index 00000000..d45be871
--- /dev/null
+++ b/block/blk.h
@@ -0,0 +1,237 @@
+#ifndef BLK_INTERNAL_H
+#define BLK_INTERNAL_H
+
+#include <linux/idr.h>
+
+/* Amount of time in which a process may batch requests */
+#define BLK_BATCH_TIME (HZ/50UL)
+
+/* Number of requests a "batching" process may submit */
+#define BLK_BATCH_REQ 32
+
+extern struct kmem_cache *blk_requestq_cachep;
+extern struct kobj_type blk_queue_ktype;
+extern struct ida blk_queue_ida;
+
+static inline void __blk_get_queue(struct request_queue *q)
+{
+ kobject_get(&q->kobj);
+}
+
+void init_request_from_bio(struct request *req, struct bio *bio);
+void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
+ struct bio *bio);
+int blk_rq_append_bio(struct request_queue *q, struct request *rq,
+ struct bio *bio);
+void blk_drain_queue(struct request_queue *q, bool drain_all);
+void blk_dequeue_request(struct request *rq);
+void __blk_queue_free_tags(struct request_queue *q);
+bool __blk_end_bidi_request(struct request *rq, int error,
+ unsigned int nr_bytes, unsigned int bidi_bytes);
+
+void blk_rq_timed_out_timer(unsigned long data);
+void blk_delete_timer(struct request *);
+void blk_add_timer(struct request *);
+void __generic_unplug_device(struct request_queue *);
+
+/*
+ * Internal atomic flags for request handling
+ */
+enum rq_atomic_flags {
+ REQ_ATOM_COMPLETE = 0,
+};
+
+/*
+ * EH timer and IO completion will both attempt to 'grab' the request, make
+ * sure that only one of them succeeds
+ */
+static inline int blk_mark_rq_complete(struct request *rq)
+{
+ return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
+}
+
+static inline void blk_clear_rq_complete(struct request *rq)
+{
+ clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
+}
+
+/*
+ * Internal elevator interface
+ */
+#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
+
+void blk_insert_flush(struct request *rq);
+void blk_abort_flushes(struct request_queue *q);
+
+static inline struct request *__elv_next_request(struct request_queue *q)
+{
+ struct request *rq;
+
+ while (1) {
+ if (!list_empty(&q->queue_head)) {
+ rq = list_entry_rq(q->queue_head.next);
+ return rq;
+ }
+
+ /*
+ * Flush request is running and flush request isn't queueable
+ * in the drive, we can hold the queue till flush request is
+ * finished. Even we don't do this, driver can't dispatch next
+ * requests and will requeue them. And this can improve
+ * throughput too. For example, we have request flush1, write1,
+ * flush 2. flush1 is dispatched, then queue is hold, write1
+ * isn't inserted to queue. After flush1 is finished, flush2
+ * will be dispatched. Since disk cache is already clean,
+ * flush2 will be finished very soon, so looks like flush2 is
+ * folded to flush1.
+ * Since the queue is hold, a flag is set to indicate the queue
+ * should be restarted later. Please see flush_end_io() for
+ * details.
+ */
+ if (q->flush_pending_idx != q->flush_running_idx &&
+ !queue_flush_queueable(q)) {
+ q->flush_queue_delayed = 1;
+ return NULL;
+ }
+ if (unlikely(blk_queue_dead(q)) ||
+ !q->elevator->type->ops.elevator_dispatch_fn(q, 0))
+ return NULL;
+ }
+}
+
+static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (e->type->ops.elevator_activate_req_fn)
+ e->type->ops.elevator_activate_req_fn(q, rq);
+}
+
+static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (e->type->ops.elevator_deactivate_req_fn)
+ e->type->ops.elevator_deactivate_req_fn(q, rq);
+}
+
+#ifdef CONFIG_FAIL_IO_TIMEOUT
+int blk_should_fake_timeout(struct request_queue *);
+ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
+ssize_t part_timeout_store(struct device *, struct device_attribute *,
+ const char *, size_t);
+#else
+static inline int blk_should_fake_timeout(struct request_queue *q)
+{
+ return 0;
+}
+#endif
+
+int ll_back_merge_fn(struct request_queue *q, struct request *req,
+ struct bio *bio);
+int ll_front_merge_fn(struct request_queue *q, struct request *req,
+ struct bio *bio);
+int attempt_back_merge(struct request_queue *q, struct request *rq);
+int attempt_front_merge(struct request_queue *q, struct request *rq);
+int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
+ struct request *next);
+void blk_recalc_rq_segments(struct request *rq);
+void blk_rq_set_mixed_merge(struct request *rq);
+bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
+int blk_try_merge(struct request *rq, struct bio *bio);
+
+void blk_queue_congestion_threshold(struct request_queue *q);
+
+int blk_dev_init(void);
+
+void elv_quiesce_start(struct request_queue *q);
+void elv_quiesce_end(struct request_queue *q);
+
+
+/*
+ * Return the threshold (number of used requests) at which the queue is
+ * considered to be congested. It include a little hysteresis to keep the
+ * context switch rate down.
+ */
+static inline int queue_congestion_on_threshold(struct request_queue *q)
+{
+ return q->nr_congestion_on;
+}
+
+/*
+ * The threshold at which a queue is considered to be uncongested
+ */
+static inline int queue_congestion_off_threshold(struct request_queue *q)
+{
+ return q->nr_congestion_off;
+}
+
+/*
+ * Contribute to IO statistics IFF:
+ *
+ * a) it's attached to a gendisk, and
+ * b) the queue had IO stats enabled when this request was started, and
+ * c) it's a file system request or a discard request
+ */
+static inline int blk_do_io_stat(struct request *rq)
+{
+ return rq->rq_disk &&
+ (rq->cmd_flags & REQ_IO_STAT) &&
+ (rq->cmd_type == REQ_TYPE_FS ||
+ (rq->cmd_flags & REQ_DISCARD));
+}
+
+/*
+ * Internal io_context interface
+ */
+void get_io_context(struct io_context *ioc);
+struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
+struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask);
+void ioc_clear_queue(struct request_queue *q);
+
+void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask,
+ int node);
+
+/**
+ * create_io_context - try to create task->io_context
+ * @task: target task
+ * @gfp_mask: allocation mask
+ * @node: allocation node
+ *
+ * If @task->io_context is %NULL, allocate a new io_context and install it.
+ * Returns the current @task->io_context which may be %NULL if allocation
+ * failed.
+ *
+ * Note that this function can't be called with IRQ disabled because
+ * task_lock which protects @task->io_context is IRQ-unsafe.
+ */
+static inline struct io_context *create_io_context(struct task_struct *task,
+ gfp_t gfp_mask, int node)
+{
+ WARN_ON_ONCE(irqs_disabled());
+ if (unlikely(!task->io_context))
+ create_io_context_slowpath(task, gfp_mask, node);
+ return task->io_context;
+}
+
+/*
+ * Internal throttling interface
+ */
+#ifdef CONFIG_BLK_DEV_THROTTLING
+extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
+extern void blk_throtl_drain(struct request_queue *q);
+extern int blk_throtl_init(struct request_queue *q);
+extern void blk_throtl_exit(struct request_queue *q);
+extern void blk_throtl_release(struct request_queue *q);
+#else /* CONFIG_BLK_DEV_THROTTLING */
+static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
+{
+ return false;
+}
+static inline void blk_throtl_drain(struct request_queue *q) { }
+static inline int blk_throtl_init(struct request_queue *q) { return 0; }
+static inline void blk_throtl_exit(struct request_queue *q) { }
+static inline void blk_throtl_release(struct request_queue *q) { }
+#endif /* CONFIG_BLK_DEV_THROTTLING */
+
+#endif /* BLK_INTERNAL_H */
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
new file mode 100644
index 00000000..7ad49c88
--- /dev/null
+++ b/block/bsg-lib.c
@@ -0,0 +1,298 @@
+/*
+ * BSG helper library
+ *
+ * Copyright (C) 2008 James Smart, Emulex Corporation
+ * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2011 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/scatterlist.h>
+#include <linux/bsg-lib.h>
+#include <linux/export.h>
+#include <scsi/scsi_cmnd.h>
+
+/**
+ * bsg_destroy_job - routine to teardown/delete a bsg job
+ * @job: bsg_job that is to be torn down
+ */
+static void bsg_destroy_job(struct bsg_job *job)
+{
+ put_device(job->dev); /* release reference for the request */
+
+ kfree(job->request_payload.sg_list);
+ kfree(job->reply_payload.sg_list);
+ kfree(job);
+}
+
+/**
+ * bsg_job_done - completion routine for bsg requests
+ * @job: bsg_job that is complete
+ * @result: job reply result
+ * @reply_payload_rcv_len: length of payload recvd
+ *
+ * The LLD should call this when the bsg job has completed.
+ */
+void bsg_job_done(struct bsg_job *job, int result,
+ unsigned int reply_payload_rcv_len)
+{
+ struct request *req = job->req;
+ struct request *rsp = req->next_rq;
+ int err;
+
+ err = job->req->errors = result;
+ if (err < 0)
+ /* we're only returning the result field in the reply */
+ job->req->sense_len = sizeof(u32);
+ else
+ job->req->sense_len = job->reply_len;
+ /* we assume all request payload was transferred, residual == 0 */
+ req->resid_len = 0;
+
+ if (rsp) {
+ WARN_ON(reply_payload_rcv_len > rsp->resid_len);
+
+ /* set reply (bidi) residual */
+ rsp->resid_len -= min(reply_payload_rcv_len, rsp->resid_len);
+ }
+ blk_complete_request(req);
+}
+EXPORT_SYMBOL_GPL(bsg_job_done);
+
+/**
+ * bsg_softirq_done - softirq done routine for destroying the bsg requests
+ * @rq: BSG request that holds the job to be destroyed
+ */
+static void bsg_softirq_done(struct request *rq)
+{
+ struct bsg_job *job = rq->special;
+
+ blk_end_request_all(rq, rq->errors);
+ bsg_destroy_job(job);
+}
+
+static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
+{
+ size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
+
+ BUG_ON(!req->nr_phys_segments);
+
+ buf->sg_list = kzalloc(sz, GFP_KERNEL);
+ if (!buf->sg_list)
+ return -ENOMEM;
+ sg_init_table(buf->sg_list, req->nr_phys_segments);
+ buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
+ buf->payload_len = blk_rq_bytes(req);
+ return 0;
+}
+
+/**
+ * bsg_create_job - create the bsg_job structure for the bsg request
+ * @dev: device that is being sent the bsg request
+ * @req: BSG request that needs a job structure
+ */
+static int bsg_create_job(struct device *dev, struct request *req)
+{
+ struct request *rsp = req->next_rq;
+ struct request_queue *q = req->q;
+ struct bsg_job *job;
+ int ret;
+
+ BUG_ON(req->special);
+
+ job = kzalloc(sizeof(struct bsg_job) + q->bsg_job_size, GFP_KERNEL);
+ if (!job)
+ return -ENOMEM;
+
+ req->special = job;
+ job->req = req;
+ if (q->bsg_job_size)
+ job->dd_data = (void *)&job[1];
+ job->request = req->cmd;
+ job->request_len = req->cmd_len;
+ job->reply = req->sense;
+ job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer
+ * allocated */
+ if (req->bio) {
+ ret = bsg_map_buffer(&job->request_payload, req);
+ if (ret)
+ goto failjob_rls_job;
+ }
+ if (rsp && rsp->bio) {
+ ret = bsg_map_buffer(&job->reply_payload, rsp);
+ if (ret)
+ goto failjob_rls_rqst_payload;
+ }
+ job->dev = dev;
+ /* take a reference for the request */
+ get_device(job->dev);
+ return 0;
+
+failjob_rls_rqst_payload:
+ kfree(job->request_payload.sg_list);
+failjob_rls_job:
+ kfree(job);
+ return -ENOMEM;
+}
+
+/*
+ * bsg_goose_queue - restart queue in case it was stopped
+ * @q: request q to be restarted
+ */
+void bsg_goose_queue(struct request_queue *q)
+{
+ if (!q)
+ return;
+
+ blk_run_queue_async(q);
+}
+EXPORT_SYMBOL_GPL(bsg_goose_queue);
+
+/**
+ * bsg_request_fn - generic handler for bsg requests
+ * @q: request queue to manage
+ *
+ * On error the create_bsg_job function should return a -Exyz error value
+ * that will be set to the req->errors.
+ *
+ * Drivers/subsys should pass this to the queue init function.
+ */
+void bsg_request_fn(struct request_queue *q)
+{
+ struct device *dev = q->queuedata;
+ struct request *req;
+ struct bsg_job *job;
+ int ret;
+
+ if (!get_device(dev))
+ return;
+
+ while (1) {
+ req = blk_fetch_request(q);
+ if (!req)
+ break;
+ spin_unlock_irq(q->queue_lock);
+
+ ret = bsg_create_job(dev, req);
+ if (ret) {
+ req->errors = ret;
+ blk_end_request_all(req, ret);
+ spin_lock_irq(q->queue_lock);
+ continue;
+ }
+
+ job = req->special;
+ ret = q->bsg_job_fn(job);
+ spin_lock_irq(q->queue_lock);
+ if (ret)
+ break;
+ }
+
+ spin_unlock_irq(q->queue_lock);
+ put_device(dev);
+ spin_lock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL_GPL(bsg_request_fn);
+
+/**
+ * bsg_setup_queue - Create and add the bsg hooks so we can receive requests
+ * @dev: device to attach bsg device to
+ * @q: request queue setup by caller
+ * @name: device to give bsg device
+ * @job_fn: bsg job handler
+ * @dd_job_size: size of LLD data needed for each job
+ *
+ * The caller should have setup the reuqest queue with bsg_request_fn
+ * as the request_fn.
+ */
+int bsg_setup_queue(struct device *dev, struct request_queue *q,
+ char *name, bsg_job_fn *job_fn, int dd_job_size)
+{
+ int ret;
+
+ q->queuedata = dev;
+ q->bsg_job_size = dd_job_size;
+ q->bsg_job_fn = job_fn;
+ queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
+ blk_queue_softirq_done(q, bsg_softirq_done);
+ blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
+
+ ret = bsg_register_queue(q, dev, name, NULL);
+ if (ret) {
+ printk(KERN_ERR "%s: bsg interface failed to "
+ "initialize - register queue\n", dev->kobj.name);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bsg_setup_queue);
+
+/**
+ * bsg_remove_queue - Deletes the bsg dev from the q
+ * @q: the request_queue that is to be torn down.
+ *
+ * Notes:
+ * Before unregistering the queue empty any requests that are blocked
+ */
+void bsg_remove_queue(struct request_queue *q)
+{
+ struct request *req; /* block request */
+ int counts; /* totals for request_list count and starved */
+
+ if (!q)
+ return;
+
+ /* Stop taking in new requests */
+ spin_lock_irq(q->queue_lock);
+ blk_stop_queue(q);
+
+ /* drain all requests in the queue */
+ while (1) {
+ /* need the lock to fetch a request
+ * this may fetch the same reqeust as the previous pass
+ */
+ req = blk_fetch_request(q);
+ /* save requests in use and starved */
+ counts = q->rq.count[0] + q->rq.count[1] +
+ q->rq.starved[0] + q->rq.starved[1];
+ spin_unlock_irq(q->queue_lock);
+ /* any requests still outstanding? */
+ if (counts == 0)
+ break;
+
+ /* This may be the same req as the previous iteration,
+ * always send the blk_end_request_all after a prefetch.
+ * It is not okay to not end the request because the
+ * prefetch started the request.
+ */
+ if (req) {
+ /* return -ENXIO to indicate that this queue is
+ * going away
+ */
+ req->errors = -ENXIO;
+ blk_end_request_all(req, -ENXIO);
+ }
+
+ msleep(200); /* allow bsg to possibly finish */
+ spin_lock_irq(q->queue_lock);
+ }
+ bsg_unregister_queue(q);
+}
+EXPORT_SYMBOL_GPL(bsg_remove_queue);
diff --git a/block/bsg.c b/block/bsg.c
new file mode 100644
index 00000000..ff64ae3b
--- /dev/null
+++ b/block/bsg.c
@@ -0,0 +1,1126 @@
+/*
+ * bsg.c - block layer implementation of the sg v4 interface
+ *
+ * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
+ * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License version 2. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/file.h>
+#include <linux/blkdev.h>
+#include <linux/poll.h>
+#include <linux/cdev.h>
+#include <linux/jiffies.h>
+#include <linux/percpu.h>
+#include <linux/uio.h>
+#include <linux/idr.h>
+#include <linux/bsg.h>
+#include <linux/slab.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/sg.h>
+
+#define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver"
+#define BSG_VERSION "0.4"
+
+struct bsg_device {
+ struct request_queue *queue;
+ spinlock_t lock;
+ struct list_head busy_list;
+ struct list_head done_list;
+ struct hlist_node dev_list;
+ atomic_t ref_count;
+ int queued_cmds;
+ int done_cmds;
+ wait_queue_head_t wq_done;
+ wait_queue_head_t wq_free;
+ char name[20];
+ int max_queue;
+ unsigned long flags;
+};
+
+enum {
+ BSG_F_BLOCK = 1,
+};
+
+#define BSG_DEFAULT_CMDS 64
+#define BSG_MAX_DEVS 32768
+
+#undef BSG_DEBUG
+
+#ifdef BSG_DEBUG
+#define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args)
+#else
+#define dprintk(fmt, args...)
+#endif
+
+static DEFINE_MUTEX(bsg_mutex);
+static DEFINE_IDR(bsg_minor_idr);
+
+#define BSG_LIST_ARRAY_SIZE 8
+static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
+
+static struct class *bsg_class;
+static int bsg_major;
+
+static struct kmem_cache *bsg_cmd_cachep;
+
+/*
+ * our internal command type
+ */
+struct bsg_command {
+ struct bsg_device *bd;
+ struct list_head list;
+ struct request *rq;
+ struct bio *bio;
+ struct bio *bidi_bio;
+ int err;
+ struct sg_io_v4 hdr;
+ char sense[SCSI_SENSE_BUFFERSIZE];
+};
+
+static void bsg_free_command(struct bsg_command *bc)
+{
+ struct bsg_device *bd = bc->bd;
+ unsigned long flags;
+
+ kmem_cache_free(bsg_cmd_cachep, bc);
+
+ spin_lock_irqsave(&bd->lock, flags);
+ bd->queued_cmds--;
+ spin_unlock_irqrestore(&bd->lock, flags);
+
+ wake_up(&bd->wq_free);
+}
+
+static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
+{
+ struct bsg_command *bc = ERR_PTR(-EINVAL);
+
+ spin_lock_irq(&bd->lock);
+
+ if (bd->queued_cmds >= bd->max_queue)
+ goto out;
+
+ bd->queued_cmds++;
+ spin_unlock_irq(&bd->lock);
+
+ bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL);
+ if (unlikely(!bc)) {
+ spin_lock_irq(&bd->lock);
+ bd->queued_cmds--;
+ bc = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ bc->bd = bd;
+ INIT_LIST_HEAD(&bc->list);
+ dprintk("%s: returning free cmd %p\n", bd->name, bc);
+ return bc;
+out:
+ spin_unlock_irq(&bd->lock);
+ return bc;
+}
+
+static inline struct hlist_head *bsg_dev_idx_hash(int index)
+{
+ return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
+}
+
+static int bsg_io_schedule(struct bsg_device *bd)
+{
+ DEFINE_WAIT(wait);
+ int ret = 0;
+
+ spin_lock_irq(&bd->lock);
+
+ BUG_ON(bd->done_cmds > bd->queued_cmds);
+
+ /*
+ * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no
+ * work to do", even though we return -ENOSPC after this same test
+ * during bsg_write() -- there, it means our buffer can't have more
+ * bsg_commands added to it, thus has no space left.
+ */
+ if (bd->done_cmds == bd->queued_cmds) {
+ ret = -ENODATA;
+ goto unlock;
+ }
+
+ if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
+ ret = -EAGAIN;
+ goto unlock;
+ }
+
+ prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE);
+ spin_unlock_irq(&bd->lock);
+ io_schedule();
+ finish_wait(&bd->wq_done, &wait);
+
+ return ret;
+unlock:
+ spin_unlock_irq(&bd->lock);
+ return ret;
+}
+
+static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
+ struct sg_io_v4 *hdr, struct bsg_device *bd,
+ fmode_t has_write_perm)
+{
+ if (hdr->request_len > BLK_MAX_CDB) {
+ rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
+ if (!rq->cmd)
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
+ hdr->request_len))
+ return -EFAULT;
+
+ if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
+ if (blk_verify_command(rq->cmd, has_write_perm))
+ return -EPERM;
+ } else if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ /*
+ * fill in request structure
+ */
+ rq->cmd_len = hdr->request_len;
+ rq->cmd_type = REQ_TYPE_BLOCK_PC;
+
+ rq->timeout = msecs_to_jiffies(hdr->timeout);
+ if (!rq->timeout)
+ rq->timeout = q->sg_timeout;
+ if (!rq->timeout)
+ rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
+ if (rq->timeout < BLK_MIN_SG_TIMEOUT)
+ rq->timeout = BLK_MIN_SG_TIMEOUT;
+
+ return 0;
+}
+
+/*
+ * Check if sg_io_v4 from user is allowed and valid
+ */
+static int
+bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)
+{
+ int ret = 0;
+
+ if (hdr->guard != 'Q')
+ return -EINVAL;
+
+ switch (hdr->protocol) {
+ case BSG_PROTOCOL_SCSI:
+ switch (hdr->subprotocol) {
+ case BSG_SUB_PROTOCOL_SCSI_CMD:
+ case BSG_SUB_PROTOCOL_SCSI_TRANSPORT:
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ *rw = hdr->dout_xfer_len ? WRITE : READ;
+ return ret;
+}
+
+/*
+ * map sg_io_v4 to a request.
+ */
+static struct request *
+bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
+ u8 *sense)
+{
+ struct request_queue *q = bd->queue;
+ struct request *rq, *next_rq = NULL;
+ int ret, rw;
+ unsigned int dxfer_len;
+ void __user *dxferp = NULL;
+ struct bsg_class_device *bcd = &q->bsg_dev;
+
+ /* if the LLD has been removed then the bsg_unregister_queue will
+ * eventually be called and the class_dev was freed, so we can no
+ * longer use this request_queue. Return no such address.
+ */
+ if (!bcd->class_dev)
+ return ERR_PTR(-ENXIO);
+
+ dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp,
+ hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
+ hdr->din_xfer_len);
+
+ ret = bsg_validate_sgv4_hdr(q, hdr, &rw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /*
+ * map scatter-gather elements separately and string them to request
+ */
+ rq = blk_get_request(q, rw, GFP_KERNEL);
+ if (!rq)
+ return ERR_PTR(-ENOMEM);
+ ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm);
+ if (ret)
+ goto out;
+
+ if (rw == WRITE && hdr->din_xfer_len) {
+ if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ next_rq = blk_get_request(q, READ, GFP_KERNEL);
+ if (!next_rq) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ rq->next_rq = next_rq;
+ next_rq->cmd_type = rq->cmd_type;
+
+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
+ ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
+ hdr->din_xfer_len, GFP_KERNEL);
+ if (ret)
+ goto out;
+ }
+
+ if (hdr->dout_xfer_len) {
+ dxfer_len = hdr->dout_xfer_len;
+ dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
+ } else if (hdr->din_xfer_len) {
+ dxfer_len = hdr->din_xfer_len;
+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
+ } else
+ dxfer_len = 0;
+
+ if (dxfer_len) {
+ ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len,
+ GFP_KERNEL);
+ if (ret)
+ goto out;
+ }
+
+ rq->sense = sense;
+ rq->sense_len = 0;
+
+ return rq;
+out:
+ if (rq->cmd != rq->__cmd)
+ kfree(rq->cmd);
+ blk_put_request(rq);
+ if (next_rq) {
+ blk_rq_unmap_user(next_rq->bio);
+ blk_put_request(next_rq);
+ }
+ return ERR_PTR(ret);
+}
+
+/*
+ * async completion call-back from the block layer, when scsi/ide/whatever
+ * calls end_that_request_last() on a request
+ */
+static void bsg_rq_end_io(struct request *rq, int uptodate)
+{
+ struct bsg_command *bc = rq->end_io_data;
+ struct bsg_device *bd = bc->bd;
+ unsigned long flags;
+
+ dprintk("%s: finished rq %p bc %p, bio %p stat %d\n",
+ bd->name, rq, bc, bc->bio, uptodate);
+
+ bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
+
+ spin_lock_irqsave(&bd->lock, flags);
+ list_move_tail(&bc->list, &bd->done_list);
+ bd->done_cmds++;
+ spin_unlock_irqrestore(&bd->lock, flags);
+
+ wake_up(&bd->wq_done);
+}
+
+/*
+ * do final setup of a 'bc' and submit the matching 'rq' to the block
+ * layer for io
+ */
+static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
+ struct bsg_command *bc, struct request *rq)
+{
+ int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL));
+
+ /*
+ * add bc command to busy queue and submit rq for io
+ */
+ bc->rq = rq;
+ bc->bio = rq->bio;
+ if (rq->next_rq)
+ bc->bidi_bio = rq->next_rq->bio;
+ bc->hdr.duration = jiffies;
+ spin_lock_irq(&bd->lock);
+ list_add_tail(&bc->list, &bd->busy_list);
+ spin_unlock_irq(&bd->lock);
+
+ dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc);
+
+ rq->end_io_data = bc;
+ blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io);
+}
+
+static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
+{
+ struct bsg_command *bc = NULL;
+
+ spin_lock_irq(&bd->lock);
+ if (bd->done_cmds) {
+ bc = list_first_entry(&bd->done_list, struct bsg_command, list);
+ list_del(&bc->list);
+ bd->done_cmds--;
+ }
+ spin_unlock_irq(&bd->lock);
+
+ return bc;
+}
+
+/*
+ * Get a finished command from the done list
+ */
+static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
+{
+ struct bsg_command *bc;
+ int ret;
+
+ do {
+ bc = bsg_next_done_cmd(bd);
+ if (bc)
+ break;
+
+ if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
+ bc = ERR_PTR(-EAGAIN);
+ break;
+ }
+
+ ret = wait_event_interruptible(bd->wq_done, bd->done_cmds);
+ if (ret) {
+ bc = ERR_PTR(-ERESTARTSYS);
+ break;
+ }
+ } while (1);
+
+ dprintk("%s: returning done %p\n", bd->name, bc);
+
+ return bc;
+}
+
+static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
+ struct bio *bio, struct bio *bidi_bio)
+{
+ int ret = 0;
+
+ dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors);
+ /*
+ * fill in all the output members
+ */
+ hdr->device_status = rq->errors & 0xff;
+ hdr->transport_status = host_byte(rq->errors);
+ hdr->driver_status = driver_byte(rq->errors);
+ hdr->info = 0;
+ if (hdr->device_status || hdr->transport_status || hdr->driver_status)
+ hdr->info |= SG_INFO_CHECK;
+ hdr->response_len = 0;
+
+ if (rq->sense_len && hdr->response) {
+ int len = min_t(unsigned int, hdr->max_response_len,
+ rq->sense_len);
+
+ ret = copy_to_user((void __user *)(unsigned long)hdr->response,
+ rq->sense, len);
+ if (!ret)
+ hdr->response_len = len;
+ else
+ ret = -EFAULT;
+ }
+
+ if (rq->next_rq) {
+ hdr->dout_resid = rq->resid_len;
+ hdr->din_resid = rq->next_rq->resid_len;
+ blk_rq_unmap_user(bidi_bio);
+ blk_put_request(rq->next_rq);
+ } else if (rq_data_dir(rq) == READ)
+ hdr->din_resid = rq->resid_len;
+ else
+ hdr->dout_resid = rq->resid_len;
+
+ /*
+ * If the request generated a negative error number, return it
+ * (providing we aren't already returning an error); if it's
+ * just a protocol response (i.e. non negative), that gets
+ * processed above.
+ */
+ if (!ret && rq->errors < 0)
+ ret = rq->errors;
+
+ blk_rq_unmap_user(bio);
+ if (rq->cmd != rq->__cmd)
+ kfree(rq->cmd);
+ blk_put_request(rq);
+
+ return ret;
+}
+
+static int bsg_complete_all_commands(struct bsg_device *bd)
+{
+ struct bsg_command *bc;
+ int ret, tret;
+
+ dprintk("%s: entered\n", bd->name);
+
+ /*
+ * wait for all commands to complete
+ */
+ ret = 0;
+ do {
+ ret = bsg_io_schedule(bd);
+ /*
+ * look for -ENODATA specifically -- we'll sometimes get
+ * -ERESTARTSYS when we've taken a signal, but we can't
+ * return until we're done freeing the queue, so ignore
+ * it. The signal will get handled when we're done freeing
+ * the bsg_device.
+ */
+ } while (ret != -ENODATA);
+
+ /*
+ * discard done commands
+ */
+ ret = 0;
+ do {
+ spin_lock_irq(&bd->lock);
+ if (!bd->queued_cmds) {
+ spin_unlock_irq(&bd->lock);
+ break;
+ }
+ spin_unlock_irq(&bd->lock);
+
+ bc = bsg_get_done_cmd(bd);
+ if (IS_ERR(bc))
+ break;
+
+ tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
+ bc->bidi_bio);
+ if (!ret)
+ ret = tret;
+
+ bsg_free_command(bc);
+ } while (1);
+
+ return ret;
+}
+
+static int
+__bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
+ const struct iovec *iov, ssize_t *bytes_read)
+{
+ struct bsg_command *bc;
+ int nr_commands, ret;
+
+ if (count % sizeof(struct sg_io_v4))
+ return -EINVAL;
+
+ ret = 0;
+ nr_commands = count / sizeof(struct sg_io_v4);
+ while (nr_commands) {
+ bc = bsg_get_done_cmd(bd);
+ if (IS_ERR(bc)) {
+ ret = PTR_ERR(bc);
+ break;
+ }
+
+ /*
+ * this is the only case where we need to copy data back
+ * after completing the request. so do that here,
+ * bsg_complete_work() cannot do that for us
+ */
+ ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
+ bc->bidi_bio);
+
+ if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr)))
+ ret = -EFAULT;
+
+ bsg_free_command(bc);
+
+ if (ret)
+ break;
+
+ buf += sizeof(struct sg_io_v4);
+ *bytes_read += sizeof(struct sg_io_v4);
+ nr_commands--;
+ }
+
+ return ret;
+}
+
+static inline void bsg_set_block(struct bsg_device *bd, struct file *file)
+{
+ if (file->f_flags & O_NONBLOCK)
+ clear_bit(BSG_F_BLOCK, &bd->flags);
+ else
+ set_bit(BSG_F_BLOCK, &bd->flags);
+}
+
+/*
+ * Check if the error is a "real" error that we should return.
+ */
+static inline int err_block_err(int ret)
+{
+ if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN)
+ return 1;
+
+ return 0;
+}
+
+static ssize_t
+bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+ struct bsg_device *bd = file->private_data;
+ int ret;
+ ssize_t bytes_read;
+
+ dprintk("%s: read %Zd bytes\n", bd->name, count);
+
+ bsg_set_block(bd, file);
+
+ bytes_read = 0;
+ ret = __bsg_read(buf, count, bd, NULL, &bytes_read);
+ *ppos = bytes_read;
+
+ if (!bytes_read || err_block_err(ret))
+ bytes_read = ret;
+
+ return bytes_read;
+}
+
+static int __bsg_write(struct bsg_device *bd, const char __user *buf,
+ size_t count, ssize_t *bytes_written,
+ fmode_t has_write_perm)
+{
+ struct bsg_command *bc;
+ struct request *rq;
+ int ret, nr_commands;
+
+ if (count % sizeof(struct sg_io_v4))
+ return -EINVAL;
+
+ nr_commands = count / sizeof(struct sg_io_v4);
+ rq = NULL;
+ bc = NULL;
+ ret = 0;
+ while (nr_commands) {
+ struct request_queue *q = bd->queue;
+
+ bc = bsg_alloc_command(bd);
+ if (IS_ERR(bc)) {
+ ret = PTR_ERR(bc);
+ bc = NULL;
+ break;
+ }
+
+ if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ /*
+ * get a request, fill in the blanks, and add to request queue
+ */
+ rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm, bc->sense);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ rq = NULL;
+ break;
+ }
+
+ bsg_add_command(bd, q, bc, rq);
+ bc = NULL;
+ rq = NULL;
+ nr_commands--;
+ buf += sizeof(struct sg_io_v4);
+ *bytes_written += sizeof(struct sg_io_v4);
+ }
+
+ if (bc)
+ bsg_free_command(bc);
+
+ return ret;
+}
+
+static ssize_t
+bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+{
+ struct bsg_device *bd = file->private_data;
+ ssize_t bytes_written;
+ int ret;
+
+ dprintk("%s: write %Zd bytes\n", bd->name, count);
+
+ bsg_set_block(bd, file);
+
+ bytes_written = 0;
+ ret = __bsg_write(bd, buf, count, &bytes_written,
+ file->f_mode & FMODE_WRITE);
+
+ *ppos = bytes_written;
+
+ /*
+ * return bytes written on non-fatal errors
+ */
+ if (!bytes_written || err_block_err(ret))
+ bytes_written = ret;
+
+ dprintk("%s: returning %Zd\n", bd->name, bytes_written);
+ return bytes_written;
+}
+
+static struct bsg_device *bsg_alloc_device(void)
+{
+ struct bsg_device *bd;
+
+ bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
+ if (unlikely(!bd))
+ return NULL;
+
+ spin_lock_init(&bd->lock);
+
+ bd->max_queue = BSG_DEFAULT_CMDS;
+
+ INIT_LIST_HEAD(&bd->busy_list);
+ INIT_LIST_HEAD(&bd->done_list);
+ INIT_HLIST_NODE(&bd->dev_list);
+
+ init_waitqueue_head(&bd->wq_free);
+ init_waitqueue_head(&bd->wq_done);
+ return bd;
+}
+
+static void bsg_kref_release_function(struct kref *kref)
+{
+ struct bsg_class_device *bcd =
+ container_of(kref, struct bsg_class_device, ref);
+ struct device *parent = bcd->parent;
+
+ if (bcd->release)
+ bcd->release(bcd->parent);
+
+ put_device(parent);
+}
+
+static int bsg_put_device(struct bsg_device *bd)
+{
+ int ret = 0, do_free;
+ struct request_queue *q = bd->queue;
+
+ mutex_lock(&bsg_mutex);
+
+ do_free = atomic_dec_and_test(&bd->ref_count);
+ if (!do_free) {
+ mutex_unlock(&bsg_mutex);
+ goto out;
+ }
+
+ hlist_del(&bd->dev_list);
+ mutex_unlock(&bsg_mutex);
+
+ dprintk("%s: tearing down\n", bd->name);
+
+ /*
+ * close can always block
+ */
+ set_bit(BSG_F_BLOCK, &bd->flags);
+
+ /*
+ * correct error detection baddies here again. it's the responsibility
+ * of the app to properly reap commands before close() if it wants
+ * fool-proof error detection
+ */
+ ret = bsg_complete_all_commands(bd);
+
+ kfree(bd);
+out:
+ kref_put(&q->bsg_dev.ref, bsg_kref_release_function);
+ if (do_free)
+ blk_put_queue(q);
+ return ret;
+}
+
+static struct bsg_device *bsg_add_device(struct inode *inode,
+ struct request_queue *rq,
+ struct file *file)
+{
+ struct bsg_device *bd;
+#ifdef BSG_DEBUG
+ unsigned char buf[32];
+#endif
+ if (!blk_get_queue(rq))
+ return ERR_PTR(-ENXIO);
+
+ bd = bsg_alloc_device();
+ if (!bd) {
+ blk_put_queue(rq);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ bd->queue = rq;
+
+ bsg_set_block(bd, file);
+
+ atomic_set(&bd->ref_count, 1);
+ mutex_lock(&bsg_mutex);
+ hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
+
+ strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
+ dprintk("bound to <%s>, max queue %d\n",
+ format_dev_t(buf, inode->i_rdev), bd->max_queue);
+
+ mutex_unlock(&bsg_mutex);
+ return bd;
+}
+
+static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
+{
+ struct bsg_device *bd;
+ struct hlist_node *entry;
+
+ mutex_lock(&bsg_mutex);
+
+ hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) {
+ if (bd->queue == q) {
+ atomic_inc(&bd->ref_count);
+ goto found;
+ }
+ }
+ bd = NULL;
+found:
+ mutex_unlock(&bsg_mutex);
+ return bd;
+}
+
+static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
+{
+ struct bsg_device *bd;
+ struct bsg_class_device *bcd;
+
+ /*
+ * find the class device
+ */
+ mutex_lock(&bsg_mutex);
+ bcd = idr_find(&bsg_minor_idr, iminor(inode));
+ if (bcd)
+ kref_get(&bcd->ref);
+ mutex_unlock(&bsg_mutex);
+
+ if (!bcd)
+ return ERR_PTR(-ENODEV);
+
+ bd = __bsg_get_device(iminor(inode), bcd->queue);
+ if (bd)
+ return bd;
+
+ bd = bsg_add_device(inode, bcd->queue, file);
+ if (IS_ERR(bd))
+ kref_put(&bcd->ref, bsg_kref_release_function);
+
+ return bd;
+}
+
+static int bsg_open(struct inode *inode, struct file *file)
+{
+ struct bsg_device *bd;
+
+ bd = bsg_get_device(inode, file);
+
+ if (IS_ERR(bd))
+ return PTR_ERR(bd);
+
+ file->private_data = bd;
+ return 0;
+}
+
+static int bsg_release(struct inode *inode, struct file *file)
+{
+ struct bsg_device *bd = file->private_data;
+
+ file->private_data = NULL;
+ return bsg_put_device(bd);
+}
+
+static unsigned int bsg_poll(struct file *file, poll_table *wait)
+{
+ struct bsg_device *bd = file->private_data;
+ unsigned int mask = 0;
+
+ poll_wait(file, &bd->wq_done, wait);
+ poll_wait(file, &bd->wq_free, wait);
+
+ spin_lock_irq(&bd->lock);
+ if (!list_empty(&bd->done_list))
+ mask |= POLLIN | POLLRDNORM;
+ if (bd->queued_cmds < bd->max_queue)
+ mask |= POLLOUT;
+ spin_unlock_irq(&bd->lock);
+
+ return mask;
+}
+
+static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct bsg_device *bd = file->private_data;
+ int __user *uarg = (int __user *) arg;
+ int ret;
+
+ switch (cmd) {
+ /*
+ * our own ioctls
+ */
+ case SG_GET_COMMAND_Q:
+ return put_user(bd->max_queue, uarg);
+ case SG_SET_COMMAND_Q: {
+ int queue;
+
+ if (get_user(queue, uarg))
+ return -EFAULT;
+ if (queue < 1)
+ return -EINVAL;
+
+ spin_lock_irq(&bd->lock);
+ bd->max_queue = queue;
+ spin_unlock_irq(&bd->lock);
+ return 0;
+ }
+
+ /*
+ * SCSI/sg ioctls
+ */
+ case SG_GET_VERSION_NUM:
+ case SCSI_IOCTL_GET_IDLUN:
+ case SCSI_IOCTL_GET_BUS_NUMBER:
+ case SG_SET_TIMEOUT:
+ case SG_GET_TIMEOUT:
+ case SG_GET_RESERVED_SIZE:
+ case SG_SET_RESERVED_SIZE:
+ case SG_EMULATED_HOST:
+ case SCSI_IOCTL_SEND_COMMAND: {
+ void __user *uarg = (void __user *) arg;
+ return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg);
+ }
+ case SG_IO: {
+ struct request *rq;
+ struct bio *bio, *bidi_bio = NULL;
+ struct sg_io_v4 hdr;
+ int at_head;
+ u8 sense[SCSI_SENSE_BUFFERSIZE];
+
+ if (copy_from_user(&hdr, uarg, sizeof(hdr)))
+ return -EFAULT;
+
+ rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE, sense);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ bio = rq->bio;
+ if (rq->next_rq)
+ bidi_bio = rq->next_rq->bio;
+
+ at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL));
+ blk_execute_rq(bd->queue, NULL, rq, at_head);
+ ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
+
+ if (copy_to_user(uarg, &hdr, sizeof(hdr)))
+ return -EFAULT;
+
+ return ret;
+ }
+ /*
+ * block device ioctls
+ */
+ default:
+#if 0
+ return ioctl_by_bdev(bd->bdev, cmd, arg);
+#else
+ return -ENOTTY;
+#endif
+ }
+}
+
+static const struct file_operations bsg_fops = {
+ .read = bsg_read,
+ .write = bsg_write,
+ .poll = bsg_poll,
+ .open = bsg_open,
+ .release = bsg_release,
+ .unlocked_ioctl = bsg_ioctl,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void bsg_unregister_queue(struct request_queue *q)
+{
+ struct bsg_class_device *bcd = &q->bsg_dev;
+
+ if (!bcd->class_dev)
+ return;
+
+ mutex_lock(&bsg_mutex);
+ idr_remove(&bsg_minor_idr, bcd->minor);
+ if (q->kobj.sd)
+ sysfs_remove_link(&q->kobj, "bsg");
+ device_unregister(bcd->class_dev);
+ bcd->class_dev = NULL;
+ kref_put(&bcd->ref, bsg_kref_release_function);
+ mutex_unlock(&bsg_mutex);
+}
+EXPORT_SYMBOL_GPL(bsg_unregister_queue);
+
+int bsg_register_queue(struct request_queue *q, struct device *parent,
+ const char *name, void (*release)(struct device *))
+{
+ struct bsg_class_device *bcd;
+ dev_t dev;
+ int ret, minor;
+ struct device *class_dev = NULL;
+ const char *devname;
+
+ if (name)
+ devname = name;
+ else
+ devname = dev_name(parent);
+
+ /*
+ * we need a proper transport to send commands, not a stacked device
+ */
+ if (!q->request_fn)
+ return 0;
+
+ bcd = &q->bsg_dev;
+ memset(bcd, 0, sizeof(*bcd));
+
+ mutex_lock(&bsg_mutex);
+
+ ret = idr_pre_get(&bsg_minor_idr, GFP_KERNEL);
+ if (!ret) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ ret = idr_get_new(&bsg_minor_idr, bcd, &minor);
+ if (ret < 0)
+ goto unlock;
+
+ if (minor >= BSG_MAX_DEVS) {
+ printk(KERN_ERR "bsg: too many bsg devices\n");
+ ret = -EINVAL;
+ goto remove_idr;
+ }
+
+ bcd->minor = minor;
+ bcd->queue = q;
+ bcd->parent = get_device(parent);
+ bcd->release = release;
+ kref_init(&bcd->ref);
+ dev = MKDEV(bsg_major, bcd->minor);
+ class_dev = device_create(bsg_class, parent, dev, NULL, "%s", devname);
+ if (IS_ERR(class_dev)) {
+ ret = PTR_ERR(class_dev);
+ goto put_dev;
+ }
+ bcd->class_dev = class_dev;
+
+ if (q->kobj.sd) {
+ ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg");
+ if (ret)
+ goto unregister_class_dev;
+ }
+
+ mutex_unlock(&bsg_mutex);
+ return 0;
+
+unregister_class_dev:
+ device_unregister(class_dev);
+put_dev:
+ put_device(parent);
+remove_idr:
+ idr_remove(&bsg_minor_idr, minor);
+unlock:
+ mutex_unlock(&bsg_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(bsg_register_queue);
+
+static struct cdev bsg_cdev;
+
+static char *bsg_devnode(struct device *dev, umode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev));
+}
+
+static int __init bsg_init(void)
+{
+ int ret, i;
+ dev_t devid;
+
+ bsg_cmd_cachep = kmem_cache_create("bsg_cmd",
+ sizeof(struct bsg_command), 0, 0, NULL);
+ if (!bsg_cmd_cachep) {
+ printk(KERN_ERR "bsg: failed creating slab cache\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
+ INIT_HLIST_HEAD(&bsg_device_list[i]);
+
+ bsg_class = class_create(THIS_MODULE, "bsg");
+ if (IS_ERR(bsg_class)) {
+ ret = PTR_ERR(bsg_class);
+ goto destroy_kmemcache;
+ }
+ bsg_class->devnode = bsg_devnode;
+
+ ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
+ if (ret)
+ goto destroy_bsg_class;
+
+ bsg_major = MAJOR(devid);
+
+ cdev_init(&bsg_cdev, &bsg_fops);
+ ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS);
+ if (ret)
+ goto unregister_chrdev;
+
+ printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
+ " loaded (major %d)\n", bsg_major);
+ return 0;
+unregister_chrdev:
+ unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
+destroy_bsg_class:
+ class_destroy(bsg_class);
+destroy_kmemcache:
+ kmem_cache_destroy(bsg_cmd_cachep);
+ return ret;
+}
+
+MODULE_AUTHOR("Jens Axboe");
+MODULE_DESCRIPTION(BSG_DESCRIPTION);
+MODULE_LICENSE("GPL");
+
+device_initcall(bsg_init);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
new file mode 100644
index 00000000..3c38536b
--- /dev/null
+++ b/block/cfq-iosched.c
@@ -0,0 +1,3936 @@
+/*
+ * CFQ, or complete fairness queueing, disk scheduler.
+ *
+ * Based on ideas from a previously unfinished io
+ * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
+ *
+ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/jiffies.h>
+#include <linux/rbtree.h>
+#include <linux/ioprio.h>
+#include <linux/blktrace_api.h>
+#include "blk.h"
+#include "cfq.h"
+
+/*
+ * tunables
+ */
+/* max queue in one round of service */
+static const int cfq_quantum = 8;
+static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
+/* maximum backwards seek, in KiB */
+static const int cfq_back_max = 16 * 1024;
+/* penalty of a backwards seek */
+static const int cfq_back_penalty = 2;
+static const int cfq_slice_sync = HZ / 10;
+static int cfq_slice_async = HZ / 25;
+static const int cfq_slice_async_rq = 2;
+static int cfq_slice_idle = HZ / 125;
+static int cfq_group_idle = HZ / 125;
+static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
+static const int cfq_hist_divisor = 4;
+
+/*
+ * offset from end of service tree
+ */
+#define CFQ_IDLE_DELAY (HZ / 5)
+
+/*
+ * below this threshold, we consider thinktime immediate
+ */
+#define CFQ_MIN_TT (2)
+
+#define CFQ_SLICE_SCALE (5)
+#define CFQ_HW_QUEUE_MIN (5)
+#define CFQ_SERVICE_SHIFT 12
+
+#define CFQQ_SEEK_THR (sector_t)(8 * 100)
+#define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
+#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
+#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
+
+#define RQ_CIC(rq) icq_to_cic((rq)->elv.icq)
+#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0])
+#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1])
+
+static struct kmem_cache *cfq_pool;
+
+#define CFQ_PRIO_LISTS IOPRIO_BE_NR
+#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
+#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
+
+#define sample_valid(samples) ((samples) > 80)
+#define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node)
+
+struct cfq_ttime {
+ unsigned long last_end_request;
+
+ unsigned long ttime_total;
+ unsigned long ttime_samples;
+ unsigned long ttime_mean;
+};
+
+/*
+ * Most of our rbtree usage is for sorting with min extraction, so
+ * if we cache the leftmost node we don't have to walk down the tree
+ * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
+ * move this into the elevator for the rq sorting as well.
+ */
+struct cfq_rb_root {
+ struct rb_root rb;
+ struct rb_node *left;
+ unsigned count;
+ unsigned total_weight;
+ u64 min_vdisktime;
+ struct cfq_ttime ttime;
+};
+#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, \
+ .ttime = {.last_end_request = jiffies,},}
+
+/*
+ * Per process-grouping structure
+ */
+struct cfq_queue {
+ /* reference count */
+ int ref;
+ /* various state flags, see below */
+ unsigned int flags;
+ /* parent cfq_data */
+ struct cfq_data *cfqd;
+ /* service_tree member */
+ struct rb_node rb_node;
+ /* service_tree key */
+ unsigned long rb_key;
+ /* prio tree member */
+ struct rb_node p_node;
+ /* prio tree root we belong to, if any */
+ struct rb_root *p_root;
+ /* sorted list of pending requests */
+ struct rb_root sort_list;
+ /* if fifo isn't expired, next request to serve */
+ struct request *next_rq;
+ /* requests queued in sort_list */
+ int queued[2];
+ /* currently allocated requests */
+ int allocated[2];
+ /* fifo list of requests in sort_list */
+ struct list_head fifo;
+
+ /* time when queue got scheduled in to dispatch first request. */
+ unsigned long dispatch_start;
+ unsigned int allocated_slice;
+ unsigned int slice_dispatch;
+ /* time when first request from queue completed and slice started. */
+ unsigned long slice_start;
+ unsigned long slice_end;
+ long slice_resid;
+
+ /* pending priority requests */
+ int prio_pending;
+ /* number of requests that are on the dispatch list or inside driver */
+ int dispatched;
+
+ /* io prio of this group */
+ unsigned short ioprio, org_ioprio;
+ unsigned short ioprio_class;
+
+ pid_t pid;
+
+ u32 seek_history;
+ sector_t last_request_pos;
+
+ struct cfq_rb_root *service_tree;
+ struct cfq_queue *new_cfqq;
+ struct cfq_group *cfqg;
+ /* Number of sectors dispatched from queue in single dispatch round */
+ unsigned long nr_sectors;
+};
+
+/*
+ * First index in the service_trees.
+ * IDLE is handled separately, so it has negative index
+ */
+enum wl_prio_t {
+ BE_WORKLOAD = 0,
+ RT_WORKLOAD = 1,
+ IDLE_WORKLOAD = 2,
+ CFQ_PRIO_NR,
+};
+
+/*
+ * Second index in the service_trees.
+ */
+enum wl_type_t {
+ ASYNC_WORKLOAD = 0,
+ SYNC_NOIDLE_WORKLOAD = 1,
+ SYNC_WORKLOAD = 2
+};
+
+/* This is per cgroup per device grouping structure */
+struct cfq_group {
+ /* group service_tree member */
+ struct rb_node rb_node;
+
+ /* group service_tree key */
+ u64 vdisktime;
+ unsigned int weight;
+ unsigned int new_weight;
+ bool needs_update;
+
+ /* number of cfqq currently on this group */
+ int nr_cfqq;
+
+ /*
+ * Per group busy queues average. Useful for workload slice calc. We
+ * create the array for each prio class but at run time it is used
+ * only for RT and BE class and slot for IDLE class remains unused.
+ * This is primarily done to avoid confusion and a gcc warning.
+ */
+ unsigned int busy_queues_avg[CFQ_PRIO_NR];
+ /*
+ * rr lists of queues with requests. We maintain service trees for
+ * RT and BE classes. These trees are subdivided in subclasses
+ * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
+ * class there is no subclassification and all the cfq queues go on
+ * a single tree service_tree_idle.
+ * Counts are embedded in the cfq_rb_root
+ */
+ struct cfq_rb_root service_trees[2][3];
+ struct cfq_rb_root service_tree_idle;
+
+ unsigned long saved_workload_slice;
+ enum wl_type_t saved_workload;
+ enum wl_prio_t saved_serving_prio;
+ struct blkio_group blkg;
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+ struct hlist_node cfqd_node;
+ int ref;
+#endif
+ /* number of requests that are on the dispatch list or inside driver */
+ int dispatched;
+ struct cfq_ttime ttime;
+};
+
+struct cfq_io_cq {
+ struct io_cq icq; /* must be the first member */
+ struct cfq_queue *cfqq[2];
+ struct cfq_ttime ttime;
+};
+
+/*
+ * Per block device queue structure
+ */
+struct cfq_data {
+ struct request_queue *queue;
+ /* Root service tree for cfq_groups */
+ struct cfq_rb_root grp_service_tree;
+ struct cfq_group root_group;
+
+ /*
+ * The priority currently being served
+ */
+ enum wl_prio_t serving_prio;
+ enum wl_type_t serving_type;
+ unsigned long workload_expires;
+ struct cfq_group *serving_group;
+
+ /*
+ * Each priority tree is sorted by next_request position. These
+ * trees are used when determining if two or more queues are
+ * interleaving requests (see cfq_close_cooperator).
+ */
+ struct rb_root prio_trees[CFQ_PRIO_LISTS];
+
+ unsigned int busy_queues;
+ unsigned int busy_sync_queues;
+
+ int rq_in_driver;
+ int rq_in_flight[2];
+
+ /*
+ * queue-depth detection
+ */
+ int rq_queued;
+ int hw_tag;
+ /*
+ * hw_tag can be
+ * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
+ * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
+ * 0 => no NCQ
+ */
+ int hw_tag_est_depth;
+ unsigned int hw_tag_samples;
+
+ /*
+ * idle window management
+ */
+ struct timer_list idle_slice_timer;
+ struct work_struct unplug_work;
+
+ struct cfq_queue *active_queue;
+ struct cfq_io_cq *active_cic;
+
+ /*
+ * async queue for each priority case
+ */
+ struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
+ struct cfq_queue *async_idle_cfqq;
+
+ sector_t last_position;
+
+ /*
+ * tunables, see top of file
+ */
+ unsigned int cfq_quantum;
+ unsigned int cfq_fifo_expire[2];
+ unsigned int cfq_back_penalty;
+ unsigned int cfq_back_max;
+ unsigned int cfq_slice[2];
+ unsigned int cfq_slice_async_rq;
+ unsigned int cfq_slice_idle;
+ unsigned int cfq_group_idle;
+ unsigned int cfq_latency;
+ unsigned int cfq_target_latency;
+
+ /*
+ * Fallback dummy cfqq for extreme OOM conditions
+ */
+ struct cfq_queue oom_cfqq;
+
+ unsigned long last_delayed_sync;
+
+ /* List of cfq groups being managed on this device*/
+ struct hlist_head cfqg_list;
+
+ /* Number of groups which are on blkcg->blkg_list */
+ unsigned int nr_blkcg_linked_grps;
+};
+
+static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
+
+static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
+ enum wl_prio_t prio,
+ enum wl_type_t type)
+{
+ if (!cfqg)
+ return NULL;
+
+ if (prio == IDLE_WORKLOAD)
+ return &cfqg->service_tree_idle;
+
+ return &cfqg->service_trees[prio][type];
+}
+
+enum cfqq_state_flags {
+ CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
+ CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
+ CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
+ CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
+ CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
+ CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
+ CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
+ CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
+ CFQ_CFQQ_FLAG_sync, /* synchronous queue */
+ CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
+ CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */
+ CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
+ CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
+};
+
+#define CFQ_CFQQ_FNS(name) \
+static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
+{ \
+ (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
+} \
+static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
+{ \
+ (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
+} \
+static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
+{ \
+ return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
+}
+
+CFQ_CFQQ_FNS(on_rr);
+CFQ_CFQQ_FNS(wait_request);
+CFQ_CFQQ_FNS(must_dispatch);
+CFQ_CFQQ_FNS(must_alloc_slice);
+CFQ_CFQQ_FNS(fifo_expire);
+CFQ_CFQQ_FNS(idle_window);
+CFQ_CFQQ_FNS(prio_changed);
+CFQ_CFQQ_FNS(slice_new);
+CFQ_CFQQ_FNS(sync);
+CFQ_CFQQ_FNS(coop);
+CFQ_CFQQ_FNS(split_coop);
+CFQ_CFQQ_FNS(deep);
+CFQ_CFQQ_FNS(wait_busy);
+#undef CFQ_CFQQ_FNS
+
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
+ blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
+ cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
+ blkg_path(&(cfqq)->cfqg->blkg), ##args)
+
+#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \
+ blk_add_trace_msg((cfqd)->queue, "%s " fmt, \
+ blkg_path(&(cfqg)->blkg), ##args) \
+
+#else
+#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
+ blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
+#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
+#endif
+#define cfq_log(cfqd, fmt, args...) \
+ blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
+
+/* Traverses through cfq group service trees */
+#define for_each_cfqg_st(cfqg, i, j, st) \
+ for (i = 0; i <= IDLE_WORKLOAD; i++) \
+ for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
+ : &cfqg->service_tree_idle; \
+ (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
+ (i == IDLE_WORKLOAD && j == 0); \
+ j++, st = i < IDLE_WORKLOAD ? \
+ &cfqg->service_trees[i][j]: NULL) \
+
+static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
+ struct cfq_ttime *ttime, bool group_idle)
+{
+ unsigned long slice;
+ if (!sample_valid(ttime->ttime_samples))
+ return false;
+ if (group_idle)
+ slice = cfqd->cfq_group_idle;
+ else
+ slice = cfqd->cfq_slice_idle;
+ return ttime->ttime_mean > slice;
+}
+
+static inline bool iops_mode(struct cfq_data *cfqd)
+{
+ /*
+ * If we are not idling on queues and it is a NCQ drive, parallel
+ * execution of requests is on and measuring time is not possible
+ * in most of the cases until and unless we drive shallower queue
+ * depths and that becomes a performance bottleneck. In such cases
+ * switch to start providing fairness in terms of number of IOs.
+ */
+ if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
+ return true;
+ else
+ return false;
+}
+
+static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
+{
+ if (cfq_class_idle(cfqq))
+ return IDLE_WORKLOAD;
+ if (cfq_class_rt(cfqq))
+ return RT_WORKLOAD;
+ return BE_WORKLOAD;
+}
+
+
+static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
+{
+ if (!cfq_cfqq_sync(cfqq))
+ return ASYNC_WORKLOAD;
+ if (!cfq_cfqq_idle_window(cfqq))
+ return SYNC_NOIDLE_WORKLOAD;
+ return SYNC_WORKLOAD;
+}
+
+static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
+ struct cfq_data *cfqd,
+ struct cfq_group *cfqg)
+{
+ if (wl == IDLE_WORKLOAD)
+ return cfqg->service_tree_idle.count;
+
+ return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
+ + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
+ + cfqg->service_trees[wl][SYNC_WORKLOAD].count;
+}
+
+static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
+ struct cfq_group *cfqg)
+{
+ return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
+ + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
+}
+
+static void cfq_dispatch_insert(struct request_queue *, struct request *);
+static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
+ struct io_context *, gfp_t);
+
+static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
+{
+ /* cic->icq is the first member, %NULL will convert to %NULL */
+ return container_of(icq, struct cfq_io_cq, icq);
+}
+
+static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
+ struct io_context *ioc)
+{
+ if (ioc)
+ return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
+ return NULL;
+}
+
+static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
+{
+ return cic->cfqq[is_sync];
+}
+
+static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
+ bool is_sync)
+{
+ cic->cfqq[is_sync] = cfqq;
+}
+
+static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
+{
+ return cic->icq.q->elevator->elevator_data;
+}
+
+/*
+ * We regard a request as SYNC, if it's either a read or has the SYNC bit
+ * set (in which case it could also be direct WRITE).
+ */
+static inline bool cfq_bio_sync(struct bio *bio)
+{
+ return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
+}
+
+/*
+ * scheduler run of queue, if there are requests pending and no one in the
+ * driver that will restart queueing
+ */
+static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
+{
+ if (cfqd->busy_queues) {
+ cfq_log(cfqd, "schedule dispatch");
+ kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
+ }
+}
+
+/*
+ * Scale schedule slice based on io priority. Use the sync time slice only
+ * if a queue is marked sync and has sync io queued. A sync queue with async
+ * io only, should not get full sync slice length.
+ */
+static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
+ unsigned short prio)
+{
+ const int base_slice = cfqd->cfq_slice[sync];
+
+ WARN_ON(prio >= IOPRIO_BE_NR);
+
+ return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
+}
+
+static inline int
+cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
+}
+
+static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
+{
+ u64 d = delta << CFQ_SERVICE_SHIFT;
+
+ d = d * BLKIO_WEIGHT_DEFAULT;
+ do_div(d, cfqg->weight);
+ return d;
+}
+
+static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
+{
+ s64 delta = (s64)(vdisktime - min_vdisktime);
+ if (delta > 0)
+ min_vdisktime = vdisktime;
+
+ return min_vdisktime;
+}
+
+static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
+{
+ s64 delta = (s64)(vdisktime - min_vdisktime);
+ if (delta < 0)
+ min_vdisktime = vdisktime;
+
+ return min_vdisktime;
+}
+
+static void update_min_vdisktime(struct cfq_rb_root *st)
+{
+ struct cfq_group *cfqg;
+
+ if (st->left) {
+ cfqg = rb_entry_cfqg(st->left);
+ st->min_vdisktime = max_vdisktime(st->min_vdisktime,
+ cfqg->vdisktime);
+ }
+}
+
+/*
+ * get averaged number of queues of RT/BE priority.
+ * average is updated, with a formula that gives more weight to higher numbers,
+ * to quickly follows sudden increases and decrease slowly
+ */
+
+static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
+ struct cfq_group *cfqg, bool rt)
+{
+ unsigned min_q, max_q;
+ unsigned mult = cfq_hist_divisor - 1;
+ unsigned round = cfq_hist_divisor / 2;
+ unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
+
+ min_q = min(cfqg->busy_queues_avg[rt], busy);
+ max_q = max(cfqg->busy_queues_avg[rt], busy);
+ cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
+ cfq_hist_divisor;
+ return cfqg->busy_queues_avg[rt];
+}
+
+static inline unsigned
+cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
+{
+ struct cfq_rb_root *st = &cfqd->grp_service_tree;
+
+ return cfqd->cfq_target_latency * cfqg->weight / st->total_weight;
+}
+
+static inline unsigned
+cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
+ if (cfqd->cfq_latency) {
+ /*
+ * interested queues (we consider only the ones with the same
+ * priority class in the cfq group)
+ */
+ unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
+ cfq_class_rt(cfqq));
+ unsigned sync_slice = cfqd->cfq_slice[1];
+ unsigned expect_latency = sync_slice * iq;
+ unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
+
+ if (expect_latency > group_slice) {
+ unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
+ /* scale low_slice according to IO priority
+ * and sync vs async */
+ unsigned low_slice =
+ min(slice, base_low_slice * slice / sync_slice);
+ /* the adapted slice value is scaled to fit all iqs
+ * into the target latency */
+ slice = max(slice * group_slice / expect_latency,
+ low_slice);
+ }
+ }
+ return slice;
+}
+
+static inline void
+cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
+
+ cfqq->slice_start = jiffies;
+ cfqq->slice_end = jiffies + slice;
+ cfqq->allocated_slice = slice;
+ cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
+}
+
+/*
+ * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
+ * isn't valid until the first request from the dispatch is activated
+ * and the slice time set.
+ */
+static inline bool cfq_slice_used(struct cfq_queue *cfqq)
+{
+ if (cfq_cfqq_slice_new(cfqq))
+ return false;
+ if (time_before(jiffies, cfqq->slice_end))
+ return false;
+
+ return true;
+}
+
+/*
+ * Lifted from AS - choose which of rq1 and rq2 that is best served now.
+ * We choose the request that is closest to the head right now. Distance
+ * behind the head is penalized and only allowed to a certain extent.
+ */
+static struct request *
+cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
+{
+ sector_t s1, s2, d1 = 0, d2 = 0;
+ unsigned long back_max;
+#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
+#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
+ unsigned wrap = 0; /* bit mask: requests behind the disk head? */
+
+ if (rq1 == NULL || rq1 == rq2)
+ return rq2;
+ if (rq2 == NULL)
+ return rq1;
+
+ if (rq_is_sync(rq1) != rq_is_sync(rq2))
+ return rq_is_sync(rq1) ? rq1 : rq2;
+
+ if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
+ return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
+
+ s1 = blk_rq_pos(rq1);
+ s2 = blk_rq_pos(rq2);
+
+ /*
+ * by definition, 1KiB is 2 sectors
+ */
+ back_max = cfqd->cfq_back_max * 2;
+
+ /*
+ * Strict one way elevator _except_ in the case where we allow
+ * short backward seeks which are biased as twice the cost of a
+ * similar forward seek.
+ */
+ if (s1 >= last)
+ d1 = s1 - last;
+ else if (s1 + back_max >= last)
+ d1 = (last - s1) * cfqd->cfq_back_penalty;
+ else
+ wrap |= CFQ_RQ1_WRAP;
+
+ if (s2 >= last)
+ d2 = s2 - last;
+ else if (s2 + back_max >= last)
+ d2 = (last - s2) * cfqd->cfq_back_penalty;
+ else
+ wrap |= CFQ_RQ2_WRAP;
+
+ /* Found required data */
+
+ /*
+ * By doing switch() on the bit mask "wrap" we avoid having to
+ * check two variables for all permutations: --> faster!
+ */
+ switch (wrap) {
+ case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
+ if (d1 < d2)
+ return rq1;
+ else if (d2 < d1)
+ return rq2;
+ else {
+ if (s1 >= s2)
+ return rq1;
+ else
+ return rq2;
+ }
+
+ case CFQ_RQ2_WRAP:
+ return rq1;
+ case CFQ_RQ1_WRAP:
+ return rq2;
+ case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
+ default:
+ /*
+ * Since both rqs are wrapped,
+ * start with the one that's further behind head
+ * (--> only *one* back seek required),
+ * since back seek takes more time than forward.
+ */
+ if (s1 <= s2)
+ return rq1;
+ else
+ return rq2;
+ }
+}
+
+/*
+ * The below is leftmost cache rbtree addon
+ */
+static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
+{
+ /* Service tree is empty */
+ if (!root->count)
+ return NULL;
+
+ if (!root->left)
+ root->left = rb_first(&root->rb);
+
+ if (root->left)
+ return rb_entry(root->left, struct cfq_queue, rb_node);
+
+ return NULL;
+}
+
+static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
+{
+ if (!root->left)
+ root->left = rb_first(&root->rb);
+
+ if (root->left)
+ return rb_entry_cfqg(root->left);
+
+ return NULL;
+}
+
+static void rb_erase_init(struct rb_node *n, struct rb_root *root)
+{
+ rb_erase(n, root);
+ RB_CLEAR_NODE(n);
+}
+
+static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
+{
+ if (root->left == n)
+ root->left = NULL;
+ rb_erase_init(n, &root->rb);
+ --root->count;
+}
+
+/*
+ * would be nice to take fifo expire time into account as well
+ */
+static struct request *
+cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+ struct request *last)
+{
+ struct rb_node *rbnext = rb_next(&last->rb_node);
+ struct rb_node *rbprev = rb_prev(&last->rb_node);
+ struct request *next = NULL, *prev = NULL;
+
+ BUG_ON(RB_EMPTY_NODE(&last->rb_node));
+
+ if (rbprev)
+ prev = rb_entry_rq(rbprev);
+
+ if (rbnext)
+ next = rb_entry_rq(rbnext);
+ else {
+ rbnext = rb_first(&cfqq->sort_list);
+ if (rbnext && rbnext != &last->rb_node)
+ next = rb_entry_rq(rbnext);
+ }
+
+ return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
+}
+
+static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
+ struct cfq_queue *cfqq)
+{
+ /*
+ * just an approximation, should be ok.
+ */
+ return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
+ cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
+}
+
+static inline s64
+cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
+{
+ return cfqg->vdisktime - st->min_vdisktime;
+}
+
+static void
+__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
+{
+ struct rb_node **node = &st->rb.rb_node;
+ struct rb_node *parent = NULL;
+ struct cfq_group *__cfqg;
+ s64 key = cfqg_key(st, cfqg);
+ int left = 1;
+
+ while (*node != NULL) {
+ parent = *node;
+ __cfqg = rb_entry_cfqg(parent);
+
+ if (key < cfqg_key(st, __cfqg))
+ node = &parent->rb_left;
+ else {
+ node = &parent->rb_right;
+ left = 0;
+ }
+ }
+
+ if (left)
+ st->left = &cfqg->rb_node;
+
+ rb_link_node(&cfqg->rb_node, parent, node);
+ rb_insert_color(&cfqg->rb_node, &st->rb);
+}
+
+static void
+cfq_update_group_weight(struct cfq_group *cfqg)
+{
+ BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
+ if (cfqg->needs_update) {
+ cfqg->weight = cfqg->new_weight;
+ cfqg->needs_update = false;
+ }
+}
+
+static void
+cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
+{
+ BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
+
+ cfq_update_group_weight(cfqg);
+ __cfq_group_service_tree_add(st, cfqg);
+ st->total_weight += cfqg->weight;
+}
+
+static void
+cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
+{
+ struct cfq_rb_root *st = &cfqd->grp_service_tree;
+ struct cfq_group *__cfqg;
+ struct rb_node *n;
+
+ cfqg->nr_cfqq++;
+ if (!RB_EMPTY_NODE(&cfqg->rb_node))
+ return;
+
+ /*
+ * Currently put the group at the end. Later implement something
+ * so that groups get lesser vtime based on their weights, so that
+ * if group does not loose all if it was not continuously backlogged.
+ */
+ n = rb_last(&st->rb);
+ if (n) {
+ __cfqg = rb_entry_cfqg(n);
+ cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
+ } else
+ cfqg->vdisktime = st->min_vdisktime;
+ cfq_group_service_tree_add(st, cfqg);
+}
+
+static void
+cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
+{
+ st->total_weight -= cfqg->weight;
+ if (!RB_EMPTY_NODE(&cfqg->rb_node))
+ cfq_rb_erase(&cfqg->rb_node, st);
+}
+
+static void
+cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
+{
+ struct cfq_rb_root *st = &cfqd->grp_service_tree;
+
+ BUG_ON(cfqg->nr_cfqq < 1);
+ cfqg->nr_cfqq--;
+
+ /* If there are other cfq queues under this group, don't delete it */
+ if (cfqg->nr_cfqq)
+ return;
+
+ cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
+ cfq_group_service_tree_del(st, cfqg);
+ cfqg->saved_workload_slice = 0;
+ cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
+}
+
+static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
+ unsigned int *unaccounted_time)
+{
+ unsigned int slice_used;
+
+ /*
+ * Queue got expired before even a single request completed or
+ * got expired immediately after first request completion.
+ */
+ if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
+ /*
+ * Also charge the seek time incurred to the group, otherwise
+ * if there are mutiple queues in the group, each can dispatch
+ * a single request on seeky media and cause lots of seek time
+ * and group will never know it.
+ */
+ slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
+ 1);
+ } else {
+ slice_used = jiffies - cfqq->slice_start;
+ if (slice_used > cfqq->allocated_slice) {
+ *unaccounted_time = slice_used - cfqq->allocated_slice;
+ slice_used = cfqq->allocated_slice;
+ }
+ if (time_after(cfqq->slice_start, cfqq->dispatch_start))
+ *unaccounted_time += cfqq->slice_start -
+ cfqq->dispatch_start;
+ }
+
+ return slice_used;
+}
+
+static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
+ struct cfq_queue *cfqq)
+{
+ struct cfq_rb_root *st = &cfqd->grp_service_tree;
+ unsigned int used_sl, charge, unaccounted_sl = 0;
+ int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
+ - cfqg->service_tree_idle.count;
+
+ BUG_ON(nr_sync < 0);
+ used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
+
+ if (iops_mode(cfqd))
+ charge = cfqq->slice_dispatch;
+ else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
+ charge = cfqq->allocated_slice;
+
+ /* Can't update vdisktime while group is on service tree */
+ cfq_group_service_tree_del(st, cfqg);
+ cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
+ /* If a new weight was requested, update now, off tree */
+ cfq_group_service_tree_add(st, cfqg);
+
+ /* This group is being expired. Save the context */
+ if (time_after(cfqd->workload_expires, jiffies)) {
+ cfqg->saved_workload_slice = cfqd->workload_expires
+ - jiffies;
+ cfqg->saved_workload = cfqd->serving_type;
+ cfqg->saved_serving_prio = cfqd->serving_prio;
+ } else
+ cfqg->saved_workload_slice = 0;
+
+ cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
+ st->min_vdisktime);
+ cfq_log_cfqq(cfqq->cfqd, cfqq,
+ "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
+ used_sl, cfqq->slice_dispatch, charge,
+ iops_mode(cfqd), cfqq->nr_sectors);
+ cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl,
+ unaccounted_sl);
+ cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
+}
+
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
+{
+ if (blkg)
+ return container_of(blkg, struct cfq_group, blkg);
+ return NULL;
+}
+
+static void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
+ unsigned int weight)
+{
+ struct cfq_group *cfqg = cfqg_of_blkg(blkg);
+ cfqg->new_weight = weight;
+ cfqg->needs_update = true;
+}
+
+static void cfq_init_add_cfqg_lists(struct cfq_data *cfqd,
+ struct cfq_group *cfqg, struct blkio_cgroup *blkcg)
+{
+ struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
+ unsigned int major, minor;
+
+ /*
+ * Add group onto cgroup list. It might happen that bdi->dev is
+ * not initialized yet. Initialize this new group without major
+ * and minor info and this info will be filled in once a new thread
+ * comes for IO.
+ */
+ if (bdi->dev) {
+ sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
+ cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
+ (void *)cfqd, MKDEV(major, minor));
+ } else
+ cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
+ (void *)cfqd, 0);
+
+ cfqd->nr_blkcg_linked_grps++;
+ cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
+
+ /* Add group on cfqd list */
+ hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
+}
+
+/*
+ * Should be called from sleepable context. No request queue lock as per
+ * cpu stats are allocated dynamically and alloc_percpu needs to be called
+ * from sleepable context.
+ */
+static struct cfq_group * cfq_alloc_cfqg(struct cfq_data *cfqd)
+{
+ struct cfq_group *cfqg = NULL;
+ int i, j, ret;
+ struct cfq_rb_root *st;
+
+ cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
+ if (!cfqg)
+ return NULL;
+
+ for_each_cfqg_st(cfqg, i, j, st)
+ *st = CFQ_RB_ROOT;
+ RB_CLEAR_NODE(&cfqg->rb_node);
+
+ cfqg->ttime.last_end_request = jiffies;
+
+ /*
+ * Take the initial reference that will be released on destroy
+ * This can be thought of a joint reference by cgroup and
+ * elevator which will be dropped by either elevator exit
+ * or cgroup deletion path depending on who is exiting first.
+ */
+ cfqg->ref = 1;
+
+ ret = blkio_alloc_blkg_stats(&cfqg->blkg);
+ if (ret) {
+ kfree(cfqg);
+ return NULL;
+ }
+
+ return cfqg;
+}
+
+static struct cfq_group *
+cfq_find_cfqg(struct cfq_data *cfqd, struct blkio_cgroup *blkcg)
+{
+ struct cfq_group *cfqg = NULL;
+ void *key = cfqd;
+ struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
+ unsigned int major, minor;
+
+ /*
+ * This is the common case when there are no blkio cgroups.
+ * Avoid lookup in this case
+ */
+ if (blkcg == &blkio_root_cgroup)
+ cfqg = &cfqd->root_group;
+ else
+ cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
+
+ if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
+ sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
+ cfqg->blkg.dev = MKDEV(major, minor);
+ }
+
+ return cfqg;
+}
+
+/*
+ * Search for the cfq group current task belongs to. request_queue lock must
+ * be held.
+ */
+static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
+{
+ struct blkio_cgroup *blkcg;
+ struct cfq_group *cfqg = NULL, *__cfqg = NULL;
+ struct request_queue *q = cfqd->queue;
+
+ rcu_read_lock();
+ blkcg = task_blkio_cgroup(current);
+ cfqg = cfq_find_cfqg(cfqd, blkcg);
+ if (cfqg) {
+ rcu_read_unlock();
+ return cfqg;
+ }
+
+ /*
+ * Need to allocate a group. Allocation of group also needs allocation
+ * of per cpu stats which in-turn takes a mutex() and can block. Hence
+ * we need to drop rcu lock and queue_lock before we call alloc.
+ *
+ * Not taking any queue reference here and assuming that queue is
+ * around by the time we return. CFQ queue allocation code does
+ * the same. It might be racy though.
+ */
+
+ rcu_read_unlock();
+ spin_unlock_irq(q->queue_lock);
+
+ cfqg = cfq_alloc_cfqg(cfqd);
+
+ spin_lock_irq(q->queue_lock);
+
+ rcu_read_lock();
+ blkcg = task_blkio_cgroup(current);
+
+ /*
+ * If some other thread already allocated the group while we were
+ * not holding queue lock, free up the group
+ */
+ __cfqg = cfq_find_cfqg(cfqd, blkcg);
+
+ if (__cfqg) {
+ kfree(cfqg);
+ rcu_read_unlock();
+ return __cfqg;
+ }
+
+ if (!cfqg)
+ cfqg = &cfqd->root_group;
+
+ cfq_init_add_cfqg_lists(cfqd, cfqg, blkcg);
+ rcu_read_unlock();
+ return cfqg;
+}
+
+static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
+{
+ cfqg->ref++;
+ return cfqg;
+}
+
+static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
+{
+ /* Currently, all async queues are mapped to root group */
+ if (!cfq_cfqq_sync(cfqq))
+ cfqg = &cfqq->cfqd->root_group;
+
+ cfqq->cfqg = cfqg;
+ /* cfqq reference on cfqg */
+ cfqq->cfqg->ref++;
+}
+
+static void cfq_put_cfqg(struct cfq_group *cfqg)
+{
+ struct cfq_rb_root *st;
+ int i, j;
+
+ BUG_ON(cfqg->ref <= 0);
+ cfqg->ref--;
+ if (cfqg->ref)
+ return;
+ for_each_cfqg_st(cfqg, i, j, st)
+ BUG_ON(!RB_EMPTY_ROOT(&st->rb));
+ free_percpu(cfqg->blkg.stats_cpu);
+ kfree(cfqg);
+}
+
+static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
+{
+ /* Something wrong if we are trying to remove same group twice */
+ BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
+
+ hlist_del_init(&cfqg->cfqd_node);
+
+ BUG_ON(cfqd->nr_blkcg_linked_grps <= 0);
+ cfqd->nr_blkcg_linked_grps--;
+
+ /*
+ * Put the reference taken at the time of creation so that when all
+ * queues are gone, group can be destroyed.
+ */
+ cfq_put_cfqg(cfqg);
+}
+
+static void cfq_release_cfq_groups(struct cfq_data *cfqd)
+{
+ struct hlist_node *pos, *n;
+ struct cfq_group *cfqg;
+
+ hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
+ /*
+ * If cgroup removal path got to blk_group first and removed
+ * it from cgroup list, then it will take care of destroying
+ * cfqg also.
+ */
+ if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg))
+ cfq_destroy_cfqg(cfqd, cfqg);
+ }
+}
+
+/*
+ * Blk cgroup controller notification saying that blkio_group object is being
+ * delinked as associated cgroup object is going away. That also means that
+ * no new IO will come in this group. So get rid of this group as soon as
+ * any pending IO in the group is finished.
+ *
+ * This function is called under rcu_read_lock(). key is the rcu protected
+ * pointer. That means "key" is a valid cfq_data pointer as long as we are rcu
+ * read lock.
+ *
+ * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
+ * it should not be NULL as even if elevator was exiting, cgroup deltion
+ * path got to it first.
+ */
+static void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg)
+{
+ unsigned long flags;
+ struct cfq_data *cfqd = key;
+
+ spin_lock_irqsave(cfqd->queue->queue_lock, flags);
+ cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
+ spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
+}
+
+#else /* GROUP_IOSCHED */
+static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
+{
+ return &cfqd->root_group;
+}
+
+static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
+{
+ return cfqg;
+}
+
+static inline void
+cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
+ cfqq->cfqg = cfqg;
+}
+
+static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
+static inline void cfq_put_cfqg(struct cfq_group *cfqg) {}
+
+#endif /* GROUP_IOSCHED */
+
+/*
+ * The cfqd->service_trees holds all pending cfq_queue's that have
+ * requests waiting to be processed. It is sorted in the order that
+ * we will service the queues.
+ */
+static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+ bool add_front)
+{
+ struct rb_node **p, *parent;
+ struct cfq_queue *__cfqq;
+ unsigned long rb_key;
+ struct cfq_rb_root *service_tree;
+ int left;
+ int new_cfqq = 1;
+
+ service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
+ cfqq_type(cfqq));
+ if (cfq_class_idle(cfqq)) {
+ rb_key = CFQ_IDLE_DELAY;
+ parent = rb_last(&service_tree->rb);
+ if (parent && parent != &cfqq->rb_node) {
+ __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
+ rb_key += __cfqq->rb_key;
+ } else
+ rb_key += jiffies;
+ } else if (!add_front) {
+ /*
+ * Get our rb key offset. Subtract any residual slice
+ * value carried from last service. A negative resid
+ * count indicates slice overrun, and this should position
+ * the next service time further away in the tree.
+ */
+ rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
+ rb_key -= cfqq->slice_resid;
+ cfqq->slice_resid = 0;
+ } else {
+ rb_key = -HZ;
+ __cfqq = cfq_rb_first(service_tree);
+ rb_key += __cfqq ? __cfqq->rb_key : jiffies;
+ }
+
+ if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
+ new_cfqq = 0;
+ /*
+ * same position, nothing more to do
+ */
+ if (rb_key == cfqq->rb_key &&
+ cfqq->service_tree == service_tree)
+ return;
+
+ cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
+ cfqq->service_tree = NULL;
+ }
+
+ left = 1;
+ parent = NULL;
+ cfqq->service_tree = service_tree;
+ p = &service_tree->rb.rb_node;
+ while (*p) {
+ struct rb_node **n;
+
+ parent = *p;
+ __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
+
+ /*
+ * sort by key, that represents service time.
+ */
+ if (time_before(rb_key, __cfqq->rb_key))
+ n = &(*p)->rb_left;
+ else {
+ n = &(*p)->rb_right;
+ left = 0;
+ }
+
+ p = n;
+ }
+
+ if (left)
+ service_tree->left = &cfqq->rb_node;
+
+ cfqq->rb_key = rb_key;
+ rb_link_node(&cfqq->rb_node, parent, p);
+ rb_insert_color(&cfqq->rb_node, &service_tree->rb);
+ service_tree->count++;
+ if (add_front || !new_cfqq)
+ return;
+ cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
+}
+
+static struct cfq_queue *
+cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
+ sector_t sector, struct rb_node **ret_parent,
+ struct rb_node ***rb_link)
+{
+ struct rb_node **p, *parent;
+ struct cfq_queue *cfqq = NULL;
+
+ parent = NULL;
+ p = &root->rb_node;
+ while (*p) {
+ struct rb_node **n;
+
+ parent = *p;
+ cfqq = rb_entry(parent, struct cfq_queue, p_node);
+
+ /*
+ * Sort strictly based on sector. Smallest to the left,
+ * largest to the right.
+ */
+ if (sector > blk_rq_pos(cfqq->next_rq))
+ n = &(*p)->rb_right;
+ else if (sector < blk_rq_pos(cfqq->next_rq))
+ n = &(*p)->rb_left;
+ else
+ break;
+ p = n;
+ cfqq = NULL;
+ }
+
+ *ret_parent = parent;
+ if (rb_link)
+ *rb_link = p;
+ return cfqq;
+}
+
+static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ struct rb_node **p, *parent;
+ struct cfq_queue *__cfqq;
+
+ if (cfqq->p_root) {
+ rb_erase(&cfqq->p_node, cfqq->p_root);
+ cfqq->p_root = NULL;
+ }
+
+ if (cfq_class_idle(cfqq))
+ return;
+ if (!cfqq->next_rq)
+ return;
+
+ cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
+ __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
+ blk_rq_pos(cfqq->next_rq), &parent, &p);
+ if (!__cfqq) {
+ rb_link_node(&cfqq->p_node, parent, p);
+ rb_insert_color(&cfqq->p_node, cfqq->p_root);
+ } else
+ cfqq->p_root = NULL;
+}
+
+/*
+ * Update cfqq's position in the service tree.
+ */
+static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ /*
+ * Resorting requires the cfqq to be on the RR list already.
+ */
+ if (cfq_cfqq_on_rr(cfqq)) {
+ cfq_service_tree_add(cfqd, cfqq, 0);
+ cfq_prio_tree_add(cfqd, cfqq);
+ }
+}
+
+/*
+ * add to busy list of queues for service, trying to be fair in ordering
+ * the pending list according to last request service
+ */
+static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
+ BUG_ON(cfq_cfqq_on_rr(cfqq));
+ cfq_mark_cfqq_on_rr(cfqq);
+ cfqd->busy_queues++;
+ if (cfq_cfqq_sync(cfqq))
+ cfqd->busy_sync_queues++;
+
+ cfq_resort_rr_list(cfqd, cfqq);
+}
+
+/*
+ * Called when the cfqq no longer has requests pending, remove it from
+ * the service tree.
+ */
+static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
+ BUG_ON(!cfq_cfqq_on_rr(cfqq));
+ cfq_clear_cfqq_on_rr(cfqq);
+
+ if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
+ cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
+ cfqq->service_tree = NULL;
+ }
+ if (cfqq->p_root) {
+ rb_erase(&cfqq->p_node, cfqq->p_root);
+ cfqq->p_root = NULL;
+ }
+
+ cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
+ BUG_ON(!cfqd->busy_queues);
+ cfqd->busy_queues--;
+ if (cfq_cfqq_sync(cfqq))
+ cfqd->busy_sync_queues--;
+}
+
+/*
+ * rb tree support functions
+ */
+static void cfq_del_rq_rb(struct request *rq)
+{
+ struct cfq_queue *cfqq = RQ_CFQQ(rq);
+ const int sync = rq_is_sync(rq);
+
+ BUG_ON(!cfqq->queued[sync]);
+ cfqq->queued[sync]--;
+
+ elv_rb_del(&cfqq->sort_list, rq);
+
+ if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
+ /*
+ * Queue will be deleted from service tree when we actually
+ * expire it later. Right now just remove it from prio tree
+ * as it is empty.
+ */
+ if (cfqq->p_root) {
+ rb_erase(&cfqq->p_node, cfqq->p_root);
+ cfqq->p_root = NULL;
+ }
+ }
+}
+
+static void cfq_add_rq_rb(struct request *rq)
+{
+ struct cfq_queue *cfqq = RQ_CFQQ(rq);
+ struct cfq_data *cfqd = cfqq->cfqd;
+ struct request *prev;
+
+ cfqq->queued[rq_is_sync(rq)]++;
+
+ elv_rb_add(&cfqq->sort_list, rq);
+
+ if (!cfq_cfqq_on_rr(cfqq))
+ cfq_add_cfqq_rr(cfqd, cfqq);
+
+ /*
+ * check if this request is a better next-serve candidate
+ */
+ prev = cfqq->next_rq;
+ cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
+
+ /*
+ * adjust priority tree position, if ->next_rq changes
+ */
+ if (prev != cfqq->next_rq)
+ cfq_prio_tree_add(cfqd, cfqq);
+
+ BUG_ON(!cfqq->next_rq);
+}
+
+static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
+{
+ elv_rb_del(&cfqq->sort_list, rq);
+ cfqq->queued[rq_is_sync(rq)]--;
+ cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
+ rq_data_dir(rq), rq_is_sync(rq));
+ cfq_add_rq_rb(rq);
+ cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
+ &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
+ rq_is_sync(rq));
+}
+
+static struct request *
+cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
+{
+ struct task_struct *tsk = current;
+ struct cfq_io_cq *cic;
+ struct cfq_queue *cfqq;
+
+ cic = cfq_cic_lookup(cfqd, tsk->io_context);
+ if (!cic)
+ return NULL;
+
+ cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
+ if (cfqq) {
+ sector_t sector = bio->bi_sector + bio_sectors(bio);
+
+ return elv_rb_find(&cfqq->sort_list, sector);
+ }
+
+ return NULL;
+}
+
+static void cfq_activate_request(struct request_queue *q, struct request *rq)
+{
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+
+ cfqd->rq_in_driver++;
+ cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
+ cfqd->rq_in_driver);
+
+ cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
+}
+
+static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
+{
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+
+ WARN_ON(!cfqd->rq_in_driver);
+ cfqd->rq_in_driver--;
+ cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
+ cfqd->rq_in_driver);
+}
+
+static void cfq_remove_request(struct request *rq)
+{
+ struct cfq_queue *cfqq = RQ_CFQQ(rq);
+
+ if (cfqq->next_rq == rq)
+ cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
+
+ list_del_init(&rq->queuelist);
+ cfq_del_rq_rb(rq);
+
+ cfqq->cfqd->rq_queued--;
+ cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
+ rq_data_dir(rq), rq_is_sync(rq));
+ if (rq->cmd_flags & REQ_PRIO) {
+ WARN_ON(!cfqq->prio_pending);
+ cfqq->prio_pending--;
+ }
+}
+
+static int cfq_merge(struct request_queue *q, struct request **req,
+ struct bio *bio)
+{
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+ struct request *__rq;
+
+ __rq = cfq_find_rq_fmerge(cfqd, bio);
+ if (__rq && elv_rq_merge_ok(__rq, bio)) {
+ *req = __rq;
+ return ELEVATOR_FRONT_MERGE;
+ }
+
+ return ELEVATOR_NO_MERGE;
+}
+
+static void cfq_merged_request(struct request_queue *q, struct request *req,
+ int type)
+{
+ if (type == ELEVATOR_FRONT_MERGE) {
+ struct cfq_queue *cfqq = RQ_CFQQ(req);
+
+ cfq_reposition_rq_rb(cfqq, req);
+ }
+}
+
+static void cfq_bio_merged(struct request_queue *q, struct request *req,
+ struct bio *bio)
+{
+ cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg,
+ bio_data_dir(bio), cfq_bio_sync(bio));
+}
+
+static void
+cfq_merged_requests(struct request_queue *q, struct request *rq,
+ struct request *next)
+{
+ struct cfq_queue *cfqq = RQ_CFQQ(rq);
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+
+ /*
+ * reposition in fifo if next is older than rq
+ */
+ if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
+ time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
+ list_move(&rq->queuelist, &next->queuelist);
+ rq_set_fifo_time(rq, rq_fifo_time(next));
+ }
+
+ if (cfqq->next_rq == next)
+ cfqq->next_rq = rq;
+ cfq_remove_request(next);
+ cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
+ rq_data_dir(next), rq_is_sync(next));
+
+ cfqq = RQ_CFQQ(next);
+ /*
+ * all requests of this queue are merged to other queues, delete it
+ * from the service tree. If it's the active_queue,
+ * cfq_dispatch_requests() will choose to expire it or do idle
+ */
+ if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
+ cfqq != cfqd->active_queue)
+ cfq_del_cfqq_rr(cfqd, cfqq);
+}
+
+static int cfq_allow_merge(struct request_queue *q, struct request *rq,
+ struct bio *bio)
+{
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+ struct cfq_io_cq *cic;
+ struct cfq_queue *cfqq;
+
+ /*
+ * Disallow merge of a sync bio into an async request.
+ */
+ if (cfq_bio_sync(bio) && !rq_is_sync(rq))
+ return false;
+
+ /*
+ * Lookup the cfqq that this bio will be queued with and allow
+ * merge only if rq is queued there.
+ */
+ cic = cfq_cic_lookup(cfqd, current->io_context);
+ if (!cic)
+ return false;
+
+ cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
+ return cfqq == RQ_CFQQ(rq);
+}
+
+static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ del_timer(&cfqd->idle_slice_timer);
+ cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
+}
+
+static void __cfq_set_active_queue(struct cfq_data *cfqd,
+ struct cfq_queue *cfqq)
+{
+ if (cfqq) {
+ cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
+ cfqd->serving_prio, cfqd->serving_type);
+ cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
+ cfqq->slice_start = 0;
+ cfqq->dispatch_start = jiffies;
+ cfqq->allocated_slice = 0;
+ cfqq->slice_end = 0;
+ cfqq->slice_dispatch = 0;
+ cfqq->nr_sectors = 0;
+
+ cfq_clear_cfqq_wait_request(cfqq);
+ cfq_clear_cfqq_must_dispatch(cfqq);
+ cfq_clear_cfqq_must_alloc_slice(cfqq);
+ cfq_clear_cfqq_fifo_expire(cfqq);
+ cfq_mark_cfqq_slice_new(cfqq);
+
+ cfq_del_timer(cfqd, cfqq);
+ }
+
+ cfqd->active_queue = cfqq;
+}
+
+/*
+ * current cfqq expired its slice (or was too idle), select new one
+ */
+static void
+__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+ bool timed_out)
+{
+ cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
+
+ if (cfq_cfqq_wait_request(cfqq))
+ cfq_del_timer(cfqd, cfqq);
+
+ cfq_clear_cfqq_wait_request(cfqq);
+ cfq_clear_cfqq_wait_busy(cfqq);
+
+ /*
+ * If this cfqq is shared between multiple processes, check to
+ * make sure that those processes are still issuing I/Os within
+ * the mean seek distance. If not, it may be time to break the
+ * queues apart again.
+ */
+ if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
+ cfq_mark_cfqq_split_coop(cfqq);
+
+ /*
+ * store what was left of this slice, if the queue idled/timed out
+ */
+ if (timed_out) {
+ if (cfq_cfqq_slice_new(cfqq))
+ cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
+ else
+ cfqq->slice_resid = cfqq->slice_end - jiffies;
+ cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
+ }
+
+ cfq_group_served(cfqd, cfqq->cfqg, cfqq);
+
+ if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
+ cfq_del_cfqq_rr(cfqd, cfqq);
+
+ cfq_resort_rr_list(cfqd, cfqq);
+
+ if (cfqq == cfqd->active_queue)
+ cfqd->active_queue = NULL;
+
+ if (cfqd->active_cic) {
+ put_io_context(cfqd->active_cic->icq.ioc);
+ cfqd->active_cic = NULL;
+ }
+}
+
+static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
+{
+ struct cfq_queue *cfqq = cfqd->active_queue;
+
+ if (cfqq)
+ __cfq_slice_expired(cfqd, cfqq, timed_out);
+}
+
+/*
+ * Get next queue for service. Unless we have a queue preemption,
+ * we'll simply select the first cfqq in the service tree.
+ */
+static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
+{
+ struct cfq_rb_root *service_tree =
+ service_tree_for(cfqd->serving_group, cfqd->serving_prio,
+ cfqd->serving_type);
+
+ if (!cfqd->rq_queued)
+ return NULL;
+
+ /* There is nothing to dispatch */
+ if (!service_tree)
+ return NULL;
+ if (RB_EMPTY_ROOT(&service_tree->rb))
+ return NULL;
+ return cfq_rb_first(service_tree);
+}
+
+static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
+{
+ struct cfq_group *cfqg;
+ struct cfq_queue *cfqq;
+ int i, j;
+ struct cfq_rb_root *st;
+
+ if (!cfqd->rq_queued)
+ return NULL;
+
+ cfqg = cfq_get_next_cfqg(cfqd);
+ if (!cfqg)
+ return NULL;
+
+ for_each_cfqg_st(cfqg, i, j, st)
+ if ((cfqq = cfq_rb_first(st)) != NULL)
+ return cfqq;
+ return NULL;
+}
+
+/*
+ * Get and set a new active queue for service.
+ */
+static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
+ struct cfq_queue *cfqq)
+{
+ if (!cfqq)
+ cfqq = cfq_get_next_queue(cfqd);
+
+ __cfq_set_active_queue(cfqd, cfqq);
+ return cfqq;
+}
+
+static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
+ struct request *rq)
+{
+ if (blk_rq_pos(rq) >= cfqd->last_position)
+ return blk_rq_pos(rq) - cfqd->last_position;
+ else
+ return cfqd->last_position - blk_rq_pos(rq);
+}
+
+static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+ struct request *rq)
+{
+ return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
+}
+
+static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
+ struct cfq_queue *cur_cfqq)
+{
+ struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
+ struct rb_node *parent, *node;
+ struct cfq_queue *__cfqq;
+ sector_t sector = cfqd->last_position;
+
+ if (RB_EMPTY_ROOT(root))
+ return NULL;
+
+ /*
+ * First, if we find a request starting at the end of the last
+ * request, choose it.
+ */
+ __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
+ if (__cfqq)
+ return __cfqq;
+
+ /*
+ * If the exact sector wasn't found, the parent of the NULL leaf
+ * will contain the closest sector.
+ */
+ __cfqq = rb_entry(parent, struct cfq_queue, p_node);
+ if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
+ return __cfqq;
+
+ if (blk_rq_pos(__cfqq->next_rq) < sector)
+ node = rb_next(&__cfqq->p_node);
+ else
+ node = rb_prev(&__cfqq->p_node);
+ if (!node)
+ return NULL;
+
+ __cfqq = rb_entry(node, struct cfq_queue, p_node);
+ if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
+ return __cfqq;
+
+ return NULL;
+}
+
+/*
+ * cfqd - obvious
+ * cur_cfqq - passed in so that we don't decide that the current queue is
+ * closely cooperating with itself.
+ *
+ * So, basically we're assuming that that cur_cfqq has dispatched at least
+ * one request, and that cfqd->last_position reflects a position on the disk
+ * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid
+ * assumption.
+ */
+static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
+ struct cfq_queue *cur_cfqq)
+{
+ struct cfq_queue *cfqq;
+
+ if (cfq_class_idle(cur_cfqq))
+ return NULL;
+ if (!cfq_cfqq_sync(cur_cfqq))
+ return NULL;
+ if (CFQQ_SEEKY(cur_cfqq))
+ return NULL;
+
+ /*
+ * Don't search priority tree if it's the only queue in the group.
+ */
+ if (cur_cfqq->cfqg->nr_cfqq == 1)
+ return NULL;
+
+ /*
+ * We should notice if some of the queues are cooperating, eg
+ * working closely on the same area of the disk. In that case,
+ * we can group them together and don't waste time idling.
+ */
+ cfqq = cfqq_close(cfqd, cur_cfqq);
+ if (!cfqq)
+ return NULL;
+
+ /* If new queue belongs to different cfq_group, don't choose it */
+ if (cur_cfqq->cfqg != cfqq->cfqg)
+ return NULL;
+
+ /*
+ * It only makes sense to merge sync queues.
+ */
+ if (!cfq_cfqq_sync(cfqq))
+ return NULL;
+ if (CFQQ_SEEKY(cfqq))
+ return NULL;
+
+ /*
+ * Do not merge queues of different priority classes
+ */
+ if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
+ return NULL;
+
+ return cfqq;
+}
+
+/*
+ * Determine whether we should enforce idle window for this queue.
+ */
+
+static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ enum wl_prio_t prio = cfqq_prio(cfqq);
+ struct cfq_rb_root *service_tree = cfqq->service_tree;
+
+ BUG_ON(!service_tree);
+ BUG_ON(!service_tree->count);
+
+ if (!cfqd->cfq_slice_idle)
+ return false;
+
+ /* We never do for idle class queues. */
+ if (prio == IDLE_WORKLOAD)
+ return false;
+
+ /* We do for queues that were marked with idle window flag. */
+ if (cfq_cfqq_idle_window(cfqq) &&
+ !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
+ return true;
+
+ /*
+ * Otherwise, we do only if they are the last ones
+ * in their service tree.
+ */
+ if (service_tree->count == 1 && cfq_cfqq_sync(cfqq) &&
+ !cfq_io_thinktime_big(cfqd, &service_tree->ttime, false))
+ return true;
+ cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
+ service_tree->count);
+ return false;
+}
+
+static void cfq_arm_slice_timer(struct cfq_data *cfqd)
+{
+ struct cfq_queue *cfqq = cfqd->active_queue;
+ struct cfq_io_cq *cic;
+ unsigned long sl, group_idle = 0;
+
+ /*
+ * SSD device without seek penalty, disable idling. But only do so
+ * for devices that support queuing, otherwise we still have a problem
+ * with sync vs async workloads.
+ */
+ if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
+ return;
+
+ WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
+ WARN_ON(cfq_cfqq_slice_new(cfqq));
+
+ /*
+ * idle is disabled, either manually or by past process history
+ */
+ if (!cfq_should_idle(cfqd, cfqq)) {
+ /* no queue idling. Check for group idling */
+ if (cfqd->cfq_group_idle)
+ group_idle = cfqd->cfq_group_idle;
+ else
+ return;
+ }
+
+ /*
+ * still active requests from this queue, don't idle
+ */
+ if (cfqq->dispatched)
+ return;
+
+ /*
+ * task has exited, don't wait
+ */
+ cic = cfqd->active_cic;
+ if (!cic || !atomic_read(&cic->icq.ioc->nr_tasks))
+ return;
+
+ /*
+ * If our average think time is larger than the remaining time
+ * slice, then don't idle. This avoids overrunning the allotted
+ * time slice.
+ */
+ if (sample_valid(cic->ttime.ttime_samples) &&
+ (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
+ cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
+ cic->ttime.ttime_mean);
+ return;
+ }
+
+ /* There are other queues in the group, don't do group idle */
+ if (group_idle && cfqq->cfqg->nr_cfqq > 1)
+ return;
+
+ cfq_mark_cfqq_wait_request(cfqq);
+
+ if (group_idle)
+ sl = cfqd->cfq_group_idle;
+ else
+ sl = cfqd->cfq_slice_idle;
+
+ mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
+ cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
+ cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
+ group_idle ? 1 : 0);
+}
+
+/*
+ * Move request from internal lists to the request queue dispatch list.
+ */
+static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
+{
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+ struct cfq_queue *cfqq = RQ_CFQQ(rq);
+
+ cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
+
+ cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
+ cfq_remove_request(rq);
+ cfqq->dispatched++;
+ (RQ_CFQG(rq))->dispatched++;
+ elv_dispatch_sort(q, rq);
+
+ cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
+ cfqq->nr_sectors += blk_rq_sectors(rq);
+ cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
+ rq_data_dir(rq), rq_is_sync(rq));
+}
+
+/*
+ * return expired entry, or NULL to just start from scratch in rbtree
+ */
+static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
+{
+ struct request *rq = NULL;
+
+ if (cfq_cfqq_fifo_expire(cfqq))
+ return NULL;
+
+ cfq_mark_cfqq_fifo_expire(cfqq);
+
+ if (list_empty(&cfqq->fifo))
+ return NULL;
+
+ rq = rq_entry_fifo(cfqq->fifo.next);
+ if (time_before(jiffies, rq_fifo_time(rq)))
+ rq = NULL;
+
+ cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
+ return rq;
+}
+
+static inline int
+cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ const int base_rq = cfqd->cfq_slice_async_rq;
+
+ WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
+
+ return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
+}
+
+/*
+ * Must be called with the queue_lock held.
+ */
+static int cfqq_process_refs(struct cfq_queue *cfqq)
+{
+ int process_refs, io_refs;
+
+ io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
+ process_refs = cfqq->ref - io_refs;
+ BUG_ON(process_refs < 0);
+ return process_refs;
+}
+
+static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
+{
+ int process_refs, new_process_refs;
+ struct cfq_queue *__cfqq;
+
+ /*
+ * If there are no process references on the new_cfqq, then it is
+ * unsafe to follow the ->new_cfqq chain as other cfqq's in the
+ * chain may have dropped their last reference (not just their
+ * last process reference).
+ */
+ if (!cfqq_process_refs(new_cfqq))
+ return;
+
+ /* Avoid a circular list and skip interim queue merges */
+ while ((__cfqq = new_cfqq->new_cfqq)) {
+ if (__cfqq == cfqq)
+ return;
+ new_cfqq = __cfqq;
+ }
+
+ process_refs = cfqq_process_refs(cfqq);
+ new_process_refs = cfqq_process_refs(new_cfqq);
+ /*
+ * If the process for the cfqq has gone away, there is no
+ * sense in merging the queues.
+ */
+ if (process_refs == 0 || new_process_refs == 0)
+ return;
+
+ /*
+ * Merge in the direction of the lesser amount of work.
+ */
+ if (new_process_refs >= process_refs) {
+ cfqq->new_cfqq = new_cfqq;
+ new_cfqq->ref += process_refs;
+ } else {
+ new_cfqq->new_cfqq = cfqq;
+ cfqq->ref += new_process_refs;
+ }
+}
+
+static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
+ struct cfq_group *cfqg, enum wl_prio_t prio)
+{
+ struct cfq_queue *queue;
+ int i;
+ bool key_valid = false;
+ unsigned long lowest_key = 0;
+ enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
+
+ for (i = 0; i <= SYNC_WORKLOAD; ++i) {
+ /* select the one with lowest rb_key */
+ queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
+ if (queue &&
+ (!key_valid || time_before(queue->rb_key, lowest_key))) {
+ lowest_key = queue->rb_key;
+ cur_best = i;
+ key_valid = true;
+ }
+ }
+
+ return cur_best;
+}
+
+static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
+{
+ unsigned slice;
+ unsigned count;
+ struct cfq_rb_root *st;
+ unsigned group_slice;
+ enum wl_prio_t original_prio = cfqd->serving_prio;
+
+ /* Choose next priority. RT > BE > IDLE */
+ if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
+ cfqd->serving_prio = RT_WORKLOAD;
+ else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
+ cfqd->serving_prio = BE_WORKLOAD;
+ else {
+ cfqd->serving_prio = IDLE_WORKLOAD;
+ cfqd->workload_expires = jiffies + 1;
+ return;
+ }
+
+ if (original_prio != cfqd->serving_prio)
+ goto new_workload;
+
+ /*
+ * For RT and BE, we have to choose also the type
+ * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
+ * expiration time
+ */
+ st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
+ count = st->count;
+
+ /*
+ * check workload expiration, and that we still have other queues ready
+ */
+ if (count && !time_after(jiffies, cfqd->workload_expires))
+ return;
+
+new_workload:
+ /* otherwise select new workload type */
+ cfqd->serving_type =
+ cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
+ st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
+ count = st->count;
+
+ /*
+ * the workload slice is computed as a fraction of target latency
+ * proportional to the number of queues in that workload, over
+ * all the queues in the same priority class
+ */
+ group_slice = cfq_group_slice(cfqd, cfqg);
+
+ slice = group_slice * count /
+ max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
+ cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
+
+ if (cfqd->serving_type == ASYNC_WORKLOAD) {
+ unsigned int tmp;
+
+ /*
+ * Async queues are currently system wide. Just taking
+ * proportion of queues with-in same group will lead to higher
+ * async ratio system wide as generally root group is going
+ * to have higher weight. A more accurate thing would be to
+ * calculate system wide asnc/sync ratio.
+ */
+ tmp = cfqd->cfq_target_latency *
+ cfqg_busy_async_queues(cfqd, cfqg);
+ tmp = tmp/cfqd->busy_queues;
+ slice = min_t(unsigned, slice, tmp);
+
+ /* async workload slice is scaled down according to
+ * the sync/async slice ratio. */
+ slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
+ } else
+ /* sync workload slice is at least 2 * cfq_slice_idle */
+ slice = max(slice, 2 * cfqd->cfq_slice_idle);
+
+ slice = max_t(unsigned, slice, CFQ_MIN_TT);
+ cfq_log(cfqd, "workload slice:%d", slice);
+ cfqd->workload_expires = jiffies + slice;
+}
+
+static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
+{
+ struct cfq_rb_root *st = &cfqd->grp_service_tree;
+ struct cfq_group *cfqg;
+
+ if (RB_EMPTY_ROOT(&st->rb))
+ return NULL;
+ cfqg = cfq_rb_first_group(st);
+ update_min_vdisktime(st);
+ return cfqg;
+}
+
+static void cfq_choose_cfqg(struct cfq_data *cfqd)
+{
+ struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
+
+ cfqd->serving_group = cfqg;
+
+ /* Restore the workload type data */
+ if (cfqg->saved_workload_slice) {
+ cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
+ cfqd->serving_type = cfqg->saved_workload;
+ cfqd->serving_prio = cfqg->saved_serving_prio;
+ } else
+ cfqd->workload_expires = jiffies - 1;
+
+ choose_service_tree(cfqd, cfqg);
+}
+
+/*
+ * Select a queue for service. If we have a current active queue,
+ * check whether to continue servicing it, or retrieve and set a new one.
+ */
+static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
+{
+ struct cfq_queue *cfqq, *new_cfqq = NULL;
+
+ cfqq = cfqd->active_queue;
+ if (!cfqq)
+ goto new_queue;
+
+ if (!cfqd->rq_queued)
+ return NULL;
+
+ /*
+ * We were waiting for group to get backlogged. Expire the queue
+ */
+ if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
+ goto expire;
+
+ /*
+ * The active queue has run out of time, expire it and select new.
+ */
+ if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
+ /*
+ * If slice had not expired at the completion of last request
+ * we might not have turned on wait_busy flag. Don't expire
+ * the queue yet. Allow the group to get backlogged.
+ *
+ * The very fact that we have used the slice, that means we
+ * have been idling all along on this queue and it should be
+ * ok to wait for this request to complete.
+ */
+ if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
+ && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
+ cfqq = NULL;
+ goto keep_queue;
+ } else
+ goto check_group_idle;
+ }
+
+ /*
+ * The active queue has requests and isn't expired, allow it to
+ * dispatch.
+ */
+ if (!RB_EMPTY_ROOT(&cfqq->sort_list))
+ goto keep_queue;
+
+ /*
+ * If another queue has a request waiting within our mean seek
+ * distance, let it run. The expire code will check for close
+ * cooperators and put the close queue at the front of the service
+ * tree. If possible, merge the expiring queue with the new cfqq.
+ */
+ new_cfqq = cfq_close_cooperator(cfqd, cfqq);
+ if (new_cfqq) {
+ if (!cfqq->new_cfqq)
+ cfq_setup_merge(cfqq, new_cfqq);
+ goto expire;
+ }
+
+ /*
+ * No requests pending. If the active queue still has requests in
+ * flight or is idling for a new request, allow either of these
+ * conditions to happen (or time out) before selecting a new queue.
+ */
+ if (timer_pending(&cfqd->idle_slice_timer)) {
+ cfqq = NULL;
+ goto keep_queue;
+ }
+
+ /*
+ * This is a deep seek queue, but the device is much faster than
+ * the queue can deliver, don't idle
+ **/
+ if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
+ (cfq_cfqq_slice_new(cfqq) ||
+ (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
+ cfq_clear_cfqq_deep(cfqq);
+ cfq_clear_cfqq_idle_window(cfqq);
+ }
+
+ if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
+ cfqq = NULL;
+ goto keep_queue;
+ }
+
+ /*
+ * If group idle is enabled and there are requests dispatched from
+ * this group, wait for requests to complete.
+ */
+check_group_idle:
+ if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
+ cfqq->cfqg->dispatched &&
+ !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
+ cfqq = NULL;
+ goto keep_queue;
+ }
+
+expire:
+ cfq_slice_expired(cfqd, 0);
+new_queue:
+ /*
+ * Current queue expired. Check if we have to switch to a new
+ * service tree
+ */
+ if (!new_cfqq)
+ cfq_choose_cfqg(cfqd);
+
+ cfqq = cfq_set_active_queue(cfqd, new_cfqq);
+keep_queue:
+ return cfqq;
+}
+
+static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
+{
+ int dispatched = 0;
+
+ while (cfqq->next_rq) {
+ cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
+ dispatched++;
+ }
+
+ BUG_ON(!list_empty(&cfqq->fifo));
+
+ /* By default cfqq is not expired if it is empty. Do it explicitly */
+ __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
+ return dispatched;
+}
+
+/*
+ * Drain our current requests. Used for barriers and when switching
+ * io schedulers on-the-fly.
+ */
+static int cfq_forced_dispatch(struct cfq_data *cfqd)
+{
+ struct cfq_queue *cfqq;
+ int dispatched = 0;
+
+ /* Expire the timeslice of the current active queue first */
+ cfq_slice_expired(cfqd, 0);
+ while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
+ __cfq_set_active_queue(cfqd, cfqq);
+ dispatched += __cfq_forced_dispatch_cfqq(cfqq);
+ }
+
+ BUG_ON(cfqd->busy_queues);
+
+ cfq_log(cfqd, "forced_dispatch=%d", dispatched);
+ return dispatched;
+}
+
+static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
+ struct cfq_queue *cfqq)
+{
+ /* the queue hasn't finished any request, can't estimate */
+ if (cfq_cfqq_slice_new(cfqq))
+ return true;
+ if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
+ cfqq->slice_end))
+ return true;
+
+ return false;
+}
+
+static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ unsigned int max_dispatch;
+
+ /*
+ * Drain async requests before we start sync IO
+ */
+ if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
+ return false;
+
+ /*
+ * If this is an async queue and we have sync IO in flight, let it wait
+ */
+ if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
+ return false;
+
+ max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
+ if (cfq_class_idle(cfqq))
+ max_dispatch = 1;
+
+ /*
+ * Does this cfqq already have too much IO in flight?
+ */
+ if (cfqq->dispatched >= max_dispatch) {
+ bool promote_sync = false;
+ /*
+ * idle queue must always only have a single IO in flight
+ */
+ if (cfq_class_idle(cfqq))
+ return false;
+
+ /*
+ * If there is only one sync queue
+ * we can ignore async queue here and give the sync
+ * queue no dispatch limit. The reason is a sync queue can
+ * preempt async queue, limiting the sync queue doesn't make
+ * sense. This is useful for aiostress test.
+ */
+ if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
+ promote_sync = true;
+
+ /*
+ * We have other queues, don't allow more IO from this one
+ */
+ if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
+ !promote_sync)
+ return false;
+
+ /*
+ * Sole queue user, no limit
+ */
+ if (cfqd->busy_queues == 1 || promote_sync)
+ max_dispatch = -1;
+ else
+ /*
+ * Normally we start throttling cfqq when cfq_quantum/2
+ * requests have been dispatched. But we can drive
+ * deeper queue depths at the beginning of slice
+ * subjected to upper limit of cfq_quantum.
+ * */
+ max_dispatch = cfqd->cfq_quantum;
+ }
+
+ /*
+ * Async queues must wait a bit before being allowed dispatch.
+ * We also ramp up the dispatch depth gradually for async IO,
+ * based on the last sync IO we serviced
+ */
+ if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
+ unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
+ unsigned int depth;
+
+ depth = last_sync / cfqd->cfq_slice[1];
+ if (!depth && !cfqq->dispatched)
+ depth = 1;
+ if (depth < max_dispatch)
+ max_dispatch = depth;
+ }
+
+ /*
+ * If we're below the current max, allow a dispatch
+ */
+ return cfqq->dispatched < max_dispatch;
+}
+
+/*
+ * Dispatch a request from cfqq, moving them to the request queue
+ * dispatch list.
+ */
+static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ struct request *rq;
+
+ BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
+
+ if (!cfq_may_dispatch(cfqd, cfqq))
+ return false;
+
+ /*
+ * follow expired path, else get first next available
+ */
+ rq = cfq_check_fifo(cfqq);
+ if (!rq)
+ rq = cfqq->next_rq;
+
+ /*
+ * insert request into driver dispatch list
+ */
+ cfq_dispatch_insert(cfqd->queue, rq);
+
+ if (!cfqd->active_cic) {
+ struct cfq_io_cq *cic = RQ_CIC(rq);
+
+ atomic_long_inc(&cic->icq.ioc->refcount);
+ cfqd->active_cic = cic;
+ }
+
+ return true;
+}
+
+/*
+ * Find the cfqq that we need to service and move a request from that to the
+ * dispatch list
+ */
+static int cfq_dispatch_requests(struct request_queue *q, int force)
+{
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+ struct cfq_queue *cfqq;
+
+ if (!cfqd->busy_queues)
+ return 0;
+
+ if (unlikely(force))
+ return cfq_forced_dispatch(cfqd);
+
+ cfqq = cfq_select_queue(cfqd);
+ if (!cfqq)
+ return 0;
+
+ /*
+ * Dispatch a request from this cfqq, if it is allowed
+ */
+ if (!cfq_dispatch_request(cfqd, cfqq))
+ return 0;
+
+ cfqq->slice_dispatch++;
+ cfq_clear_cfqq_must_dispatch(cfqq);
+
+ /*
+ * expire an async queue immediately if it has used up its slice. idle
+ * queue always expire after 1 dispatch round.
+ */
+ if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
+ cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
+ cfq_class_idle(cfqq))) {
+ cfqq->slice_end = jiffies + 1;
+ cfq_slice_expired(cfqd, 0);
+ }
+
+ cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
+ return 1;
+}
+
+/*
+ * task holds one reference to the queue, dropped when task exits. each rq
+ * in-flight on this queue also holds a reference, dropped when rq is freed.
+ *
+ * Each cfq queue took a reference on the parent group. Drop it now.
+ * queue lock must be held here.
+ */
+static void cfq_put_queue(struct cfq_queue *cfqq)
+{
+ struct cfq_data *cfqd = cfqq->cfqd;
+ struct cfq_group *cfqg;
+
+ BUG_ON(cfqq->ref <= 0);
+
+ cfqq->ref--;
+ if (cfqq->ref)
+ return;
+
+ cfq_log_cfqq(cfqd, cfqq, "put_queue");
+ BUG_ON(rb_first(&cfqq->sort_list));
+ BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
+ cfqg = cfqq->cfqg;
+
+ if (unlikely(cfqd->active_queue == cfqq)) {
+ __cfq_slice_expired(cfqd, cfqq, 0);
+ cfq_schedule_dispatch(cfqd);
+ }
+
+ BUG_ON(cfq_cfqq_on_rr(cfqq));
+ kmem_cache_free(cfq_pool, cfqq);
+ cfq_put_cfqg(cfqg);
+}
+
+static void cfq_put_cooperator(struct cfq_queue *cfqq)
+{
+ struct cfq_queue *__cfqq, *next;
+
+ /*
+ * If this queue was scheduled to merge with another queue, be
+ * sure to drop the reference taken on that queue (and others in
+ * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
+ */
+ __cfqq = cfqq->new_cfqq;
+ while (__cfqq) {
+ if (__cfqq == cfqq) {
+ WARN(1, "cfqq->new_cfqq loop detected\n");
+ break;
+ }
+ next = __cfqq->new_cfqq;
+ cfq_put_queue(__cfqq);
+ __cfqq = next;
+ }
+}
+
+static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ if (unlikely(cfqq == cfqd->active_queue)) {
+ __cfq_slice_expired(cfqd, cfqq, 0);
+ cfq_schedule_dispatch(cfqd);
+ }
+
+ cfq_put_cooperator(cfqq);
+
+ cfq_put_queue(cfqq);
+}
+
+static void cfq_init_icq(struct io_cq *icq)
+{
+ struct cfq_io_cq *cic = icq_to_cic(icq);
+
+ cic->ttime.last_end_request = jiffies;
+}
+
+static void cfq_exit_icq(struct io_cq *icq)
+{
+ struct cfq_io_cq *cic = icq_to_cic(icq);
+ struct cfq_data *cfqd = cic_to_cfqd(cic);
+
+ if (cic->cfqq[BLK_RW_ASYNC]) {
+ cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
+ cic->cfqq[BLK_RW_ASYNC] = NULL;
+ }
+
+ if (cic->cfqq[BLK_RW_SYNC]) {
+ cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
+ cic->cfqq[BLK_RW_SYNC] = NULL;
+ }
+}
+
+static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
+{
+ struct task_struct *tsk = current;
+ int ioprio_class;
+
+ if (!cfq_cfqq_prio_changed(cfqq))
+ return;
+
+ ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
+ switch (ioprio_class) {
+ default:
+ printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
+ case IOPRIO_CLASS_NONE:
+ /*
+ * no prio set, inherit CPU scheduling settings
+ */
+ cfqq->ioprio = task_nice_ioprio(tsk);
+ cfqq->ioprio_class = task_nice_ioclass(tsk);
+ break;
+ case IOPRIO_CLASS_RT:
+ cfqq->ioprio = task_ioprio(ioc);
+ cfqq->ioprio_class = IOPRIO_CLASS_RT;
+ break;
+ case IOPRIO_CLASS_BE:
+ cfqq->ioprio = task_ioprio(ioc);
+ cfqq->ioprio_class = IOPRIO_CLASS_BE;
+ break;
+ case IOPRIO_CLASS_IDLE:
+ cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
+ cfqq->ioprio = 7;
+ cfq_clear_cfqq_idle_window(cfqq);
+ break;
+ }
+
+ /*
+ * keep track of original prio settings in case we have to temporarily
+ * elevate the priority of this queue
+ */
+ cfqq->org_ioprio = cfqq->ioprio;
+ cfq_clear_cfqq_prio_changed(cfqq);
+}
+
+static void changed_ioprio(struct cfq_io_cq *cic)
+{
+ struct cfq_data *cfqd = cic_to_cfqd(cic);
+ struct cfq_queue *cfqq;
+
+ if (unlikely(!cfqd))
+ return;
+
+ cfqq = cic->cfqq[BLK_RW_ASYNC];
+ if (cfqq) {
+ struct cfq_queue *new_cfqq;
+ new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->icq.ioc,
+ GFP_ATOMIC);
+ if (new_cfqq) {
+ cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
+ cfq_put_queue(cfqq);
+ }
+ }
+
+ cfqq = cic->cfqq[BLK_RW_SYNC];
+ if (cfqq)
+ cfq_mark_cfqq_prio_changed(cfqq);
+}
+
+static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+ pid_t pid, bool is_sync)
+{
+ RB_CLEAR_NODE(&cfqq->rb_node);
+ RB_CLEAR_NODE(&cfqq->p_node);
+ INIT_LIST_HEAD(&cfqq->fifo);
+
+ cfqq->ref = 0;
+ cfqq->cfqd = cfqd;
+
+ cfq_mark_cfqq_prio_changed(cfqq);
+
+ if (is_sync) {
+ if (!cfq_class_idle(cfqq))
+ cfq_mark_cfqq_idle_window(cfqq);
+ cfq_mark_cfqq_sync(cfqq);
+ }
+ cfqq->pid = pid;
+}
+
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+static void changed_cgroup(struct cfq_io_cq *cic)
+{
+ struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
+ struct cfq_data *cfqd = cic_to_cfqd(cic);
+ struct request_queue *q;
+
+ if (unlikely(!cfqd))
+ return;
+
+ q = cfqd->queue;
+
+ if (sync_cfqq) {
+ /*
+ * Drop reference to sync queue. A new sync queue will be
+ * assigned in new group upon arrival of a fresh request.
+ */
+ cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
+ cic_set_cfqq(cic, NULL, 1);
+ cfq_put_queue(sync_cfqq);
+ }
+}
+#endif /* CONFIG_CFQ_GROUP_IOSCHED */
+
+static struct cfq_queue *
+cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
+ struct io_context *ioc, gfp_t gfp_mask)
+{
+ struct cfq_queue *cfqq, *new_cfqq = NULL;
+ struct cfq_io_cq *cic;
+ struct cfq_group *cfqg;
+
+retry:
+ cfqg = cfq_get_cfqg(cfqd);
+ cic = cfq_cic_lookup(cfqd, ioc);
+ /* cic always exists here */
+ cfqq = cic_to_cfqq(cic, is_sync);
+
+ /*
+ * Always try a new alloc if we fell back to the OOM cfqq
+ * originally, since it should just be a temporary situation.
+ */
+ if (!cfqq || cfqq == &cfqd->oom_cfqq) {
+ cfqq = NULL;
+ if (new_cfqq) {
+ cfqq = new_cfqq;
+ new_cfqq = NULL;
+ } else if (gfp_mask & __GFP_WAIT) {
+ spin_unlock_irq(cfqd->queue->queue_lock);
+ new_cfqq = kmem_cache_alloc_node(cfq_pool,
+ gfp_mask | __GFP_ZERO,
+ cfqd->queue->node);
+ spin_lock_irq(cfqd->queue->queue_lock);
+ if (new_cfqq)
+ goto retry;
+ } else {
+ cfqq = kmem_cache_alloc_node(cfq_pool,
+ gfp_mask | __GFP_ZERO,
+ cfqd->queue->node);
+ }
+
+ if (cfqq) {
+ cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
+ cfq_init_prio_data(cfqq, ioc);
+ cfq_link_cfqq_cfqg(cfqq, cfqg);
+ cfq_log_cfqq(cfqd, cfqq, "alloced");
+ } else
+ cfqq = &cfqd->oom_cfqq;
+ }
+
+ if (new_cfqq)
+ kmem_cache_free(cfq_pool, new_cfqq);
+
+ return cfqq;
+}
+
+static struct cfq_queue **
+cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
+{
+ switch (ioprio_class) {
+ case IOPRIO_CLASS_RT:
+ return &cfqd->async_cfqq[0][ioprio];
+ case IOPRIO_CLASS_BE:
+ return &cfqd->async_cfqq[1][ioprio];
+ case IOPRIO_CLASS_IDLE:
+ return &cfqd->async_idle_cfqq;
+ default:
+ BUG();
+ }
+}
+
+static struct cfq_queue *
+cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
+ gfp_t gfp_mask)
+{
+ const int ioprio = task_ioprio(ioc);
+ const int ioprio_class = task_ioprio_class(ioc);
+ struct cfq_queue **async_cfqq = NULL;
+ struct cfq_queue *cfqq = NULL;
+
+ if (!is_sync) {
+ async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
+ cfqq = *async_cfqq;
+ }
+
+ if (!cfqq)
+ cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
+
+ /*
+ * pin the queue now that it's allocated, scheduler exit will prune it
+ */
+ if (!is_sync && !(*async_cfqq)) {
+ cfqq->ref++;
+ *async_cfqq = cfqq;
+ }
+
+ cfqq->ref++;
+ return cfqq;
+}
+
+static void
+__cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
+{
+ unsigned long elapsed = jiffies - ttime->last_end_request;
+ elapsed = min(elapsed, 2UL * slice_idle);
+
+ ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
+ ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
+ ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
+}
+
+static void
+cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+ struct cfq_io_cq *cic)
+{
+ if (cfq_cfqq_sync(cfqq)) {
+ __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
+ __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
+ cfqd->cfq_slice_idle);
+ }
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+ __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
+#endif
+}
+
+static void
+cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+ struct request *rq)
+{
+ sector_t sdist = 0;
+ sector_t n_sec = blk_rq_sectors(rq);
+ if (cfqq->last_request_pos) {
+ if (cfqq->last_request_pos < blk_rq_pos(rq))
+ sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
+ else
+ sdist = cfqq->last_request_pos - blk_rq_pos(rq);
+ }
+
+ cfqq->seek_history <<= 1;
+ if (blk_queue_nonrot(cfqd->queue))
+ cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
+ else
+ cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
+}
+
+/*
+ * Disable idle window if the process thinks too long or seeks so much that
+ * it doesn't matter
+ */
+static void
+cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+ struct cfq_io_cq *cic)
+{
+ int old_idle, enable_idle;
+
+ /*
+ * Don't idle for async or idle io prio class
+ */
+ if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
+ return;
+
+ enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
+
+ if (cfqq->queued[0] + cfqq->queued[1] >= 4)
+ cfq_mark_cfqq_deep(cfqq);
+
+ if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
+ enable_idle = 0;
+ else if (!atomic_read(&cic->icq.ioc->nr_tasks) ||
+ !cfqd->cfq_slice_idle ||
+ (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
+ enable_idle = 0;
+ else if (sample_valid(cic->ttime.ttime_samples)) {
+ if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
+ enable_idle = 0;
+ else
+ enable_idle = 1;
+ }
+
+ if (old_idle != enable_idle) {
+ cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
+ if (enable_idle)
+ cfq_mark_cfqq_idle_window(cfqq);
+ else
+ cfq_clear_cfqq_idle_window(cfqq);
+ }
+}
+
+/*
+ * Check if new_cfqq should preempt the currently active queue. Return 0 for
+ * no or if we aren't sure, a 1 will cause a preempt.
+ */
+static bool
+cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
+ struct request *rq)
+{
+ struct cfq_queue *cfqq;
+
+ cfqq = cfqd->active_queue;
+ if (!cfqq)
+ return false;
+
+ if (cfq_class_idle(new_cfqq))
+ return false;
+
+ if (cfq_class_idle(cfqq))
+ return true;
+
+ /*
+ * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
+ */
+ if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
+ return false;
+
+ /*
+ * if the new request is sync, but the currently running queue is
+ * not, let the sync request have priority.
+ */
+ if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
+ return true;
+
+ if (new_cfqq->cfqg != cfqq->cfqg)
+ return false;
+
+ if (cfq_slice_used(cfqq))
+ return true;
+
+ /* Allow preemption only if we are idling on sync-noidle tree */
+ if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
+ cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
+ new_cfqq->service_tree->count == 2 &&
+ RB_EMPTY_ROOT(&cfqq->sort_list))
+ return true;
+
+ /*
+ * So both queues are sync. Let the new request get disk time if
+ * it's a metadata request and the current queue is doing regular IO.
+ */
+ if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
+ return true;
+
+ /*
+ * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
+ */
+ if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
+ return true;
+
+ /* An idle queue should not be idle now for some reason */
+ if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
+ return true;
+
+ if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
+ return false;
+
+ /*
+ * if this request is as-good as one we would expect from the
+ * current cfqq, let it preempt
+ */
+ if (cfq_rq_close(cfqd, cfqq, rq))
+ return true;
+
+ return false;
+}
+
+/*
+ * cfqq preempts the active queue. if we allowed preempt with no slice left,
+ * let it have half of its nominal slice.
+ */
+static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
+
+ cfq_log_cfqq(cfqd, cfqq, "preempt");
+ cfq_slice_expired(cfqd, 1);
+
+ /*
+ * workload type is changed, don't save slice, otherwise preempt
+ * doesn't happen
+ */
+ if (old_type != cfqq_type(cfqq))
+ cfqq->cfqg->saved_workload_slice = 0;
+
+ /*
+ * Put the new queue at the front of the of the current list,
+ * so we know that it will be selected next.
+ */
+ BUG_ON(!cfq_cfqq_on_rr(cfqq));
+
+ cfq_service_tree_add(cfqd, cfqq, 1);
+
+ cfqq->slice_end = 0;
+ cfq_mark_cfqq_slice_new(cfqq);
+}
+
+/*
+ * Called when a new fs request (rq) is added (to cfqq). Check if there's
+ * something we should do about it
+ */
+static void
+cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+ struct request *rq)
+{
+ struct cfq_io_cq *cic = RQ_CIC(rq);
+
+ cfqd->rq_queued++;
+ if (rq->cmd_flags & REQ_PRIO)
+ cfqq->prio_pending++;
+
+ cfq_update_io_thinktime(cfqd, cfqq, cic);
+ cfq_update_io_seektime(cfqd, cfqq, rq);
+ cfq_update_idle_window(cfqd, cfqq, cic);
+
+ cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
+
+ if (cfqq == cfqd->active_queue) {
+ /*
+ * Remember that we saw a request from this process, but
+ * don't start queuing just yet. Otherwise we risk seeing lots
+ * of tiny requests, because we disrupt the normal plugging
+ * and merging. If the request is already larger than a single
+ * page, let it rip immediately. For that case we assume that
+ * merging is already done. Ditto for a busy system that
+ * has other work pending, don't risk delaying until the
+ * idle timer unplug to continue working.
+ */
+ if (cfq_cfqq_wait_request(cfqq)) {
+ if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
+ cfqd->busy_queues > 1) {
+ cfq_del_timer(cfqd, cfqq);
+ cfq_clear_cfqq_wait_request(cfqq);
+ __blk_run_queue(cfqd->queue);
+ } else {
+ cfq_blkiocg_update_idle_time_stats(
+ &cfqq->cfqg->blkg);
+ cfq_mark_cfqq_must_dispatch(cfqq);
+ }
+ }
+ } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
+ /*
+ * not the active queue - expire current slice if it is
+ * idle and has expired it's mean thinktime or this new queue
+ * has some old slice time left and is of higher priority or
+ * this new queue is RT and the current one is BE
+ */
+ cfq_preempt_queue(cfqd, cfqq);
+ __blk_run_queue(cfqd->queue);
+ }
+}
+
+static void cfq_insert_request(struct request_queue *q, struct request *rq)
+{
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+ struct cfq_queue *cfqq = RQ_CFQQ(rq);
+
+ cfq_log_cfqq(cfqd, cfqq, "insert_request");
+ cfq_init_prio_data(cfqq, RQ_CIC(rq)->icq.ioc);
+
+ rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
+ list_add_tail(&rq->queuelist, &cfqq->fifo);
+ cfq_add_rq_rb(rq);
+ cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
+ &cfqd->serving_group->blkg, rq_data_dir(rq),
+ rq_is_sync(rq));
+ cfq_rq_enqueued(cfqd, cfqq, rq);
+}
+
+/*
+ * Update hw_tag based on peak queue depth over 50 samples under
+ * sufficient load.
+ */
+static void cfq_update_hw_tag(struct cfq_data *cfqd)
+{
+ struct cfq_queue *cfqq = cfqd->active_queue;
+
+ if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
+ cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
+
+ if (cfqd->hw_tag == 1)
+ return;
+
+ if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
+ cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
+ return;
+
+ /*
+ * If active queue hasn't enough requests and can idle, cfq might not
+ * dispatch sufficient requests to hardware. Don't zero hw_tag in this
+ * case
+ */
+ if (cfqq && cfq_cfqq_idle_window(cfqq) &&
+ cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
+ CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
+ return;
+
+ if (cfqd->hw_tag_samples++ < 50)
+ return;
+
+ if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
+ cfqd->hw_tag = 1;
+ else
+ cfqd->hw_tag = 0;
+}
+
+static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ struct cfq_io_cq *cic = cfqd->active_cic;
+
+ /* If the queue already has requests, don't wait */
+ if (!RB_EMPTY_ROOT(&cfqq->sort_list))
+ return false;
+
+ /* If there are other queues in the group, don't wait */
+ if (cfqq->cfqg->nr_cfqq > 1)
+ return false;
+
+ /* the only queue in the group, but think time is big */
+ if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
+ return false;
+
+ if (cfq_slice_used(cfqq))
+ return true;
+
+ /* if slice left is less than think time, wait busy */
+ if (cic && sample_valid(cic->ttime.ttime_samples)
+ && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
+ return true;
+
+ /*
+ * If think times is less than a jiffy than ttime_mean=0 and above
+ * will not be true. It might happen that slice has not expired yet
+ * but will expire soon (4-5 ns) during select_queue(). To cover the
+ * case where think time is less than a jiffy, mark the queue wait
+ * busy if only 1 jiffy is left in the slice.
+ */
+ if (cfqq->slice_end - jiffies == 1)
+ return true;
+
+ return false;
+}
+
+static void cfq_completed_request(struct request_queue *q, struct request *rq)
+{
+ struct cfq_queue *cfqq = RQ_CFQQ(rq);
+ struct cfq_data *cfqd = cfqq->cfqd;
+ const int sync = rq_is_sync(rq);
+ unsigned long now;
+
+ now = jiffies;
+ cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
+ !!(rq->cmd_flags & REQ_NOIDLE));
+
+ cfq_update_hw_tag(cfqd);
+
+ WARN_ON(!cfqd->rq_in_driver);
+ WARN_ON(!cfqq->dispatched);
+ cfqd->rq_in_driver--;
+ cfqq->dispatched--;
+ (RQ_CFQG(rq))->dispatched--;
+ cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
+ rq_start_time_ns(rq), rq_io_start_time_ns(rq),
+ rq_data_dir(rq), rq_is_sync(rq));
+
+ cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
+
+ if (sync) {
+ struct cfq_rb_root *service_tree;
+
+ RQ_CIC(rq)->ttime.last_end_request = now;
+
+ if (cfq_cfqq_on_rr(cfqq))
+ service_tree = cfqq->service_tree;
+ else
+ service_tree = service_tree_for(cfqq->cfqg,
+ cfqq_prio(cfqq), cfqq_type(cfqq));
+ service_tree->ttime.last_end_request = now;
+ if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
+ cfqd->last_delayed_sync = now;
+ }
+
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+ cfqq->cfqg->ttime.last_end_request = now;
+#endif
+
+ /*
+ * If this is the active queue, check if it needs to be expired,
+ * or if we want to idle in case it has no pending requests.
+ */
+ if (cfqd->active_queue == cfqq) {
+ const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
+
+ if (cfq_cfqq_slice_new(cfqq)) {
+ cfq_set_prio_slice(cfqd, cfqq);
+ cfq_clear_cfqq_slice_new(cfqq);
+ }
+
+ /*
+ * Should we wait for next request to come in before we expire
+ * the queue.
+ */
+ if (cfq_should_wait_busy(cfqd, cfqq)) {
+ unsigned long extend_sl = cfqd->cfq_slice_idle;
+ if (!cfqd->cfq_slice_idle)
+ extend_sl = cfqd->cfq_group_idle;
+ cfqq->slice_end = jiffies + extend_sl;
+ cfq_mark_cfqq_wait_busy(cfqq);
+ cfq_log_cfqq(cfqd, cfqq, "will busy wait");
+ }
+
+ /*
+ * Idling is not enabled on:
+ * - expired queues
+ * - idle-priority queues
+ * - async queues
+ * - queues with still some requests queued
+ * - when there is a close cooperator
+ */
+ if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
+ cfq_slice_expired(cfqd, 1);
+ else if (sync && cfqq_empty &&
+ !cfq_close_cooperator(cfqd, cfqq)) {
+ cfq_arm_slice_timer(cfqd);
+ }
+ }
+
+ if (!cfqd->rq_in_driver)
+ cfq_schedule_dispatch(cfqd);
+}
+
+static inline int __cfq_may_queue(struct cfq_queue *cfqq)
+{
+ if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
+ cfq_mark_cfqq_must_alloc_slice(cfqq);
+ return ELV_MQUEUE_MUST;
+ }
+
+ return ELV_MQUEUE_MAY;
+}
+
+static int cfq_may_queue(struct request_queue *q, int rw)
+{
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+ struct task_struct *tsk = current;
+ struct cfq_io_cq *cic;
+ struct cfq_queue *cfqq;
+
+ /*
+ * don't force setup of a queue from here, as a call to may_queue
+ * does not necessarily imply that a request actually will be queued.
+ * so just lookup a possibly existing queue, or return 'may queue'
+ * if that fails
+ */
+ cic = cfq_cic_lookup(cfqd, tsk->io_context);
+ if (!cic)
+ return ELV_MQUEUE_MAY;
+
+ cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
+ if (cfqq) {
+ cfq_init_prio_data(cfqq, cic->icq.ioc);
+
+ return __cfq_may_queue(cfqq);
+ }
+
+ return ELV_MQUEUE_MAY;
+}
+
+/*
+ * queue lock held here
+ */
+static void cfq_put_request(struct request *rq)
+{
+ struct cfq_queue *cfqq = RQ_CFQQ(rq);
+
+ if (cfqq) {
+ const int rw = rq_data_dir(rq);
+
+ BUG_ON(!cfqq->allocated[rw]);
+ cfqq->allocated[rw]--;
+
+ /* Put down rq reference on cfqg */
+ cfq_put_cfqg(RQ_CFQG(rq));
+ rq->elv.priv[0] = NULL;
+ rq->elv.priv[1] = NULL;
+
+ cfq_put_queue(cfqq);
+ }
+}
+
+static struct cfq_queue *
+cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
+ struct cfq_queue *cfqq)
+{
+ cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
+ cic_set_cfqq(cic, cfqq->new_cfqq, 1);
+ cfq_mark_cfqq_coop(cfqq->new_cfqq);
+ cfq_put_queue(cfqq);
+ return cic_to_cfqq(cic, 1);
+}
+
+/*
+ * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
+ * was the last process referring to said cfqq.
+ */
+static struct cfq_queue *
+split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
+{
+ if (cfqq_process_refs(cfqq) == 1) {
+ cfqq->pid = current->pid;
+ cfq_clear_cfqq_coop(cfqq);
+ cfq_clear_cfqq_split_coop(cfqq);
+ return cfqq;
+ }
+
+ cic_set_cfqq(cic, NULL, 1);
+
+ cfq_put_cooperator(cfqq);
+
+ cfq_put_queue(cfqq);
+ return NULL;
+}
+/*
+ * Allocate cfq data structures associated with this request.
+ */
+static int
+cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
+{
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+ struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
+ const int rw = rq_data_dir(rq);
+ const bool is_sync = rq_is_sync(rq);
+ struct cfq_queue *cfqq;
+ unsigned int changed;
+
+ might_sleep_if(gfp_mask & __GFP_WAIT);
+
+ spin_lock_irq(q->queue_lock);
+
+ /* handle changed notifications */
+ changed = icq_get_changed(&cic->icq);
+ if (unlikely(changed & ICQ_IOPRIO_CHANGED))
+ changed_ioprio(cic);
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+ if (unlikely(changed & ICQ_CGROUP_CHANGED))
+ changed_cgroup(cic);
+#endif
+
+new_queue:
+ cfqq = cic_to_cfqq(cic, is_sync);
+ if (!cfqq || cfqq == &cfqd->oom_cfqq) {
+ cfqq = cfq_get_queue(cfqd, is_sync, cic->icq.ioc, gfp_mask);
+ cic_set_cfqq(cic, cfqq, is_sync);
+ } else {
+ /*
+ * If the queue was seeky for too long, break it apart.
+ */
+ if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
+ cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
+ cfqq = split_cfqq(cic, cfqq);
+ if (!cfqq)
+ goto new_queue;
+ }
+
+ /*
+ * Check to see if this queue is scheduled to merge with
+ * another, closely cooperating queue. The merging of
+ * queues happens here as it must be done in process context.
+ * The reference on new_cfqq was taken in merge_cfqqs.
+ */
+ if (cfqq->new_cfqq)
+ cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
+ }
+
+ cfqq->allocated[rw]++;
+
+ cfqq->ref++;
+ rq->elv.priv[0] = cfqq;
+ rq->elv.priv[1] = cfq_ref_get_cfqg(cfqq->cfqg);
+ spin_unlock_irq(q->queue_lock);
+ return 0;
+}
+
+static void cfq_kick_queue(struct work_struct *work)
+{
+ struct cfq_data *cfqd =
+ container_of(work, struct cfq_data, unplug_work);
+ struct request_queue *q = cfqd->queue;
+
+ spin_lock_irq(q->queue_lock);
+ __blk_run_queue(cfqd->queue);
+ spin_unlock_irq(q->queue_lock);
+}
+
+/*
+ * Timer running if the active_queue is currently idling inside its time slice
+ */
+static void cfq_idle_slice_timer(unsigned long data)
+{
+ struct cfq_data *cfqd = (struct cfq_data *) data;
+ struct cfq_queue *cfqq;
+ unsigned long flags;
+ int timed_out = 1;
+
+ cfq_log(cfqd, "idle timer fired");
+
+ spin_lock_irqsave(cfqd->queue->queue_lock, flags);
+
+ cfqq = cfqd->active_queue;
+ if (cfqq) {
+ timed_out = 0;
+
+ /*
+ * We saw a request before the queue expired, let it through
+ */
+ if (cfq_cfqq_must_dispatch(cfqq))
+ goto out_kick;
+
+ /*
+ * expired
+ */
+ if (cfq_slice_used(cfqq))
+ goto expire;
+
+ /*
+ * only expire and reinvoke request handler, if there are
+ * other queues with pending requests
+ */
+ if (!cfqd->busy_queues)
+ goto out_cont;
+
+ /*
+ * not expired and it has a request pending, let it dispatch
+ */
+ if (!RB_EMPTY_ROOT(&cfqq->sort_list))
+ goto out_kick;
+
+ /*
+ * Queue depth flag is reset only when the idle didn't succeed
+ */
+ cfq_clear_cfqq_deep(cfqq);
+ }
+expire:
+ cfq_slice_expired(cfqd, timed_out);
+out_kick:
+ cfq_schedule_dispatch(cfqd);
+out_cont:
+ spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
+}
+
+static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
+{
+ del_timer_sync(&cfqd->idle_slice_timer);
+ cancel_work_sync(&cfqd->unplug_work);
+}
+
+static void cfq_put_async_queues(struct cfq_data *cfqd)
+{
+ int i;
+
+ for (i = 0; i < IOPRIO_BE_NR; i++) {
+ if (cfqd->async_cfqq[0][i])
+ cfq_put_queue(cfqd->async_cfqq[0][i]);
+ if (cfqd->async_cfqq[1][i])
+ cfq_put_queue(cfqd->async_cfqq[1][i]);
+ }
+
+ if (cfqd->async_idle_cfqq)
+ cfq_put_queue(cfqd->async_idle_cfqq);
+}
+
+static void cfq_exit_queue(struct elevator_queue *e)
+{
+ struct cfq_data *cfqd = e->elevator_data;
+ struct request_queue *q = cfqd->queue;
+ bool wait = false;
+
+ cfq_shutdown_timer_wq(cfqd);
+
+ spin_lock_irq(q->queue_lock);
+
+ if (cfqd->active_queue)
+ __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
+
+ cfq_put_async_queues(cfqd);
+ cfq_release_cfq_groups(cfqd);
+
+ /*
+ * If there are groups which we could not unlink from blkcg list,
+ * wait for a rcu period for them to be freed.
+ */
+ if (cfqd->nr_blkcg_linked_grps)
+ wait = true;
+
+ spin_unlock_irq(q->queue_lock);
+
+ cfq_shutdown_timer_wq(cfqd);
+
+ /*
+ * Wait for cfqg->blkg->key accessors to exit their grace periods.
+ * Do this wait only if there are other unlinked groups out
+ * there. This can happen if cgroup deletion path claimed the
+ * responsibility of cleaning up a group before queue cleanup code
+ * get to the group.
+ *
+ * Do not call synchronize_rcu() unconditionally as there are drivers
+ * which create/delete request queue hundreds of times during scan/boot
+ * and synchronize_rcu() can take significant time and slow down boot.
+ */
+ if (wait)
+ synchronize_rcu();
+
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+ /* Free up per cpu stats for root group */
+ free_percpu(cfqd->root_group.blkg.stats_cpu);
+#endif
+ kfree(cfqd);
+}
+
+static void *cfq_init_queue(struct request_queue *q)
+{
+ struct cfq_data *cfqd;
+ int i, j;
+ struct cfq_group *cfqg;
+ struct cfq_rb_root *st;
+
+ cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
+ if (!cfqd)
+ return NULL;
+
+ /* Init root service tree */
+ cfqd->grp_service_tree = CFQ_RB_ROOT;
+
+ /* Init root group */
+ cfqg = &cfqd->root_group;
+ for_each_cfqg_st(cfqg, i, j, st)
+ *st = CFQ_RB_ROOT;
+ RB_CLEAR_NODE(&cfqg->rb_node);
+
+ /* Give preference to root group over other groups */
+ cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT;
+
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+ /*
+ * Set root group reference to 2. One reference will be dropped when
+ * all groups on cfqd->cfqg_list are being deleted during queue exit.
+ * Other reference will remain there as we don't want to delete this
+ * group as it is statically allocated and gets destroyed when
+ * throtl_data goes away.
+ */
+ cfqg->ref = 2;
+
+ if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
+ kfree(cfqg);
+ kfree(cfqd);
+ return NULL;
+ }
+
+ rcu_read_lock();
+
+ cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
+ (void *)cfqd, 0);
+ rcu_read_unlock();
+ cfqd->nr_blkcg_linked_grps++;
+
+ /* Add group on cfqd->cfqg_list */
+ hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
+#endif
+ /*
+ * Not strictly needed (since RB_ROOT just clears the node and we
+ * zeroed cfqd on alloc), but better be safe in case someone decides
+ * to add magic to the rb code
+ */
+ for (i = 0; i < CFQ_PRIO_LISTS; i++)
+ cfqd->prio_trees[i] = RB_ROOT;
+
+ /*
+ * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
+ * Grab a permanent reference to it, so that the normal code flow
+ * will not attempt to free it.
+ */
+ cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
+ cfqd->oom_cfqq.ref++;
+ cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
+
+ cfqd->queue = q;
+
+ init_timer(&cfqd->idle_slice_timer);
+ cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
+ cfqd->idle_slice_timer.data = (unsigned long) cfqd;
+
+ INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
+
+ cfqd->cfq_quantum = cfq_quantum;
+ cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
+ cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
+ cfqd->cfq_back_max = cfq_back_max;
+ cfqd->cfq_back_penalty = cfq_back_penalty;
+ cfqd->cfq_slice[0] = cfq_slice_async;
+ cfqd->cfq_slice[1] = cfq_slice_sync;
+ cfqd->cfq_target_latency = cfq_target_latency;
+ cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
+ cfqd->cfq_slice_idle = cfq_slice_idle;
+ cfqd->cfq_group_idle = cfq_group_idle;
+ cfqd->cfq_latency = 1;
+ cfqd->hw_tag = -1;
+ /*
+ * we optimistically start assuming sync ops weren't delayed in last
+ * second, in order to have larger depth for async operations.
+ */
+ cfqd->last_delayed_sync = jiffies - HZ;
+ return cfqd;
+}
+
+/*
+ * sysfs parts below -->
+ */
+static ssize_t
+cfq_var_show(unsigned int var, char *page)
+{
+ return sprintf(page, "%d\n", var);
+}
+
+static ssize_t
+cfq_var_store(unsigned int *var, const char *page, size_t count)
+{
+ char *p = (char *) page;
+
+ *var = simple_strtoul(p, &p, 10);
+ return count;
+}
+
+#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
+static ssize_t __FUNC(struct elevator_queue *e, char *page) \
+{ \
+ struct cfq_data *cfqd = e->elevator_data; \
+ unsigned int __data = __VAR; \
+ if (__CONV) \
+ __data = jiffies_to_msecs(__data); \
+ return cfq_var_show(__data, (page)); \
+}
+SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
+SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
+SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
+SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
+SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
+SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
+SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
+SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
+SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
+SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
+SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
+SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
+#undef SHOW_FUNCTION
+
+#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
+static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
+{ \
+ struct cfq_data *cfqd = e->elevator_data; \
+ unsigned int __data; \
+ int ret = cfq_var_store(&__data, (page), count); \
+ if (__data < (MIN)) \
+ __data = (MIN); \
+ else if (__data > (MAX)) \
+ __data = (MAX); \
+ if (__CONV) \
+ *(__PTR) = msecs_to_jiffies(__data); \
+ else \
+ *(__PTR) = __data; \
+ return ret; \
+}
+STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
+STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
+ UINT_MAX, 1);
+STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
+ UINT_MAX, 1);
+STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
+STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
+ UINT_MAX, 0);
+STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
+STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
+STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
+STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
+STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
+ UINT_MAX, 0);
+STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
+STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
+#undef STORE_FUNCTION
+
+#define CFQ_ATTR(name) \
+ __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
+
+static struct elv_fs_entry cfq_attrs[] = {
+ CFQ_ATTR(quantum),
+ CFQ_ATTR(fifo_expire_sync),
+ CFQ_ATTR(fifo_expire_async),
+ CFQ_ATTR(back_seek_max),
+ CFQ_ATTR(back_seek_penalty),
+ CFQ_ATTR(slice_sync),
+ CFQ_ATTR(slice_async),
+ CFQ_ATTR(slice_async_rq),
+ CFQ_ATTR(slice_idle),
+ CFQ_ATTR(group_idle),
+ CFQ_ATTR(low_latency),
+ CFQ_ATTR(target_latency),
+ __ATTR_NULL
+};
+
+static struct elevator_type iosched_cfq = {
+ .ops = {
+ .elevator_merge_fn = cfq_merge,
+ .elevator_merged_fn = cfq_merged_request,
+ .elevator_merge_req_fn = cfq_merged_requests,
+ .elevator_allow_merge_fn = cfq_allow_merge,
+ .elevator_bio_merged_fn = cfq_bio_merged,
+ .elevator_dispatch_fn = cfq_dispatch_requests,
+ .elevator_add_req_fn = cfq_insert_request,
+ .elevator_activate_req_fn = cfq_activate_request,
+ .elevator_deactivate_req_fn = cfq_deactivate_request,
+ .elevator_completed_req_fn = cfq_completed_request,
+ .elevator_former_req_fn = elv_rb_former_request,
+ .elevator_latter_req_fn = elv_rb_latter_request,
+ .elevator_init_icq_fn = cfq_init_icq,
+ .elevator_exit_icq_fn = cfq_exit_icq,
+ .elevator_set_req_fn = cfq_set_request,
+ .elevator_put_req_fn = cfq_put_request,
+ .elevator_may_queue_fn = cfq_may_queue,
+ .elevator_init_fn = cfq_init_queue,
+ .elevator_exit_fn = cfq_exit_queue,
+ },
+ .icq_size = sizeof(struct cfq_io_cq),
+ .icq_align = __alignof__(struct cfq_io_cq),
+ .elevator_attrs = cfq_attrs,
+ .elevator_name = "cfq",
+ .elevator_owner = THIS_MODULE,
+};
+
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+static struct blkio_policy_type blkio_policy_cfq = {
+ .ops = {
+ .blkio_unlink_group_fn = cfq_unlink_blkio_group,
+ .blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
+ },
+ .plid = BLKIO_POLICY_PROP,
+};
+#else
+static struct blkio_policy_type blkio_policy_cfq;
+#endif
+
+static int __init cfq_init(void)
+{
+ int ret;
+
+ /*
+ * could be 0 on HZ < 1000 setups
+ */
+ if (!cfq_slice_async)
+ cfq_slice_async = 1;
+ if (!cfq_slice_idle)
+ cfq_slice_idle = 1;
+
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+ if (!cfq_group_idle)
+ cfq_group_idle = 1;
+#else
+ cfq_group_idle = 0;
+#endif
+ cfq_pool = KMEM_CACHE(cfq_queue, 0);
+ if (!cfq_pool)
+ return -ENOMEM;
+
+ ret = elv_register(&iosched_cfq);
+ if (ret) {
+ kmem_cache_destroy(cfq_pool);
+ return ret;
+ }
+
+ blkio_policy_register(&blkio_policy_cfq);
+
+ return 0;
+}
+
+static void __exit cfq_exit(void)
+{
+ blkio_policy_unregister(&blkio_policy_cfq);
+ elv_unregister(&iosched_cfq);
+ kmem_cache_destroy(cfq_pool);
+}
+
+module_init(cfq_init);
+module_exit(cfq_exit);
+
+MODULE_AUTHOR("Jens Axboe");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");
diff --git a/block/cfq.h b/block/cfq.h
new file mode 100644
index 00000000..2a155927
--- /dev/null
+++ b/block/cfq.h
@@ -0,0 +1,115 @@
+#ifndef _CFQ_H
+#define _CFQ_H
+#include "blk-cgroup.h"
+
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
+ struct blkio_group *curr_blkg, bool direction, bool sync)
+{
+ blkiocg_update_io_add_stats(blkg, curr_blkg, direction, sync);
+}
+
+static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
+ unsigned long dequeue)
+{
+ blkiocg_update_dequeue_stats(blkg, dequeue);
+}
+
+static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
+ unsigned long time, unsigned long unaccounted_time)
+{
+ blkiocg_update_timeslice_used(blkg, time, unaccounted_time);
+}
+
+static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg)
+{
+ blkiocg_set_start_empty_time(blkg);
+}
+
+static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
+ bool direction, bool sync)
+{
+ blkiocg_update_io_remove_stats(blkg, direction, sync);
+}
+
+static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
+ bool direction, bool sync)
+{
+ blkiocg_update_io_merged_stats(blkg, direction, sync);
+}
+
+static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
+{
+ blkiocg_update_idle_time_stats(blkg);
+}
+
+static inline void
+cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
+{
+ blkiocg_update_avg_queue_size_stats(blkg);
+}
+
+static inline void
+cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
+{
+ blkiocg_update_set_idle_time_stats(blkg);
+}
+
+static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
+ uint64_t bytes, bool direction, bool sync)
+{
+ blkiocg_update_dispatch_stats(blkg, bytes, direction, sync);
+}
+
+static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
+{
+ blkiocg_update_completion_stats(blkg, start_time, io_start_time,
+ direction, sync);
+}
+
+static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
+ struct blkio_group *blkg, void *key, dev_t dev) {
+ blkiocg_add_blkio_group(blkcg, blkg, key, dev, BLKIO_POLICY_PROP);
+}
+
+static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
+{
+ return blkiocg_del_blkio_group(blkg);
+}
+
+#else /* CFQ_GROUP_IOSCHED */
+static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
+ struct blkio_group *curr_blkg, bool direction, bool sync) {}
+
+static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
+ unsigned long dequeue) {}
+
+static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
+ unsigned long time, unsigned long unaccounted_time) {}
+static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
+static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
+ bool direction, bool sync) {}
+static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
+ bool direction, bool sync) {}
+static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
+{
+}
+static inline void
+cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) {}
+
+static inline void
+cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) {}
+
+static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
+ uint64_t bytes, bool direction, bool sync) {}
+static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) {}
+
+static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
+ struct blkio_group *blkg, void *key, dev_t dev) {}
+static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
+{
+ return 0;
+}
+
+#endif /* CFQ_GROUP_IOSCHED */
+#endif
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
new file mode 100644
index 00000000..7c668c8a
--- /dev/null
+++ b/block/compat_ioctl.c
@@ -0,0 +1,756 @@
+#include <linux/blkdev.h>
+#include <linux/blkpg.h>
+#include <linux/blktrace_api.h>
+#include <linux/cdrom.h>
+#include <linux/compat.h>
+#include <linux/elevator.h>
+#include <linux/fd.h>
+#include <linux/hdreg.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+static int compat_put_ushort(unsigned long arg, unsigned short val)
+{
+ return put_user(val, (unsigned short __user *)compat_ptr(arg));
+}
+
+static int compat_put_int(unsigned long arg, int val)
+{
+ return put_user(val, (compat_int_t __user *)compat_ptr(arg));
+}
+
+static int compat_put_uint(unsigned long arg, unsigned int val)
+{
+ return put_user(val, (compat_uint_t __user *)compat_ptr(arg));
+}
+
+static int compat_put_long(unsigned long arg, long val)
+{
+ return put_user(val, (compat_long_t __user *)compat_ptr(arg));
+}
+
+static int compat_put_ulong(unsigned long arg, compat_ulong_t val)
+{
+ return put_user(val, (compat_ulong_t __user *)compat_ptr(arg));
+}
+
+static int compat_put_u64(unsigned long arg, u64 val)
+{
+ return put_user(val, (compat_u64 __user *)compat_ptr(arg));
+}
+
+struct compat_hd_geometry {
+ unsigned char heads;
+ unsigned char sectors;
+ unsigned short cylinders;
+ u32 start;
+};
+
+static int compat_hdio_getgeo(struct gendisk *disk, struct block_device *bdev,
+ struct compat_hd_geometry __user *ugeo)
+{
+ struct hd_geometry geo;
+ int ret;
+
+ if (!ugeo)
+ return -EINVAL;
+ if (!disk->fops->getgeo)
+ return -ENOTTY;
+
+ /*
+ * We need to set the startsect first, the driver may
+ * want to override it.
+ */
+ geo.start = get_start_sect(bdev);
+ ret = disk->fops->getgeo(bdev, &geo);
+ if (ret)
+ return ret;
+
+ ret = copy_to_user(ugeo, &geo, 4);
+ ret |= __put_user(geo.start, &ugeo->start);
+ if (ret)
+ ret = -EFAULT;
+
+ return ret;
+}
+
+static int compat_hdio_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ mm_segment_t old_fs = get_fs();
+ unsigned long kval;
+ unsigned int __user *uvp;
+ int error;
+
+ set_fs(KERNEL_DS);
+ error = __blkdev_driver_ioctl(bdev, mode,
+ cmd, (unsigned long)(&kval));
+ set_fs(old_fs);
+
+ if (error == 0) {
+ uvp = compat_ptr(arg);
+ if (put_user(kval, uvp))
+ error = -EFAULT;
+ }
+ return error;
+}
+
+struct compat_cdrom_read_audio {
+ union cdrom_addr addr;
+ u8 addr_format;
+ compat_int_t nframes;
+ compat_caddr_t buf;
+};
+
+struct compat_cdrom_generic_command {
+ unsigned char cmd[CDROM_PACKET_SIZE];
+ compat_caddr_t buffer;
+ compat_uint_t buflen;
+ compat_int_t stat;
+ compat_caddr_t sense;
+ unsigned char data_direction;
+ compat_int_t quiet;
+ compat_int_t timeout;
+ compat_caddr_t reserved[1];
+};
+
+static int compat_cdrom_read_audio(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct cdrom_read_audio __user *cdread_audio;
+ struct compat_cdrom_read_audio __user *cdread_audio32;
+ __u32 data;
+ void __user *datap;
+
+ cdread_audio = compat_alloc_user_space(sizeof(*cdread_audio));
+ cdread_audio32 = compat_ptr(arg);
+
+ if (copy_in_user(&cdread_audio->addr,
+ &cdread_audio32->addr,
+ (sizeof(*cdread_audio32) -
+ sizeof(compat_caddr_t))))
+ return -EFAULT;
+
+ if (get_user(data, &cdread_audio32->buf))
+ return -EFAULT;
+ datap = compat_ptr(data);
+ if (put_user(datap, &cdread_audio->buf))
+ return -EFAULT;
+
+ return __blkdev_driver_ioctl(bdev, mode, cmd,
+ (unsigned long)cdread_audio);
+}
+
+static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct cdrom_generic_command __user *cgc;
+ struct compat_cdrom_generic_command __user *cgc32;
+ u32 data;
+ unsigned char dir;
+ int itmp;
+
+ cgc = compat_alloc_user_space(sizeof(*cgc));
+ cgc32 = compat_ptr(arg);
+
+ if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
+ get_user(data, &cgc32->buffer) ||
+ put_user(compat_ptr(data), &cgc->buffer) ||
+ copy_in_user(&cgc->buflen, &cgc32->buflen,
+ (sizeof(unsigned int) + sizeof(int))) ||
+ get_user(data, &cgc32->sense) ||
+ put_user(compat_ptr(data), &cgc->sense) ||
+ get_user(dir, &cgc32->data_direction) ||
+ put_user(dir, &cgc->data_direction) ||
+ get_user(itmp, &cgc32->quiet) ||
+ put_user(itmp, &cgc->quiet) ||
+ get_user(itmp, &cgc32->timeout) ||
+ put_user(itmp, &cgc->timeout) ||
+ get_user(data, &cgc32->reserved[0]) ||
+ put_user(compat_ptr(data), &cgc->reserved[0]))
+ return -EFAULT;
+
+ return __blkdev_driver_ioctl(bdev, mode, cmd, (unsigned long)cgc);
+}
+
+struct compat_blkpg_ioctl_arg {
+ compat_int_t op;
+ compat_int_t flags;
+ compat_int_t datalen;
+ compat_caddr_t data;
+};
+
+static int compat_blkpg_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, struct compat_blkpg_ioctl_arg __user *ua32)
+{
+ struct blkpg_ioctl_arg __user *a = compat_alloc_user_space(sizeof(*a));
+ compat_caddr_t udata;
+ compat_int_t n;
+ int err;
+
+ err = get_user(n, &ua32->op);
+ err |= put_user(n, &a->op);
+ err |= get_user(n, &ua32->flags);
+ err |= put_user(n, &a->flags);
+ err |= get_user(n, &ua32->datalen);
+ err |= put_user(n, &a->datalen);
+ err |= get_user(udata, &ua32->data);
+ err |= put_user(compat_ptr(udata), &a->data);
+ if (err)
+ return err;
+
+ return blkdev_ioctl(bdev, mode, cmd, (unsigned long)a);
+}
+
+#define BLKBSZGET_32 _IOR(0x12, 112, int)
+#define BLKBSZSET_32 _IOW(0x12, 113, int)
+#define BLKGETSIZE64_32 _IOR(0x12, 114, int)
+
+struct compat_floppy_drive_params {
+ char cmos;
+ compat_ulong_t max_dtr;
+ compat_ulong_t hlt;
+ compat_ulong_t hut;
+ compat_ulong_t srt;
+ compat_ulong_t spinup;
+ compat_ulong_t spindown;
+ unsigned char spindown_offset;
+ unsigned char select_delay;
+ unsigned char rps;
+ unsigned char tracks;
+ compat_ulong_t timeout;
+ unsigned char interleave_sect;
+ struct floppy_max_errors max_errors;
+ char flags;
+ char read_track;
+ short autodetect[8];
+ compat_int_t checkfreq;
+ compat_int_t native_format;
+};
+
+struct compat_floppy_drive_struct {
+ signed char flags;
+ compat_ulong_t spinup_date;
+ compat_ulong_t select_date;
+ compat_ulong_t first_read_date;
+ short probed_format;
+ short track;
+ short maxblock;
+ short maxtrack;
+ compat_int_t generation;
+ compat_int_t keep_data;
+ compat_int_t fd_ref;
+ compat_int_t fd_device;
+ compat_int_t last_checked;
+ compat_caddr_t dmabuf;
+ compat_int_t bufblocks;
+};
+
+struct compat_floppy_fdc_state {
+ compat_int_t spec1;
+ compat_int_t spec2;
+ compat_int_t dtr;
+ unsigned char version;
+ unsigned char dor;
+ compat_ulong_t address;
+ unsigned int rawcmd:2;
+ unsigned int reset:1;
+ unsigned int need_configure:1;
+ unsigned int perp_mode:2;
+ unsigned int has_fifo:1;
+ unsigned int driver_version;
+ unsigned char track[4];
+};
+
+struct compat_floppy_write_errors {
+ unsigned int write_errors;
+ compat_ulong_t first_error_sector;
+ compat_int_t first_error_generation;
+ compat_ulong_t last_error_sector;
+ compat_int_t last_error_generation;
+ compat_uint_t badness;
+};
+
+#define FDSETPRM32 _IOW(2, 0x42, struct compat_floppy_struct)
+#define FDDEFPRM32 _IOW(2, 0x43, struct compat_floppy_struct)
+#define FDSETDRVPRM32 _IOW(2, 0x90, struct compat_floppy_drive_params)
+#define FDGETDRVPRM32 _IOR(2, 0x11, struct compat_floppy_drive_params)
+#define FDGETDRVSTAT32 _IOR(2, 0x12, struct compat_floppy_drive_struct)
+#define FDPOLLDRVSTAT32 _IOR(2, 0x13, struct compat_floppy_drive_struct)
+#define FDGETFDCSTAT32 _IOR(2, 0x15, struct compat_floppy_fdc_state)
+#define FDWERRORGET32 _IOR(2, 0x17, struct compat_floppy_write_errors)
+
+static struct {
+ unsigned int cmd32;
+ unsigned int cmd;
+} fd_ioctl_trans_table[] = {
+ { FDSETPRM32, FDSETPRM },
+ { FDDEFPRM32, FDDEFPRM },
+ { FDGETPRM32, FDGETPRM },
+ { FDSETDRVPRM32, FDSETDRVPRM },
+ { FDGETDRVPRM32, FDGETDRVPRM },
+ { FDGETDRVSTAT32, FDGETDRVSTAT },
+ { FDPOLLDRVSTAT32, FDPOLLDRVSTAT },
+ { FDGETFDCSTAT32, FDGETFDCSTAT },
+ { FDWERRORGET32, FDWERRORGET }
+};
+
+#define NR_FD_IOCTL_TRANS ARRAY_SIZE(fd_ioctl_trans_table)
+
+static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ mm_segment_t old_fs = get_fs();
+ void *karg = NULL;
+ unsigned int kcmd = 0;
+ int i, err;
+
+ for (i = 0; i < NR_FD_IOCTL_TRANS; i++)
+ if (cmd == fd_ioctl_trans_table[i].cmd32) {
+ kcmd = fd_ioctl_trans_table[i].cmd;
+ break;
+ }
+ if (!kcmd)
+ return -EINVAL;
+
+ switch (cmd) {
+ case FDSETPRM32:
+ case FDDEFPRM32:
+ case FDGETPRM32:
+ {
+ compat_uptr_t name;
+ struct compat_floppy_struct __user *uf;
+ struct floppy_struct *f;
+
+ uf = compat_ptr(arg);
+ f = karg = kmalloc(sizeof(struct floppy_struct), GFP_KERNEL);
+ if (!karg)
+ return -ENOMEM;
+ if (cmd == FDGETPRM32)
+ break;
+ err = __get_user(f->size, &uf->size);
+ err |= __get_user(f->sect, &uf->sect);
+ err |= __get_user(f->head, &uf->head);
+ err |= __get_user(f->track, &uf->track);
+ err |= __get_user(f->stretch, &uf->stretch);
+ err |= __get_user(f->gap, &uf->gap);
+ err |= __get_user(f->rate, &uf->rate);
+ err |= __get_user(f->spec1, &uf->spec1);
+ err |= __get_user(f->fmt_gap, &uf->fmt_gap);
+ err |= __get_user(name, &uf->name);
+ f->name = compat_ptr(name);
+ if (err) {
+ err = -EFAULT;
+ goto out;
+ }
+ break;
+ }
+ case FDSETDRVPRM32:
+ case FDGETDRVPRM32:
+ {
+ struct compat_floppy_drive_params __user *uf;
+ struct floppy_drive_params *f;
+
+ uf = compat_ptr(arg);
+ f = karg = kmalloc(sizeof(struct floppy_drive_params), GFP_KERNEL);
+ if (!karg)
+ return -ENOMEM;
+ if (cmd == FDGETDRVPRM32)
+ break;
+ err = __get_user(f->cmos, &uf->cmos);
+ err |= __get_user(f->max_dtr, &uf->max_dtr);
+ err |= __get_user(f->hlt, &uf->hlt);
+ err |= __get_user(f->hut, &uf->hut);
+ err |= __get_user(f->srt, &uf->srt);
+ err |= __get_user(f->spinup, &uf->spinup);
+ err |= __get_user(f->spindown, &uf->spindown);
+ err |= __get_user(f->spindown_offset, &uf->spindown_offset);
+ err |= __get_user(f->select_delay, &uf->select_delay);
+ err |= __get_user(f->rps, &uf->rps);
+ err |= __get_user(f->tracks, &uf->tracks);
+ err |= __get_user(f->timeout, &uf->timeout);
+ err |= __get_user(f->interleave_sect, &uf->interleave_sect);
+ err |= __copy_from_user(&f->max_errors, &uf->max_errors, sizeof(f->max_errors));
+ err |= __get_user(f->flags, &uf->flags);
+ err |= __get_user(f->read_track, &uf->read_track);
+ err |= __copy_from_user(f->autodetect, uf->autodetect, sizeof(f->autodetect));
+ err |= __get_user(f->checkfreq, &uf->checkfreq);
+ err |= __get_user(f->native_format, &uf->native_format);
+ if (err) {
+ err = -EFAULT;
+ goto out;
+ }
+ break;
+ }
+ case FDGETDRVSTAT32:
+ case FDPOLLDRVSTAT32:
+ karg = kmalloc(sizeof(struct floppy_drive_struct), GFP_KERNEL);
+ if (!karg)
+ return -ENOMEM;
+ break;
+ case FDGETFDCSTAT32:
+ karg = kmalloc(sizeof(struct floppy_fdc_state), GFP_KERNEL);
+ if (!karg)
+ return -ENOMEM;
+ break;
+ case FDWERRORGET32:
+ karg = kmalloc(sizeof(struct floppy_write_errors), GFP_KERNEL);
+ if (!karg)
+ return -ENOMEM;
+ break;
+ default:
+ return -EINVAL;
+ }
+ set_fs(KERNEL_DS);
+ err = __blkdev_driver_ioctl(bdev, mode, kcmd, (unsigned long)karg);
+ set_fs(old_fs);
+ if (err)
+ goto out;
+ switch (cmd) {
+ case FDGETPRM32:
+ {
+ struct floppy_struct *f = karg;
+ struct compat_floppy_struct __user *uf = compat_ptr(arg);
+
+ err = __put_user(f->size, &uf->size);
+ err |= __put_user(f->sect, &uf->sect);
+ err |= __put_user(f->head, &uf->head);
+ err |= __put_user(f->track, &uf->track);
+ err |= __put_user(f->stretch, &uf->stretch);
+ err |= __put_user(f->gap, &uf->gap);
+ err |= __put_user(f->rate, &uf->rate);
+ err |= __put_user(f->spec1, &uf->spec1);
+ err |= __put_user(f->fmt_gap, &uf->fmt_gap);
+ err |= __put_user((u64)f->name, (compat_caddr_t __user *)&uf->name);
+ break;
+ }
+ case FDGETDRVPRM32:
+ {
+ struct compat_floppy_drive_params __user *uf;
+ struct floppy_drive_params *f = karg;
+
+ uf = compat_ptr(arg);
+ err = __put_user(f->cmos, &uf->cmos);
+ err |= __put_user(f->max_dtr, &uf->max_dtr);
+ err |= __put_user(f->hlt, &uf->hlt);
+ err |= __put_user(f->hut, &uf->hut);
+ err |= __put_user(f->srt, &uf->srt);
+ err |= __put_user(f->spinup, &uf->spinup);
+ err |= __put_user(f->spindown, &uf->spindown);
+ err |= __put_user(f->spindown_offset, &uf->spindown_offset);
+ err |= __put_user(f->select_delay, &uf->select_delay);
+ err |= __put_user(f->rps, &uf->rps);
+ err |= __put_user(f->tracks, &uf->tracks);
+ err |= __put_user(f->timeout, &uf->timeout);
+ err |= __put_user(f->interleave_sect, &uf->interleave_sect);
+ err |= __copy_to_user(&uf->max_errors, &f->max_errors, sizeof(f->max_errors));
+ err |= __put_user(f->flags, &uf->flags);
+ err |= __put_user(f->read_track, &uf->read_track);
+ err |= __copy_to_user(uf->autodetect, f->autodetect, sizeof(f->autodetect));
+ err |= __put_user(f->checkfreq, &uf->checkfreq);
+ err |= __put_user(f->native_format, &uf->native_format);
+ break;
+ }
+ case FDGETDRVSTAT32:
+ case FDPOLLDRVSTAT32:
+ {
+ struct compat_floppy_drive_struct __user *uf;
+ struct floppy_drive_struct *f = karg;
+
+ uf = compat_ptr(arg);
+ err = __put_user(f->flags, &uf->flags);
+ err |= __put_user(f->spinup_date, &uf->spinup_date);
+ err |= __put_user(f->select_date, &uf->select_date);
+ err |= __put_user(f->first_read_date, &uf->first_read_date);
+ err |= __put_user(f->probed_format, &uf->probed_format);
+ err |= __put_user(f->track, &uf->track);
+ err |= __put_user(f->maxblock, &uf->maxblock);
+ err |= __put_user(f->maxtrack, &uf->maxtrack);
+ err |= __put_user(f->generation, &uf->generation);
+ err |= __put_user(f->keep_data, &uf->keep_data);
+ err |= __put_user(f->fd_ref, &uf->fd_ref);
+ err |= __put_user(f->fd_device, &uf->fd_device);
+ err |= __put_user(f->last_checked, &uf->last_checked);
+ err |= __put_user((u64)f->dmabuf, &uf->dmabuf);
+ err |= __put_user((u64)f->bufblocks, &uf->bufblocks);
+ break;
+ }
+ case FDGETFDCSTAT32:
+ {
+ struct compat_floppy_fdc_state __user *uf;
+ struct floppy_fdc_state *f = karg;
+
+ uf = compat_ptr(arg);
+ err = __put_user(f->spec1, &uf->spec1);
+ err |= __put_user(f->spec2, &uf->spec2);
+ err |= __put_user(f->dtr, &uf->dtr);
+ err |= __put_user(f->version, &uf->version);
+ err |= __put_user(f->dor, &uf->dor);
+ err |= __put_user(f->address, &uf->address);
+ err |= __copy_to_user((char __user *)&uf->address + sizeof(uf->address),
+ (char *)&f->address + sizeof(f->address), sizeof(int));
+ err |= __put_user(f->driver_version, &uf->driver_version);
+ err |= __copy_to_user(uf->track, f->track, sizeof(f->track));
+ break;
+ }
+ case FDWERRORGET32:
+ {
+ struct compat_floppy_write_errors __user *uf;
+ struct floppy_write_errors *f = karg;
+
+ uf = compat_ptr(arg);
+ err = __put_user(f->write_errors, &uf->write_errors);
+ err |= __put_user(f->first_error_sector, &uf->first_error_sector);
+ err |= __put_user(f->first_error_generation, &uf->first_error_generation);
+ err |= __put_user(f->last_error_sector, &uf->last_error_sector);
+ err |= __put_user(f->last_error_generation, &uf->last_error_generation);
+ err |= __put_user(f->badness, &uf->badness);
+ break;
+ }
+ default:
+ break;
+ }
+ if (err)
+ err = -EFAULT;
+
+out:
+ kfree(karg);
+ return err;
+}
+
+static int compat_blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case HDIO_GET_UNMASKINTR:
+ case HDIO_GET_MULTCOUNT:
+ case HDIO_GET_KEEPSETTINGS:
+ case HDIO_GET_32BIT:
+ case HDIO_GET_NOWERR:
+ case HDIO_GET_DMA:
+ case HDIO_GET_NICE:
+ case HDIO_GET_WCACHE:
+ case HDIO_GET_ACOUSTIC:
+ case HDIO_GET_ADDRESS:
+ case HDIO_GET_BUSSTATE:
+ return compat_hdio_ioctl(bdev, mode, cmd, arg);
+ case FDSETPRM32:
+ case FDDEFPRM32:
+ case FDGETPRM32:
+ case FDSETDRVPRM32:
+ case FDGETDRVPRM32:
+ case FDGETDRVSTAT32:
+ case FDPOLLDRVSTAT32:
+ case FDGETFDCSTAT32:
+ case FDWERRORGET32:
+ return compat_fd_ioctl(bdev, mode, cmd, arg);
+ case CDROMREADAUDIO:
+ return compat_cdrom_read_audio(bdev, mode, cmd, arg);
+ case CDROM_SEND_PACKET:
+ return compat_cdrom_generic_command(bdev, mode, cmd, arg);
+
+ /*
+ * No handler required for the ones below, we just need to
+ * convert arg to a 64 bit pointer.
+ */
+ case BLKSECTSET:
+ /*
+ * 0x03 -- HD/IDE ioctl's used by hdparm and friends.
+ * Some need translations, these do not.
+ */
+ case HDIO_GET_IDENTITY:
+ case HDIO_DRIVE_TASK:
+ case HDIO_DRIVE_CMD:
+ /* 0x330 is reserved -- it used to be HDIO_GETGEO_BIG */
+ case 0x330:
+ /* 0x02 -- Floppy ioctls */
+ case FDMSGON:
+ case FDMSGOFF:
+ case FDSETEMSGTRESH:
+ case FDFLUSH:
+ case FDWERRORCLR:
+ case FDSETMAXERRS:
+ case FDGETMAXERRS:
+ case FDGETDRVTYP:
+ case FDEJECT:
+ case FDCLRPRM:
+ case FDFMTBEG:
+ case FDFMTEND:
+ case FDRESET:
+ case FDTWADDLE:
+ case FDFMTTRK:
+ case FDRAWCMD:
+ /* CDROM stuff */
+ case CDROMPAUSE:
+ case CDROMRESUME:
+ case CDROMPLAYMSF:
+ case CDROMPLAYTRKIND:
+ case CDROMREADTOCHDR:
+ case CDROMREADTOCENTRY:
+ case CDROMSTOP:
+ case CDROMSTART:
+ case CDROMEJECT:
+ case CDROMVOLCTRL:
+ case CDROMSUBCHNL:
+ case CDROMMULTISESSION:
+ case CDROM_GET_MCN:
+ case CDROMRESET:
+ case CDROMVOLREAD:
+ case CDROMSEEK:
+ case CDROMPLAYBLK:
+ case CDROMCLOSETRAY:
+ case CDROM_DISC_STATUS:
+ case CDROM_CHANGER_NSLOTS:
+ case CDROM_GET_CAPABILITY:
+ /* Ignore cdrom.h about these next 5 ioctls, they absolutely do
+ * not take a struct cdrom_read, instead they take a struct cdrom_msf
+ * which is compatible.
+ */
+ case CDROMREADMODE2:
+ case CDROMREADMODE1:
+ case CDROMREADRAW:
+ case CDROMREADCOOKED:
+ case CDROMREADALL:
+ /* DVD ioctls */
+ case DVD_READ_STRUCT:
+ case DVD_WRITE_STRUCT:
+ case DVD_AUTH:
+ arg = (unsigned long)compat_ptr(arg);
+ /* These intepret arg as an unsigned long, not as a pointer,
+ * so we must not do compat_ptr() conversion. */
+ case HDIO_SET_MULTCOUNT:
+ case HDIO_SET_UNMASKINTR:
+ case HDIO_SET_KEEPSETTINGS:
+ case HDIO_SET_32BIT:
+ case HDIO_SET_NOWERR:
+ case HDIO_SET_DMA:
+ case HDIO_SET_PIO_MODE:
+ case HDIO_SET_NICE:
+ case HDIO_SET_WCACHE:
+ case HDIO_SET_ACOUSTIC:
+ case HDIO_SET_BUSSTATE:
+ case HDIO_SET_ADDRESS:
+ case CDROMEJECT_SW:
+ case CDROM_SET_OPTIONS:
+ case CDROM_CLEAR_OPTIONS:
+ case CDROM_SELECT_SPEED:
+ case CDROM_SELECT_DISC:
+ case CDROM_MEDIA_CHANGED:
+ case CDROM_DRIVE_STATUS:
+ case CDROM_LOCKDOOR:
+ case CDROM_DEBUG:
+ break;
+ default:
+ /* unknown ioctl number */
+ return -ENOIOCTLCMD;
+ }
+
+ return __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+}
+
+/* Most of the generic ioctls are handled in the normal fallback path.
+ This assumes the blkdev's low level compat_ioctl always returns
+ ENOIOCTLCMD for unknown ioctls. */
+long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+{
+ int ret = -ENOIOCTLCMD;
+ struct inode *inode = file->f_mapping->host;
+ struct block_device *bdev = inode->i_bdev;
+ struct gendisk *disk = bdev->bd_disk;
+ fmode_t mode = file->f_mode;
+ struct backing_dev_info *bdi;
+ loff_t size;
+
+ /*
+ * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have
+ * to updated it before every ioctl.
+ */
+ if (file->f_flags & O_NDELAY)
+ mode |= FMODE_NDELAY;
+ else
+ mode &= ~FMODE_NDELAY;
+
+ switch (cmd) {
+ case HDIO_GETGEO:
+ return compat_hdio_getgeo(disk, bdev, compat_ptr(arg));
+ case BLKPBSZGET:
+ return compat_put_uint(arg, bdev_physical_block_size(bdev));
+ case BLKIOMIN:
+ return compat_put_uint(arg, bdev_io_min(bdev));
+ case BLKIOOPT:
+ return compat_put_uint(arg, bdev_io_opt(bdev));
+ case BLKALIGNOFF:
+ return compat_put_int(arg, bdev_alignment_offset(bdev));
+ case BLKDISCARDZEROES:
+ return compat_put_uint(arg, bdev_discard_zeroes_data(bdev));
+ case BLKFLSBUF:
+ case BLKROSET:
+ case BLKDISCARD:
+ case BLKSECDISCARD:
+ /*
+ * the ones below are implemented in blkdev_locked_ioctl,
+ * but we call blkdev_ioctl, which gets the lock for us
+ */
+ case BLKRRPART:
+ return blkdev_ioctl(bdev, mode, cmd,
+ (unsigned long)compat_ptr(arg));
+ case BLKBSZSET_32:
+ return blkdev_ioctl(bdev, mode, BLKBSZSET,
+ (unsigned long)compat_ptr(arg));
+ case BLKPG:
+ return compat_blkpg_ioctl(bdev, mode, cmd, compat_ptr(arg));
+ case BLKRAGET:
+ case BLKFRAGET:
+ if (!arg)
+ return -EINVAL;
+ bdi = blk_get_backing_dev_info(bdev);
+ if (bdi == NULL)
+ return -ENOTTY;
+ return compat_put_long(arg,
+ (bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
+ case BLKROGET: /* compatible */
+ return compat_put_int(arg, bdev_read_only(bdev) != 0);
+ case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
+ return compat_put_int(arg, block_size(bdev));
+ case BLKSSZGET: /* get block device hardware sector size */
+ return compat_put_int(arg, bdev_logical_block_size(bdev));
+ case BLKSECTGET:
+ return compat_put_ushort(arg,
+ queue_max_sectors(bdev_get_queue(bdev)));
+ case BLKROTATIONAL:
+ return compat_put_ushort(arg,
+ !blk_queue_nonrot(bdev_get_queue(bdev)));
+ case BLKRASET: /* compatible, but no compat_ptr (!) */
+ case BLKFRASET:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ bdi = blk_get_backing_dev_info(bdev);
+ if (bdi == NULL)
+ return -ENOTTY;
+ bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
+ return 0;
+ case BLKGETSIZE:
+ size = i_size_read(bdev->bd_inode);
+ if ((size >> 9) > ~0UL)
+ return -EFBIG;
+ return compat_put_ulong(arg, size >> 9);
+
+ case BLKGETSIZE64_32:
+ return compat_put_u64(arg, i_size_read(bdev->bd_inode));
+
+ case BLKTRACESETUP32:
+ case BLKTRACESTART: /* compatible */
+ case BLKTRACESTOP: /* compatible */
+ case BLKTRACETEARDOWN: /* compatible */
+ ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg));
+ return ret;
+ default:
+ if (disk->fops->compat_ioctl)
+ ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg);
+ if (ret == -ENOIOCTLCMD)
+ ret = compat_blkdev_driver_ioctl(bdev, mode, cmd, arg);
+ return ret;
+ }
+}
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
new file mode 100644
index 00000000..7bf12d79
--- /dev/null
+++ b/block/deadline-iosched.c
@@ -0,0 +1,464 @@
+/*
+ * Deadline i/o scheduler.
+ *
+ * Copyright (C) 2002 Jens Axboe <axboe@kernel.dk>
+ */
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/compiler.h>
+#include <linux/rbtree.h>
+
+/*
+ * See Documentation/block/deadline-iosched.txt
+ */
+static const int read_expire = HZ / 2; /* max time before a read is submitted. */
+static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
+static const int writes_starved = 2; /* max times reads can starve a write */
+static const int fifo_batch = 16; /* # of sequential requests treated as one
+ by the above parameters. For throughput. */
+
+struct deadline_data {
+ /*
+ * run time data
+ */
+
+ /*
+ * requests (deadline_rq s) are present on both sort_list and fifo_list
+ */
+ struct rb_root sort_list[2];
+ struct list_head fifo_list[2];
+
+ /*
+ * next in sort order. read, write or both are NULL
+ */
+ struct request *next_rq[2];
+ unsigned int batching; /* number of sequential requests made */
+ sector_t last_sector; /* head position */
+ unsigned int starved; /* times reads have starved writes */
+
+ /*
+ * settings that change how the i/o scheduler behaves
+ */
+ int fifo_expire[2];
+ int fifo_batch;
+ int writes_starved;
+ int front_merges;
+};
+
+static void deadline_move_request(struct deadline_data *, struct request *);
+
+static inline struct rb_root *
+deadline_rb_root(struct deadline_data *dd, struct request *rq)
+{
+ return &dd->sort_list[rq_data_dir(rq)];
+}
+
+/*
+ * get the request after `rq' in sector-sorted order
+ */
+static inline struct request *
+deadline_latter_request(struct request *rq)
+{
+ struct rb_node *node = rb_next(&rq->rb_node);
+
+ if (node)
+ return rb_entry_rq(node);
+
+ return NULL;
+}
+
+static void
+deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
+{
+ struct rb_root *root = deadline_rb_root(dd, rq);
+
+ elv_rb_add(root, rq);
+}
+
+static inline void
+deadline_del_rq_rb(struct deadline_data *dd, struct request *rq)
+{
+ const int data_dir = rq_data_dir(rq);
+
+ if (dd->next_rq[data_dir] == rq)
+ dd->next_rq[data_dir] = deadline_latter_request(rq);
+
+ elv_rb_del(deadline_rb_root(dd, rq), rq);
+}
+
+/*
+ * add rq to rbtree and fifo
+ */
+static void
+deadline_add_request(struct request_queue *q, struct request *rq)
+{
+ struct deadline_data *dd = q->elevator->elevator_data;
+ const int data_dir = rq_data_dir(rq);
+
+ deadline_add_rq_rb(dd, rq);
+
+ /*
+ * set expire time and add to fifo list
+ */
+ rq_set_fifo_time(rq, jiffies + dd->fifo_expire[data_dir]);
+ list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
+}
+
+/*
+ * remove rq from rbtree and fifo.
+ */
+static void deadline_remove_request(struct request_queue *q, struct request *rq)
+{
+ struct deadline_data *dd = q->elevator->elevator_data;
+
+ rq_fifo_clear(rq);
+ deadline_del_rq_rb(dd, rq);
+}
+
+static int
+deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
+{
+ struct deadline_data *dd = q->elevator->elevator_data;
+ struct request *__rq;
+ int ret;
+
+ /*
+ * check for front merge
+ */
+ if (dd->front_merges) {
+ sector_t sector = bio->bi_sector + bio_sectors(bio);
+
+ __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
+ if (__rq) {
+ BUG_ON(sector != blk_rq_pos(__rq));
+
+ if (elv_rq_merge_ok(__rq, bio)) {
+ ret = ELEVATOR_FRONT_MERGE;
+ goto out;
+ }
+ }
+ }
+
+ return ELEVATOR_NO_MERGE;
+out:
+ *req = __rq;
+ return ret;
+}
+
+static void deadline_merged_request(struct request_queue *q,
+ struct request *req, int type)
+{
+ struct deadline_data *dd = q->elevator->elevator_data;
+
+ /*
+ * if the merge was a front merge, we need to reposition request
+ */
+ if (type == ELEVATOR_FRONT_MERGE) {
+ elv_rb_del(deadline_rb_root(dd, req), req);
+ deadline_add_rq_rb(dd, req);
+ }
+}
+
+static void
+deadline_merged_requests(struct request_queue *q, struct request *req,
+ struct request *next)
+{
+ /*
+ * if next expires before rq, assign its expire time to rq
+ * and move into next position (next will be deleted) in fifo
+ */
+ if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
+ if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
+ list_move(&req->queuelist, &next->queuelist);
+ rq_set_fifo_time(req, rq_fifo_time(next));
+ }
+ }
+
+ /*
+ * kill knowledge of next, this one is a goner
+ */
+ deadline_remove_request(q, next);
+}
+
+/*
+ * move request from sort list to dispatch queue.
+ */
+static inline void
+deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq)
+{
+ struct request_queue *q = rq->q;
+
+ deadline_remove_request(q, rq);
+ elv_dispatch_add_tail(q, rq);
+}
+
+/*
+ * move an entry to dispatch queue
+ */
+static void
+deadline_move_request(struct deadline_data *dd, struct request *rq)
+{
+ const int data_dir = rq_data_dir(rq);
+
+ dd->next_rq[READ] = NULL;
+ dd->next_rq[WRITE] = NULL;
+ dd->next_rq[data_dir] = deadline_latter_request(rq);
+
+ dd->last_sector = rq_end_sector(rq);
+
+ /*
+ * take it off the sort and fifo list, move
+ * to dispatch queue
+ */
+ deadline_move_to_dispatch(dd, rq);
+}
+
+/*
+ * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
+ * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
+ */
+static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
+{
+ struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next);
+
+ /*
+ * rq is expired!
+ */
+ if (time_after(jiffies, rq_fifo_time(rq)))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * deadline_dispatch_requests selects the best request according to
+ * read/write expire, fifo_batch, etc
+ */
+static int deadline_dispatch_requests(struct request_queue *q, int force)
+{
+ struct deadline_data *dd = q->elevator->elevator_data;
+ const int reads = !list_empty(&dd->fifo_list[READ]);
+ const int writes = !list_empty(&dd->fifo_list[WRITE]);
+ struct request *rq;
+ int data_dir;
+
+ /*
+ * batches are currently reads XOR writes
+ */
+ if (dd->next_rq[WRITE])
+ rq = dd->next_rq[WRITE];
+ else
+ rq = dd->next_rq[READ];
+
+ if (rq && dd->batching < dd->fifo_batch)
+ /* we have a next request are still entitled to batch */
+ goto dispatch_request;
+
+ /*
+ * at this point we are not running a batch. select the appropriate
+ * data direction (read / write)
+ */
+
+ if (reads) {
+ BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
+
+ if (writes && (dd->starved++ >= dd->writes_starved))
+ goto dispatch_writes;
+
+ data_dir = READ;
+
+ goto dispatch_find_request;
+ }
+
+ /*
+ * there are either no reads or writes have been starved
+ */
+
+ if (writes) {
+dispatch_writes:
+ BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));
+
+ dd->starved = 0;
+
+ data_dir = WRITE;
+
+ goto dispatch_find_request;
+ }
+
+ return 0;
+
+dispatch_find_request:
+ /*
+ * we are not running a batch, find best request for selected data_dir
+ */
+ if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) {
+ /*
+ * A deadline has expired, the last request was in the other
+ * direction, or we have run out of higher-sectored requests.
+ * Start again from the request with the earliest expiry time.
+ */
+ rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
+ } else {
+ /*
+ * The last req was the same dir and we have a next request in
+ * sort order. No expired requests so continue on from here.
+ */
+ rq = dd->next_rq[data_dir];
+ }
+
+ dd->batching = 0;
+
+dispatch_request:
+ /*
+ * rq is the selected appropriate request.
+ */
+ dd->batching++;
+ deadline_move_request(dd, rq);
+
+ return 1;
+}
+
+static void deadline_exit_queue(struct elevator_queue *e)
+{
+ struct deadline_data *dd = e->elevator_data;
+
+ BUG_ON(!list_empty(&dd->fifo_list[READ]));
+ BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
+
+ kfree(dd);
+}
+
+/*
+ * initialize elevator private data (deadline_data).
+ */
+static void *deadline_init_queue(struct request_queue *q)
+{
+ struct deadline_data *dd;
+
+ dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node);
+ if (!dd)
+ return NULL;
+
+ INIT_LIST_HEAD(&dd->fifo_list[READ]);
+ INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
+ dd->sort_list[READ] = RB_ROOT;
+ dd->sort_list[WRITE] = RB_ROOT;
+ dd->fifo_expire[READ] = read_expire;
+ dd->fifo_expire[WRITE] = write_expire;
+ dd->writes_starved = writes_starved;
+ dd->front_merges = 1;
+ dd->fifo_batch = fifo_batch;
+ return dd;
+}
+
+/*
+ * sysfs parts below
+ */
+
+static ssize_t
+deadline_var_show(int var, char *page)
+{
+ return sprintf(page, "%d\n", var);
+}
+
+static ssize_t
+deadline_var_store(int *var, const char *page, size_t count)
+{
+ char *p = (char *) page;
+
+ *var = simple_strtol(p, &p, 10);
+ return count;
+}
+
+#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
+static ssize_t __FUNC(struct elevator_queue *e, char *page) \
+{ \
+ struct deadline_data *dd = e->elevator_data; \
+ int __data = __VAR; \
+ if (__CONV) \
+ __data = jiffies_to_msecs(__data); \
+ return deadline_var_show(__data, (page)); \
+}
+SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1);
+SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1);
+SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0);
+SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0);
+SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
+#undef SHOW_FUNCTION
+
+#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
+static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
+{ \
+ struct deadline_data *dd = e->elevator_data; \
+ int __data; \
+ int ret = deadline_var_store(&__data, (page), count); \
+ if (__data < (MIN)) \
+ __data = (MIN); \
+ else if (__data > (MAX)) \
+ __data = (MAX); \
+ if (__CONV) \
+ *(__PTR) = msecs_to_jiffies(__data); \
+ else \
+ *(__PTR) = __data; \
+ return ret; \
+}
+STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
+STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
+STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
+STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0);
+STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0);
+#undef STORE_FUNCTION
+
+#define DD_ATTR(name) \
+ __ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \
+ deadline_##name##_store)
+
+static struct elv_fs_entry deadline_attrs[] = {
+ DD_ATTR(read_expire),
+ DD_ATTR(write_expire),
+ DD_ATTR(writes_starved),
+ DD_ATTR(front_merges),
+ DD_ATTR(fifo_batch),
+ __ATTR_NULL
+};
+
+static struct elevator_type iosched_deadline = {
+ .ops = {
+ .elevator_merge_fn = deadline_merge,
+ .elevator_merged_fn = deadline_merged_request,
+ .elevator_merge_req_fn = deadline_merged_requests,
+ .elevator_dispatch_fn = deadline_dispatch_requests,
+ .elevator_add_req_fn = deadline_add_request,
+ .elevator_former_req_fn = elv_rb_former_request,
+ .elevator_latter_req_fn = elv_rb_latter_request,
+ .elevator_init_fn = deadline_init_queue,
+ .elevator_exit_fn = deadline_exit_queue,
+ },
+
+ .elevator_attrs = deadline_attrs,
+ .elevator_name = "deadline",
+ .elevator_owner = THIS_MODULE,
+};
+
+static int __init deadline_init(void)
+{
+ return elv_register(&iosched_deadline);
+}
+
+static void __exit deadline_exit(void)
+{
+ elv_unregister(&iosched_deadline);
+}
+
+module_init(deadline_init);
+module_exit(deadline_exit);
+
+MODULE_AUTHOR("Jens Axboe");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("deadline IO scheduler");
diff --git a/block/elevator.c b/block/elevator.c
new file mode 100644
index 00000000..f016855a
--- /dev/null
+++ b/block/elevator.c
@@ -0,0 +1,1051 @@
+/*
+ * Block device elevator/IO-scheduler.
+ *
+ * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
+ *
+ * 30042000 Jens Axboe <axboe@kernel.dk> :
+ *
+ * Split the elevator a bit so that it is possible to choose a different
+ * one or even write a new "plug in". There are three pieces:
+ * - elevator_fn, inserts a new request in the queue list
+ * - elevator_merge_fn, decides whether a new buffer can be merged with
+ * an existing request
+ * - elevator_dequeue_fn, called when a request is taken off the active list
+ *
+ * 20082000 Dave Jones <davej@suse.de> :
+ * Removed tests for max-bomb-segments, which was breaking elvtune
+ * when run without -bN
+ *
+ * Jens:
+ * - Rework again to work with bio instead of buffer_heads
+ * - loose bi_dev comparisons, partition handling is right now
+ * - completely modularize elevator setup and teardown
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/compiler.h>
+#include <linux/blktrace_api.h>
+#include <linux/hash.h>
+#include <linux/uaccess.h>
+
+#include <trace/events/block.h>
+
+#include "blk.h"
+
+static DEFINE_SPINLOCK(elv_list_lock);
+static LIST_HEAD(elv_list);
+
+/*
+ * Merge hash stuff.
+ */
+static const int elv_hash_shift = 6;
+#define ELV_HASH_BLOCK(sec) ((sec) >> 3)
+#define ELV_HASH_FN(sec) \
+ (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
+#define ELV_HASH_ENTRIES (1 << elv_hash_shift)
+#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
+
+/*
+ * Query io scheduler to see if the current process issuing bio may be
+ * merged with rq.
+ */
+static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
+{
+ struct request_queue *q = rq->q;
+ struct elevator_queue *e = q->elevator;
+
+ if (e->type->ops.elevator_allow_merge_fn)
+ return e->type->ops.elevator_allow_merge_fn(q, rq, bio);
+
+ return 1;
+}
+
+/*
+ * can we safely merge with this request?
+ */
+bool elv_rq_merge_ok(struct request *rq, struct bio *bio)
+{
+ if (!blk_rq_merge_ok(rq, bio))
+ return 0;
+
+ if (!elv_iosched_allow_merge(rq, bio))
+ return 0;
+
+ return 1;
+}
+EXPORT_SYMBOL(elv_rq_merge_ok);
+
+static struct elevator_type *elevator_find(const char *name)
+{
+ struct elevator_type *e;
+
+ list_for_each_entry(e, &elv_list, list) {
+ if (!strcmp(e->elevator_name, name))
+ return e;
+ }
+
+ return NULL;
+}
+
+static void elevator_put(struct elevator_type *e)
+{
+ module_put(e->elevator_owner);
+}
+
+static struct elevator_type *elevator_get(const char *name)
+{
+ struct elevator_type *e;
+
+ spin_lock(&elv_list_lock);
+
+ e = elevator_find(name);
+ if (!e) {
+ spin_unlock(&elv_list_lock);
+ request_module("%s-iosched", name);
+ spin_lock(&elv_list_lock);
+ e = elevator_find(name);
+ }
+
+ if (e && !try_module_get(e->elevator_owner))
+ e = NULL;
+
+ spin_unlock(&elv_list_lock);
+
+ return e;
+}
+
+static int elevator_init_queue(struct request_queue *q,
+ struct elevator_queue *eq)
+{
+ eq->elevator_data = eq->type->ops.elevator_init_fn(q);
+ if (eq->elevator_data)
+ return 0;
+ return -ENOMEM;
+}
+
+static char chosen_elevator[ELV_NAME_MAX];
+
+static int __init elevator_setup(char *str)
+{
+ /*
+ * Be backwards-compatible with previous kernels, so users
+ * won't get the wrong elevator.
+ */
+ strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
+ return 1;
+}
+
+__setup("elevator=", elevator_setup);
+
+static struct kobj_type elv_ktype;
+
+static struct elevator_queue *elevator_alloc(struct request_queue *q,
+ struct elevator_type *e)
+{
+ struct elevator_queue *eq;
+ int i;
+
+ eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
+ if (unlikely(!eq))
+ goto err;
+
+ eq->type = e;
+ kobject_init(&eq->kobj, &elv_ktype);
+ mutex_init(&eq->sysfs_lock);
+
+ eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
+ GFP_KERNEL, q->node);
+ if (!eq->hash)
+ goto err;
+
+ for (i = 0; i < ELV_HASH_ENTRIES; i++)
+ INIT_HLIST_HEAD(&eq->hash[i]);
+
+ return eq;
+err:
+ kfree(eq);
+ elevator_put(e);
+ return NULL;
+}
+
+static void elevator_release(struct kobject *kobj)
+{
+ struct elevator_queue *e;
+
+ e = container_of(kobj, struct elevator_queue, kobj);
+ elevator_put(e->type);
+ kfree(e->hash);
+ kfree(e);
+}
+
+int elevator_init(struct request_queue *q, char *name)
+{
+ struct elevator_type *e = NULL;
+ struct elevator_queue *eq;
+ int err;
+
+ if (unlikely(q->elevator))
+ return 0;
+
+ INIT_LIST_HEAD(&q->queue_head);
+ q->last_merge = NULL;
+ q->end_sector = 0;
+ q->boundary_rq = NULL;
+
+ if (name) {
+ e = elevator_get(name);
+ if (!e)
+ return -EINVAL;
+ }
+
+ if (!e && *chosen_elevator) {
+ e = elevator_get(chosen_elevator);
+ if (!e)
+ printk(KERN_ERR "I/O scheduler %s not found\n",
+ chosen_elevator);
+ }
+
+ if (!e) {
+ e = elevator_get(CONFIG_DEFAULT_IOSCHED);
+ if (!e) {
+ printk(KERN_ERR
+ "Default I/O scheduler not found. " \
+ "Using noop.\n");
+ e = elevator_get("noop");
+ }
+ }
+
+ eq = elevator_alloc(q, e);
+ if (!eq)
+ return -ENOMEM;
+
+ err = elevator_init_queue(q, eq);
+ if (err) {
+ kobject_put(&eq->kobj);
+ return err;
+ }
+
+ q->elevator = eq;
+ return 0;
+}
+EXPORT_SYMBOL(elevator_init);
+
+void elevator_exit(struct elevator_queue *e)
+{
+ mutex_lock(&e->sysfs_lock);
+ if (e->type->ops.elevator_exit_fn)
+ e->type->ops.elevator_exit_fn(e);
+ mutex_unlock(&e->sysfs_lock);
+
+ kobject_put(&e->kobj);
+}
+EXPORT_SYMBOL(elevator_exit);
+
+static inline void __elv_rqhash_del(struct request *rq)
+{
+ hlist_del_init(&rq->hash);
+}
+
+static void elv_rqhash_del(struct request_queue *q, struct request *rq)
+{
+ if (ELV_ON_HASH(rq))
+ __elv_rqhash_del(rq);
+}
+
+static void elv_rqhash_add(struct request_queue *q, struct request *rq)
+{
+ struct elevator_queue *e = q->elevator;
+
+ BUG_ON(ELV_ON_HASH(rq));
+ hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
+}
+
+static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
+{
+ __elv_rqhash_del(rq);
+ elv_rqhash_add(q, rq);
+}
+
+static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
+{
+ struct elevator_queue *e = q->elevator;
+ struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
+ struct hlist_node *entry, *next;
+ struct request *rq;
+
+ hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
+ BUG_ON(!ELV_ON_HASH(rq));
+
+ if (unlikely(!rq_mergeable(rq))) {
+ __elv_rqhash_del(rq);
+ continue;
+ }
+
+ if (rq_hash_key(rq) == offset)
+ return rq;
+ }
+
+ return NULL;
+}
+
+/*
+ * RB-tree support functions for inserting/lookup/removal of requests
+ * in a sorted RB tree.
+ */
+void elv_rb_add(struct rb_root *root, struct request *rq)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct request *__rq;
+
+ while (*p) {
+ parent = *p;
+ __rq = rb_entry(parent, struct request, rb_node);
+
+ if (blk_rq_pos(rq) < blk_rq_pos(__rq))
+ p = &(*p)->rb_left;
+ else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&rq->rb_node, parent, p);
+ rb_insert_color(&rq->rb_node, root);
+}
+EXPORT_SYMBOL(elv_rb_add);
+
+void elv_rb_del(struct rb_root *root, struct request *rq)
+{
+ BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
+ rb_erase(&rq->rb_node, root);
+ RB_CLEAR_NODE(&rq->rb_node);
+}
+EXPORT_SYMBOL(elv_rb_del);
+
+struct request *elv_rb_find(struct rb_root *root, sector_t sector)
+{
+ struct rb_node *n = root->rb_node;
+ struct request *rq;
+
+ while (n) {
+ rq = rb_entry(n, struct request, rb_node);
+
+ if (sector < blk_rq_pos(rq))
+ n = n->rb_left;
+ else if (sector > blk_rq_pos(rq))
+ n = n->rb_right;
+ else
+ return rq;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(elv_rb_find);
+
+/*
+ * Insert rq into dispatch queue of q. Queue lock must be held on
+ * entry. rq is sort instead into the dispatch queue. To be used by
+ * specific elevators.
+ */
+void elv_dispatch_sort(struct request_queue *q, struct request *rq)
+{
+ sector_t boundary;
+ struct list_head *entry;
+ int stop_flags;
+
+ if (q->last_merge == rq)
+ q->last_merge = NULL;
+
+ elv_rqhash_del(q, rq);
+
+ q->nr_sorted--;
+
+ boundary = q->end_sector;
+ stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
+ list_for_each_prev(entry, &q->queue_head) {
+ struct request *pos = list_entry_rq(entry);
+
+ if ((rq->cmd_flags & REQ_DISCARD) !=
+ (pos->cmd_flags & REQ_DISCARD))
+ break;
+ if (rq_data_dir(rq) != rq_data_dir(pos))
+ break;
+ if (pos->cmd_flags & stop_flags)
+ break;
+ if (blk_rq_pos(rq) >= boundary) {
+ if (blk_rq_pos(pos) < boundary)
+ continue;
+ } else {
+ if (blk_rq_pos(pos) >= boundary)
+ break;
+ }
+ if (blk_rq_pos(rq) >= blk_rq_pos(pos))
+ break;
+ }
+
+ list_add(&rq->queuelist, entry);
+}
+EXPORT_SYMBOL(elv_dispatch_sort);
+
+/*
+ * Insert rq into dispatch queue of q. Queue lock must be held on
+ * entry. rq is added to the back of the dispatch queue. To be used by
+ * specific elevators.
+ */
+void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
+{
+ if (q->last_merge == rq)
+ q->last_merge = NULL;
+
+ elv_rqhash_del(q, rq);
+
+ q->nr_sorted--;
+
+ q->end_sector = rq_end_sector(rq);
+ q->boundary_rq = rq;
+ list_add_tail(&rq->queuelist, &q->queue_head);
+}
+EXPORT_SYMBOL(elv_dispatch_add_tail);
+
+int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
+{
+ struct elevator_queue *e = q->elevator;
+ struct request *__rq;
+ int ret;
+
+ /*
+ * Levels of merges:
+ * nomerges: No merges at all attempted
+ * noxmerges: Only simple one-hit cache try
+ * merges: All merge tries attempted
+ */
+ if (blk_queue_nomerges(q))
+ return ELEVATOR_NO_MERGE;
+
+ /*
+ * First try one-hit cache.
+ */
+ if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) {
+ ret = blk_try_merge(q->last_merge, bio);
+ if (ret != ELEVATOR_NO_MERGE) {
+ *req = q->last_merge;
+ return ret;
+ }
+ }
+
+ if (blk_queue_noxmerges(q))
+ return ELEVATOR_NO_MERGE;
+
+ /*
+ * See if our hash lookup can find a potential backmerge.
+ */
+ __rq = elv_rqhash_find(q, bio->bi_sector);
+ if (__rq && elv_rq_merge_ok(__rq, bio)) {
+ *req = __rq;
+ return ELEVATOR_BACK_MERGE;
+ }
+
+ if (e->type->ops.elevator_merge_fn)
+ return e->type->ops.elevator_merge_fn(q, req, bio);
+
+ return ELEVATOR_NO_MERGE;
+}
+
+/*
+ * Attempt to do an insertion back merge. Only check for the case where
+ * we can append 'rq' to an existing request, so we can throw 'rq' away
+ * afterwards.
+ *
+ * Returns true if we merged, false otherwise
+ */
+static bool elv_attempt_insert_merge(struct request_queue *q,
+ struct request *rq)
+{
+ struct request *__rq;
+
+ if (blk_queue_nomerges(q))
+ return false;
+
+ /*
+ * First try one-hit cache.
+ */
+ if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
+ return true;
+
+ if (blk_queue_noxmerges(q))
+ return false;
+
+ /*
+ * See if our hash lookup can find a potential backmerge.
+ */
+ __rq = elv_rqhash_find(q, blk_rq_pos(rq));
+ if (__rq && blk_attempt_req_merge(q, __rq, rq))
+ return true;
+
+ return false;
+}
+
+void elv_merged_request(struct request_queue *q, struct request *rq, int type)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (e->type->ops.elevator_merged_fn)
+ e->type->ops.elevator_merged_fn(q, rq, type);
+
+ if (type == ELEVATOR_BACK_MERGE)
+ elv_rqhash_reposition(q, rq);
+
+ q->last_merge = rq;
+}
+
+void elv_merge_requests(struct request_queue *q, struct request *rq,
+ struct request *next)
+{
+ struct elevator_queue *e = q->elevator;
+ const int next_sorted = next->cmd_flags & REQ_SORTED;
+
+ if (next_sorted && e->type->ops.elevator_merge_req_fn)
+ e->type->ops.elevator_merge_req_fn(q, rq, next);
+
+ elv_rqhash_reposition(q, rq);
+
+ if (next_sorted) {
+ elv_rqhash_del(q, next);
+ q->nr_sorted--;
+ }
+
+ q->last_merge = rq;
+}
+
+void elv_bio_merged(struct request_queue *q, struct request *rq,
+ struct bio *bio)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (e->type->ops.elevator_bio_merged_fn)
+ e->type->ops.elevator_bio_merged_fn(q, rq, bio);
+}
+
+void elv_requeue_request(struct request_queue *q, struct request *rq)
+{
+ /*
+ * it already went through dequeue, we need to decrement the
+ * in_flight count again
+ */
+ if (blk_account_rq(rq)) {
+ q->in_flight[rq_is_sync(rq)]--;
+ if (rq->cmd_flags & REQ_SORTED)
+ elv_deactivate_rq(q, rq);
+ }
+
+ rq->cmd_flags &= ~REQ_STARTED;
+
+ __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
+}
+
+void elv_drain_elevator(struct request_queue *q)
+{
+ static int printed;
+
+ lockdep_assert_held(q->queue_lock);
+
+ while (q->elevator->type->ops.elevator_dispatch_fn(q, 1))
+ ;
+ if (q->nr_sorted && printed++ < 10) {
+ printk(KERN_ERR "%s: forced dispatching is broken "
+ "(nr_sorted=%u), please report this\n",
+ q->elevator->type->elevator_name, q->nr_sorted);
+ }
+}
+
+void elv_quiesce_start(struct request_queue *q)
+{
+ if (!q->elevator)
+ return;
+
+ spin_lock_irq(q->queue_lock);
+ queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
+ spin_unlock_irq(q->queue_lock);
+
+ blk_drain_queue(q, false);
+}
+
+void elv_quiesce_end(struct request_queue *q)
+{
+ spin_lock_irq(q->queue_lock);
+ queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
+ spin_unlock_irq(q->queue_lock);
+}
+
+void __elv_add_request(struct request_queue *q, struct request *rq, int where)
+{
+ trace_block_rq_insert(q, rq);
+
+ rq->q = q;
+
+ if (rq->cmd_flags & REQ_SOFTBARRIER) {
+ /* barriers are scheduling boundary, update end_sector */
+ if (rq->cmd_type == REQ_TYPE_FS ||
+ (rq->cmd_flags & REQ_DISCARD)) {
+ q->end_sector = rq_end_sector(rq);
+ q->boundary_rq = rq;
+ }
+ } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
+ (where == ELEVATOR_INSERT_SORT ||
+ where == ELEVATOR_INSERT_SORT_MERGE))
+ where = ELEVATOR_INSERT_BACK;
+
+ switch (where) {
+ case ELEVATOR_INSERT_REQUEUE:
+ case ELEVATOR_INSERT_FRONT:
+ rq->cmd_flags |= REQ_SOFTBARRIER;
+ list_add(&rq->queuelist, &q->queue_head);
+ break;
+
+ case ELEVATOR_INSERT_BACK:
+ rq->cmd_flags |= REQ_SOFTBARRIER;
+ elv_drain_elevator(q);
+ list_add_tail(&rq->queuelist, &q->queue_head);
+ /*
+ * We kick the queue here for the following reasons.
+ * - The elevator might have returned NULL previously
+ * to delay requests and returned them now. As the
+ * queue wasn't empty before this request, ll_rw_blk
+ * won't run the queue on return, resulting in hang.
+ * - Usually, back inserted requests won't be merged
+ * with anything. There's no point in delaying queue
+ * processing.
+ */
+ __blk_run_queue(q);
+ break;
+
+ case ELEVATOR_INSERT_SORT_MERGE:
+ /*
+ * If we succeed in merging this request with one in the
+ * queue already, we are done - rq has now been freed,
+ * so no need to do anything further.
+ */
+ if (elv_attempt_insert_merge(q, rq))
+ break;
+ case ELEVATOR_INSERT_SORT:
+ BUG_ON(rq->cmd_type != REQ_TYPE_FS &&
+ !(rq->cmd_flags & REQ_DISCARD));
+ rq->cmd_flags |= REQ_SORTED;
+ q->nr_sorted++;
+ if (rq_mergeable(rq)) {
+ elv_rqhash_add(q, rq);
+ if (!q->last_merge)
+ q->last_merge = rq;
+ }
+
+ /*
+ * Some ioscheds (cfq) run q->request_fn directly, so
+ * rq cannot be accessed after calling
+ * elevator_add_req_fn.
+ */
+ q->elevator->type->ops.elevator_add_req_fn(q, rq);
+ break;
+
+ case ELEVATOR_INSERT_FLUSH:
+ rq->cmd_flags |= REQ_SOFTBARRIER;
+ blk_insert_flush(rq);
+ break;
+ default:
+ printk(KERN_ERR "%s: bad insertion point %d\n",
+ __func__, where);
+ BUG();
+ }
+}
+EXPORT_SYMBOL(__elv_add_request);
+
+void elv_add_request(struct request_queue *q, struct request *rq, int where)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ __elv_add_request(q, rq, where);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+EXPORT_SYMBOL(elv_add_request);
+
+struct request *elv_latter_request(struct request_queue *q, struct request *rq)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (e->type->ops.elevator_latter_req_fn)
+ return e->type->ops.elevator_latter_req_fn(q, rq);
+ return NULL;
+}
+
+struct request *elv_former_request(struct request_queue *q, struct request *rq)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (e->type->ops.elevator_former_req_fn)
+ return e->type->ops.elevator_former_req_fn(q, rq);
+ return NULL;
+}
+
+int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (e->type->ops.elevator_set_req_fn)
+ return e->type->ops.elevator_set_req_fn(q, rq, gfp_mask);
+ return 0;
+}
+
+void elv_put_request(struct request_queue *q, struct request *rq)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (e->type->ops.elevator_put_req_fn)
+ e->type->ops.elevator_put_req_fn(rq);
+}
+
+int elv_may_queue(struct request_queue *q, int rw)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (e->type->ops.elevator_may_queue_fn)
+ return e->type->ops.elevator_may_queue_fn(q, rw);
+
+ return ELV_MQUEUE_MAY;
+}
+
+void elv_abort_queue(struct request_queue *q)
+{
+ struct request *rq;
+
+ blk_abort_flushes(q);
+
+ while (!list_empty(&q->queue_head)) {
+ rq = list_entry_rq(q->queue_head.next);
+ rq->cmd_flags |= REQ_QUIET;
+ trace_block_rq_abort(q, rq);
+ /*
+ * Mark this request as started so we don't trigger
+ * any debug logic in the end I/O path.
+ */
+ blk_start_request(rq);
+ __blk_end_request_all(rq, -EIO);
+ }
+}
+EXPORT_SYMBOL(elv_abort_queue);
+
+void elv_completed_request(struct request_queue *q, struct request *rq)
+{
+ struct elevator_queue *e = q->elevator;
+
+ /*
+ * request is released from the driver, io must be done
+ */
+ if (blk_account_rq(rq)) {
+ q->in_flight[rq_is_sync(rq)]--;
+ if ((rq->cmd_flags & REQ_SORTED) &&
+ e->type->ops.elevator_completed_req_fn)
+ e->type->ops.elevator_completed_req_fn(q, rq);
+ }
+}
+
+#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
+
+static ssize_t
+elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
+{
+ struct elv_fs_entry *entry = to_elv(attr);
+ struct elevator_queue *e;
+ ssize_t error;
+
+ if (!entry->show)
+ return -EIO;
+
+ e = container_of(kobj, struct elevator_queue, kobj);
+ mutex_lock(&e->sysfs_lock);
+ error = e->type ? entry->show(e, page) : -ENOENT;
+ mutex_unlock(&e->sysfs_lock);
+ return error;
+}
+
+static ssize_t
+elv_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *page, size_t length)
+{
+ struct elv_fs_entry *entry = to_elv(attr);
+ struct elevator_queue *e;
+ ssize_t error;
+
+ if (!entry->store)
+ return -EIO;
+
+ e = container_of(kobj, struct elevator_queue, kobj);
+ mutex_lock(&e->sysfs_lock);
+ error = e->type ? entry->store(e, page, length) : -ENOENT;
+ mutex_unlock(&e->sysfs_lock);
+ return error;
+}
+
+static const struct sysfs_ops elv_sysfs_ops = {
+ .show = elv_attr_show,
+ .store = elv_attr_store,
+};
+
+static struct kobj_type elv_ktype = {
+ .sysfs_ops = &elv_sysfs_ops,
+ .release = elevator_release,
+};
+
+int __elv_register_queue(struct request_queue *q, struct elevator_queue *e)
+{
+ int error;
+
+ error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
+ if (!error) {
+ struct elv_fs_entry *attr = e->type->elevator_attrs;
+ if (attr) {
+ while (attr->attr.name) {
+ if (sysfs_create_file(&e->kobj, &attr->attr))
+ break;
+ attr++;
+ }
+ }
+ kobject_uevent(&e->kobj, KOBJ_ADD);
+ e->registered = 1;
+ }
+ return error;
+}
+
+int elv_register_queue(struct request_queue *q)
+{
+ return __elv_register_queue(q, q->elevator);
+}
+EXPORT_SYMBOL(elv_register_queue);
+
+void elv_unregister_queue(struct request_queue *q)
+{
+ if (q) {
+ struct elevator_queue *e = q->elevator;
+
+ kobject_uevent(&e->kobj, KOBJ_REMOVE);
+ kobject_del(&e->kobj);
+ e->registered = 0;
+ }
+}
+EXPORT_SYMBOL(elv_unregister_queue);
+
+int elv_register(struct elevator_type *e)
+{
+ char *def = "";
+
+ /* create icq_cache if requested */
+ if (e->icq_size) {
+ if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
+ WARN_ON(e->icq_align < __alignof__(struct io_cq)))
+ return -EINVAL;
+
+ snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
+ "%s_io_cq", e->elevator_name);
+ e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
+ e->icq_align, 0, NULL);
+ if (!e->icq_cache)
+ return -ENOMEM;
+ }
+
+ /* register, don't allow duplicate names */
+ spin_lock(&elv_list_lock);
+ if (elevator_find(e->elevator_name)) {
+ spin_unlock(&elv_list_lock);
+ if (e->icq_cache)
+ kmem_cache_destroy(e->icq_cache);
+ return -EBUSY;
+ }
+ list_add_tail(&e->list, &elv_list);
+ spin_unlock(&elv_list_lock);
+
+ /* print pretty message */
+ if (!strcmp(e->elevator_name, chosen_elevator) ||
+ (!*chosen_elevator &&
+ !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
+ def = " (default)";
+
+ printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
+ def);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(elv_register);
+
+void elv_unregister(struct elevator_type *e)
+{
+ /* unregister */
+ spin_lock(&elv_list_lock);
+ list_del_init(&e->list);
+ spin_unlock(&elv_list_lock);
+
+ /*
+ * Destroy icq_cache if it exists. icq's are RCU managed. Make
+ * sure all RCU operations are complete before proceeding.
+ */
+ if (e->icq_cache) {
+ rcu_barrier();
+ kmem_cache_destroy(e->icq_cache);
+ e->icq_cache = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(elv_unregister);
+
+/*
+ * switch to new_e io scheduler. be careful not to introduce deadlocks -
+ * we don't free the old io scheduler, before we have allocated what we
+ * need for the new one. this way we have a chance of going back to the old
+ * one, if the new one fails init for some reason.
+ */
+static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
+{
+ struct elevator_queue *old_elevator, *e;
+ int err;
+
+ /* allocate new elevator */
+ e = elevator_alloc(q, new_e);
+ if (!e)
+ return -ENOMEM;
+
+ err = elevator_init_queue(q, e);
+ if (err) {
+ kobject_put(&e->kobj);
+ return err;
+ }
+
+ /* turn on BYPASS and drain all requests w/ elevator private data */
+ elv_quiesce_start(q);
+
+ /* unregister old queue, register new one and kill old elevator */
+ if (q->elevator->registered) {
+ elv_unregister_queue(q);
+ err = __elv_register_queue(q, e);
+ if (err)
+ goto fail_register;
+ }
+
+ /* done, clear io_cq's, switch elevators and turn off BYPASS */
+ spin_lock_irq(q->queue_lock);
+ ioc_clear_queue(q);
+ old_elevator = q->elevator;
+ q->elevator = e;
+ spin_unlock_irq(q->queue_lock);
+
+ elevator_exit(old_elevator);
+ elv_quiesce_end(q);
+
+ blk_add_trace_msg(q, "elv switch: %s", e->type->elevator_name);
+
+ return 0;
+
+fail_register:
+ /*
+ * switch failed, exit the new io scheduler and reattach the old
+ * one again (along with re-adding the sysfs dir)
+ */
+ elevator_exit(e);
+ elv_register_queue(q);
+ elv_quiesce_end(q);
+
+ return err;
+}
+
+/*
+ * Switch this queue to the given IO scheduler.
+ */
+int elevator_change(struct request_queue *q, const char *name)
+{
+ char elevator_name[ELV_NAME_MAX];
+ struct elevator_type *e;
+
+ if (!q->elevator)
+ return -ENXIO;
+
+ strlcpy(elevator_name, name, sizeof(elevator_name));
+ e = elevator_get(strstrip(elevator_name));
+ if (!e) {
+ printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
+ return -EINVAL;
+ }
+
+ if (!strcmp(elevator_name, q->elevator->type->elevator_name)) {
+ elevator_put(e);
+ return 0;
+ }
+
+ return elevator_switch(q, e);
+}
+EXPORT_SYMBOL(elevator_change);
+
+ssize_t elv_iosched_store(struct request_queue *q, const char *name,
+ size_t count)
+{
+ int ret;
+
+ if (!q->elevator)
+ return count;
+
+ ret = elevator_change(q, name);
+ if (!ret)
+ return count;
+
+ printk(KERN_ERR "elevator: switch to %s failed\n", name);
+ return ret;
+}
+
+ssize_t elv_iosched_show(struct request_queue *q, char *name)
+{
+ struct elevator_queue *e = q->elevator;
+ struct elevator_type *elv;
+ struct elevator_type *__e;
+ int len = 0;
+
+ if (!q->elevator || !blk_queue_stackable(q))
+ return sprintf(name, "none\n");
+
+ elv = e->type;
+
+ spin_lock(&elv_list_lock);
+ list_for_each_entry(__e, &elv_list, list) {
+ if (!strcmp(elv->elevator_name, __e->elevator_name))
+ len += sprintf(name+len, "[%s] ", elv->elevator_name);
+ else
+ len += sprintf(name+len, "%s ", __e->elevator_name);
+ }
+ spin_unlock(&elv_list_lock);
+
+ len += sprintf(len+name, "\n");
+ return len;
+}
+
+struct request *elv_rb_former_request(struct request_queue *q,
+ struct request *rq)
+{
+ struct rb_node *rbprev = rb_prev(&rq->rb_node);
+
+ if (rbprev)
+ return rb_entry_rq(rbprev);
+
+ return NULL;
+}
+EXPORT_SYMBOL(elv_rb_former_request);
+
+struct request *elv_rb_latter_request(struct request_queue *q,
+ struct request *rq)
+{
+ struct rb_node *rbnext = rb_next(&rq->rb_node);
+
+ if (rbnext)
+ return rb_entry_rq(rbnext);
+
+ return NULL;
+}
+EXPORT_SYMBOL(elv_rb_latter_request);
diff --git a/block/genhd.c b/block/genhd.c
new file mode 100644
index 00000000..53c3f7e2
--- /dev/null
+++ b/block/genhd.c
@@ -0,0 +1,1824 @@
+/*
+ * gendisk handling
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/kdev_t.h>
+#include <linux/kernel.h>
+#include <linux/blkdev.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/kmod.h>
+#include <linux/kobj_map.h>
+#include <linux/mutex.h>
+#include <linux/idr.h>
+#include <linux/log2.h>
+
+#include "blk.h"
+
+static DEFINE_MUTEX(block_class_lock);
+struct kobject *block_depr;
+
+/* for extended dynamic devt allocation, currently only one major is used */
+#define MAX_EXT_DEVT (1 << MINORBITS)
+
+/* For extended devt allocation. ext_devt_mutex prevents look up
+ * results from going away underneath its user.
+ */
+static DEFINE_MUTEX(ext_devt_mutex);
+static DEFINE_IDR(ext_devt_idr);
+
+static struct device_type disk_type;
+
+static void disk_alloc_events(struct gendisk *disk);
+static void disk_add_events(struct gendisk *disk);
+static void disk_del_events(struct gendisk *disk);
+static void disk_release_events(struct gendisk *disk);
+
+/**
+ * disk_get_part - get partition
+ * @disk: disk to look partition from
+ * @partno: partition number
+ *
+ * Look for partition @partno from @disk. If found, increment
+ * reference count and return it.
+ *
+ * CONTEXT:
+ * Don't care.
+ *
+ * RETURNS:
+ * Pointer to the found partition on success, NULL if not found.
+ */
+struct hd_struct *disk_get_part(struct gendisk *disk, int partno)
+{
+ struct hd_struct *part = NULL;
+ struct disk_part_tbl *ptbl;
+
+ if (unlikely(partno < 0))
+ return NULL;
+
+ rcu_read_lock();
+
+ ptbl = rcu_dereference(disk->part_tbl);
+ if (likely(partno < ptbl->len)) {
+ part = rcu_dereference(ptbl->part[partno]);
+ if (part)
+ get_device(part_to_dev(part));
+ }
+
+ rcu_read_unlock();
+
+ return part;
+}
+EXPORT_SYMBOL_GPL(disk_get_part);
+
+/**
+ * disk_part_iter_init - initialize partition iterator
+ * @piter: iterator to initialize
+ * @disk: disk to iterate over
+ * @flags: DISK_PITER_* flags
+ *
+ * Initialize @piter so that it iterates over partitions of @disk.
+ *
+ * CONTEXT:
+ * Don't care.
+ */
+void disk_part_iter_init(struct disk_part_iter *piter, struct gendisk *disk,
+ unsigned int flags)
+{
+ struct disk_part_tbl *ptbl;
+
+ rcu_read_lock();
+ ptbl = rcu_dereference(disk->part_tbl);
+
+ piter->disk = disk;
+ piter->part = NULL;
+
+ if (flags & DISK_PITER_REVERSE)
+ piter->idx = ptbl->len - 1;
+ else if (flags & (DISK_PITER_INCL_PART0 | DISK_PITER_INCL_EMPTY_PART0))
+ piter->idx = 0;
+ else
+ piter->idx = 1;
+
+ piter->flags = flags;
+
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(disk_part_iter_init);
+
+/**
+ * disk_part_iter_next - proceed iterator to the next partition and return it
+ * @piter: iterator of interest
+ *
+ * Proceed @piter to the next partition and return it.
+ *
+ * CONTEXT:
+ * Don't care.
+ */
+struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter)
+{
+ struct disk_part_tbl *ptbl;
+ int inc, end;
+
+ /* put the last partition */
+ disk_put_part(piter->part);
+ piter->part = NULL;
+
+ /* get part_tbl */
+ rcu_read_lock();
+ ptbl = rcu_dereference(piter->disk->part_tbl);
+
+ /* determine iteration parameters */
+ if (piter->flags & DISK_PITER_REVERSE) {
+ inc = -1;
+ if (piter->flags & (DISK_PITER_INCL_PART0 |
+ DISK_PITER_INCL_EMPTY_PART0))
+ end = -1;
+ else
+ end = 0;
+ } else {
+ inc = 1;
+ end = ptbl->len;
+ }
+
+ /* iterate to the next partition */
+ for (; piter->idx != end; piter->idx += inc) {
+ struct hd_struct *part;
+
+ part = rcu_dereference(ptbl->part[piter->idx]);
+ if (!part)
+ continue;
+ if (!part->nr_sects &&
+ !(piter->flags & DISK_PITER_INCL_EMPTY) &&
+ !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 &&
+ piter->idx == 0))
+ continue;
+
+ get_device(part_to_dev(part));
+ piter->part = part;
+ piter->idx += inc;
+ break;
+ }
+
+ rcu_read_unlock();
+
+ return piter->part;
+}
+EXPORT_SYMBOL_GPL(disk_part_iter_next);
+
+/**
+ * disk_part_iter_exit - finish up partition iteration
+ * @piter: iter of interest
+ *
+ * Called when iteration is over. Cleans up @piter.
+ *
+ * CONTEXT:
+ * Don't care.
+ */
+void disk_part_iter_exit(struct disk_part_iter *piter)
+{
+ disk_put_part(piter->part);
+ piter->part = NULL;
+}
+EXPORT_SYMBOL_GPL(disk_part_iter_exit);
+
+static inline int sector_in_part(struct hd_struct *part, sector_t sector)
+{
+ return part->start_sect <= sector &&
+ sector < part->start_sect + part->nr_sects;
+}
+
+/**
+ * disk_map_sector_rcu - map sector to partition
+ * @disk: gendisk of interest
+ * @sector: sector to map
+ *
+ * Find out which partition @sector maps to on @disk. This is
+ * primarily used for stats accounting.
+ *
+ * CONTEXT:
+ * RCU read locked. The returned partition pointer is valid only
+ * while preemption is disabled.
+ *
+ * RETURNS:
+ * Found partition on success, part0 is returned if no partition matches
+ */
+struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector)
+{
+ struct disk_part_tbl *ptbl;
+ struct hd_struct *part;
+ int i;
+
+ ptbl = rcu_dereference(disk->part_tbl);
+
+ part = rcu_dereference(ptbl->last_lookup);
+ if (part && sector_in_part(part, sector))
+ return part;
+
+ for (i = 1; i < ptbl->len; i++) {
+ part = rcu_dereference(ptbl->part[i]);
+
+ if (part && sector_in_part(part, sector)) {
+ rcu_assign_pointer(ptbl->last_lookup, part);
+ return part;
+ }
+ }
+ return &disk->part0;
+}
+EXPORT_SYMBOL_GPL(disk_map_sector_rcu);
+
+/*
+ * Can be deleted altogether. Later.
+ *
+ */
+static struct blk_major_name {
+ struct blk_major_name *next;
+ int major;
+ char name[16];
+} *major_names[BLKDEV_MAJOR_HASH_SIZE];
+
+/* index in the above - for now: assume no multimajor ranges */
+static inline int major_to_index(unsigned major)
+{
+ return major % BLKDEV_MAJOR_HASH_SIZE;
+}
+
+#ifdef CONFIG_PROC_FS
+void blkdev_show(struct seq_file *seqf, off_t offset)
+{
+ struct blk_major_name *dp;
+
+ if (offset < BLKDEV_MAJOR_HASH_SIZE) {
+ mutex_lock(&block_class_lock);
+ for (dp = major_names[offset]; dp; dp = dp->next)
+ seq_printf(seqf, "%3d %s\n", dp->major, dp->name);
+ mutex_unlock(&block_class_lock);
+ }
+}
+#endif /* CONFIG_PROC_FS */
+
+/**
+ * register_blkdev - register a new block device
+ *
+ * @major: the requested major device number [1..255]. If @major=0, try to
+ * allocate any unused major number.
+ * @name: the name of the new block device as a zero terminated string
+ *
+ * The @name must be unique within the system.
+ *
+ * The return value depends on the @major input parameter.
+ * - if a major device number was requested in range [1..255] then the
+ * function returns zero on success, or a negative error code
+ * - if any unused major number was requested with @major=0 parameter
+ * then the return value is the allocated major number in range
+ * [1..255] or a negative error code otherwise
+ */
+int register_blkdev(unsigned int major, const char *name)
+{
+ struct blk_major_name **n, *p;
+ int index, ret = 0;
+
+ mutex_lock(&block_class_lock);
+
+ /* temporary */
+ if (major == 0) {
+ for (index = ARRAY_SIZE(major_names)-1; index > 0; index--) {
+ if (major_names[index] == NULL)
+ break;
+ }
+
+ if (index == 0) {
+ printk("register_blkdev: failed to get major for %s\n",
+ name);
+ ret = -EBUSY;
+ goto out;
+ }
+ major = index;
+ ret = major;
+ }
+
+ p = kmalloc(sizeof(struct blk_major_name), GFP_KERNEL);
+ if (p == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ p->major = major;
+ strlcpy(p->name, name, sizeof(p->name));
+ p->next = NULL;
+ index = major_to_index(major);
+
+ for (n = &major_names[index]; *n; n = &(*n)->next) {
+ if ((*n)->major == major)
+ break;
+ }
+ if (!*n)
+ *n = p;
+ else
+ ret = -EBUSY;
+
+ if (ret < 0) {
+ printk("register_blkdev: cannot get major %d for %s\n",
+ major, name);
+ kfree(p);
+ }
+out:
+ mutex_unlock(&block_class_lock);
+ return ret;
+}
+
+EXPORT_SYMBOL(register_blkdev);
+
+void unregister_blkdev(unsigned int major, const char *name)
+{
+ struct blk_major_name **n;
+ struct blk_major_name *p = NULL;
+ int index = major_to_index(major);
+
+ mutex_lock(&block_class_lock);
+ for (n = &major_names[index]; *n; n = &(*n)->next)
+ if ((*n)->major == major)
+ break;
+ if (!*n || strcmp((*n)->name, name)) {
+ WARN_ON(1);
+ } else {
+ p = *n;
+ *n = p->next;
+ }
+ mutex_unlock(&block_class_lock);
+ kfree(p);
+}
+
+EXPORT_SYMBOL(unregister_blkdev);
+
+static struct kobj_map *bdev_map;
+
+/**
+ * blk_mangle_minor - scatter minor numbers apart
+ * @minor: minor number to mangle
+ *
+ * Scatter consecutively allocated @minor number apart if MANGLE_DEVT
+ * is enabled. Mangling twice gives the original value.
+ *
+ * RETURNS:
+ * Mangled value.
+ *
+ * CONTEXT:
+ * Don't care.
+ */
+static int blk_mangle_minor(int minor)
+{
+#ifdef CONFIG_DEBUG_BLOCK_EXT_DEVT
+ int i;
+
+ for (i = 0; i < MINORBITS / 2; i++) {
+ int low = minor & (1 << i);
+ int high = minor & (1 << (MINORBITS - 1 - i));
+ int distance = MINORBITS - 1 - 2 * i;
+
+ minor ^= low | high; /* clear both bits */
+ low <<= distance; /* swap the positions */
+ high >>= distance;
+ minor |= low | high; /* and set */
+ }
+#endif
+ return minor;
+}
+
+/**
+ * blk_alloc_devt - allocate a dev_t for a partition
+ * @part: partition to allocate dev_t for
+ * @devt: out parameter for resulting dev_t
+ *
+ * Allocate a dev_t for block device.
+ *
+ * RETURNS:
+ * 0 on success, allocated dev_t is returned in *@devt. -errno on
+ * failure.
+ *
+ * CONTEXT:
+ * Might sleep.
+ */
+int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
+{
+ struct gendisk *disk = part_to_disk(part);
+ int idx, rc;
+
+ /* in consecutive minor range? */
+ if (part->partno < disk->minors) {
+ *devt = MKDEV(disk->major, disk->first_minor + part->partno);
+ return 0;
+ }
+
+ /* allocate ext devt */
+ do {
+ if (!idr_pre_get(&ext_devt_idr, GFP_KERNEL))
+ return -ENOMEM;
+ rc = idr_get_new(&ext_devt_idr, part, &idx);
+ } while (rc == -EAGAIN);
+
+ if (rc)
+ return rc;
+
+ if (idx > MAX_EXT_DEVT) {
+ idr_remove(&ext_devt_idr, idx);
+ return -EBUSY;
+ }
+
+ *devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx));
+ return 0;
+}
+
+/**
+ * blk_free_devt - free a dev_t
+ * @devt: dev_t to free
+ *
+ * Free @devt which was allocated using blk_alloc_devt().
+ *
+ * CONTEXT:
+ * Might sleep.
+ */
+void blk_free_devt(dev_t devt)
+{
+ might_sleep();
+
+ if (devt == MKDEV(0, 0))
+ return;
+
+ if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
+ mutex_lock(&ext_devt_mutex);
+ idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
+ mutex_unlock(&ext_devt_mutex);
+ }
+}
+
+static char *bdevt_str(dev_t devt, char *buf)
+{
+ if (MAJOR(devt) <= 0xff && MINOR(devt) <= 0xff) {
+ char tbuf[BDEVT_SIZE];
+ snprintf(tbuf, BDEVT_SIZE, "%02x%02x", MAJOR(devt), MINOR(devt));
+ snprintf(buf, BDEVT_SIZE, "%-9s", tbuf);
+ } else
+ snprintf(buf, BDEVT_SIZE, "%03x:%05x", MAJOR(devt), MINOR(devt));
+
+ return buf;
+}
+
+/*
+ * Register device numbers dev..(dev+range-1)
+ * range must be nonzero
+ * The hash chain is sorted on range, so that subranges can override.
+ */
+void blk_register_region(dev_t devt, unsigned long range, struct module *module,
+ struct kobject *(*probe)(dev_t, int *, void *),
+ int (*lock)(dev_t, void *), void *data)
+{
+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
+}
+
+EXPORT_SYMBOL(blk_register_region);
+
+void blk_unregister_region(dev_t devt, unsigned long range)
+{
+ kobj_unmap(bdev_map, devt, range);
+}
+
+EXPORT_SYMBOL(blk_unregister_region);
+
+static struct kobject *exact_match(dev_t devt, int *partno, void *data)
+{
+ struct gendisk *p = data;
+
+ return &disk_to_dev(p)->kobj;
+}
+
+static int exact_lock(dev_t devt, void *data)
+{
+ struct gendisk *p = data;
+
+ if (!get_disk(p))
+ return -1;
+ return 0;
+}
+
+static void register_disk(struct gendisk *disk)
+{
+ struct device *ddev = disk_to_dev(disk);
+ struct block_device *bdev;
+ struct disk_part_iter piter;
+ struct hd_struct *part;
+ int err;
+
+ ddev->parent = disk->driverfs_dev;
+
+ dev_set_name(ddev, disk->disk_name);
+
+ /* delay uevents, until we scanned partition table */
+ dev_set_uevent_suppress(ddev, 1);
+
+ if (device_add(ddev))
+ return;
+ if (!sysfs_deprecated) {
+ err = sysfs_create_link(block_depr, &ddev->kobj,
+ kobject_name(&ddev->kobj));
+ if (err) {
+ device_del(ddev);
+ return;
+ }
+ }
+ disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj);
+ disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
+
+ /* No minors to use for partitions */
+ if (!disk_part_scan_enabled(disk))
+ goto exit;
+
+ /* No such device (e.g., media were just removed) */
+ if (!get_capacity(disk))
+ goto exit;
+
+ bdev = bdget_disk(disk, 0);
+ if (!bdev)
+ goto exit;
+
+ bdev->bd_invalidated = 1;
+ err = blkdev_get(bdev, FMODE_READ, NULL);
+ if (err < 0)
+ goto exit;
+ blkdev_put(bdev, FMODE_READ);
+
+exit:
+ /* announce disk after possible partitions are created */
+ dev_set_uevent_suppress(ddev, 0);
+ kobject_uevent(&ddev->kobj, KOBJ_ADD);
+
+ /* announce possible partitions */
+ disk_part_iter_init(&piter, disk, 0);
+ while ((part = disk_part_iter_next(&piter)))
+ kobject_uevent(&part_to_dev(part)->kobj, KOBJ_ADD);
+ disk_part_iter_exit(&piter);
+}
+
+/**
+ * add_disk - add partitioning information to kernel list
+ * @disk: per-device partitioning information
+ *
+ * This function registers the partitioning information in @disk
+ * with the kernel.
+ *
+ * FIXME: error handling
+ */
+void add_disk(struct gendisk *disk)
+{
+ struct backing_dev_info *bdi;
+ dev_t devt;
+ int retval;
+
+ /* minors == 0 indicates to use ext devt from part0 and should
+ * be accompanied with EXT_DEVT flag. Make sure all
+ * parameters make sense.
+ */
+ WARN_ON(disk->minors && !(disk->major || disk->first_minor));
+ WARN_ON(!disk->minors && !(disk->flags & GENHD_FL_EXT_DEVT));
+
+ disk->flags |= GENHD_FL_UP;
+
+ retval = blk_alloc_devt(&disk->part0, &devt);
+ if (retval) {
+ WARN_ON(1);
+ return;
+ }
+ disk_to_dev(disk)->devt = devt;
+
+ /* ->major and ->first_minor aren't supposed to be
+ * dereferenced from here on, but set them just in case.
+ */
+ disk->major = MAJOR(devt);
+ disk->first_minor = MINOR(devt);
+
+ disk_alloc_events(disk);
+
+ /* Register BDI before referencing it from bdev */
+ bdi = &disk->queue->backing_dev_info;
+ bdi_register_dev(bdi, disk_devt(disk));
+
+ blk_register_region(disk_devt(disk), disk->minors, NULL,
+ exact_match, exact_lock, disk);
+ register_disk(disk);
+ blk_register_queue(disk);
+
+ /*
+ * Take an extra ref on queue which will be put on disk_release()
+ * so that it sticks around as long as @disk is there.
+ */
+ WARN_ON_ONCE(!blk_get_queue(disk->queue));
+
+ retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj,
+ "bdi");
+ WARN_ON(retval);
+
+ disk_add_events(disk);
+}
+EXPORT_SYMBOL(add_disk);
+
+void del_gendisk(struct gendisk *disk)
+{
+ struct disk_part_iter piter;
+ struct hd_struct *part;
+
+ disk_del_events(disk);
+
+ /* invalidate stuff */
+ disk_part_iter_init(&piter, disk,
+ DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE);
+ while ((part = disk_part_iter_next(&piter))) {
+ invalidate_partition(disk, part->partno);
+ delete_partition(disk, part->partno);
+ }
+ disk_part_iter_exit(&piter);
+
+ invalidate_partition(disk, 0);
+ blk_free_devt(disk_to_dev(disk)->devt);
+ set_capacity(disk, 0);
+ disk->flags &= ~GENHD_FL_UP;
+
+ sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
+ bdi_unregister(&disk->queue->backing_dev_info);
+ blk_unregister_queue(disk);
+ blk_unregister_region(disk_devt(disk), disk->minors);
+
+ part_stat_set_all(&disk->part0, 0);
+ disk->part0.stamp = 0;
+
+ kobject_put(disk->part0.holder_dir);
+ kobject_put(disk->slave_dir);
+ disk->driverfs_dev = NULL;
+ if (!sysfs_deprecated)
+ sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
+ device_del(disk_to_dev(disk));
+}
+EXPORT_SYMBOL(del_gendisk);
+
+/**
+ * get_gendisk - get partitioning information for a given device
+ * @devt: device to get partitioning information for
+ * @partno: returned partition index
+ *
+ * This function gets the structure containing partitioning
+ * information for the given device @devt.
+ */
+struct gendisk *get_gendisk(dev_t devt, int *partno)
+{
+ struct gendisk *disk = NULL;
+
+ if (MAJOR(devt) != BLOCK_EXT_MAJOR) {
+ struct kobject *kobj;
+
+ kobj = kobj_lookup(bdev_map, devt, partno);
+ if (kobj)
+ disk = dev_to_disk(kobj_to_dev(kobj));
+ } else {
+ struct hd_struct *part;
+
+ mutex_lock(&ext_devt_mutex);
+ part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
+ if (part && get_disk(part_to_disk(part))) {
+ *partno = part->partno;
+ disk = part_to_disk(part);
+ }
+ mutex_unlock(&ext_devt_mutex);
+ }
+
+ return disk;
+}
+EXPORT_SYMBOL(get_gendisk);
+
+/**
+ * bdget_disk - do bdget() by gendisk and partition number
+ * @disk: gendisk of interest
+ * @partno: partition number
+ *
+ * Find partition @partno from @disk, do bdget() on it.
+ *
+ * CONTEXT:
+ * Don't care.
+ *
+ * RETURNS:
+ * Resulting block_device on success, NULL on failure.
+ */
+struct block_device *bdget_disk(struct gendisk *disk, int partno)
+{
+ struct hd_struct *part;
+ struct block_device *bdev = NULL;
+
+ part = disk_get_part(disk, partno);
+ if (part)
+ bdev = bdget(part_devt(part));
+ disk_put_part(part);
+
+ return bdev;
+}
+EXPORT_SYMBOL(bdget_disk);
+
+/*
+ * print a full list of all partitions - intended for places where the root
+ * filesystem can't be mounted and thus to give the victim some idea of what
+ * went wrong
+ */
+void __init printk_all_partitions(void)
+{
+ struct class_dev_iter iter;
+ struct device *dev;
+
+ class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
+ while ((dev = class_dev_iter_next(&iter))) {
+ struct gendisk *disk = dev_to_disk(dev);
+ struct disk_part_iter piter;
+ struct hd_struct *part;
+ char name_buf[BDEVNAME_SIZE];
+ char devt_buf[BDEVT_SIZE];
+ char uuid_buf[PARTITION_META_INFO_UUIDLTH * 2 + 5];
+
+ /*
+ * Don't show empty devices or things that have been
+ * suppressed
+ */
+ if (get_capacity(disk) == 0 ||
+ (disk->flags & GENHD_FL_SUPPRESS_PARTITION_INFO))
+ continue;
+
+ /*
+ * Note, unlike /proc/partitions, I am showing the
+ * numbers in hex - the same format as the root=
+ * option takes.
+ */
+ disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
+ while ((part = disk_part_iter_next(&piter))) {
+ bool is_part0 = part == &disk->part0;
+
+ uuid_buf[0] = '\0';
+ if (part->info)
+ snprintf(uuid_buf, sizeof(uuid_buf), "%pU",
+ part->info->uuid);
+
+ printk("%s%s %10llu %s %s", is_part0 ? "" : " ",
+ bdevt_str(part_devt(part), devt_buf),
+ (unsigned long long)part->nr_sects >> 1,
+ disk_name(disk, part->partno, name_buf),
+ uuid_buf);
+ if (is_part0) {
+ if (disk->driverfs_dev != NULL &&
+ disk->driverfs_dev->driver != NULL)
+ printk(" driver: %s\n",
+ disk->driverfs_dev->driver->name);
+ else
+ printk(" (driver?)\n");
+ } else
+ printk("\n");
+ }
+ disk_part_iter_exit(&piter);
+ }
+ class_dev_iter_exit(&iter);
+}
+
+#ifdef CONFIG_PROC_FS
+/* iterator */
+static void *disk_seqf_start(struct seq_file *seqf, loff_t *pos)
+{
+ loff_t skip = *pos;
+ struct class_dev_iter *iter;
+ struct device *dev;
+
+ iter = kmalloc(sizeof(*iter), GFP_KERNEL);
+ if (!iter)
+ return ERR_PTR(-ENOMEM);
+
+ seqf->private = iter;
+ class_dev_iter_init(iter, &block_class, NULL, &disk_type);
+ do {
+ dev = class_dev_iter_next(iter);
+ if (!dev)
+ return NULL;
+ } while (skip--);
+
+ return dev_to_disk(dev);
+}
+
+static void *disk_seqf_next(struct seq_file *seqf, void *v, loff_t *pos)
+{
+ struct device *dev;
+
+ (*pos)++;
+ dev = class_dev_iter_next(seqf->private);
+ if (dev)
+ return dev_to_disk(dev);
+
+ return NULL;
+}
+
+static void disk_seqf_stop(struct seq_file *seqf, void *v)
+{
+ struct class_dev_iter *iter = seqf->private;
+
+ /* stop is called even after start failed :-( */
+ if (iter) {
+ class_dev_iter_exit(iter);
+ kfree(iter);
+ }
+}
+
+static void *show_partition_start(struct seq_file *seqf, loff_t *pos)
+{
+ static void *p;
+
+ p = disk_seqf_start(seqf, pos);
+ if (!IS_ERR_OR_NULL(p) && !*pos)
+ seq_puts(seqf, "major minor #blocks name\n\n");
+ return p;
+}
+
+static int show_partition(struct seq_file *seqf, void *v)
+{
+ struct gendisk *sgp = v;
+ struct disk_part_iter piter;
+ struct hd_struct *part;
+ char buf[BDEVNAME_SIZE];
+
+ /* Don't show non-partitionable removeable devices or empty devices */
+ if (!get_capacity(sgp) || (!disk_max_parts(sgp) &&
+ (sgp->flags & GENHD_FL_REMOVABLE)))
+ return 0;
+ if (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO)
+ return 0;
+
+ /* show the full disk and all non-0 size partitions of it */
+ disk_part_iter_init(&piter, sgp, DISK_PITER_INCL_PART0);
+ while ((part = disk_part_iter_next(&piter)))
+ seq_printf(seqf, "%4d %7d %10llu %s\n",
+ MAJOR(part_devt(part)), MINOR(part_devt(part)),
+ (unsigned long long)part->nr_sects >> 1,
+ disk_name(sgp, part->partno, buf));
+ disk_part_iter_exit(&piter);
+
+ return 0;
+}
+
+static const struct seq_operations partitions_op = {
+ .start = show_partition_start,
+ .next = disk_seqf_next,
+ .stop = disk_seqf_stop,
+ .show = show_partition
+};
+
+static int partitions_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &partitions_op);
+}
+
+static const struct file_operations proc_partitions_operations = {
+ .open = partitions_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+#endif
+
+
+static struct kobject *base_probe(dev_t devt, int *partno, void *data)
+{
+ if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0)
+ /* Make old-style 2.4 aliases work */
+ request_module("block-major-%d", MAJOR(devt));
+ return NULL;
+}
+
+static int __init genhd_device_init(void)
+{
+ int error;
+
+ block_class.dev_kobj = sysfs_dev_block_kobj;
+ error = class_register(&block_class);
+ if (unlikely(error))
+ return error;
+ bdev_map = kobj_map_init(base_probe, &block_class_lock);
+ blk_dev_init();
+
+ register_blkdev(BLOCK_EXT_MAJOR, "blkext");
+
+ /* create top-level block dir */
+ if (!sysfs_deprecated)
+ block_depr = kobject_create_and_add("block", NULL);
+ return 0;
+}
+
+subsys_initcall(genhd_device_init);
+
+static ssize_t disk_range_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+
+ return sprintf(buf, "%d\n", disk->minors);
+}
+
+static ssize_t disk_ext_range_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+
+ return sprintf(buf, "%d\n", disk_max_parts(disk));
+}
+
+static ssize_t disk_removable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+
+ return sprintf(buf, "%d\n",
+ (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
+}
+
+static ssize_t disk_ro_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+
+ return sprintf(buf, "%d\n", get_disk_ro(disk) ? 1 : 0);
+}
+
+static ssize_t disk_capability_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+
+ return sprintf(buf, "%x\n", disk->flags);
+}
+
+static ssize_t disk_alignment_offset_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+
+ return sprintf(buf, "%d\n", queue_alignment_offset(disk->queue));
+}
+
+static ssize_t disk_discard_alignment_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+
+ return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue));
+}
+
+static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);
+static DEVICE_ATTR(ext_range, S_IRUGO, disk_ext_range_show, NULL);
+static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL);
+static DEVICE_ATTR(ro, S_IRUGO, disk_ro_show, NULL);
+static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
+static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL);
+static DEVICE_ATTR(discard_alignment, S_IRUGO, disk_discard_alignment_show,
+ NULL);
+static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
+static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
+static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL);
+#ifdef CONFIG_FAIL_MAKE_REQUEST
+static struct device_attribute dev_attr_fail =
+ __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
+#endif
+#ifdef CONFIG_FAIL_IO_TIMEOUT
+static struct device_attribute dev_attr_fail_timeout =
+ __ATTR(io-timeout-fail, S_IRUGO|S_IWUSR, part_timeout_show,
+ part_timeout_store);
+#endif
+
+static struct attribute *disk_attrs[] = {
+ &dev_attr_range.attr,
+ &dev_attr_ext_range.attr,
+ &dev_attr_removable.attr,
+ &dev_attr_ro.attr,
+ &dev_attr_size.attr,
+ &dev_attr_alignment_offset.attr,
+ &dev_attr_discard_alignment.attr,
+ &dev_attr_capability.attr,
+ &dev_attr_stat.attr,
+ &dev_attr_inflight.attr,
+#ifdef CONFIG_FAIL_MAKE_REQUEST
+ &dev_attr_fail.attr,
+#endif
+#ifdef CONFIG_FAIL_IO_TIMEOUT
+ &dev_attr_fail_timeout.attr,
+#endif
+ NULL
+};
+
+static struct attribute_group disk_attr_group = {
+ .attrs = disk_attrs,
+};
+
+static const struct attribute_group *disk_attr_groups[] = {
+ &disk_attr_group,
+ NULL
+};
+
+/**
+ * disk_replace_part_tbl - replace disk->part_tbl in RCU-safe way
+ * @disk: disk to replace part_tbl for
+ * @new_ptbl: new part_tbl to install
+ *
+ * Replace disk->part_tbl with @new_ptbl in RCU-safe way. The
+ * original ptbl is freed using RCU callback.
+ *
+ * LOCKING:
+ * Matching bd_mutx locked.
+ */
+static void disk_replace_part_tbl(struct gendisk *disk,
+ struct disk_part_tbl *new_ptbl)
+{
+ struct disk_part_tbl *old_ptbl = disk->part_tbl;
+
+ rcu_assign_pointer(disk->part_tbl, new_ptbl);
+
+ if (old_ptbl) {
+ rcu_assign_pointer(old_ptbl->last_lookup, NULL);
+ kfree_rcu(old_ptbl, rcu_head);
+ }
+}
+
+/**
+ * disk_expand_part_tbl - expand disk->part_tbl
+ * @disk: disk to expand part_tbl for
+ * @partno: expand such that this partno can fit in
+ *
+ * Expand disk->part_tbl such that @partno can fit in. disk->part_tbl
+ * uses RCU to allow unlocked dereferencing for stats and other stuff.
+ *
+ * LOCKING:
+ * Matching bd_mutex locked, might sleep.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int disk_expand_part_tbl(struct gendisk *disk, int partno)
+{
+ struct disk_part_tbl *old_ptbl = disk->part_tbl;
+ struct disk_part_tbl *new_ptbl;
+ int len = old_ptbl ? old_ptbl->len : 0;
+ int target = partno + 1;
+ size_t size;
+ int i;
+
+ /* disk_max_parts() is zero during initialization, ignore if so */
+ if (disk_max_parts(disk) && target > disk_max_parts(disk))
+ return -EINVAL;
+
+ if (target <= len)
+ return 0;
+
+ size = sizeof(*new_ptbl) + target * sizeof(new_ptbl->part[0]);
+ new_ptbl = kzalloc_node(size, GFP_KERNEL, disk->node_id);
+ if (!new_ptbl)
+ return -ENOMEM;
+
+ new_ptbl->len = target;
+
+ for (i = 0; i < len; i++)
+ rcu_assign_pointer(new_ptbl->part[i], old_ptbl->part[i]);
+
+ disk_replace_part_tbl(disk, new_ptbl);
+ return 0;
+}
+
+static void disk_release(struct device *dev)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+
+ disk_release_events(disk);
+ kfree(disk->random);
+ disk_replace_part_tbl(disk, NULL);
+ free_part_stats(&disk->part0);
+ free_part_info(&disk->part0);
+ if (disk->queue)
+ blk_put_queue(disk->queue);
+ kfree(disk);
+}
+
+static int disk_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+ struct disk_part_iter piter;
+ struct hd_struct *part;
+ int cnt = 0;
+
+ disk_part_iter_init(&piter, disk, 0);
+ while((part = disk_part_iter_next(&piter)))
+ cnt++;
+ disk_part_iter_exit(&piter);
+ add_uevent_var(env, "NPARTS=%u", cnt);
+ return 0;
+}
+
+struct class block_class = {
+ .name = "block",
+};
+
+static char *block_devnode(struct device *dev, umode_t *mode)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+
+ if (disk->devnode)
+ return disk->devnode(disk, mode);
+ return NULL;
+}
+
+static struct device_type disk_type = {
+ .name = "disk",
+ .groups = disk_attr_groups,
+ .release = disk_release,
+ .devnode = block_devnode,
+ .uevent = disk_uevent,
+};
+
+#ifdef CONFIG_PROC_FS
+/*
+ * aggregate disk stat collector. Uses the same stats that the sysfs
+ * entries do, above, but makes them available through one seq_file.
+ *
+ * The output looks suspiciously like /proc/partitions with a bunch of
+ * extra fields.
+ */
+static int diskstats_show(struct seq_file *seqf, void *v)
+{
+ struct gendisk *gp = v;
+ struct disk_part_iter piter;
+ struct hd_struct *hd;
+ char buf[BDEVNAME_SIZE];
+ int cpu;
+
+ /*
+ if (&disk_to_dev(gp)->kobj.entry == block_class.devices.next)
+ seq_puts(seqf, "major minor name"
+ " rio rmerge rsect ruse wio wmerge "
+ "wsect wuse running use aveq"
+ "\n\n");
+ */
+
+ disk_part_iter_init(&piter, gp, DISK_PITER_INCL_EMPTY_PART0);
+ while ((hd = disk_part_iter_next(&piter))) {
+ cpu = part_stat_lock();
+ part_round_stats(cpu, hd);
+ part_stat_unlock();
+ seq_printf(seqf, "%4d %7d %s %lu %lu %lu "
+ "%u %lu %lu %lu %u %u %u %u\n",
+ MAJOR(part_devt(hd)), MINOR(part_devt(hd)),
+ disk_name(gp, hd->partno, buf),
+ part_stat_read(hd, ios[READ]),
+ part_stat_read(hd, merges[READ]),
+ part_stat_read(hd, sectors[READ]),
+ jiffies_to_msecs(part_stat_read(hd, ticks[READ])),
+ part_stat_read(hd, ios[WRITE]),
+ part_stat_read(hd, merges[WRITE]),
+ part_stat_read(hd, sectors[WRITE]),
+ jiffies_to_msecs(part_stat_read(hd, ticks[WRITE])),
+ part_in_flight(hd),
+ jiffies_to_msecs(part_stat_read(hd, io_ticks)),
+ jiffies_to_msecs(part_stat_read(hd, time_in_queue))
+ );
+ }
+ disk_part_iter_exit(&piter);
+
+ return 0;
+}
+
+static const struct seq_operations diskstats_op = {
+ .start = disk_seqf_start,
+ .next = disk_seqf_next,
+ .stop = disk_seqf_stop,
+ .show = diskstats_show
+};
+
+static int diskstats_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &diskstats_op);
+}
+
+static const struct file_operations proc_diskstats_operations = {
+ .open = diskstats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int __init proc_genhd_init(void)
+{
+ proc_create("diskstats", 0, NULL, &proc_diskstats_operations);
+ proc_create("partitions", 0, NULL, &proc_partitions_operations);
+ return 0;
+}
+module_init(proc_genhd_init);
+#endif /* CONFIG_PROC_FS */
+
+dev_t blk_lookup_devt(const char *name, int partno)
+{
+ dev_t devt = MKDEV(0, 0);
+ struct class_dev_iter iter;
+ struct device *dev;
+
+ class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
+ while ((dev = class_dev_iter_next(&iter))) {
+ struct gendisk *disk = dev_to_disk(dev);
+ struct hd_struct *part;
+
+ if (strcmp(dev_name(dev), name))
+ continue;
+
+ if (partno < disk->minors) {
+ /* We need to return the right devno, even
+ * if the partition doesn't exist yet.
+ */
+ devt = MKDEV(MAJOR(dev->devt),
+ MINOR(dev->devt) + partno);
+ break;
+ }
+ part = disk_get_part(disk, partno);
+ if (part) {
+ devt = part_devt(part);
+ disk_put_part(part);
+ break;
+ }
+ disk_put_part(part);
+ }
+ class_dev_iter_exit(&iter);
+ return devt;
+}
+EXPORT_SYMBOL(blk_lookup_devt);
+
+struct gendisk *alloc_disk(int minors)
+{
+ return alloc_disk_node(minors, -1);
+}
+EXPORT_SYMBOL(alloc_disk);
+
+struct gendisk *alloc_disk_node(int minors, int node_id)
+{
+ struct gendisk *disk;
+
+ disk = kmalloc_node(sizeof(struct gendisk),
+ GFP_KERNEL | __GFP_ZERO, node_id);
+ if (disk) {
+ if (!init_part_stats(&disk->part0)) {
+ kfree(disk);
+ return NULL;
+ }
+ disk->node_id = node_id;
+ if (disk_expand_part_tbl(disk, 0)) {
+ free_part_stats(&disk->part0);
+ kfree(disk);
+ return NULL;
+ }
+ disk->part_tbl->part[0] = &disk->part0;
+
+ hd_ref_init(&disk->part0);
+
+ disk->minors = minors;
+ rand_initialize_disk(disk);
+ disk_to_dev(disk)->class = &block_class;
+ disk_to_dev(disk)->type = &disk_type;
+ device_initialize(disk_to_dev(disk));
+ }
+ return disk;
+}
+EXPORT_SYMBOL(alloc_disk_node);
+
+struct kobject *get_disk(struct gendisk *disk)
+{
+ struct module *owner;
+ struct kobject *kobj;
+
+ if (!disk->fops)
+ return NULL;
+ owner = disk->fops->owner;
+ if (owner && !try_module_get(owner))
+ return NULL;
+ kobj = kobject_get(&disk_to_dev(disk)->kobj);
+ if (kobj == NULL) {
+ module_put(owner);
+ return NULL;
+ }
+ return kobj;
+
+}
+
+EXPORT_SYMBOL(get_disk);
+
+void put_disk(struct gendisk *disk)
+{
+ if (disk)
+ kobject_put(&disk_to_dev(disk)->kobj);
+}
+
+EXPORT_SYMBOL(put_disk);
+
+static void set_disk_ro_uevent(struct gendisk *gd, int ro)
+{
+ char event[] = "DISK_RO=1";
+ char *envp[] = { event, NULL };
+
+ if (!ro)
+ event[8] = '0';
+ kobject_uevent_env(&disk_to_dev(gd)->kobj, KOBJ_CHANGE, envp);
+}
+
+void set_device_ro(struct block_device *bdev, int flag)
+{
+ bdev->bd_part->policy = flag;
+}
+
+EXPORT_SYMBOL(set_device_ro);
+
+void set_disk_ro(struct gendisk *disk, int flag)
+{
+ struct disk_part_iter piter;
+ struct hd_struct *part;
+
+ if (disk->part0.policy != flag) {
+ set_disk_ro_uevent(disk, flag);
+ disk->part0.policy = flag;
+ }
+
+ disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY);
+ while ((part = disk_part_iter_next(&piter)))
+ part->policy = flag;
+ disk_part_iter_exit(&piter);
+}
+
+EXPORT_SYMBOL(set_disk_ro);
+
+int bdev_read_only(struct block_device *bdev)
+{
+ if (!bdev)
+ return 0;
+ return bdev->bd_part->policy;
+}
+
+EXPORT_SYMBOL(bdev_read_only);
+
+int invalidate_partition(struct gendisk *disk, int partno)
+{
+ int res = 0;
+ struct block_device *bdev = bdget_disk(disk, partno);
+ if (bdev) {
+ fsync_bdev(bdev);
+ res = __invalidate_device(bdev, true);
+ bdput(bdev);
+ }
+ return res;
+}
+
+EXPORT_SYMBOL(invalidate_partition);
+
+/*
+ * Disk events - monitor disk events like media change and eject request.
+ */
+struct disk_events {
+ struct list_head node; /* all disk_event's */
+ struct gendisk *disk; /* the associated disk */
+ spinlock_t lock;
+
+ struct mutex block_mutex; /* protects blocking */
+ int block; /* event blocking depth */
+ unsigned int pending; /* events already sent out */
+ unsigned int clearing; /* events being cleared */
+
+ long poll_msecs; /* interval, -1 for default */
+ struct delayed_work dwork;
+};
+
+static const char *disk_events_strs[] = {
+ [ilog2(DISK_EVENT_MEDIA_CHANGE)] = "media_change",
+ [ilog2(DISK_EVENT_EJECT_REQUEST)] = "eject_request",
+};
+
+static char *disk_uevents[] = {
+ [ilog2(DISK_EVENT_MEDIA_CHANGE)] = "DISK_MEDIA_CHANGE=1",
+ [ilog2(DISK_EVENT_EJECT_REQUEST)] = "DISK_EJECT_REQUEST=1",
+};
+
+/* list of all disk_events */
+static DEFINE_MUTEX(disk_events_mutex);
+static LIST_HEAD(disk_events);
+
+/* disable in-kernel polling by default */
+static unsigned long disk_events_dfl_poll_msecs = 0;
+
+static unsigned long disk_events_poll_jiffies(struct gendisk *disk)
+{
+ struct disk_events *ev = disk->ev;
+ long intv_msecs = 0;
+
+ /*
+ * If device-specific poll interval is set, always use it. If
+ * the default is being used, poll iff there are events which
+ * can't be monitored asynchronously.
+ */
+ if (ev->poll_msecs >= 0)
+ intv_msecs = ev->poll_msecs;
+ else if (disk->events & ~disk->async_events)
+ intv_msecs = disk_events_dfl_poll_msecs;
+
+ return msecs_to_jiffies(intv_msecs);
+}
+
+/**
+ * disk_block_events - block and flush disk event checking
+ * @disk: disk to block events for
+ *
+ * On return from this function, it is guaranteed that event checking
+ * isn't in progress and won't happen until unblocked by
+ * disk_unblock_events(). Events blocking is counted and the actual
+ * unblocking happens after the matching number of unblocks are done.
+ *
+ * Note that this intentionally does not block event checking from
+ * disk_clear_events().
+ *
+ * CONTEXT:
+ * Might sleep.
+ */
+void disk_block_events(struct gendisk *disk)
+{
+ struct disk_events *ev = disk->ev;
+ unsigned long flags;
+ bool cancel;
+
+ if (!ev)
+ return;
+
+ /*
+ * Outer mutex ensures that the first blocker completes canceling
+ * the event work before further blockers are allowed to finish.
+ */
+ mutex_lock(&ev->block_mutex);
+
+ spin_lock_irqsave(&ev->lock, flags);
+ cancel = !ev->block++;
+ spin_unlock_irqrestore(&ev->lock, flags);
+
+ if (cancel)
+ cancel_delayed_work_sync(&disk->ev->dwork);
+
+ mutex_unlock(&ev->block_mutex);
+}
+
+static void __disk_unblock_events(struct gendisk *disk, bool check_now)
+{
+ struct disk_events *ev = disk->ev;
+ unsigned long intv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ev->lock, flags);
+
+ if (WARN_ON_ONCE(ev->block <= 0))
+ goto out_unlock;
+
+ if (--ev->block)
+ goto out_unlock;
+
+ /*
+ * Not exactly a latency critical operation, set poll timer
+ * slack to 25% and kick event check.
+ */
+ intv = disk_events_poll_jiffies(disk);
+ set_timer_slack(&ev->dwork.timer, intv / 4);
+ if (check_now)
+ queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
+ else if (intv)
+ queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv);
+out_unlock:
+ spin_unlock_irqrestore(&ev->lock, flags);
+}
+
+/**
+ * disk_unblock_events - unblock disk event checking
+ * @disk: disk to unblock events for
+ *
+ * Undo disk_block_events(). When the block count reaches zero, it
+ * starts events polling if configured.
+ *
+ * CONTEXT:
+ * Don't care. Safe to call from irq context.
+ */
+void disk_unblock_events(struct gendisk *disk)
+{
+ if (disk->ev)
+ __disk_unblock_events(disk, false);
+}
+
+/**
+ * disk_flush_events - schedule immediate event checking and flushing
+ * @disk: disk to check and flush events for
+ * @mask: events to flush
+ *
+ * Schedule immediate event checking on @disk if not blocked. Events in
+ * @mask are scheduled to be cleared from the driver. Note that this
+ * doesn't clear the events from @disk->ev.
+ *
+ * CONTEXT:
+ * If @mask is non-zero must be called with bdev->bd_mutex held.
+ */
+void disk_flush_events(struct gendisk *disk, unsigned int mask)
+{
+ struct disk_events *ev = disk->ev;
+
+ if (!ev)
+ return;
+
+ spin_lock_irq(&ev->lock);
+ ev->clearing |= mask;
+ if (!ev->block) {
+ cancel_delayed_work(&ev->dwork);
+ queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
+ }
+ spin_unlock_irq(&ev->lock);
+}
+
+/**
+ * disk_clear_events - synchronously check, clear and return pending events
+ * @disk: disk to fetch and clear events from
+ * @mask: mask of events to be fetched and clearted
+ *
+ * Disk events are synchronously checked and pending events in @mask
+ * are cleared and returned. This ignores the block count.
+ *
+ * CONTEXT:
+ * Might sleep.
+ */
+unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
+{
+ const struct block_device_operations *bdops = disk->fops;
+ struct disk_events *ev = disk->ev;
+ unsigned int pending;
+
+ if (!ev) {
+ /* for drivers still using the old ->media_changed method */
+ if ((mask & DISK_EVENT_MEDIA_CHANGE) &&
+ bdops->media_changed && bdops->media_changed(disk))
+ return DISK_EVENT_MEDIA_CHANGE;
+ return 0;
+ }
+
+ /* tell the workfn about the events being cleared */
+ spin_lock_irq(&ev->lock);
+ ev->clearing |= mask;
+ spin_unlock_irq(&ev->lock);
+
+ /* uncondtionally schedule event check and wait for it to finish */
+ disk_block_events(disk);
+ queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
+ flush_delayed_work(&ev->dwork);
+ __disk_unblock_events(disk, false);
+
+ /* then, fetch and clear pending events */
+ spin_lock_irq(&ev->lock);
+ WARN_ON_ONCE(ev->clearing & mask); /* cleared by workfn */
+ pending = ev->pending & mask;
+ ev->pending &= ~mask;
+ spin_unlock_irq(&ev->lock);
+
+ return pending;
+}
+
+static void disk_events_workfn(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct disk_events *ev = container_of(dwork, struct disk_events, dwork);
+ struct gendisk *disk = ev->disk;
+ char *envp[ARRAY_SIZE(disk_uevents) + 1] = { };
+ unsigned int clearing = ev->clearing;
+ unsigned int events;
+ unsigned long intv;
+ int nr_events = 0, i;
+
+ /* check events */
+ events = disk->fops->check_events(disk, clearing);
+
+ /* accumulate pending events and schedule next poll if necessary */
+ spin_lock_irq(&ev->lock);
+
+ events &= ~ev->pending;
+ ev->pending |= events;
+ ev->clearing &= ~clearing;
+
+ intv = disk_events_poll_jiffies(disk);
+ if (!ev->block && intv)
+ queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv);
+
+ spin_unlock_irq(&ev->lock);
+
+ /*
+ * Tell userland about new events. Only the events listed in
+ * @disk->events are reported. Unlisted events are processed the
+ * same internally but never get reported to userland.
+ */
+ for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
+ if (events & disk->events & (1 << i))
+ envp[nr_events++] = disk_uevents[i];
+
+ if (nr_events)
+ kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp);
+}
+
+/*
+ * A disk events enabled device has the following sysfs nodes under
+ * its /sys/block/X/ directory.
+ *
+ * events : list of all supported events
+ * events_async : list of events which can be detected w/o polling
+ * events_poll_msecs : polling interval, 0: disable, -1: system default
+ */
+static ssize_t __disk_events_show(unsigned int events, char *buf)
+{
+ const char *delim = "";
+ ssize_t pos = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(disk_events_strs); i++)
+ if (events & (1 << i)) {
+ pos += sprintf(buf + pos, "%s%s",
+ delim, disk_events_strs[i]);
+ delim = " ";
+ }
+ if (pos)
+ pos += sprintf(buf + pos, "\n");
+ return pos;
+}
+
+static ssize_t disk_events_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+
+ return __disk_events_show(disk->events, buf);
+}
+
+static ssize_t disk_events_async_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+
+ return __disk_events_show(disk->async_events, buf);
+}
+
+static ssize_t disk_events_poll_msecs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+
+ return sprintf(buf, "%ld\n", disk->ev->poll_msecs);
+}
+
+static ssize_t disk_events_poll_msecs_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+ long intv;
+
+ if (!count || !sscanf(buf, "%ld", &intv))
+ return -EINVAL;
+
+ if (intv < 0 && intv != -1)
+ return -EINVAL;
+
+ disk_block_events(disk);
+ disk->ev->poll_msecs = intv;
+ __disk_unblock_events(disk, true);
+
+ return count;
+}
+
+static const DEVICE_ATTR(events, S_IRUGO, disk_events_show, NULL);
+static const DEVICE_ATTR(events_async, S_IRUGO, disk_events_async_show, NULL);
+static const DEVICE_ATTR(events_poll_msecs, S_IRUGO|S_IWUSR,
+ disk_events_poll_msecs_show,
+ disk_events_poll_msecs_store);
+
+static const struct attribute *disk_events_attrs[] = {
+ &dev_attr_events.attr,
+ &dev_attr_events_async.attr,
+ &dev_attr_events_poll_msecs.attr,
+ NULL,
+};
+
+/*
+ * The default polling interval can be specified by the kernel
+ * parameter block.events_dfl_poll_msecs which defaults to 0
+ * (disable). This can also be modified runtime by writing to
+ * /sys/module/block/events_dfl_poll_msecs.
+ */
+static int disk_events_set_dfl_poll_msecs(const char *val,
+ const struct kernel_param *kp)
+{
+ struct disk_events *ev;
+ int ret;
+
+ ret = param_set_ulong(val, kp);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&disk_events_mutex);
+
+ list_for_each_entry(ev, &disk_events, node)
+ disk_flush_events(ev->disk, 0);
+
+ mutex_unlock(&disk_events_mutex);
+
+ return 0;
+}
+
+static const struct kernel_param_ops disk_events_dfl_poll_msecs_param_ops = {
+ .set = disk_events_set_dfl_poll_msecs,
+ .get = param_get_ulong,
+};
+
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX "block."
+
+module_param_cb(events_dfl_poll_msecs, &disk_events_dfl_poll_msecs_param_ops,
+ &disk_events_dfl_poll_msecs, 0644);
+
+/*
+ * disk_{alloc|add|del|release}_events - initialize and destroy disk_events.
+ */
+static void disk_alloc_events(struct gendisk *disk)
+{
+ struct disk_events *ev;
+
+ if (!disk->fops->check_events)
+ return;
+
+ ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+ if (!ev) {
+ pr_warn("%s: failed to initialize events\n", disk->disk_name);
+ return;
+ }
+
+ INIT_LIST_HEAD(&ev->node);
+ ev->disk = disk;
+ spin_lock_init(&ev->lock);
+ mutex_init(&ev->block_mutex);
+ ev->block = 1;
+ ev->poll_msecs = -1;
+ INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn);
+
+ disk->ev = ev;
+}
+
+static void disk_add_events(struct gendisk *disk)
+{
+ if (!disk->ev)
+ return;
+
+ /* FIXME: error handling */
+ if (sysfs_create_files(&disk_to_dev(disk)->kobj, disk_events_attrs) < 0)
+ pr_warn("%s: failed to create sysfs files for events\n",
+ disk->disk_name);
+
+ mutex_lock(&disk_events_mutex);
+ list_add_tail(&disk->ev->node, &disk_events);
+ mutex_unlock(&disk_events_mutex);
+
+ /*
+ * Block count is initialized to 1 and the following initial
+ * unblock kicks it into action.
+ */
+ __disk_unblock_events(disk, true);
+}
+
+static void disk_del_events(struct gendisk *disk)
+{
+ if (!disk->ev)
+ return;
+
+ disk_block_events(disk);
+
+ mutex_lock(&disk_events_mutex);
+ list_del_init(&disk->ev->node);
+ mutex_unlock(&disk_events_mutex);
+
+ sysfs_remove_files(&disk_to_dev(disk)->kobj, disk_events_attrs);
+}
+
+static void disk_release_events(struct gendisk *disk)
+{
+ /* the block count should be 1 from disk_del_events() */
+ WARN_ON_ONCE(disk->ev && disk->ev->block != 1);
+ kfree(disk->ev);
+}
diff --git a/block/ioctl.c b/block/ioctl.c
new file mode 100644
index 00000000..ba15b2db
--- /dev/null
+++ b/block/ioctl.c
@@ -0,0 +1,351 @@
+#include <linux/capability.h>
+#include <linux/blkdev.h>
+#include <linux/export.h>
+#include <linux/gfp.h>
+#include <linux/blkpg.h>
+#include <linux/hdreg.h>
+#include <linux/backing-dev.h>
+#include <linux/fs.h>
+#include <linux/blktrace_api.h>
+#include <asm/uaccess.h>
+
+static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg)
+{
+ struct block_device *bdevp;
+ struct gendisk *disk;
+ struct hd_struct *part;
+ struct blkpg_ioctl_arg a;
+ struct blkpg_partition p;
+ struct disk_part_iter piter;
+ long long start, length;
+ int partno;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg)))
+ return -EFAULT;
+ if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition)))
+ return -EFAULT;
+ disk = bdev->bd_disk;
+ if (bdev != bdev->bd_contains)
+ return -EINVAL;
+ partno = p.pno;
+ if (partno <= 0)
+ return -EINVAL;
+ switch (a.op) {
+ case BLKPG_ADD_PARTITION:
+ start = p.start >> 9;
+ length = p.length >> 9;
+ /* check for fit in a hd_struct */
+ if (sizeof(sector_t) == sizeof(long) &&
+ sizeof(long long) > sizeof(long)) {
+ long pstart = start, plength = length;
+ if (pstart != start || plength != length
+ || pstart < 0 || plength < 0)
+ return -EINVAL;
+ }
+
+ mutex_lock(&bdev->bd_mutex);
+
+ /* overlap? */
+ disk_part_iter_init(&piter, disk,
+ DISK_PITER_INCL_EMPTY);
+ while ((part = disk_part_iter_next(&piter))) {
+ if (!(start + length <= part->start_sect ||
+ start >= part->start_sect + part->nr_sects)) {
+ disk_part_iter_exit(&piter);
+ mutex_unlock(&bdev->bd_mutex);
+ return -EBUSY;
+ }
+ }
+ disk_part_iter_exit(&piter);
+
+ /* all seems OK */
+ part = add_partition(disk, partno, start, length,
+ ADDPART_FLAG_NONE, NULL);
+ mutex_unlock(&bdev->bd_mutex);
+ return IS_ERR(part) ? PTR_ERR(part) : 0;
+ case BLKPG_DEL_PARTITION:
+ part = disk_get_part(disk, partno);
+ if (!part)
+ return -ENXIO;
+
+ bdevp = bdget(part_devt(part));
+ disk_put_part(part);
+ if (!bdevp)
+ return -ENOMEM;
+
+ mutex_lock(&bdevp->bd_mutex);
+ if (bdevp->bd_openers) {
+ mutex_unlock(&bdevp->bd_mutex);
+ bdput(bdevp);
+ return -EBUSY;
+ }
+ /* all seems OK */
+ fsync_bdev(bdevp);
+ invalidate_bdev(bdevp);
+
+ mutex_lock_nested(&bdev->bd_mutex, 1);
+ delete_partition(disk, partno);
+ mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&bdevp->bd_mutex);
+ bdput(bdevp);
+
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int blkdev_reread_part(struct block_device *bdev)
+{
+ struct gendisk *disk = bdev->bd_disk;
+ int res;
+
+ if (!disk_part_scan_enabled(disk) || bdev != bdev->bd_contains)
+ return -EINVAL;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (!mutex_trylock(&bdev->bd_mutex))
+ return -EBUSY;
+ res = rescan_partitions(disk, bdev);
+ mutex_unlock(&bdev->bd_mutex);
+ return res;
+}
+
+static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
+ uint64_t len, int secure)
+{
+ unsigned long flags = 0;
+
+ if (start & 511)
+ return -EINVAL;
+ if (len & 511)
+ return -EINVAL;
+ start >>= 9;
+ len >>= 9;
+
+ if (start + len > (i_size_read(bdev->bd_inode) >> 9))
+ return -EINVAL;
+ if (secure)
+ flags |= BLKDEV_DISCARD_SECURE;
+ return blkdev_issue_discard(bdev, start, len, GFP_KERNEL, flags);
+}
+
+static int put_ushort(unsigned long arg, unsigned short val)
+{
+ return put_user(val, (unsigned short __user *)arg);
+}
+
+static int put_int(unsigned long arg, int val)
+{
+ return put_user(val, (int __user *)arg);
+}
+
+static int put_uint(unsigned long arg, unsigned int val)
+{
+ return put_user(val, (unsigned int __user *)arg);
+}
+
+static int put_long(unsigned long arg, long val)
+{
+ return put_user(val, (long __user *)arg);
+}
+
+static int put_ulong(unsigned long arg, unsigned long val)
+{
+ return put_user(val, (unsigned long __user *)arg);
+}
+
+static int put_u64(unsigned long arg, u64 val)
+{
+ return put_user(val, (u64 __user *)arg);
+}
+
+int __blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned cmd, unsigned long arg)
+{
+ struct gendisk *disk = bdev->bd_disk;
+
+ if (disk->fops->ioctl)
+ return disk->fops->ioctl(bdev, mode, cmd, arg);
+
+ return -ENOTTY;
+}
+/*
+ * For the record: _GPL here is only because somebody decided to slap it
+ * on the previous export. Sheer idiocy, since it wasn't copyrightable
+ * at all and could be open-coded without any exports by anybody who cares.
+ */
+EXPORT_SYMBOL_GPL(__blkdev_driver_ioctl);
+
+/*
+ * Is it an unrecognized ioctl? The correct returns are either
+ * ENOTTY (final) or ENOIOCTLCMD ("I don't know this one, try a
+ * fallback"). ENOIOCTLCMD gets turned into ENOTTY by the ioctl
+ * code before returning.
+ *
+ * Confused drivers sometimes return EINVAL, which is wrong. It
+ * means "I understood the ioctl command, but the parameters to
+ * it were wrong".
+ *
+ * We should aim to just fix the broken drivers, the EINVAL case
+ * should go away.
+ */
+static inline int is_unrecognized_ioctl(int ret)
+{
+ return ret == -EINVAL ||
+ ret == -ENOTTY ||
+ ret == -ENOIOCTLCMD;
+}
+
+/*
+ * always keep this in sync with compat_blkdev_ioctl()
+ */
+int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
+ unsigned long arg)
+{
+ struct gendisk *disk = bdev->bd_disk;
+ struct backing_dev_info *bdi;
+ loff_t size;
+ int ret, n;
+
+ switch(cmd) {
+ case BLKFLSBUF:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+ if (!is_unrecognized_ioctl(ret))
+ return ret;
+
+ fsync_bdev(bdev);
+ invalidate_bdev(bdev);
+ return 0;
+
+ case BLKROSET:
+ ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+ if (!is_unrecognized_ioctl(ret))
+ return ret;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (get_user(n, (int __user *)(arg)))
+ return -EFAULT;
+ set_device_ro(bdev, n);
+ return 0;
+
+ case BLKDISCARD:
+ case BLKSECDISCARD: {
+ uint64_t range[2];
+
+ if (!(mode & FMODE_WRITE))
+ return -EBADF;
+
+ if (copy_from_user(range, (void __user *)arg, sizeof(range)))
+ return -EFAULT;
+
+ return blk_ioctl_discard(bdev, range[0], range[1],
+ cmd == BLKSECDISCARD);
+ }
+
+ case HDIO_GETGEO: {
+ struct hd_geometry geo;
+
+ if (!arg)
+ return -EINVAL;
+ if (!disk->fops->getgeo)
+ return -ENOTTY;
+
+ /*
+ * We need to set the startsect first, the driver may
+ * want to override it.
+ */
+ memset(&geo, 0, sizeof(geo));
+ geo.start = get_start_sect(bdev);
+ ret = disk->fops->getgeo(bdev, &geo);
+ if (ret)
+ return ret;
+ if (copy_to_user((struct hd_geometry __user *)arg, &geo,
+ sizeof(geo)))
+ return -EFAULT;
+ return 0;
+ }
+ case BLKRAGET:
+ case BLKFRAGET:
+ if (!arg)
+ return -EINVAL;
+ bdi = blk_get_backing_dev_info(bdev);
+ if (bdi == NULL)
+ return -ENOTTY;
+ return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
+ case BLKROGET:
+ return put_int(arg, bdev_read_only(bdev) != 0);
+ case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
+ return put_int(arg, block_size(bdev));
+ case BLKSSZGET: /* get block device logical block size */
+ return put_int(arg, bdev_logical_block_size(bdev));
+ case BLKPBSZGET: /* get block device physical block size */
+ return put_uint(arg, bdev_physical_block_size(bdev));
+ case BLKIOMIN:
+ return put_uint(arg, bdev_io_min(bdev));
+ case BLKIOOPT:
+ return put_uint(arg, bdev_io_opt(bdev));
+ case BLKALIGNOFF:
+ return put_int(arg, bdev_alignment_offset(bdev));
+ case BLKDISCARDZEROES:
+ return put_uint(arg, bdev_discard_zeroes_data(bdev));
+ case BLKSECTGET:
+ return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev)));
+ case BLKROTATIONAL:
+ return put_ushort(arg, !blk_queue_nonrot(bdev_get_queue(bdev)));
+ case BLKRASET:
+ case BLKFRASET:
+ if(!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ bdi = blk_get_backing_dev_info(bdev);
+ if (bdi == NULL)
+ return -ENOTTY;
+ bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
+ return 0;
+ case BLKBSZSET:
+ /* set the logical block size */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (!arg)
+ return -EINVAL;
+ if (get_user(n, (int __user *) arg))
+ return -EFAULT;
+ if (!(mode & FMODE_EXCL)) {
+ bdgrab(bdev);
+ if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
+ return -EBUSY;
+ }
+ ret = set_blocksize(bdev, n);
+ if (!(mode & FMODE_EXCL))
+ blkdev_put(bdev, mode | FMODE_EXCL);
+ return ret;
+ case BLKPG:
+ ret = blkpg_ioctl(bdev, (struct blkpg_ioctl_arg __user *) arg);
+ break;
+ case BLKRRPART:
+ ret = blkdev_reread_part(bdev);
+ break;
+ case BLKGETSIZE:
+ size = i_size_read(bdev->bd_inode);
+ if ((size >> 9) > ~0UL)
+ return -EFBIG;
+ return put_ulong(arg, size >> 9);
+ case BLKGETSIZE64:
+ return put_u64(arg, i_size_read(bdev->bd_inode));
+ case BLKTRACESTART:
+ case BLKTRACESTOP:
+ case BLKTRACESETUP:
+ case BLKTRACETEARDOWN:
+ ret = blk_trace_ioctl(bdev, cmd, (char __user *) arg);
+ break;
+ default:
+ ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(blkdev_ioctl);
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
new file mode 100644
index 00000000..413a0b1d
--- /dev/null
+++ b/block/noop-iosched.c
@@ -0,0 +1,111 @@
+/*
+ * elevator noop
+ */
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+struct noop_data {
+ struct list_head queue;
+};
+
+static void noop_merged_requests(struct request_queue *q, struct request *rq,
+ struct request *next)
+{
+ list_del_init(&next->queuelist);
+}
+
+static int noop_dispatch(struct request_queue *q, int force)
+{
+ struct noop_data *nd = q->elevator->elevator_data;
+
+ if (!list_empty(&nd->queue)) {
+ struct request *rq;
+ rq = list_entry(nd->queue.next, struct request, queuelist);
+ list_del_init(&rq->queuelist);
+ elv_dispatch_sort(q, rq);
+ return 1;
+ }
+ return 0;
+}
+
+static void noop_add_request(struct request_queue *q, struct request *rq)
+{
+ struct noop_data *nd = q->elevator->elevator_data;
+
+ list_add_tail(&rq->queuelist, &nd->queue);
+}
+
+static struct request *
+noop_former_request(struct request_queue *q, struct request *rq)
+{
+ struct noop_data *nd = q->elevator->elevator_data;
+
+ if (rq->queuelist.prev == &nd->queue)
+ return NULL;
+ return list_entry(rq->queuelist.prev, struct request, queuelist);
+}
+
+static struct request *
+noop_latter_request(struct request_queue *q, struct request *rq)
+{
+ struct noop_data *nd = q->elevator->elevator_data;
+
+ if (rq->queuelist.next == &nd->queue)
+ return NULL;
+ return list_entry(rq->queuelist.next, struct request, queuelist);
+}
+
+static void *noop_init_queue(struct request_queue *q)
+{
+ struct noop_data *nd;
+
+ nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node);
+ if (!nd)
+ return NULL;
+ INIT_LIST_HEAD(&nd->queue);
+ return nd;
+}
+
+static void noop_exit_queue(struct elevator_queue *e)
+{
+ struct noop_data *nd = e->elevator_data;
+
+ BUG_ON(!list_empty(&nd->queue));
+ kfree(nd);
+}
+
+static struct elevator_type elevator_noop = {
+ .ops = {
+ .elevator_merge_req_fn = noop_merged_requests,
+ .elevator_dispatch_fn = noop_dispatch,
+ .elevator_add_req_fn = noop_add_request,
+ .elevator_former_req_fn = noop_former_request,
+ .elevator_latter_req_fn = noop_latter_request,
+ .elevator_init_fn = noop_init_queue,
+ .elevator_exit_fn = noop_exit_queue,
+ },
+ .elevator_name = "noop",
+ .elevator_owner = THIS_MODULE,
+};
+
+static int __init noop_init(void)
+{
+ return elv_register(&elevator_noop);
+}
+
+static void __exit noop_exit(void)
+{
+ elv_unregister(&elevator_noop);
+}
+
+module_init(noop_init);
+module_exit(noop_exit);
+
+
+MODULE_AUTHOR("Jens Axboe");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("No-op IO scheduler");
diff --git a/block/partition-generic.c b/block/partition-generic.c
new file mode 100644
index 00000000..803d1513
--- /dev/null
+++ b/block/partition-generic.c
@@ -0,0 +1,580 @@
+/*
+ * Code extracted from drivers/block/genhd.c
+ * Copyright (C) 1991-1998 Linus Torvalds
+ * Re-organised Feb 1998 Russell King
+ *
+ * We now have independent partition support from the
+ * block drivers, which allows all the partition code to
+ * be grouped in one location, and it to be mostly self
+ * contained.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/kmod.h>
+#include <linux/ctype.h>
+#include <linux/genhd.h>
+#include <linux/blktrace_api.h>
+
+#include "partitions/check.h"
+
+#ifdef CONFIG_BLK_DEV_MD
+extern void md_autodetect_dev(dev_t dev);
+#endif
+
+/*
+ * disk_name() is used by partition check code and the genhd driver.
+ * It formats the devicename of the indicated disk into
+ * the supplied buffer (of size at least 32), and returns
+ * a pointer to that same buffer (for convenience).
+ */
+
+char *disk_name(struct gendisk *hd, int partno, char *buf)
+{
+ if (!partno)
+ snprintf(buf, BDEVNAME_SIZE, "%s", hd->disk_name);
+ else if (isdigit(hd->disk_name[strlen(hd->disk_name)-1]))
+ snprintf(buf, BDEVNAME_SIZE, "%sp%d", hd->disk_name, partno);
+ else
+ snprintf(buf, BDEVNAME_SIZE, "%s%d", hd->disk_name, partno);
+
+ return buf;
+}
+
+const char *bdevname(struct block_device *bdev, char *buf)
+{
+ return disk_name(bdev->bd_disk, bdev->bd_part->partno, buf);
+}
+
+EXPORT_SYMBOL(bdevname);
+
+/*
+ * There's very little reason to use this, you should really
+ * have a struct block_device just about everywhere and use
+ * bdevname() instead.
+ */
+const char *__bdevname(dev_t dev, char *buffer)
+{
+ scnprintf(buffer, BDEVNAME_SIZE, "unknown-block(%u,%u)",
+ MAJOR(dev), MINOR(dev));
+ return buffer;
+}
+
+EXPORT_SYMBOL(__bdevname);
+
+static ssize_t part_partition_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hd_struct *p = dev_to_part(dev);
+
+ return sprintf(buf, "%d\n", p->partno);
+}
+
+static ssize_t part_start_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hd_struct *p = dev_to_part(dev);
+
+ return sprintf(buf, "%llu\n",(unsigned long long)p->start_sect);
+}
+
+ssize_t part_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hd_struct *p = dev_to_part(dev);
+ return sprintf(buf, "%llu\n",(unsigned long long)p->nr_sects);
+}
+
+static ssize_t part_ro_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hd_struct *p = dev_to_part(dev);
+ return sprintf(buf, "%d\n", p->policy ? 1 : 0);
+}
+
+static ssize_t part_alignment_offset_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hd_struct *p = dev_to_part(dev);
+ return sprintf(buf, "%llu\n", (unsigned long long)p->alignment_offset);
+}
+
+static ssize_t part_discard_alignment_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hd_struct *p = dev_to_part(dev);
+ return sprintf(buf, "%u\n", p->discard_alignment);
+}
+
+ssize_t part_stat_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hd_struct *p = dev_to_part(dev);
+ int cpu;
+
+ cpu = part_stat_lock();
+ part_round_stats(cpu, p);
+ part_stat_unlock();
+ return sprintf(buf,
+ "%8lu %8lu %8llu %8u "
+ "%8lu %8lu %8llu %8u "
+ "%8u %8u %8u"
+ "\n",
+ part_stat_read(p, ios[READ]),
+ part_stat_read(p, merges[READ]),
+ (unsigned long long)part_stat_read(p, sectors[READ]),
+ jiffies_to_msecs(part_stat_read(p, ticks[READ])),
+ part_stat_read(p, ios[WRITE]),
+ part_stat_read(p, merges[WRITE]),
+ (unsigned long long)part_stat_read(p, sectors[WRITE]),
+ jiffies_to_msecs(part_stat_read(p, ticks[WRITE])),
+ part_in_flight(p),
+ jiffies_to_msecs(part_stat_read(p, io_ticks)),
+ jiffies_to_msecs(part_stat_read(p, time_in_queue)));
+}
+
+ssize_t part_inflight_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hd_struct *p = dev_to_part(dev);
+
+ return sprintf(buf, "%8u %8u\n", atomic_read(&p->in_flight[0]),
+ atomic_read(&p->in_flight[1]));
+}
+
+#ifdef CONFIG_FAIL_MAKE_REQUEST
+ssize_t part_fail_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hd_struct *p = dev_to_part(dev);
+
+ return sprintf(buf, "%d\n", p->make_it_fail);
+}
+
+ssize_t part_fail_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hd_struct *p = dev_to_part(dev);
+ int i;
+
+ if (count > 0 && sscanf(buf, "%d", &i) > 0)
+ p->make_it_fail = (i == 0) ? 0 : 1;
+
+ return count;
+}
+#endif
+
+static DEVICE_ATTR(partition, S_IRUGO, part_partition_show, NULL);
+static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL);
+static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
+static DEVICE_ATTR(ro, S_IRUGO, part_ro_show, NULL);
+static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL);
+static DEVICE_ATTR(discard_alignment, S_IRUGO, part_discard_alignment_show,
+ NULL);
+static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
+static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL);
+#ifdef CONFIG_FAIL_MAKE_REQUEST
+static struct device_attribute dev_attr_fail =
+ __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
+#endif
+
+static struct attribute *part_attrs[] = {
+ &dev_attr_partition.attr,
+ &dev_attr_start.attr,
+ &dev_attr_size.attr,
+ &dev_attr_ro.attr,
+ &dev_attr_alignment_offset.attr,
+ &dev_attr_discard_alignment.attr,
+ &dev_attr_stat.attr,
+ &dev_attr_inflight.attr,
+#ifdef CONFIG_FAIL_MAKE_REQUEST
+ &dev_attr_fail.attr,
+#endif
+ NULL
+};
+
+static struct attribute_group part_attr_group = {
+ .attrs = part_attrs,
+};
+
+static const struct attribute_group *part_attr_groups[] = {
+ &part_attr_group,
+#ifdef CONFIG_BLK_DEV_IO_TRACE
+ &blk_trace_attr_group,
+#endif
+ NULL
+};
+
+static void part_release(struct device *dev)
+{
+ struct hd_struct *p = dev_to_part(dev);
+ free_part_stats(p);
+ free_part_info(p);
+ kfree(p);
+}
+
+static int part_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct hd_struct *part = dev_to_part(dev);
+
+ add_uevent_var(env, "PARTN=%u", part->partno);
+ if (part->info && part->info->volname[0])
+ add_uevent_var(env, "PARTNAME=%s", part->info->volname);
+ return 0;
+}
+
+struct device_type part_type = {
+ .name = "partition",
+ .groups = part_attr_groups,
+ .release = part_release,
+ .uevent = part_uevent,
+};
+
+static void delete_partition_rcu_cb(struct rcu_head *head)
+{
+ struct hd_struct *part = container_of(head, struct hd_struct, rcu_head);
+
+ part->start_sect = 0;
+ part->nr_sects = 0;
+ part_stat_set_all(part, 0);
+ put_device(part_to_dev(part));
+}
+
+void __delete_partition(struct hd_struct *part)
+{
+ call_rcu(&part->rcu_head, delete_partition_rcu_cb);
+}
+
+void delete_partition(struct gendisk *disk, int partno)
+{
+ struct disk_part_tbl *ptbl = disk->part_tbl;
+ struct hd_struct *part;
+
+ if (partno >= ptbl->len)
+ return;
+
+ part = ptbl->part[partno];
+ if (!part)
+ return;
+
+ blk_free_devt(part_devt(part));
+ rcu_assign_pointer(ptbl->part[partno], NULL);
+ rcu_assign_pointer(ptbl->last_lookup, NULL);
+ kobject_put(part->holder_dir);
+ device_del(part_to_dev(part));
+
+ hd_struct_put(part);
+}
+
+static ssize_t whole_disk_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return 0;
+}
+static DEVICE_ATTR(whole_disk, S_IRUSR | S_IRGRP | S_IROTH,
+ whole_disk_show, NULL);
+
+struct hd_struct *add_partition(struct gendisk *disk, int partno,
+ sector_t start, sector_t len, int flags,
+ struct partition_meta_info *info)
+{
+ struct hd_struct *p;
+ dev_t devt = MKDEV(0, 0);
+ struct device *ddev = disk_to_dev(disk);
+ struct device *pdev;
+ struct disk_part_tbl *ptbl;
+ const char *dname;
+ int err;
+
+ err = disk_expand_part_tbl(disk, partno);
+ if (err)
+ return ERR_PTR(err);
+ ptbl = disk->part_tbl;
+
+ if (ptbl->part[partno])
+ return ERR_PTR(-EBUSY);
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return ERR_PTR(-EBUSY);
+
+ if (!init_part_stats(p)) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+ pdev = part_to_dev(p);
+
+ p->start_sect = start;
+ p->alignment_offset =
+ queue_limit_alignment_offset(&disk->queue->limits, start);
+ p->discard_alignment =
+ queue_limit_discard_alignment(&disk->queue->limits, start);
+ p->nr_sects = len;
+ p->partno = partno;
+ p->policy = get_disk_ro(disk);
+
+ if (info) {
+ struct partition_meta_info *pinfo = alloc_part_info(disk);
+ if (!pinfo)
+ goto out_free_stats;
+ memcpy(pinfo, info, sizeof(*info));
+ p->info = pinfo;
+ }
+
+ dname = dev_name(ddev);
+ if (isdigit(dname[strlen(dname) - 1]))
+ dev_set_name(pdev, "%sp%d", dname, partno);
+ else
+ dev_set_name(pdev, "%s%d", dname, partno);
+
+ device_initialize(pdev);
+ pdev->class = &block_class;
+ pdev->type = &part_type;
+ pdev->parent = ddev;
+
+ err = blk_alloc_devt(p, &devt);
+ if (err)
+ goto out_free_info;
+ pdev->devt = devt;
+
+ /* delay uevent until 'holders' subdir is created */
+ dev_set_uevent_suppress(pdev, 1);
+ err = device_add(pdev);
+ if (err)
+ goto out_put;
+
+ err = -ENOMEM;
+ p->holder_dir = kobject_create_and_add("holders", &pdev->kobj);
+ if (!p->holder_dir)
+ goto out_del;
+
+ dev_set_uevent_suppress(pdev, 0);
+ if (flags & ADDPART_FLAG_WHOLEDISK) {
+ err = device_create_file(pdev, &dev_attr_whole_disk);
+ if (err)
+ goto out_del;
+ }
+
+ /* everything is up and running, commence */
+ rcu_assign_pointer(ptbl->part[partno], p);
+
+ /* suppress uevent if the disk suppresses it */
+ if (!dev_get_uevent_suppress(ddev))
+ kobject_uevent(&pdev->kobj, KOBJ_ADD);
+
+ hd_ref_init(p);
+ return p;
+
+out_free_info:
+ free_part_info(p);
+out_free_stats:
+ free_part_stats(p);
+out_free:
+ kfree(p);
+ return ERR_PTR(err);
+out_del:
+ kobject_put(p->holder_dir);
+ device_del(pdev);
+out_put:
+ put_device(pdev);
+ blk_free_devt(devt);
+ return ERR_PTR(err);
+}
+
+static bool disk_unlock_native_capacity(struct gendisk *disk)
+{
+ const struct block_device_operations *bdops = disk->fops;
+
+ if (bdops->unlock_native_capacity &&
+ !(disk->flags & GENHD_FL_NATIVE_CAPACITY)) {
+ printk(KERN_CONT "enabling native capacity\n");
+ bdops->unlock_native_capacity(disk);
+ disk->flags |= GENHD_FL_NATIVE_CAPACITY;
+ return true;
+ } else {
+ printk(KERN_CONT "truncated\n");
+ return false;
+ }
+}
+
+static int drop_partitions(struct gendisk *disk, struct block_device *bdev)
+{
+ struct disk_part_iter piter;
+ struct hd_struct *part;
+ int res;
+
+ if (bdev->bd_part_count)
+ return -EBUSY;
+ res = invalidate_partition(disk, 0);
+ if (res)
+ return res;
+
+ disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY);
+ while ((part = disk_part_iter_next(&piter)))
+ delete_partition(disk, part->partno);
+ disk_part_iter_exit(&piter);
+
+ return 0;
+}
+
+int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
+{
+ struct parsed_partitions *state = NULL;
+ struct hd_struct *part;
+ int p, highest, res;
+rescan:
+ if (state && !IS_ERR(state)) {
+ kfree(state);
+ state = NULL;
+ }
+
+ res = drop_partitions(disk, bdev);
+ if (res)
+ return res;
+
+ if (disk->fops->revalidate_disk)
+ disk->fops->revalidate_disk(disk);
+ check_disk_size_change(disk, bdev);
+ bdev->bd_invalidated = 0;
+ if (!get_capacity(disk) || !(state = check_partition(disk, bdev)))
+ return 0;
+ if (IS_ERR(state)) {
+ /*
+ * I/O error reading the partition table. If any
+ * partition code tried to read beyond EOD, retry
+ * after unlocking native capacity.
+ */
+ if (PTR_ERR(state) == -ENOSPC) {
+ printk(KERN_WARNING "%s: partition table beyond EOD, ",
+ disk->disk_name);
+ if (disk_unlock_native_capacity(disk))
+ goto rescan;
+ }
+ return -EIO;
+ }
+ /*
+ * If any partition code tried to read beyond EOD, try
+ * unlocking native capacity even if partition table is
+ * successfully read as we could be missing some partitions.
+ */
+ if (state->access_beyond_eod) {
+ printk(KERN_WARNING
+ "%s: partition table partially beyond EOD, ",
+ disk->disk_name);
+ if (disk_unlock_native_capacity(disk))
+ goto rescan;
+ }
+
+ /* tell userspace that the media / partition table may have changed */
+ kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
+
+ /* Detect the highest partition number and preallocate
+ * disk->part_tbl. This is an optimization and not strictly
+ * necessary.
+ */
+ for (p = 1, highest = 0; p < state->limit; p++)
+ if (state->parts[p].size)
+ highest = p;
+
+ disk_expand_part_tbl(disk, highest);
+
+ /* add partitions */
+ for (p = 1; p < state->limit; p++) {
+ sector_t size, from;
+ struct partition_meta_info *info = NULL;
+
+ size = state->parts[p].size;
+ if (!size)
+ continue;
+
+ from = state->parts[p].from;
+ if (from >= get_capacity(disk)) {
+ printk(KERN_WARNING
+ "%s: p%d start %llu is beyond EOD, ",
+ disk->disk_name, p, (unsigned long long) from);
+ if (disk_unlock_native_capacity(disk))
+ goto rescan;
+ continue;
+ }
+
+ if (from + size > get_capacity(disk)) {
+ printk(KERN_WARNING
+ "%s: p%d size %llu extends beyond EOD, ",
+ disk->disk_name, p, (unsigned long long) size);
+
+ if (disk_unlock_native_capacity(disk)) {
+ /* free state and restart */
+ goto rescan;
+ } else {
+ /*
+ * we can not ignore partitions of broken tables
+ * created by for example camera firmware, but
+ * we limit them to the end of the disk to avoid
+ * creating invalid block devices
+ */
+ size = get_capacity(disk) - from;
+ }
+ }
+
+ if (state->parts[p].has_info)
+ info = &state->parts[p].info;
+ part = add_partition(disk, p, from, size,
+ state->parts[p].flags,
+ &state->parts[p].info);
+ if (IS_ERR(part)) {
+ printk(KERN_ERR " %s: p%d could not be added: %ld\n",
+ disk->disk_name, p, -PTR_ERR(part));
+ continue;
+ }
+#ifdef CONFIG_BLK_DEV_MD
+ if (state->parts[p].flags & ADDPART_FLAG_RAID)
+ md_autodetect_dev(part_to_dev(part)->devt);
+#endif
+ }
+ kfree(state);
+ return 0;
+}
+
+int invalidate_partitions(struct gendisk *disk, struct block_device *bdev)
+{
+ int res;
+
+ if (!bdev->bd_invalidated)
+ return 0;
+
+ res = drop_partitions(disk, bdev);
+ if (res)
+ return res;
+
+ set_capacity(disk, 0);
+ check_disk_size_change(disk, bdev);
+ bdev->bd_invalidated = 0;
+ /* tell userspace that the media / partition table may have changed */
+ kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
+
+ return 0;
+}
+
+unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
+{
+ struct address_space *mapping = bdev->bd_inode->i_mapping;
+ struct page *page;
+
+ page = read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
+ NULL);
+ if (!IS_ERR(page)) {
+ if (PageError(page))
+ goto fail;
+ p->v = page;
+ return (unsigned char *)page_address(page) + ((n & ((1 << (PAGE_CACHE_SHIFT - 9)) - 1)) << 9);
+fail:
+ page_cache_release(page);
+ }
+ p->v = NULL;
+ return NULL;
+}
+
+EXPORT_SYMBOL(read_dev_sector);
diff --git a/block/partitions/Kconfig b/block/partitions/Kconfig
new file mode 100644
index 00000000..cb5f0a3f
--- /dev/null
+++ b/block/partitions/Kconfig
@@ -0,0 +1,251 @@
+#
+# Partition configuration
+#
+config PARTITION_ADVANCED
+ bool "Advanced partition selection"
+ help
+ Say Y here if you would like to use hard disks under Linux which
+ were partitioned under an operating system running on a different
+ architecture than your Linux system.
+
+ Note that the answer to this question won't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about foreign partitioning schemes.
+
+ If unsure, say N.
+
+config ACORN_PARTITION
+ bool "Acorn partition support" if PARTITION_ADVANCED
+ default y if ARCH_ACORN
+ help
+ Support hard disks partitioned under Acorn operating systems.
+
+config ACORN_PARTITION_CUMANA
+ bool "Cumana partition support" if PARTITION_ADVANCED
+ default y if ARCH_ACORN
+ depends on ACORN_PARTITION
+ help
+ Say Y here if you would like to use hard disks under Linux which
+ were partitioned using the Cumana interface on Acorn machines.
+
+config ACORN_PARTITION_EESOX
+ bool "EESOX partition support" if PARTITION_ADVANCED
+ default y if ARCH_ACORN
+ depends on ACORN_PARTITION
+
+config ACORN_PARTITION_ICS
+ bool "ICS partition support" if PARTITION_ADVANCED
+ default y if ARCH_ACORN
+ depends on ACORN_PARTITION
+ help
+ Say Y here if you would like to use hard disks under Linux which
+ were partitioned using the ICS interface on Acorn machines.
+
+config ACORN_PARTITION_ADFS
+ bool "Native filecore partition support" if PARTITION_ADVANCED
+ default y if ARCH_ACORN
+ depends on ACORN_PARTITION
+ help
+ The Acorn Disc Filing System is the standard file system of the
+ RiscOS operating system which runs on Acorn's ARM-based Risc PC
+ systems and the Acorn Archimedes range of machines. If you say
+ `Y' here, Linux will support disk partitions created under ADFS.
+
+config ACORN_PARTITION_POWERTEC
+ bool "PowerTec partition support" if PARTITION_ADVANCED
+ default y if ARCH_ACORN
+ depends on ACORN_PARTITION
+ help
+ Support reading partition tables created on Acorn machines using
+ the PowerTec SCSI drive.
+
+config ACORN_PARTITION_RISCIX
+ bool "RISCiX partition support" if PARTITION_ADVANCED
+ default y if ARCH_ACORN
+ depends on ACORN_PARTITION
+ help
+ Once upon a time, there was a native Unix port for the Acorn series
+ of machines called RISCiX. If you say 'Y' here, Linux will be able
+ to read disks partitioned under RISCiX.
+
+config OSF_PARTITION
+ bool "Alpha OSF partition support" if PARTITION_ADVANCED
+ default y if ALPHA
+ help
+ Say Y here if you would like to use hard disks under Linux which
+ were partitioned on an Alpha machine.
+
+config AMIGA_PARTITION
+ bool "Amiga partition table support" if PARTITION_ADVANCED
+ default y if (AMIGA || AFFS_FS=y)
+ help
+ Say Y here if you would like to use hard disks under Linux which
+ were partitioned under AmigaOS.
+
+config ATARI_PARTITION
+ bool "Atari partition table support" if PARTITION_ADVANCED
+ default y if ATARI
+ help
+ Say Y here if you would like to use hard disks under Linux which
+ were partitioned under the Atari OS.
+
+config IBM_PARTITION
+ bool "IBM disk label and partition support"
+ depends on PARTITION_ADVANCED && S390
+ help
+ Say Y here if you would like to be able to read the hard disk
+ partition table format used by IBM DASD disks operating under CMS.
+ Otherwise, say N.
+
+config MAC_PARTITION
+ bool "Macintosh partition map support" if PARTITION_ADVANCED
+ default y if (MAC || PPC_PMAC)
+ help
+ Say Y here if you would like to use hard disks under Linux which
+ were partitioned on a Macintosh.
+
+config MSDOS_PARTITION
+ bool "PC BIOS (MSDOS partition tables) support" if PARTITION_ADVANCED
+ default y
+ help
+ Say Y here.
+
+config BSD_DISKLABEL
+ bool "BSD disklabel (FreeBSD partition tables) support"
+ depends on PARTITION_ADVANCED && MSDOS_PARTITION
+ help
+ FreeBSD uses its own hard disk partition scheme on your PC. It
+ requires only one entry in the primary partition table of your disk
+ and manages it similarly to DOS extended partitions, putting in its
+ first sector a new partition table in BSD disklabel format. Saying Y
+ here allows you to read these disklabels and further mount FreeBSD
+ partitions from within Linux if you have also said Y to "UFS
+ file system support", above. If you don't know what all this is
+ about, say N.
+
+config MINIX_SUBPARTITION
+ bool "Minix subpartition support"
+ depends on PARTITION_ADVANCED && MSDOS_PARTITION
+ help
+ Minix 2.0.0/2.0.2 subpartition table support for Linux.
+ Say Y here if you want to mount and use Minix 2.0.0/2.0.2
+ subpartitions.
+
+config SOLARIS_X86_PARTITION
+ bool "Solaris (x86) partition table support"
+ depends on PARTITION_ADVANCED && MSDOS_PARTITION
+ help
+ Like most systems, Solaris x86 uses its own hard disk partition
+ table format, incompatible with all others. Saying Y here allows you
+ to read these partition tables and further mount Solaris x86
+ partitions from within Linux if you have also said Y to "UFS
+ file system support", above.
+
+config UNIXWARE_DISKLABEL
+ bool "Unixware slices support"
+ depends on PARTITION_ADVANCED && MSDOS_PARTITION
+ ---help---
+ Like some systems, UnixWare uses its own slice table inside a
+ partition (VTOC - Virtual Table of Contents). Its format is
+ incompatible with all other OSes. Saying Y here allows you to read
+ VTOC and further mount UnixWare partitions read-only from within
+ Linux if you have also said Y to "UFS file system support" or
+ "System V and Coherent file system support", above.
+
+ This is mainly used to carry data from a UnixWare box to your
+ Linux box via a removable medium like magneto-optical, ZIP or
+ removable IDE drives. Note, however, that a good portable way to
+ transport files and directories between unixes (and even other
+ operating systems) is given by the tar program ("man tar" or
+ preferably "info tar").
+
+ If you don't know what all this is about, say N.
+
+config LDM_PARTITION
+ bool "Windows Logical Disk Manager (Dynamic Disk) support"
+ depends on PARTITION_ADVANCED
+ ---help---
+ Say Y here if you would like to use hard disks under Linux which
+ were partitioned using Windows 2000's/XP's or Vista's Logical Disk
+ Manager. They are also known as "Dynamic Disks".
+
+ Note this driver only supports Dynamic Disks with a protective MBR
+ label, i.e. DOS partition table. It does not support GPT labelled
+ Dynamic Disks yet as can be created with Vista.
+
+ Windows 2000 introduced the concept of Dynamic Disks to get around
+ the limitations of the PC's partitioning scheme. The Logical Disk
+ Manager allows the user to repartition a disk and create spanned,
+ mirrored, striped or RAID volumes, all without the need for
+ rebooting.
+
+ Normal partitions are now called Basic Disks under Windows 2000, XP,
+ and Vista.
+
+ For a fuller description read <file:Documentation/ldm.txt>.
+
+ If unsure, say N.
+
+config LDM_DEBUG
+ bool "Windows LDM extra logging"
+ depends on LDM_PARTITION
+ help
+ Say Y here if you would like LDM to log verbosely. This could be
+ helpful if the driver doesn't work as expected and you'd like to
+ report a bug.
+
+ If unsure, say N.
+
+config SGI_PARTITION
+ bool "SGI partition support" if PARTITION_ADVANCED
+ default y if DEFAULT_SGI_PARTITION
+ help
+ Say Y here if you would like to be able to read the hard disk
+ partition table format used by SGI machines.
+
+config ULTRIX_PARTITION
+ bool "Ultrix partition table support" if PARTITION_ADVANCED
+ default y if MACH_DECSTATION
+ help
+ Say Y here if you would like to be able to read the hard disk
+ partition table format used by DEC (now Compaq) Ultrix machines.
+ Otherwise, say N.
+
+config SUN_PARTITION
+ bool "Sun partition tables support" if PARTITION_ADVANCED
+ default y if (SPARC || SUN3 || SUN3X)
+ ---help---
+ Like most systems, SunOS uses its own hard disk partition table
+ format, incompatible with all others. Saying Y here allows you to
+ read these partition tables and further mount SunOS partitions from
+ within Linux if you have also said Y to "UFS file system support",
+ above. This is mainly used to carry data from a SPARC under SunOS to
+ your Linux box via a removable medium like magneto-optical or ZIP
+ drives; note however that a good portable way to transport files and
+ directories between unixes (and even other operating systems) is
+ given by the tar program ("man tar" or preferably "info tar"). If
+ you don't know what all this is about, say N.
+
+config KARMA_PARTITION
+ bool "Karma Partition support"
+ depends on PARTITION_ADVANCED
+ help
+ Say Y here if you would like to mount the Rio Karma MP3 player, as it
+ uses a proprietary partition table.
+
+config EFI_PARTITION
+ bool "EFI GUID Partition support"
+ depends on PARTITION_ADVANCED
+ select CRC32
+ help
+ Say Y here if you would like to use hard disks under Linux which
+ were partitioned using EFI GPT.
+
+config SYSV68_PARTITION
+ bool "SYSV68 partition table support" if PARTITION_ADVANCED
+ default y if VME
+ help
+ Say Y here if you would like to be able to read the hard disk
+ partition table format used by Motorola Delta machines (using
+ sysv68).
+ Otherwise, say N.
diff --git a/block/partitions/Makefile b/block/partitions/Makefile
new file mode 100644
index 00000000..03af8eac
--- /dev/null
+++ b/block/partitions/Makefile
@@ -0,0 +1,20 @@
+#
+# Makefile for the linux kernel.
+#
+
+obj-$(CONFIG_BLOCK) := check.o
+
+obj-$(CONFIG_ACORN_PARTITION) += acorn.o
+obj-$(CONFIG_AMIGA_PARTITION) += amiga.o
+obj-$(CONFIG_ATARI_PARTITION) += atari.o
+obj-$(CONFIG_MAC_PARTITION) += mac.o
+obj-$(CONFIG_LDM_PARTITION) += ldm.o
+obj-$(CONFIG_MSDOS_PARTITION) += msdos.o
+obj-$(CONFIG_OSF_PARTITION) += osf.o
+obj-$(CONFIG_SGI_PARTITION) += sgi.o
+obj-$(CONFIG_SUN_PARTITION) += sun.o
+obj-$(CONFIG_ULTRIX_PARTITION) += ultrix.o
+obj-$(CONFIG_IBM_PARTITION) += ibm.o
+obj-$(CONFIG_EFI_PARTITION) += efi.o
+obj-$(CONFIG_KARMA_PARTITION) += karma.o
+obj-$(CONFIG_SYSV68_PARTITION) += sysv68.o
diff --git a/block/partitions/acorn.c b/block/partitions/acorn.c
new file mode 100644
index 00000000..fbeb6973
--- /dev/null
+++ b/block/partitions/acorn.c
@@ -0,0 +1,556 @@
+/*
+ * linux/fs/partitions/acorn.c
+ *
+ * Copyright (c) 1996-2000 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Scan ADFS partitions on hard disk drives. Unfortunately, there
+ * isn't a standard for partitioning drives on Acorn machines, so
+ * every single manufacturer of SCSI and IDE cards created their own
+ * method.
+ */
+#include <linux/buffer_head.h>
+#include <linux/adfs_fs.h>
+
+#include "check.h"
+#include "acorn.h"
+
+/*
+ * Partition types. (Oh for reusability)
+ */
+#define PARTITION_RISCIX_MFM 1
+#define PARTITION_RISCIX_SCSI 2
+#define PARTITION_LINUX 9
+
+#if defined(CONFIG_ACORN_PARTITION_CUMANA) || \
+ defined(CONFIG_ACORN_PARTITION_ADFS)
+static struct adfs_discrecord *
+adfs_partition(struct parsed_partitions *state, char *name, char *data,
+ unsigned long first_sector, int slot)
+{
+ struct adfs_discrecord *dr;
+ unsigned int nr_sects;
+
+ if (adfs_checkbblk(data))
+ return NULL;
+
+ dr = (struct adfs_discrecord *)(data + 0x1c0);
+
+ if (dr->disc_size == 0 && dr->disc_size_high == 0)
+ return NULL;
+
+ nr_sects = (le32_to_cpu(dr->disc_size_high) << 23) |
+ (le32_to_cpu(dr->disc_size) >> 9);
+
+ if (name) {
+ strlcat(state->pp_buf, " [", PAGE_SIZE);
+ strlcat(state->pp_buf, name, PAGE_SIZE);
+ strlcat(state->pp_buf, "]", PAGE_SIZE);
+ }
+ put_partition(state, slot, first_sector, nr_sects);
+ return dr;
+}
+#endif
+
+#ifdef CONFIG_ACORN_PARTITION_RISCIX
+
+struct riscix_part {
+ __le32 start;
+ __le32 length;
+ __le32 one;
+ char name[16];
+};
+
+struct riscix_record {
+ __le32 magic;
+#define RISCIX_MAGIC cpu_to_le32(0x4a657320)
+ __le32 date;
+ struct riscix_part part[8];
+};
+
+#if defined(CONFIG_ACORN_PARTITION_CUMANA) || \
+ defined(CONFIG_ACORN_PARTITION_ADFS)
+static int riscix_partition(struct parsed_partitions *state,
+ unsigned long first_sect, int slot,
+ unsigned long nr_sects)
+{
+ Sector sect;
+ struct riscix_record *rr;
+
+ rr = read_part_sector(state, first_sect, &sect);
+ if (!rr)
+ return -1;
+
+ strlcat(state->pp_buf, " [RISCiX]", PAGE_SIZE);
+
+
+ if (rr->magic == RISCIX_MAGIC) {
+ unsigned long size = nr_sects > 2 ? 2 : nr_sects;
+ int part;
+
+ strlcat(state->pp_buf, " <", PAGE_SIZE);
+
+ put_partition(state, slot++, first_sect, size);
+ for (part = 0; part < 8; part++) {
+ if (rr->part[part].one &&
+ memcmp(rr->part[part].name, "All\0", 4)) {
+ put_partition(state, slot++,
+ le32_to_cpu(rr->part[part].start),
+ le32_to_cpu(rr->part[part].length));
+ strlcat(state->pp_buf, "(", PAGE_SIZE);
+ strlcat(state->pp_buf, rr->part[part].name, PAGE_SIZE);
+ strlcat(state->pp_buf, ")", PAGE_SIZE);
+ }
+ }
+
+ strlcat(state->pp_buf, " >\n", PAGE_SIZE);
+ } else {
+ put_partition(state, slot++, first_sect, nr_sects);
+ }
+
+ put_dev_sector(sect);
+ return slot;
+}
+#endif
+#endif
+
+#define LINUX_NATIVE_MAGIC 0xdeafa1de
+#define LINUX_SWAP_MAGIC 0xdeafab1e
+
+struct linux_part {
+ __le32 magic;
+ __le32 start_sect;
+ __le32 nr_sects;
+};
+
+#if defined(CONFIG_ACORN_PARTITION_CUMANA) || \
+ defined(CONFIG_ACORN_PARTITION_ADFS)
+static int linux_partition(struct parsed_partitions *state,
+ unsigned long first_sect, int slot,
+ unsigned long nr_sects)
+{
+ Sector sect;
+ struct linux_part *linuxp;
+ unsigned long size = nr_sects > 2 ? 2 : nr_sects;
+
+ strlcat(state->pp_buf, " [Linux]", PAGE_SIZE);
+
+ put_partition(state, slot++, first_sect, size);
+
+ linuxp = read_part_sector(state, first_sect, &sect);
+ if (!linuxp)
+ return -1;
+
+ strlcat(state->pp_buf, " <", PAGE_SIZE);
+ while (linuxp->magic == cpu_to_le32(LINUX_NATIVE_MAGIC) ||
+ linuxp->magic == cpu_to_le32(LINUX_SWAP_MAGIC)) {
+ if (slot == state->limit)
+ break;
+ put_partition(state, slot++, first_sect +
+ le32_to_cpu(linuxp->start_sect),
+ le32_to_cpu(linuxp->nr_sects));
+ linuxp ++;
+ }
+ strlcat(state->pp_buf, " >", PAGE_SIZE);
+
+ put_dev_sector(sect);
+ return slot;
+}
+#endif
+
+#ifdef CONFIG_ACORN_PARTITION_CUMANA
+int adfspart_check_CUMANA(struct parsed_partitions *state)
+{
+ unsigned long first_sector = 0;
+ unsigned int start_blk = 0;
+ Sector sect;
+ unsigned char *data;
+ char *name = "CUMANA/ADFS";
+ int first = 1;
+ int slot = 1;
+
+ /*
+ * Try Cumana style partitions - sector 6 contains ADFS boot block
+ * with pointer to next 'drive'.
+ *
+ * There are unknowns in this code - is the 'cylinder number' of the
+ * next partition relative to the start of this one - I'm assuming
+ * it is.
+ *
+ * Also, which ID did Cumana use?
+ *
+ * This is totally unfinished, and will require more work to get it
+ * going. Hence it is totally untested.
+ */
+ do {
+ struct adfs_discrecord *dr;
+ unsigned int nr_sects;
+
+ data = read_part_sector(state, start_blk * 2 + 6, &sect);
+ if (!data)
+ return -1;
+
+ if (slot == state->limit)
+ break;
+
+ dr = adfs_partition(state, name, data, first_sector, slot++);
+ if (!dr)
+ break;
+
+ name = NULL;
+
+ nr_sects = (data[0x1fd] + (data[0x1fe] << 8)) *
+ (dr->heads + (dr->lowsector & 0x40 ? 1 : 0)) *
+ dr->secspertrack;
+
+ if (!nr_sects)
+ break;
+
+ first = 0;
+ first_sector += nr_sects;
+ start_blk += nr_sects >> (BLOCK_SIZE_BITS - 9);
+ nr_sects = 0; /* hmm - should be partition size */
+
+ switch (data[0x1fc] & 15) {
+ case 0: /* No partition / ADFS? */
+ break;
+
+#ifdef CONFIG_ACORN_PARTITION_RISCIX
+ case PARTITION_RISCIX_SCSI:
+ /* RISCiX - we don't know how to find the next one. */
+ slot = riscix_partition(state, first_sector, slot,
+ nr_sects);
+ break;
+#endif
+
+ case PARTITION_LINUX:
+ slot = linux_partition(state, first_sector, slot,
+ nr_sects);
+ break;
+ }
+ put_dev_sector(sect);
+ if (slot == -1)
+ return -1;
+ } while (1);
+ put_dev_sector(sect);
+ return first ? 0 : 1;
+}
+#endif
+
+#ifdef CONFIG_ACORN_PARTITION_ADFS
+/*
+ * Purpose: allocate ADFS partitions.
+ *
+ * Params : hd - pointer to gendisk structure to store partition info.
+ * dev - device number to access.
+ *
+ * Returns: -1 on error, 0 for no ADFS boot sector, 1 for ok.
+ *
+ * Alloc : hda = whole drive
+ * hda1 = ADFS partition on first drive.
+ * hda2 = non-ADFS partition.
+ */
+int adfspart_check_ADFS(struct parsed_partitions *state)
+{
+ unsigned long start_sect, nr_sects, sectscyl, heads;
+ Sector sect;
+ unsigned char *data;
+ struct adfs_discrecord *dr;
+ unsigned char id;
+ int slot = 1;
+
+ data = read_part_sector(state, 6, &sect);
+ if (!data)
+ return -1;
+
+ dr = adfs_partition(state, "ADFS", data, 0, slot++);
+ if (!dr) {
+ put_dev_sector(sect);
+ return 0;
+ }
+
+ heads = dr->heads + ((dr->lowsector >> 6) & 1);
+ sectscyl = dr->secspertrack * heads;
+ start_sect = ((data[0x1fe] << 8) + data[0x1fd]) * sectscyl;
+ id = data[0x1fc] & 15;
+ put_dev_sector(sect);
+
+ /*
+ * Work out start of non-adfs partition.
+ */
+ nr_sects = (state->bdev->bd_inode->i_size >> 9) - start_sect;
+
+ if (start_sect) {
+ switch (id) {
+#ifdef CONFIG_ACORN_PARTITION_RISCIX
+ case PARTITION_RISCIX_SCSI:
+ case PARTITION_RISCIX_MFM:
+ slot = riscix_partition(state, start_sect, slot,
+ nr_sects);
+ break;
+#endif
+
+ case PARTITION_LINUX:
+ slot = linux_partition(state, start_sect, slot,
+ nr_sects);
+ break;
+ }
+ }
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
+ return 1;
+}
+#endif
+
+#ifdef CONFIG_ACORN_PARTITION_ICS
+
+struct ics_part {
+ __le32 start;
+ __le32 size;
+};
+
+static int adfspart_check_ICSLinux(struct parsed_partitions *state,
+ unsigned long block)
+{
+ Sector sect;
+ unsigned char *data = read_part_sector(state, block, &sect);
+ int result = 0;
+
+ if (data) {
+ if (memcmp(data, "LinuxPart", 9) == 0)
+ result = 1;
+ put_dev_sector(sect);
+ }
+
+ return result;
+}
+
+/*
+ * Check for a valid ICS partition using the checksum.
+ */
+static inline int valid_ics_sector(const unsigned char *data)
+{
+ unsigned long sum;
+ int i;
+
+ for (i = 0, sum = 0x50617274; i < 508; i++)
+ sum += data[i];
+
+ sum -= le32_to_cpu(*(__le32 *)(&data[508]));
+
+ return sum == 0;
+}
+
+/*
+ * Purpose: allocate ICS partitions.
+ * Params : hd - pointer to gendisk structure to store partition info.
+ * dev - device number to access.
+ * Returns: -1 on error, 0 for no ICS table, 1 for partitions ok.
+ * Alloc : hda = whole drive
+ * hda1 = ADFS partition 0 on first drive.
+ * hda2 = ADFS partition 1 on first drive.
+ * ..etc..
+ */
+int adfspart_check_ICS(struct parsed_partitions *state)
+{
+ const unsigned char *data;
+ const struct ics_part *p;
+ int slot;
+ Sector sect;
+
+ /*
+ * Try ICS style partitions - sector 0 contains partition info.
+ */
+ data = read_part_sector(state, 0, &sect);
+ if (!data)
+ return -1;
+
+ if (!valid_ics_sector(data)) {
+ put_dev_sector(sect);
+ return 0;
+ }
+
+ strlcat(state->pp_buf, " [ICS]", PAGE_SIZE);
+
+ for (slot = 1, p = (const struct ics_part *)data; p->size; p++) {
+ u32 start = le32_to_cpu(p->start);
+ s32 size = le32_to_cpu(p->size); /* yes, it's signed. */
+
+ if (slot == state->limit)
+ break;
+
+ /*
+ * Negative sizes tell the RISC OS ICS driver to ignore
+ * this partition - in effect it says that this does not
+ * contain an ADFS filesystem.
+ */
+ if (size < 0) {
+ size = -size;
+
+ /*
+ * Our own extension - We use the first sector
+ * of the partition to identify what type this
+ * partition is. We must not make this visible
+ * to the filesystem.
+ */
+ if (size > 1 && adfspart_check_ICSLinux(state, start)) {
+ start += 1;
+ size -= 1;
+ }
+ }
+
+ if (size)
+ put_partition(state, slot++, start, size);
+ }
+
+ put_dev_sector(sect);
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
+ return 1;
+}
+#endif
+
+#ifdef CONFIG_ACORN_PARTITION_POWERTEC
+struct ptec_part {
+ __le32 unused1;
+ __le32 unused2;
+ __le32 start;
+ __le32 size;
+ __le32 unused5;
+ char type[8];
+};
+
+static inline int valid_ptec_sector(const unsigned char *data)
+{
+ unsigned char checksum = 0x2a;
+ int i;
+
+ /*
+ * If it looks like a PC/BIOS partition, then it
+ * probably isn't PowerTec.
+ */
+ if (data[510] == 0x55 && data[511] == 0xaa)
+ return 0;
+
+ for (i = 0; i < 511; i++)
+ checksum += data[i];
+
+ return checksum == data[511];
+}
+
+/*
+ * Purpose: allocate ICS partitions.
+ * Params : hd - pointer to gendisk structure to store partition info.
+ * dev - device number to access.
+ * Returns: -1 on error, 0 for no ICS table, 1 for partitions ok.
+ * Alloc : hda = whole drive
+ * hda1 = ADFS partition 0 on first drive.
+ * hda2 = ADFS partition 1 on first drive.
+ * ..etc..
+ */
+int adfspart_check_POWERTEC(struct parsed_partitions *state)
+{
+ Sector sect;
+ const unsigned char *data;
+ const struct ptec_part *p;
+ int slot = 1;
+ int i;
+
+ data = read_part_sector(state, 0, &sect);
+ if (!data)
+ return -1;
+
+ if (!valid_ptec_sector(data)) {
+ put_dev_sector(sect);
+ return 0;
+ }
+
+ strlcat(state->pp_buf, " [POWERTEC]", PAGE_SIZE);
+
+ for (i = 0, p = (const struct ptec_part *)data; i < 12; i++, p++) {
+ u32 start = le32_to_cpu(p->start);
+ u32 size = le32_to_cpu(p->size);
+
+ if (size)
+ put_partition(state, slot++, start, size);
+ }
+
+ put_dev_sector(sect);
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
+ return 1;
+}
+#endif
+
+#ifdef CONFIG_ACORN_PARTITION_EESOX
+struct eesox_part {
+ char magic[6];
+ char name[10];
+ __le32 start;
+ __le32 unused6;
+ __le32 unused7;
+ __le32 unused8;
+};
+
+/*
+ * Guess who created this format?
+ */
+static const char eesox_name[] = {
+ 'N', 'e', 'i', 'l', ' ',
+ 'C', 'r', 'i', 't', 'c', 'h', 'e', 'l', 'l', ' ', ' '
+};
+
+/*
+ * EESOX SCSI partition format.
+ *
+ * This is a goddamned awful partition format. We don't seem to store
+ * the size of the partition in this table, only the start addresses.
+ *
+ * There are two possibilities where the size comes from:
+ * 1. The individual ADFS boot block entries that are placed on the disk.
+ * 2. The start address of the next entry.
+ */
+int adfspart_check_EESOX(struct parsed_partitions *state)
+{
+ Sector sect;
+ const unsigned char *data;
+ unsigned char buffer[256];
+ struct eesox_part *p;
+ sector_t start = 0;
+ int i, slot = 1;
+
+ data = read_part_sector(state, 7, &sect);
+ if (!data)
+ return -1;
+
+ /*
+ * "Decrypt" the partition table. God knows why...
+ */
+ for (i = 0; i < 256; i++)
+ buffer[i] = data[i] ^ eesox_name[i & 15];
+
+ put_dev_sector(sect);
+
+ for (i = 0, p = (struct eesox_part *)buffer; i < 8; i++, p++) {
+ sector_t next;
+
+ if (memcmp(p->magic, "Eesox", 6))
+ break;
+
+ next = le32_to_cpu(p->start);
+ if (i)
+ put_partition(state, slot++, start, next - start);
+ start = next;
+ }
+
+ if (i != 0) {
+ sector_t size;
+
+ size = get_capacity(state->bdev->bd_disk);
+ put_partition(state, slot++, start, size - start);
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
+ }
+
+ return i ? 1 : 0;
+}
+#endif
diff --git a/block/partitions/acorn.h b/block/partitions/acorn.h
new file mode 100644
index 00000000..ede82852
--- /dev/null
+++ b/block/partitions/acorn.h
@@ -0,0 +1,14 @@
+/*
+ * linux/fs/partitions/acorn.h
+ *
+ * Copyright (C) 1996-2001 Russell King.
+ *
+ * I _hate_ this partitioning mess - why can't we have one defined
+ * format, and everyone stick to it?
+ */
+
+int adfspart_check_CUMANA(struct parsed_partitions *state);
+int adfspart_check_ADFS(struct parsed_partitions *state);
+int adfspart_check_ICS(struct parsed_partitions *state);
+int adfspart_check_POWERTEC(struct parsed_partitions *state);
+int adfspart_check_EESOX(struct parsed_partitions *state);
diff --git a/block/partitions/amiga.c b/block/partitions/amiga.c
new file mode 100644
index 00000000..70cbf44a
--- /dev/null
+++ b/block/partitions/amiga.c
@@ -0,0 +1,139 @@
+/*
+ * fs/partitions/amiga.c
+ *
+ * Code extracted from drivers/block/genhd.c
+ *
+ * Copyright (C) 1991-1998 Linus Torvalds
+ * Re-organised Feb 1998 Russell King
+ */
+
+#include <linux/types.h>
+#include <linux/affs_hardblocks.h>
+
+#include "check.h"
+#include "amiga.h"
+
+static __inline__ u32
+checksum_block(__be32 *m, int size)
+{
+ u32 sum = 0;
+
+ while (size--)
+ sum += be32_to_cpu(*m++);
+ return sum;
+}
+
+int amiga_partition(struct parsed_partitions *state)
+{
+ Sector sect;
+ unsigned char *data;
+ struct RigidDiskBlock *rdb;
+ struct PartitionBlock *pb;
+ int start_sect, nr_sects, blk, part, res = 0;
+ int blksize = 1; /* Multiplier for disk block size */
+ int slot = 1;
+ char b[BDEVNAME_SIZE];
+
+ for (blk = 0; ; blk++, put_dev_sector(sect)) {
+ if (blk == RDB_ALLOCATION_LIMIT)
+ goto rdb_done;
+ data = read_part_sector(state, blk, &sect);
+ if (!data) {
+ if (warn_no_part)
+ printk("Dev %s: unable to read RDB block %d\n",
+ bdevname(state->bdev, b), blk);
+ res = -1;
+ goto rdb_done;
+ }
+ if (*(__be32 *)data != cpu_to_be32(IDNAME_RIGIDDISK))
+ continue;
+
+ rdb = (struct RigidDiskBlock *)data;
+ if (checksum_block((__be32 *)data, be32_to_cpu(rdb->rdb_SummedLongs) & 0x7F) == 0)
+ break;
+ /* Try again with 0xdc..0xdf zeroed, Windows might have
+ * trashed it.
+ */
+ *(__be32 *)(data+0xdc) = 0;
+ if (checksum_block((__be32 *)data,
+ be32_to_cpu(rdb->rdb_SummedLongs) & 0x7F)==0) {
+ printk("Warning: Trashed word at 0xd0 in block %d "
+ "ignored in checksum calculation\n",blk);
+ break;
+ }
+
+ printk("Dev %s: RDB in block %d has bad checksum\n",
+ bdevname(state->bdev, b), blk);
+ }
+
+ /* blksize is blocks per 512 byte standard block */
+ blksize = be32_to_cpu( rdb->rdb_BlockBytes ) / 512;
+
+ {
+ char tmp[7 + 10 + 1 + 1];
+
+ /* Be more informative */
+ snprintf(tmp, sizeof(tmp), " RDSK (%d)", blksize * 512);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+ }
+ blk = be32_to_cpu(rdb->rdb_PartitionList);
+ put_dev_sector(sect);
+ for (part = 1; blk>0 && part<=16; part++, put_dev_sector(sect)) {
+ blk *= blksize; /* Read in terms partition table understands */
+ data = read_part_sector(state, blk, &sect);
+ if (!data) {
+ if (warn_no_part)
+ printk("Dev %s: unable to read partition block %d\n",
+ bdevname(state->bdev, b), blk);
+ res = -1;
+ goto rdb_done;
+ }
+ pb = (struct PartitionBlock *)data;
+ blk = be32_to_cpu(pb->pb_Next);
+ if (pb->pb_ID != cpu_to_be32(IDNAME_PARTITION))
+ continue;
+ if (checksum_block((__be32 *)pb, be32_to_cpu(pb->pb_SummedLongs) & 0x7F) != 0 )
+ continue;
+
+ /* Tell Kernel about it */
+
+ nr_sects = (be32_to_cpu(pb->pb_Environment[10]) + 1 -
+ be32_to_cpu(pb->pb_Environment[9])) *
+ be32_to_cpu(pb->pb_Environment[3]) *
+ be32_to_cpu(pb->pb_Environment[5]) *
+ blksize;
+ if (!nr_sects)
+ continue;
+ start_sect = be32_to_cpu(pb->pb_Environment[9]) *
+ be32_to_cpu(pb->pb_Environment[3]) *
+ be32_to_cpu(pb->pb_Environment[5]) *
+ blksize;
+ put_partition(state,slot++,start_sect,nr_sects);
+ {
+ /* Be even more informative to aid mounting */
+ char dostype[4];
+ char tmp[42];
+
+ __be32 *dt = (__be32 *)dostype;
+ *dt = pb->pb_Environment[16];
+ if (dostype[3] < ' ')
+ snprintf(tmp, sizeof(tmp), " (%c%c%c^%c)",
+ dostype[0], dostype[1],
+ dostype[2], dostype[3] + '@' );
+ else
+ snprintf(tmp, sizeof(tmp), " (%c%c%c%c)",
+ dostype[0], dostype[1],
+ dostype[2], dostype[3]);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+ snprintf(tmp, sizeof(tmp), "(res %d spb %d)",
+ be32_to_cpu(pb->pb_Environment[6]),
+ be32_to_cpu(pb->pb_Environment[4]));
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+ }
+ res = 1;
+ }
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
+
+rdb_done:
+ return res;
+}
diff --git a/block/partitions/amiga.h b/block/partitions/amiga.h
new file mode 100644
index 00000000..d094585c
--- /dev/null
+++ b/block/partitions/amiga.h
@@ -0,0 +1,6 @@
+/*
+ * fs/partitions/amiga.h
+ */
+
+int amiga_partition(struct parsed_partitions *state);
+
diff --git a/block/partitions/atari.c b/block/partitions/atari.c
new file mode 100644
index 00000000..9875b05e
--- /dev/null
+++ b/block/partitions/atari.c
@@ -0,0 +1,149 @@
+/*
+ * fs/partitions/atari.c
+ *
+ * Code extracted from drivers/block/genhd.c
+ *
+ * Copyright (C) 1991-1998 Linus Torvalds
+ * Re-organised Feb 1998 Russell King
+ */
+
+#include <linux/ctype.h>
+#include "check.h"
+#include "atari.h"
+
+/* ++guenther: this should be settable by the user ("make config")?.
+ */
+#define ICD_PARTS
+
+/* check if a partition entry looks valid -- Atari format is assumed if at
+ least one of the primary entries is ok this way */
+#define VALID_PARTITION(pi,hdsiz) \
+ (((pi)->flg & 1) && \
+ isalnum((pi)->id[0]) && isalnum((pi)->id[1]) && isalnum((pi)->id[2]) && \
+ be32_to_cpu((pi)->st) <= (hdsiz) && \
+ be32_to_cpu((pi)->st) + be32_to_cpu((pi)->siz) <= (hdsiz))
+
+static inline int OK_id(char *s)
+{
+ return memcmp (s, "GEM", 3) == 0 || memcmp (s, "BGM", 3) == 0 ||
+ memcmp (s, "LNX", 3) == 0 || memcmp (s, "SWP", 3) == 0 ||
+ memcmp (s, "RAW", 3) == 0 ;
+}
+
+int atari_partition(struct parsed_partitions *state)
+{
+ Sector sect;
+ struct rootsector *rs;
+ struct partition_info *pi;
+ u32 extensect;
+ u32 hd_size;
+ int slot;
+#ifdef ICD_PARTS
+ int part_fmt = 0; /* 0:unknown, 1:AHDI, 2:ICD/Supra */
+#endif
+
+ rs = read_part_sector(state, 0, &sect);
+ if (!rs)
+ return -1;
+
+ /* Verify this is an Atari rootsector: */
+ hd_size = state->bdev->bd_inode->i_size >> 9;
+ if (!VALID_PARTITION(&rs->part[0], hd_size) &&
+ !VALID_PARTITION(&rs->part[1], hd_size) &&
+ !VALID_PARTITION(&rs->part[2], hd_size) &&
+ !VALID_PARTITION(&rs->part[3], hd_size)) {
+ /*
+ * if there's no valid primary partition, assume that no Atari
+ * format partition table (there's no reliable magic or the like
+ * :-()
+ */
+ put_dev_sector(sect);
+ return 0;
+ }
+
+ pi = &rs->part[0];
+ strlcat(state->pp_buf, " AHDI", PAGE_SIZE);
+ for (slot = 1; pi < &rs->part[4] && slot < state->limit; slot++, pi++) {
+ struct rootsector *xrs;
+ Sector sect2;
+ ulong partsect;
+
+ if ( !(pi->flg & 1) )
+ continue;
+ /* active partition */
+ if (memcmp (pi->id, "XGM", 3) != 0) {
+ /* we don't care about other id's */
+ put_partition (state, slot, be32_to_cpu(pi->st),
+ be32_to_cpu(pi->siz));
+ continue;
+ }
+ /* extension partition */
+#ifdef ICD_PARTS
+ part_fmt = 1;
+#endif
+ strlcat(state->pp_buf, " XGM<", PAGE_SIZE);
+ partsect = extensect = be32_to_cpu(pi->st);
+ while (1) {
+ xrs = read_part_sector(state, partsect, &sect2);
+ if (!xrs) {
+ printk (" block %ld read failed\n", partsect);
+ put_dev_sector(sect);
+ return -1;
+ }
+
+ /* ++roman: sanity check: bit 0 of flg field must be set */
+ if (!(xrs->part[0].flg & 1)) {
+ printk( "\nFirst sub-partition in extended partition is not valid!\n" );
+ put_dev_sector(sect2);
+ break;
+ }
+
+ put_partition(state, slot,
+ partsect + be32_to_cpu(xrs->part[0].st),
+ be32_to_cpu(xrs->part[0].siz));
+
+ if (!(xrs->part[1].flg & 1)) {
+ /* end of linked partition list */
+ put_dev_sector(sect2);
+ break;
+ }
+ if (memcmp( xrs->part[1].id, "XGM", 3 ) != 0) {
+ printk("\nID of extended partition is not XGM!\n");
+ put_dev_sector(sect2);
+ break;
+ }
+
+ partsect = be32_to_cpu(xrs->part[1].st) + extensect;
+ put_dev_sector(sect2);
+ if (++slot == state->limit) {
+ printk( "\nMaximum number of partitions reached!\n" );
+ break;
+ }
+ }
+ strlcat(state->pp_buf, " >", PAGE_SIZE);
+ }
+#ifdef ICD_PARTS
+ if ( part_fmt!=1 ) { /* no extended partitions -> test ICD-format */
+ pi = &rs->icdpart[0];
+ /* sanity check: no ICD format if first partition invalid */
+ if (OK_id(pi->id)) {
+ strlcat(state->pp_buf, " ICD<", PAGE_SIZE);
+ for (; pi < &rs->icdpart[8] && slot < state->limit; slot++, pi++) {
+ /* accept only GEM,BGM,RAW,LNX,SWP partitions */
+ if (!((pi->flg & 1) && OK_id(pi->id)))
+ continue;
+ part_fmt = 2;
+ put_partition (state, slot,
+ be32_to_cpu(pi->st),
+ be32_to_cpu(pi->siz));
+ }
+ strlcat(state->pp_buf, " >", PAGE_SIZE);
+ }
+ }
+#endif
+ put_dev_sector(sect);
+
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
+
+ return 1;
+}
diff --git a/block/partitions/atari.h b/block/partitions/atari.h
new file mode 100644
index 00000000..fe2d32a8
--- /dev/null
+++ b/block/partitions/atari.h
@@ -0,0 +1,34 @@
+/*
+ * fs/partitions/atari.h
+ * Moved by Russell King from:
+ *
+ * linux/include/linux/atari_rootsec.h
+ * definitions for Atari Rootsector layout
+ * by Andreas Schwab (schwab@ls5.informatik.uni-dortmund.de)
+ *
+ * modified for ICD/Supra partitioning scheme restricted to at most 12
+ * partitions
+ * by Guenther Kelleter (guenther@pool.informatik.rwth-aachen.de)
+ */
+
+struct partition_info
+{
+ u8 flg; /* bit 0: active; bit 7: bootable */
+ char id[3]; /* "GEM", "BGM", "XGM", or other */
+ __be32 st; /* start of partition */
+ __be32 siz; /* length of partition */
+};
+
+struct rootsector
+{
+ char unused[0x156]; /* room for boot code */
+ struct partition_info icdpart[8]; /* info for ICD-partitions 5..12 */
+ char unused2[0xc];
+ u32 hd_siz; /* size of disk in blocks */
+ struct partition_info part[4];
+ u32 bsl_st; /* start of bad sector list */
+ u32 bsl_cnt; /* length of bad sector list */
+ u16 checksum; /* checksum for bootable disks */
+} __attribute__((__packed__));
+
+int atari_partition(struct parsed_partitions *state);
diff --git a/block/partitions/check.c b/block/partitions/check.c
new file mode 100644
index 00000000..bc908672
--- /dev/null
+++ b/block/partitions/check.c
@@ -0,0 +1,166 @@
+/*
+ * fs/partitions/check.c
+ *
+ * Code extracted from drivers/block/genhd.c
+ * Copyright (C) 1991-1998 Linus Torvalds
+ * Re-organised Feb 1998 Russell King
+ *
+ * We now have independent partition support from the
+ * block drivers, which allows all the partition code to
+ * be grouped in one location, and it to be mostly self
+ * contained.
+ *
+ * Added needed MAJORS for new pairs, {hdi,hdj}, {hdk,hdl}
+ */
+
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/genhd.h>
+
+#include "check.h"
+
+#include "acorn.h"
+#include "amiga.h"
+#include "atari.h"
+#include "ldm.h"
+#include "mac.h"
+#include "msdos.h"
+#include "osf.h"
+#include "sgi.h"
+#include "sun.h"
+#include "ibm.h"
+#include "ultrix.h"
+#include "efi.h"
+#include "karma.h"
+#include "sysv68.h"
+
+int warn_no_part = 1; /*This is ugly: should make genhd removable media aware*/
+
+static int (*check_part[])(struct parsed_partitions *) = {
+ /*
+ * Probe partition formats with tables at disk address 0
+ * that also have an ADFS boot block at 0xdc0.
+ */
+#ifdef CONFIG_ACORN_PARTITION_ICS
+ adfspart_check_ICS,
+#endif
+#ifdef CONFIG_ACORN_PARTITION_POWERTEC
+ adfspart_check_POWERTEC,
+#endif
+#ifdef CONFIG_ACORN_PARTITION_EESOX
+ adfspart_check_EESOX,
+#endif
+
+ /*
+ * Now move on to formats that only have partition info at
+ * disk address 0xdc0. Since these may also have stale
+ * PC/BIOS partition tables, they need to come before
+ * the msdos entry.
+ */
+#ifdef CONFIG_ACORN_PARTITION_CUMANA
+ adfspart_check_CUMANA,
+#endif
+#ifdef CONFIG_ACORN_PARTITION_ADFS
+ adfspart_check_ADFS,
+#endif
+
+#ifdef CONFIG_EFI_PARTITION
+ efi_partition, /* this must come before msdos */
+#endif
+#ifdef CONFIG_SGI_PARTITION
+ sgi_partition,
+#endif
+#ifdef CONFIG_LDM_PARTITION
+ ldm_partition, /* this must come before msdos */
+#endif
+#ifdef CONFIG_MSDOS_PARTITION
+ msdos_partition,
+#endif
+#ifdef CONFIG_OSF_PARTITION
+ osf_partition,
+#endif
+#ifdef CONFIG_SUN_PARTITION
+ sun_partition,
+#endif
+#ifdef CONFIG_AMIGA_PARTITION
+ amiga_partition,
+#endif
+#ifdef CONFIG_ATARI_PARTITION
+ atari_partition,
+#endif
+#ifdef CONFIG_MAC_PARTITION
+ mac_partition,
+#endif
+#ifdef CONFIG_ULTRIX_PARTITION
+ ultrix_partition,
+#endif
+#ifdef CONFIG_IBM_PARTITION
+ ibm_partition,
+#endif
+#ifdef CONFIG_KARMA_PARTITION
+ karma_partition,
+#endif
+#ifdef CONFIG_SYSV68_PARTITION
+ sysv68_partition,
+#endif
+ NULL
+};
+
+struct parsed_partitions *
+check_partition(struct gendisk *hd, struct block_device *bdev)
+{
+ struct parsed_partitions *state;
+ int i, res, err;
+
+ state = kzalloc(sizeof(struct parsed_partitions), GFP_KERNEL);
+ if (!state)
+ return NULL;
+ state->pp_buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!state->pp_buf) {
+ kfree(state);
+ return NULL;
+ }
+ state->pp_buf[0] = '\0';
+
+ state->bdev = bdev;
+ disk_name(hd, 0, state->name);
+ snprintf(state->pp_buf, PAGE_SIZE, " %s:", state->name);
+ if (isdigit(state->name[strlen(state->name)-1]))
+ sprintf(state->name, "p");
+
+ state->limit = disk_max_parts(hd);
+ i = res = err = 0;
+ while (!res && check_part[i]) {
+ memset(&state->parts, 0, sizeof(state->parts));
+ res = check_part[i++](state);
+ if (res < 0) {
+ /* We have hit an I/O error which we don't report now.
+ * But record it, and let the others do their job.
+ */
+ err = res;
+ res = 0;
+ }
+
+ }
+ if (res > 0) {
+ printk(KERN_INFO "%s", state->pp_buf);
+
+ free_page((unsigned long)state->pp_buf);
+ return state;
+ }
+ if (state->access_beyond_eod)
+ err = -ENOSPC;
+ if (err)
+ /* The partition is unrecognized. So report I/O errors if there were any */
+ res = err;
+ if (!res)
+ strlcat(state->pp_buf, " unknown partition table\n", PAGE_SIZE);
+ else if (warn_no_part)
+ strlcat(state->pp_buf, " unable to read partition table\n", PAGE_SIZE);
+
+ printk(KERN_INFO "%s", state->pp_buf);
+
+ free_page((unsigned long)state->pp_buf);
+ kfree(state);
+ return ERR_PTR(res);
+}
diff --git a/block/partitions/check.h b/block/partitions/check.h
new file mode 100644
index 00000000..52b10031
--- /dev/null
+++ b/block/partitions/check.h
@@ -0,0 +1,52 @@
+#include <linux/pagemap.h>
+#include <linux/blkdev.h>
+#include <linux/genhd.h>
+
+/*
+ * add_gd_partition adds a partitions details to the devices partition
+ * description.
+ */
+struct parsed_partitions {
+ struct block_device *bdev;
+ char name[BDEVNAME_SIZE];
+ struct {
+ sector_t from;
+ sector_t size;
+ int flags;
+ bool has_info;
+ struct partition_meta_info info;
+ } parts[DISK_MAX_PARTS];
+ int next;
+ int limit;
+ bool access_beyond_eod;
+ char *pp_buf;
+};
+
+struct parsed_partitions *
+check_partition(struct gendisk *, struct block_device *);
+
+static inline void *read_part_sector(struct parsed_partitions *state,
+ sector_t n, Sector *p)
+{
+ if (n >= get_capacity(state->bdev->bd_disk)) {
+ state->access_beyond_eod = true;
+ return NULL;
+ }
+ return read_dev_sector(state->bdev, n, p);
+}
+
+static inline void
+put_partition(struct parsed_partitions *p, int n, sector_t from, sector_t size)
+{
+ if (n < p->limit) {
+ char tmp[1 + BDEVNAME_SIZE + 10 + 1];
+
+ p->parts[n].from = from;
+ p->parts[n].size = size;
+ snprintf(tmp, sizeof(tmp), " %s%d", p->name, n);
+ strlcat(p->pp_buf, tmp, PAGE_SIZE);
+ }
+}
+
+extern int warn_no_part;
+
diff --git a/block/partitions/efi.c b/block/partitions/efi.c
new file mode 100644
index 00000000..6296b403
--- /dev/null
+++ b/block/partitions/efi.c
@@ -0,0 +1,675 @@
+/************************************************************
+ * EFI GUID Partition Table handling
+ *
+ * http://www.uefi.org/specs/
+ * http://www.intel.com/technology/efi/
+ *
+ * efi.[ch] by Matt Domsch <Matt_Domsch@dell.com>
+ * Copyright 2000,2001,2002,2004 Dell Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *
+ * TODO:
+ *
+ * Changelog:
+ * Mon Nov 09 2004 Matt Domsch <Matt_Domsch@dell.com>
+ * - test for valid PMBR and valid PGPT before ever reading
+ * AGPT, allow override with 'gpt' kernel command line option.
+ * - check for first/last_usable_lba outside of size of disk
+ *
+ * Tue Mar 26 2002 Matt Domsch <Matt_Domsch@dell.com>
+ * - Ported to 2.5.7-pre1 and 2.5.7-dj2
+ * - Applied patch to avoid fault in alternate header handling
+ * - cleaned up find_valid_gpt
+ * - On-disk structure and copy in memory is *always* LE now -
+ * swab fields as needed
+ * - remove print_gpt_header()
+ * - only use first max_p partition entries, to keep the kernel minor number
+ * and partition numbers tied.
+ *
+ * Mon Feb 04 2002 Matt Domsch <Matt_Domsch@dell.com>
+ * - Removed __PRIPTR_PREFIX - not being used
+ *
+ * Mon Jan 14 2002 Matt Domsch <Matt_Domsch@dell.com>
+ * - Ported to 2.5.2-pre11 + library crc32 patch Linus applied
+ *
+ * Thu Dec 6 2001 Matt Domsch <Matt_Domsch@dell.com>
+ * - Added compare_gpts().
+ * - moved le_efi_guid_to_cpus() back into this file. GPT is the only
+ * thing that keeps EFI GUIDs on disk.
+ * - Changed gpt structure names and members to be simpler and more Linux-like.
+ *
+ * Wed Oct 17 2001 Matt Domsch <Matt_Domsch@dell.com>
+ * - Removed CONFIG_DEVFS_VOLUMES_UUID code entirely per Martin Wilck
+ *
+ * Wed Oct 10 2001 Matt Domsch <Matt_Domsch@dell.com>
+ * - Changed function comments to DocBook style per Andreas Dilger suggestion.
+ *
+ * Mon Oct 08 2001 Matt Domsch <Matt_Domsch@dell.com>
+ * - Change read_lba() to use the page cache per Al Viro's work.
+ * - print u64s properly on all architectures
+ * - fixed debug_printk(), now Dprintk()
+ *
+ * Mon Oct 01 2001 Matt Domsch <Matt_Domsch@dell.com>
+ * - Style cleanups
+ * - made most functions static
+ * - Endianness addition
+ * - remove test for second alternate header, as it's not per spec,
+ * and is unnecessary. There's now a method to read/write the last
+ * sector of an odd-sized disk from user space. No tools have ever
+ * been released which used this code, so it's effectively dead.
+ * - Per Asit Mallick of Intel, added a test for a valid PMBR.
+ * - Added kernel command line option 'gpt' to override valid PMBR test.
+ *
+ * Wed Jun 6 2001 Martin Wilck <Martin.Wilck@Fujitsu-Siemens.com>
+ * - added devfs volume UUID support (/dev/volumes/uuids) for
+ * mounting file systems by the partition GUID.
+ *
+ * Tue Dec 5 2000 Matt Domsch <Matt_Domsch@dell.com>
+ * - Moved crc32() to linux/lib, added efi_crc32().
+ *
+ * Thu Nov 30 2000 Matt Domsch <Matt_Domsch@dell.com>
+ * - Replaced Intel's CRC32 function with an equivalent
+ * non-license-restricted version.
+ *
+ * Wed Oct 25 2000 Matt Domsch <Matt_Domsch@dell.com>
+ * - Fixed the last_lba() call to return the proper last block
+ *
+ * Thu Oct 12 2000 Matt Domsch <Matt_Domsch@dell.com>
+ * - Thanks to Andries Brouwer for his debugging assistance.
+ * - Code works, detects all the partitions.
+ *
+ ************************************************************/
+#include <linux/crc32.h>
+#include <linux/ctype.h>
+#include <linux/math64.h>
+#include <linux/slab.h>
+#include "check.h"
+#include "efi.h"
+
+/* This allows a kernel command line option 'gpt' to override
+ * the test for invalid PMBR. Not __initdata because reloading
+ * the partition tables happens after init too.
+ */
+static int force_gpt;
+static int __init
+force_gpt_fn(char *str)
+{
+ force_gpt = 1;
+ return 1;
+}
+__setup("gpt", force_gpt_fn);
+
+
+/**
+ * efi_crc32() - EFI version of crc32 function
+ * @buf: buffer to calculate crc32 of
+ * @len - length of buf
+ *
+ * Description: Returns EFI-style CRC32 value for @buf
+ *
+ * This function uses the little endian Ethernet polynomial
+ * but seeds the function with ~0, and xor's with ~0 at the end.
+ * Note, the EFI Specification, v1.02, has a reference to
+ * Dr. Dobbs Journal, May 1994 (actually it's in May 1992).
+ */
+static inline u32
+efi_crc32(const void *buf, unsigned long len)
+{
+ return (crc32(~0L, buf, len) ^ ~0L);
+}
+
+/**
+ * last_lba(): return number of last logical block of device
+ * @bdev: block device
+ *
+ * Description: Returns last LBA value on success, 0 on error.
+ * This is stored (by sd and ide-geometry) in
+ * the part[0] entry for this disk, and is the number of
+ * physical sectors available on the disk.
+ */
+static u64 last_lba(struct block_device *bdev)
+{
+ if (!bdev || !bdev->bd_inode)
+ return 0;
+ return div_u64(bdev->bd_inode->i_size,
+ bdev_logical_block_size(bdev)) - 1ULL;
+}
+
+static inline int
+pmbr_part_valid(struct partition *part)
+{
+ if (part->sys_ind == EFI_PMBR_OSTYPE_EFI_GPT &&
+ le32_to_cpu(part->start_sect) == 1UL)
+ return 1;
+ return 0;
+}
+
+/**
+ * is_pmbr_valid(): test Protective MBR for validity
+ * @mbr: pointer to a legacy mbr structure
+ *
+ * Description: Returns 1 if PMBR is valid, 0 otherwise.
+ * Validity depends on two things:
+ * 1) MSDOS signature is in the last two bytes of the MBR
+ * 2) One partition of type 0xEE is found
+ */
+static int
+is_pmbr_valid(legacy_mbr *mbr)
+{
+ int i;
+ if (!mbr || le16_to_cpu(mbr->signature) != MSDOS_MBR_SIGNATURE)
+ return 0;
+ for (i = 0; i < 4; i++)
+ if (pmbr_part_valid(&mbr->partition_record[i]))
+ return 1;
+ return 0;
+}
+
+/**
+ * read_lba(): Read bytes from disk, starting at given LBA
+ * @state
+ * @lba
+ * @buffer
+ * @size_t
+ *
+ * Description: Reads @count bytes from @state->bdev into @buffer.
+ * Returns number of bytes read on success, 0 on error.
+ */
+static size_t read_lba(struct parsed_partitions *state,
+ u64 lba, u8 *buffer, size_t count)
+{
+ size_t totalreadcount = 0;
+ struct block_device *bdev = state->bdev;
+ sector_t n = lba * (bdev_logical_block_size(bdev) / 512);
+
+ if (!buffer || lba > last_lba(bdev))
+ return 0;
+
+ while (count) {
+ int copied = 512;
+ Sector sect;
+ unsigned char *data = read_part_sector(state, n++, &sect);
+ if (!data)
+ break;
+ if (copied > count)
+ copied = count;
+ memcpy(buffer, data, copied);
+ put_dev_sector(sect);
+ buffer += copied;
+ totalreadcount +=copied;
+ count -= copied;
+ }
+ return totalreadcount;
+}
+
+/**
+ * alloc_read_gpt_entries(): reads partition entries from disk
+ * @state
+ * @gpt - GPT header
+ *
+ * Description: Returns ptes on success, NULL on error.
+ * Allocates space for PTEs based on information found in @gpt.
+ * Notes: remember to free pte when you're done!
+ */
+static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
+ gpt_header *gpt)
+{
+ size_t count;
+ gpt_entry *pte;
+
+ if (!gpt)
+ return NULL;
+
+ count = le32_to_cpu(gpt->num_partition_entries) *
+ le32_to_cpu(gpt->sizeof_partition_entry);
+ if (!count)
+ return NULL;
+ pte = kzalloc(count, GFP_KERNEL);
+ if (!pte)
+ return NULL;
+
+ if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
+ (u8 *) pte,
+ count) < count) {
+ kfree(pte);
+ pte=NULL;
+ return NULL;
+ }
+ return pte;
+}
+
+/**
+ * alloc_read_gpt_header(): Allocates GPT header, reads into it from disk
+ * @state
+ * @lba is the Logical Block Address of the partition table
+ *
+ * Description: returns GPT header on success, NULL on error. Allocates
+ * and fills a GPT header starting at @ from @state->bdev.
+ * Note: remember to free gpt when finished with it.
+ */
+static gpt_header *alloc_read_gpt_header(struct parsed_partitions *state,
+ u64 lba)
+{
+ gpt_header *gpt;
+ unsigned ssz = bdev_logical_block_size(state->bdev);
+
+ gpt = kzalloc(ssz, GFP_KERNEL);
+ if (!gpt)
+ return NULL;
+
+ if (read_lba(state, lba, (u8 *) gpt, ssz) < ssz) {
+ kfree(gpt);
+ gpt=NULL;
+ return NULL;
+ }
+
+ return gpt;
+}
+
+/**
+ * is_gpt_valid() - tests one GPT header and PTEs for validity
+ * @state
+ * @lba is the logical block address of the GPT header to test
+ * @gpt is a GPT header ptr, filled on return.
+ * @ptes is a PTEs ptr, filled on return.
+ *
+ * Description: returns 1 if valid, 0 on error.
+ * If valid, returns pointers to newly allocated GPT header and PTEs.
+ */
+static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
+ gpt_header **gpt, gpt_entry **ptes)
+{
+ u32 crc, origcrc;
+ u64 lastlba;
+
+ if (!ptes)
+ return 0;
+ if (!(*gpt = alloc_read_gpt_header(state, lba)))
+ return 0;
+
+ /* Check the GUID Partition Table signature */
+ if (le64_to_cpu((*gpt)->signature) != GPT_HEADER_SIGNATURE) {
+ pr_debug("GUID Partition Table Header signature is wrong:"
+ "%lld != %lld\n",
+ (unsigned long long)le64_to_cpu((*gpt)->signature),
+ (unsigned long long)GPT_HEADER_SIGNATURE);
+ goto fail;
+ }
+
+ /* Check the GUID Partition Table header size */
+ if (le32_to_cpu((*gpt)->header_size) >
+ bdev_logical_block_size(state->bdev)) {
+ pr_debug("GUID Partition Table Header size is wrong: %u > %u\n",
+ le32_to_cpu((*gpt)->header_size),
+ bdev_logical_block_size(state->bdev));
+ goto fail;
+ }
+
+ /* Check the GUID Partition Table CRC */
+ origcrc = le32_to_cpu((*gpt)->header_crc32);
+ (*gpt)->header_crc32 = 0;
+ crc = efi_crc32((const unsigned char *) (*gpt), le32_to_cpu((*gpt)->header_size));
+
+ if (crc != origcrc) {
+ pr_debug("GUID Partition Table Header CRC is wrong: %x != %x\n",
+ crc, origcrc);
+ goto fail;
+ }
+ (*gpt)->header_crc32 = cpu_to_le32(origcrc);
+
+ /* Check that the my_lba entry points to the LBA that contains
+ * the GUID Partition Table */
+ if (le64_to_cpu((*gpt)->my_lba) != lba) {
+ pr_debug("GPT my_lba incorrect: %lld != %lld\n",
+ (unsigned long long)le64_to_cpu((*gpt)->my_lba),
+ (unsigned long long)lba);
+ goto fail;
+ }
+
+ /* Check the first_usable_lba and last_usable_lba are
+ * within the disk.
+ */
+ lastlba = last_lba(state->bdev);
+ if (le64_to_cpu((*gpt)->first_usable_lba) > lastlba) {
+ pr_debug("GPT: first_usable_lba incorrect: %lld > %lld\n",
+ (unsigned long long)le64_to_cpu((*gpt)->first_usable_lba),
+ (unsigned long long)lastlba);
+ goto fail;
+ }
+ if (le64_to_cpu((*gpt)->last_usable_lba) > lastlba) {
+ pr_debug("GPT: last_usable_lba incorrect: %lld > %lld\n",
+ (unsigned long long)le64_to_cpu((*gpt)->last_usable_lba),
+ (unsigned long long)lastlba);
+ goto fail;
+ }
+
+ /* Check that sizeof_partition_entry has the correct value */
+ if (le32_to_cpu((*gpt)->sizeof_partition_entry) != sizeof(gpt_entry)) {
+ pr_debug("GUID Partitition Entry Size check failed.\n");
+ goto fail;
+ }
+
+ if (!(*ptes = alloc_read_gpt_entries(state, *gpt)))
+ goto fail;
+
+ /* Check the GUID Partition Entry Array CRC */
+ crc = efi_crc32((const unsigned char *) (*ptes),
+ le32_to_cpu((*gpt)->num_partition_entries) *
+ le32_to_cpu((*gpt)->sizeof_partition_entry));
+
+ if (crc != le32_to_cpu((*gpt)->partition_entry_array_crc32)) {
+ pr_debug("GUID Partitition Entry Array CRC check failed.\n");
+ goto fail_ptes;
+ }
+
+ /* We're done, all's well */
+ return 1;
+
+ fail_ptes:
+ kfree(*ptes);
+ *ptes = NULL;
+ fail:
+ kfree(*gpt);
+ *gpt = NULL;
+ return 0;
+}
+
+/**
+ * is_pte_valid() - tests one PTE for validity
+ * @pte is the pte to check
+ * @lastlba is last lba of the disk
+ *
+ * Description: returns 1 if valid, 0 on error.
+ */
+static inline int
+is_pte_valid(const gpt_entry *pte, const u64 lastlba)
+{
+ if ((!efi_guidcmp(pte->partition_type_guid, NULL_GUID)) ||
+ le64_to_cpu(pte->starting_lba) > lastlba ||
+ le64_to_cpu(pte->ending_lba) > lastlba)
+ return 0;
+ return 1;
+}
+
+/**
+ * compare_gpts() - Search disk for valid GPT headers and PTEs
+ * @pgpt is the primary GPT header
+ * @agpt is the alternate GPT header
+ * @lastlba is the last LBA number
+ * Description: Returns nothing. Sanity checks pgpt and agpt fields
+ * and prints warnings on discrepancies.
+ *
+ */
+static void
+compare_gpts(gpt_header *pgpt, gpt_header *agpt, u64 lastlba)
+{
+ int error_found = 0;
+ if (!pgpt || !agpt)
+ return;
+ if (le64_to_cpu(pgpt->my_lba) != le64_to_cpu(agpt->alternate_lba)) {
+ printk(KERN_WARNING
+ "GPT:Primary header LBA != Alt. header alternate_lba\n");
+ printk(KERN_WARNING "GPT:%lld != %lld\n",
+ (unsigned long long)le64_to_cpu(pgpt->my_lba),
+ (unsigned long long)le64_to_cpu(agpt->alternate_lba));
+ error_found++;
+ }
+ if (le64_to_cpu(pgpt->alternate_lba) != le64_to_cpu(agpt->my_lba)) {
+ printk(KERN_WARNING
+ "GPT:Primary header alternate_lba != Alt. header my_lba\n");
+ printk(KERN_WARNING "GPT:%lld != %lld\n",
+ (unsigned long long)le64_to_cpu(pgpt->alternate_lba),
+ (unsigned long long)le64_to_cpu(agpt->my_lba));
+ error_found++;
+ }
+ if (le64_to_cpu(pgpt->first_usable_lba) !=
+ le64_to_cpu(agpt->first_usable_lba)) {
+ printk(KERN_WARNING "GPT:first_usable_lbas don't match.\n");
+ printk(KERN_WARNING "GPT:%lld != %lld\n",
+ (unsigned long long)le64_to_cpu(pgpt->first_usable_lba),
+ (unsigned long long)le64_to_cpu(agpt->first_usable_lba));
+ error_found++;
+ }
+ if (le64_to_cpu(pgpt->last_usable_lba) !=
+ le64_to_cpu(agpt->last_usable_lba)) {
+ printk(KERN_WARNING "GPT:last_usable_lbas don't match.\n");
+ printk(KERN_WARNING "GPT:%lld != %lld\n",
+ (unsigned long long)le64_to_cpu(pgpt->last_usable_lba),
+ (unsigned long long)le64_to_cpu(agpt->last_usable_lba));
+ error_found++;
+ }
+ if (efi_guidcmp(pgpt->disk_guid, agpt->disk_guid)) {
+ printk(KERN_WARNING "GPT:disk_guids don't match.\n");
+ error_found++;
+ }
+ if (le32_to_cpu(pgpt->num_partition_entries) !=
+ le32_to_cpu(agpt->num_partition_entries)) {
+ printk(KERN_WARNING "GPT:num_partition_entries don't match: "
+ "0x%x != 0x%x\n",
+ le32_to_cpu(pgpt->num_partition_entries),
+ le32_to_cpu(agpt->num_partition_entries));
+ error_found++;
+ }
+ if (le32_to_cpu(pgpt->sizeof_partition_entry) !=
+ le32_to_cpu(agpt->sizeof_partition_entry)) {
+ printk(KERN_WARNING
+ "GPT:sizeof_partition_entry values don't match: "
+ "0x%x != 0x%x\n",
+ le32_to_cpu(pgpt->sizeof_partition_entry),
+ le32_to_cpu(agpt->sizeof_partition_entry));
+ error_found++;
+ }
+ if (le32_to_cpu(pgpt->partition_entry_array_crc32) !=
+ le32_to_cpu(agpt->partition_entry_array_crc32)) {
+ printk(KERN_WARNING
+ "GPT:partition_entry_array_crc32 values don't match: "
+ "0x%x != 0x%x\n",
+ le32_to_cpu(pgpt->partition_entry_array_crc32),
+ le32_to_cpu(agpt->partition_entry_array_crc32));
+ error_found++;
+ }
+ if (le64_to_cpu(pgpt->alternate_lba) != lastlba) {
+ printk(KERN_WARNING
+ "GPT:Primary header thinks Alt. header is not at the end of the disk.\n");
+ printk(KERN_WARNING "GPT:%lld != %lld\n",
+ (unsigned long long)le64_to_cpu(pgpt->alternate_lba),
+ (unsigned long long)lastlba);
+ error_found++;
+ }
+
+ if (le64_to_cpu(agpt->my_lba) != lastlba) {
+ printk(KERN_WARNING
+ "GPT:Alternate GPT header not at the end of the disk.\n");
+ printk(KERN_WARNING "GPT:%lld != %lld\n",
+ (unsigned long long)le64_to_cpu(agpt->my_lba),
+ (unsigned long long)lastlba);
+ error_found++;
+ }
+
+ if (error_found)
+ printk(KERN_WARNING
+ "GPT: Use GNU Parted to correct GPT errors.\n");
+ return;
+}
+
+/**
+ * find_valid_gpt() - Search disk for valid GPT headers and PTEs
+ * @state
+ * @gpt is a GPT header ptr, filled on return.
+ * @ptes is a PTEs ptr, filled on return.
+ * Description: Returns 1 if valid, 0 on error.
+ * If valid, returns pointers to newly allocated GPT header and PTEs.
+ * Validity depends on PMBR being valid (or being overridden by the
+ * 'gpt' kernel command line option) and finding either the Primary
+ * GPT header and PTEs valid, or the Alternate GPT header and PTEs
+ * valid. If the Primary GPT header is not valid, the Alternate GPT header
+ * is not checked unless the 'gpt' kernel command line option is passed.
+ * This protects against devices which misreport their size, and forces
+ * the user to decide to use the Alternate GPT.
+ */
+static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt,
+ gpt_entry **ptes)
+{
+ int good_pgpt = 0, good_agpt = 0, good_pmbr = 0;
+ gpt_header *pgpt = NULL, *agpt = NULL;
+ gpt_entry *pptes = NULL, *aptes = NULL;
+ legacy_mbr *legacymbr;
+ u64 lastlba;
+
+ if (!ptes)
+ return 0;
+
+ lastlba = last_lba(state->bdev);
+ if (!force_gpt) {
+ /* This will be added to the EFI Spec. per Intel after v1.02. */
+ legacymbr = kzalloc(sizeof (*legacymbr), GFP_KERNEL);
+ if (legacymbr) {
+ read_lba(state, 0, (u8 *) legacymbr,
+ sizeof (*legacymbr));
+ good_pmbr = is_pmbr_valid(legacymbr);
+ kfree(legacymbr);
+ }
+ if (!good_pmbr)
+ goto fail;
+ }
+
+ good_pgpt = is_gpt_valid(state, GPT_PRIMARY_PARTITION_TABLE_LBA,
+ &pgpt, &pptes);
+ if (good_pgpt)
+ good_agpt = is_gpt_valid(state,
+ le64_to_cpu(pgpt->alternate_lba),
+ &agpt, &aptes);
+ if (!good_agpt && force_gpt)
+ good_agpt = is_gpt_valid(state, lastlba, &agpt, &aptes);
+
+ /* The obviously unsuccessful case */
+ if (!good_pgpt && !good_agpt)
+ goto fail;
+
+ compare_gpts(pgpt, agpt, lastlba);
+
+ /* The good cases */
+ if (good_pgpt) {
+ *gpt = pgpt;
+ *ptes = pptes;
+ kfree(agpt);
+ kfree(aptes);
+ if (!good_agpt) {
+ printk(KERN_WARNING
+ "Alternate GPT is invalid, "
+ "using primary GPT.\n");
+ }
+ return 1;
+ }
+ else if (good_agpt) {
+ *gpt = agpt;
+ *ptes = aptes;
+ kfree(pgpt);
+ kfree(pptes);
+ printk(KERN_WARNING
+ "Primary GPT is invalid, using alternate GPT.\n");
+ return 1;
+ }
+
+ fail:
+ kfree(pgpt);
+ kfree(agpt);
+ kfree(pptes);
+ kfree(aptes);
+ *gpt = NULL;
+ *ptes = NULL;
+ return 0;
+}
+
+/**
+ * efi_partition(struct parsed_partitions *state)
+ * @state
+ *
+ * Description: called from check.c, if the disk contains GPT
+ * partitions, sets up partition entries in the kernel.
+ *
+ * If the first block on the disk is a legacy MBR,
+ * it will get handled by msdos_partition().
+ * If it's a Protective MBR, we'll handle it here.
+ *
+ * We do not create a Linux partition for GPT, but
+ * only for the actual data partitions.
+ * Returns:
+ * -1 if unable to read the partition table
+ * 0 if this isn't our partition table
+ * 1 if successful
+ *
+ */
+int efi_partition(struct parsed_partitions *state)
+{
+ gpt_header *gpt = NULL;
+ gpt_entry *ptes = NULL;
+ u32 i;
+ unsigned ssz = bdev_logical_block_size(state->bdev) / 512;
+ u8 unparsed_guid[37];
+
+ if (!find_valid_gpt(state, &gpt, &ptes) || !gpt || !ptes) {
+ kfree(gpt);
+ kfree(ptes);
+ return 0;
+ }
+
+ pr_debug("GUID Partition Table is valid! Yea!\n");
+
+ for (i = 0; i < le32_to_cpu(gpt->num_partition_entries) && i < state->limit-1; i++) {
+ struct partition_meta_info *info;
+ unsigned label_count = 0;
+ unsigned label_max;
+ u64 start = le64_to_cpu(ptes[i].starting_lba);
+ u64 size = le64_to_cpu(ptes[i].ending_lba) -
+ le64_to_cpu(ptes[i].starting_lba) + 1ULL;
+
+ if (!is_pte_valid(&ptes[i], last_lba(state->bdev)))
+ continue;
+
+ put_partition(state, i+1, start * ssz, size * ssz);
+
+ /* If this is a RAID volume, tell md */
+ if (!efi_guidcmp(ptes[i].partition_type_guid,
+ PARTITION_LINUX_RAID_GUID))
+ state->parts[i + 1].flags = ADDPART_FLAG_RAID;
+
+ info = &state->parts[i + 1].info;
+ /* Instead of doing a manual swap to big endian, reuse the
+ * common ASCII hex format as the interim.
+ */
+ efi_guid_unparse(&ptes[i].unique_partition_guid, unparsed_guid);
+ part_pack_uuid(unparsed_guid, info->uuid);
+
+ /* Naively convert UTF16-LE to 7 bits. */
+ label_max = min(sizeof(info->volname) - 1,
+ sizeof(ptes[i].partition_name));
+ info->volname[label_max] = 0;
+ while (label_count < label_max) {
+ u8 c = ptes[i].partition_name[label_count] & 0xff;
+ if (c && !isprint(c))
+ c = '!';
+ info->volname[label_count] = c;
+ label_count++;
+ }
+ state->parts[i + 1].has_info = true;
+ }
+ kfree(ptes);
+ kfree(gpt);
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
+ return 1;
+}
diff --git a/block/partitions/efi.h b/block/partitions/efi.h
new file mode 100644
index 00000000..b69ab729
--- /dev/null
+++ b/block/partitions/efi.h
@@ -0,0 +1,134 @@
+/************************************************************
+ * EFI GUID Partition Table
+ * Per Intel EFI Specification v1.02
+ * http://developer.intel.com/technology/efi/efi.htm
+ *
+ * By Matt Domsch <Matt_Domsch@dell.com> Fri Sep 22 22:15:56 CDT 2000
+ * Copyright 2000,2001 Dell Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ ************************************************************/
+
+#ifndef FS_PART_EFI_H_INCLUDED
+#define FS_PART_EFI_H_INCLUDED
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/efi.h>
+
+#define MSDOS_MBR_SIGNATURE 0xaa55
+#define EFI_PMBR_OSTYPE_EFI 0xEF
+#define EFI_PMBR_OSTYPE_EFI_GPT 0xEE
+
+#define GPT_HEADER_SIGNATURE 0x5452415020494645ULL
+#define GPT_HEADER_REVISION_V1 0x00010000
+#define GPT_PRIMARY_PARTITION_TABLE_LBA 1
+
+#define PARTITION_SYSTEM_GUID \
+ EFI_GUID( 0xC12A7328, 0xF81F, 0x11d2, \
+ 0xBA, 0x4B, 0x00, 0xA0, 0xC9, 0x3E, 0xC9, 0x3B)
+#define LEGACY_MBR_PARTITION_GUID \
+ EFI_GUID( 0x024DEE41, 0x33E7, 0x11d3, \
+ 0x9D, 0x69, 0x00, 0x08, 0xC7, 0x81, 0xF3, 0x9F)
+#define PARTITION_MSFT_RESERVED_GUID \
+ EFI_GUID( 0xE3C9E316, 0x0B5C, 0x4DB8, \
+ 0x81, 0x7D, 0xF9, 0x2D, 0xF0, 0x02, 0x15, 0xAE)
+#define PARTITION_BASIC_DATA_GUID \
+ EFI_GUID( 0xEBD0A0A2, 0xB9E5, 0x4433, \
+ 0x87, 0xC0, 0x68, 0xB6, 0xB7, 0x26, 0x99, 0xC7)
+#define PARTITION_LINUX_RAID_GUID \
+ EFI_GUID( 0xa19d880f, 0x05fc, 0x4d3b, \
+ 0xa0, 0x06, 0x74, 0x3f, 0x0f, 0x84, 0x91, 0x1e)
+#define PARTITION_LINUX_SWAP_GUID \
+ EFI_GUID( 0x0657fd6d, 0xa4ab, 0x43c4, \
+ 0x84, 0xe5, 0x09, 0x33, 0xc8, 0x4b, 0x4f, 0x4f)
+#define PARTITION_LINUX_LVM_GUID \
+ EFI_GUID( 0xe6d6d379, 0xf507, 0x44c2, \
+ 0xa2, 0x3c, 0x23, 0x8f, 0x2a, 0x3d, 0xf9, 0x28)
+
+typedef struct _gpt_header {
+ __le64 signature;
+ __le32 revision;
+ __le32 header_size;
+ __le32 header_crc32;
+ __le32 reserved1;
+ __le64 my_lba;
+ __le64 alternate_lba;
+ __le64 first_usable_lba;
+ __le64 last_usable_lba;
+ efi_guid_t disk_guid;
+ __le64 partition_entry_lba;
+ __le32 num_partition_entries;
+ __le32 sizeof_partition_entry;
+ __le32 partition_entry_array_crc32;
+
+ /* The rest of the logical block is reserved by UEFI and must be zero.
+ * EFI standard handles this by:
+ *
+ * uint8_t reserved2[ BlockSize - 92 ];
+ */
+} __attribute__ ((packed)) gpt_header;
+
+typedef struct _gpt_entry_attributes {
+ u64 required_to_function:1;
+ u64 reserved:47;
+ u64 type_guid_specific:16;
+} __attribute__ ((packed)) gpt_entry_attributes;
+
+typedef struct _gpt_entry {
+ efi_guid_t partition_type_guid;
+ efi_guid_t unique_partition_guid;
+ __le64 starting_lba;
+ __le64 ending_lba;
+ gpt_entry_attributes attributes;
+ efi_char16_t partition_name[72 / sizeof (efi_char16_t)];
+} __attribute__ ((packed)) gpt_entry;
+
+typedef struct _legacy_mbr {
+ u8 boot_code[440];
+ __le32 unique_mbr_signature;
+ __le16 unknown;
+ struct partition partition_record[4];
+ __le16 signature;
+} __attribute__ ((packed)) legacy_mbr;
+
+/* Functions */
+extern int efi_partition(struct parsed_partitions *state);
+
+#endif
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * --------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/block/partitions/ibm.c b/block/partitions/ibm.c
new file mode 100644
index 00000000..d513a07f
--- /dev/null
+++ b/block/partitions/ibm.c
@@ -0,0 +1,275 @@
+/*
+ * File...........: linux/fs/partitions/ibm.c
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Volker Sameske <sameske@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ */
+
+#include <linux/buffer_head.h>
+#include <linux/hdreg.h>
+#include <linux/slab.h>
+#include <asm/dasd.h>
+#include <asm/ebcdic.h>
+#include <asm/uaccess.h>
+#include <asm/vtoc.h>
+
+#include "check.h"
+#include "ibm.h"
+
+/*
+ * compute the block number from a
+ * cyl-cyl-head-head structure
+ */
+static sector_t
+cchh2blk (struct vtoc_cchh *ptr, struct hd_geometry *geo) {
+
+ sector_t cyl;
+ __u16 head;
+
+ /*decode cylinder and heads for large volumes */
+ cyl = ptr->hh & 0xFFF0;
+ cyl <<= 12;
+ cyl |= ptr->cc;
+ head = ptr->hh & 0x000F;
+ return cyl * geo->heads * geo->sectors +
+ head * geo->sectors;
+}
+
+/*
+ * compute the block number from a
+ * cyl-cyl-head-head-block structure
+ */
+static sector_t
+cchhb2blk (struct vtoc_cchhb *ptr, struct hd_geometry *geo) {
+
+ sector_t cyl;
+ __u16 head;
+
+ /*decode cylinder and heads for large volumes */
+ cyl = ptr->hh & 0xFFF0;
+ cyl <<= 12;
+ cyl |= ptr->cc;
+ head = ptr->hh & 0x000F;
+ return cyl * geo->heads * geo->sectors +
+ head * geo->sectors +
+ ptr->b;
+}
+
+/*
+ */
+int ibm_partition(struct parsed_partitions *state)
+{
+ struct block_device *bdev = state->bdev;
+ int blocksize, res;
+ loff_t i_size, offset, size, fmt_size;
+ dasd_information2_t *info;
+ struct hd_geometry *geo;
+ char type[5] = {0,};
+ char name[7] = {0,};
+ union label_t {
+ struct vtoc_volume_label_cdl vol;
+ struct vtoc_volume_label_ldl lnx;
+ struct vtoc_cms_label cms;
+ } *label;
+ unsigned char *data;
+ Sector sect;
+ sector_t labelsect;
+ char tmp[64];
+
+ res = 0;
+ blocksize = bdev_logical_block_size(bdev);
+ if (blocksize <= 0)
+ goto out_exit;
+ i_size = i_size_read(bdev->bd_inode);
+ if (i_size == 0)
+ goto out_exit;
+
+ info = kmalloc(sizeof(dasd_information2_t), GFP_KERNEL);
+ if (info == NULL)
+ goto out_exit;
+ geo = kmalloc(sizeof(struct hd_geometry), GFP_KERNEL);
+ if (geo == NULL)
+ goto out_nogeo;
+ label = kmalloc(sizeof(union label_t), GFP_KERNEL);
+ if (label == NULL)
+ goto out_nolab;
+
+ if (ioctl_by_bdev(bdev, BIODASDINFO2, (unsigned long)info) != 0 ||
+ ioctl_by_bdev(bdev, HDIO_GETGEO, (unsigned long)geo) != 0)
+ goto out_freeall;
+
+ /*
+ * Special case for FBA disks: label sector does not depend on
+ * blocksize.
+ */
+ if ((info->cu_type == 0x6310 && info->dev_type == 0x9336) ||
+ (info->cu_type == 0x3880 && info->dev_type == 0x3370))
+ labelsect = info->label_block;
+ else
+ labelsect = info->label_block * (blocksize >> 9);
+
+ /*
+ * Get volume label, extract name and type.
+ */
+ data = read_part_sector(state, labelsect, &sect);
+ if (data == NULL)
+ goto out_readerr;
+
+ memcpy(label, data, sizeof(union label_t));
+ put_dev_sector(sect);
+
+ if ((!info->FBA_layout) && (!strcmp(info->type, "ECKD"))) {
+ strncpy(type, label->vol.vollbl, 4);
+ strncpy(name, label->vol.volid, 6);
+ } else {
+ strncpy(type, label->lnx.vollbl, 4);
+ strncpy(name, label->lnx.volid, 6);
+ }
+ EBCASC(type, 4);
+ EBCASC(name, 6);
+
+ res = 1;
+
+ /*
+ * Three different formats: LDL, CDL and unformated disk
+ *
+ * identified by info->format
+ *
+ * unformated disks we do not have to care about
+ */
+ if (info->format == DASD_FORMAT_LDL) {
+ if (strncmp(type, "CMS1", 4) == 0) {
+ /*
+ * VM style CMS1 labeled disk
+ */
+ blocksize = label->cms.block_size;
+ if (label->cms.disk_offset != 0) {
+ snprintf(tmp, sizeof(tmp), "CMS1/%8s(MDSK):", name);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+ /* disk is reserved minidisk */
+ offset = label->cms.disk_offset;
+ size = (label->cms.block_count - 1)
+ * (blocksize >> 9);
+ } else {
+ snprintf(tmp, sizeof(tmp), "CMS1/%8s:", name);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+ offset = (info->label_block + 1);
+ size = label->cms.block_count
+ * (blocksize >> 9);
+ }
+ put_partition(state, 1, offset*(blocksize >> 9),
+ size-offset*(blocksize >> 9));
+ } else {
+ if (strncmp(type, "LNX1", 4) == 0) {
+ snprintf(tmp, sizeof(tmp), "LNX1/%8s:", name);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+ if (label->lnx.ldl_version == 0xf2) {
+ fmt_size = label->lnx.formatted_blocks
+ * (blocksize >> 9);
+ } else if (!strcmp(info->type, "ECKD")) {
+ /* formated w/o large volume support */
+ fmt_size = geo->cylinders * geo->heads
+ * geo->sectors * (blocksize >> 9);
+ } else {
+ /* old label and no usable disk geometry
+ * (e.g. DIAG) */
+ fmt_size = i_size >> 9;
+ }
+ size = i_size >> 9;
+ if (fmt_size < size)
+ size = fmt_size;
+ offset = (info->label_block + 1);
+ } else {
+ /* unlabeled disk */
+ strlcat(state->pp_buf, "(nonl)", PAGE_SIZE);
+ size = i_size >> 9;
+ offset = (info->label_block + 1);
+ }
+ put_partition(state, 1, offset*(blocksize >> 9),
+ size-offset*(blocksize >> 9));
+ }
+ } else if (info->format == DASD_FORMAT_CDL) {
+ /*
+ * New style CDL formatted disk
+ */
+ sector_t blk;
+ int counter;
+
+ /*
+ * check if VOL1 label is available
+ * if not, something is wrong, skipping partition detection
+ */
+ if (strncmp(type, "VOL1", 4) == 0) {
+ snprintf(tmp, sizeof(tmp), "VOL1/%8s:", name);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+ /*
+ * get block number and read then go through format1
+ * labels
+ */
+ blk = cchhb2blk(&label->vol.vtoc, geo) + 1;
+ counter = 0;
+ data = read_part_sector(state, blk * (blocksize/512),
+ &sect);
+ while (data != NULL) {
+ struct vtoc_format1_label f1;
+
+ memcpy(&f1, data,
+ sizeof(struct vtoc_format1_label));
+ put_dev_sector(sect);
+
+ /* skip FMT4 / FMT5 / FMT7 labels */
+ if (f1.DS1FMTID == _ascebc['4']
+ || f1.DS1FMTID == _ascebc['5']
+ || f1.DS1FMTID == _ascebc['7']
+ || f1.DS1FMTID == _ascebc['9']) {
+ blk++;
+ data = read_part_sector(state,
+ blk * (blocksize/512), &sect);
+ continue;
+ }
+
+ /* only FMT1 and 8 labels valid at this point */
+ if (f1.DS1FMTID != _ascebc['1'] &&
+ f1.DS1FMTID != _ascebc['8'])
+ break;
+
+ /* OK, we got valid partition data */
+ offset = cchh2blk(&f1.DS1EXT1.llimit, geo);
+ size = cchh2blk(&f1.DS1EXT1.ulimit, geo) -
+ offset + geo->sectors;
+ if (counter >= state->limit)
+ break;
+ put_partition(state, counter + 1,
+ offset * (blocksize >> 9),
+ size * (blocksize >> 9));
+ counter++;
+ blk++;
+ data = read_part_sector(state,
+ blk * (blocksize/512), &sect);
+ }
+
+ if (!data)
+ /* Are we not supposed to report this ? */
+ goto out_readerr;
+ } else
+ printk(KERN_WARNING "Warning, expected Label VOL1 not "
+ "found, treating as CDL formated Disk");
+
+ }
+
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
+ goto out_freeall;
+
+
+out_readerr:
+ res = -1;
+out_freeall:
+ kfree(label);
+out_nolab:
+ kfree(geo);
+out_nogeo:
+ kfree(info);
+out_exit:
+ return res;
+}
diff --git a/block/partitions/ibm.h b/block/partitions/ibm.h
new file mode 100644
index 00000000..08fb0804
--- /dev/null
+++ b/block/partitions/ibm.h
@@ -0,0 +1 @@
+int ibm_partition(struct parsed_partitions *);
diff --git a/block/partitions/karma.c b/block/partitions/karma.c
new file mode 100644
index 00000000..0ea19312
--- /dev/null
+++ b/block/partitions/karma.c
@@ -0,0 +1,57 @@
+/*
+ * fs/partitions/karma.c
+ * Rio Karma partition info.
+ *
+ * Copyright (C) 2006 Bob Copeland (me@bobcopeland.com)
+ * based on osf.c
+ */
+
+#include "check.h"
+#include "karma.h"
+
+int karma_partition(struct parsed_partitions *state)
+{
+ int i;
+ int slot = 1;
+ Sector sect;
+ unsigned char *data;
+ struct disklabel {
+ u8 d_reserved[270];
+ struct d_partition {
+ __le32 p_res;
+ u8 p_fstype;
+ u8 p_res2[3];
+ __le32 p_offset;
+ __le32 p_size;
+ } d_partitions[2];
+ u8 d_blank[208];
+ __le16 d_magic;
+ } __attribute__((packed)) *label;
+ struct d_partition *p;
+
+ data = read_part_sector(state, 0, &sect);
+ if (!data)
+ return -1;
+
+ label = (struct disklabel *)data;
+ if (le16_to_cpu(label->d_magic) != KARMA_LABEL_MAGIC) {
+ put_dev_sector(sect);
+ return 0;
+ }
+
+ p = label->d_partitions;
+ for (i = 0 ; i < 2; i++, p++) {
+ if (slot == state->limit)
+ break;
+
+ if (p->p_fstype == 0x4d && le32_to_cpu(p->p_size)) {
+ put_partition(state, slot, le32_to_cpu(p->p_offset),
+ le32_to_cpu(p->p_size));
+ }
+ slot++;
+ }
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
+ put_dev_sector(sect);
+ return 1;
+}
+
diff --git a/block/partitions/karma.h b/block/partitions/karma.h
new file mode 100644
index 00000000..c764b2e9
--- /dev/null
+++ b/block/partitions/karma.h
@@ -0,0 +1,8 @@
+/*
+ * fs/partitions/karma.h
+ */
+
+#define KARMA_LABEL_MAGIC 0xAB56
+
+int karma_partition(struct parsed_partitions *state);
+
diff --git a/block/partitions/ldm.c b/block/partitions/ldm.c
new file mode 100644
index 00000000..e507cfbd
--- /dev/null
+++ b/block/partitions/ldm.c
@@ -0,0 +1,1567 @@
+/**
+ * ldm - Support for Windows Logical Disk Manager (Dynamic Disks)
+ *
+ * Copyright (C) 2001,2002 Richard Russon <ldm@flatcap.org>
+ * Copyright (c) 2001-2012 Anton Altaparmakov
+ * Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com>
+ *
+ * Documentation is available at http://www.linux-ntfs.org/doku.php?id=downloads
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program (in the main directory of the source in the file COPYING); if
+ * not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
+ * Boston, MA 02111-1307 USA
+ */
+
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/stringify.h>
+#include <linux/kernel.h>
+#include "ldm.h"
+#include "check.h"
+#include "msdos.h"
+
+/**
+ * ldm_debug/info/error/crit - Output an error message
+ * @f: A printf format string containing the message
+ * @...: Variables to substitute into @f
+ *
+ * ldm_debug() writes a DEBUG level message to the syslog but only if the
+ * driver was compiled with debug enabled. Otherwise, the call turns into a NOP.
+ */
+#ifndef CONFIG_LDM_DEBUG
+#define ldm_debug(...) do {} while (0)
+#else
+#define ldm_debug(f, a...) _ldm_printk (KERN_DEBUG, __func__, f, ##a)
+#endif
+
+#define ldm_crit(f, a...) _ldm_printk (KERN_CRIT, __func__, f, ##a)
+#define ldm_error(f, a...) _ldm_printk (KERN_ERR, __func__, f, ##a)
+#define ldm_info(f, a...) _ldm_printk (KERN_INFO, __func__, f, ##a)
+
+static __printf(3, 4)
+void _ldm_printk(const char *level, const char *function, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start (args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ printk("%s%s(): %pV\n", level, function, &vaf);
+
+ va_end(args);
+}
+
+/**
+ * ldm_parse_hexbyte - Convert a ASCII hex number to a byte
+ * @src: Pointer to at least 2 characters to convert.
+ *
+ * Convert a two character ASCII hex string to a number.
+ *
+ * Return: 0-255 Success, the byte was parsed correctly
+ * -1 Error, an invalid character was supplied
+ */
+static int ldm_parse_hexbyte (const u8 *src)
+{
+ unsigned int x; /* For correct wrapping */
+ int h;
+
+ /* high part */
+ x = h = hex_to_bin(src[0]);
+ if (h < 0)
+ return -1;
+
+ /* low part */
+ h = hex_to_bin(src[1]);
+ if (h < 0)
+ return -1;
+
+ return (x << 4) + h;
+}
+
+/**
+ * ldm_parse_guid - Convert GUID from ASCII to binary
+ * @src: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ * @dest: Memory block to hold binary GUID (16 bytes)
+ *
+ * N.B. The GUID need not be NULL terminated.
+ *
+ * Return: 'true' @dest contains binary GUID
+ * 'false' @dest contents are undefined
+ */
+static bool ldm_parse_guid (const u8 *src, u8 *dest)
+{
+ static const int size[] = { 4, 2, 2, 2, 6 };
+ int i, j, v;
+
+ if (src[8] != '-' || src[13] != '-' ||
+ src[18] != '-' || src[23] != '-')
+ return false;
+
+ for (j = 0; j < 5; j++, src++)
+ for (i = 0; i < size[j]; i++, src+=2, *dest++ = v)
+ if ((v = ldm_parse_hexbyte (src)) < 0)
+ return false;
+
+ return true;
+}
+
+/**
+ * ldm_parse_privhead - Read the LDM Database PRIVHEAD structure
+ * @data: Raw database PRIVHEAD structure loaded from the device
+ * @ph: In-memory privhead structure in which to return parsed information
+ *
+ * This parses the LDM database PRIVHEAD structure supplied in @data and
+ * sets up the in-memory privhead structure @ph with the obtained information.
+ *
+ * Return: 'true' @ph contains the PRIVHEAD data
+ * 'false' @ph contents are undefined
+ */
+static bool ldm_parse_privhead(const u8 *data, struct privhead *ph)
+{
+ bool is_vista = false;
+
+ BUG_ON(!data || !ph);
+ if (MAGIC_PRIVHEAD != get_unaligned_be64(data)) {
+ ldm_error("Cannot find PRIVHEAD structure. LDM database is"
+ " corrupt. Aborting.");
+ return false;
+ }
+ ph->ver_major = get_unaligned_be16(data + 0x000C);
+ ph->ver_minor = get_unaligned_be16(data + 0x000E);
+ ph->logical_disk_start = get_unaligned_be64(data + 0x011B);
+ ph->logical_disk_size = get_unaligned_be64(data + 0x0123);
+ ph->config_start = get_unaligned_be64(data + 0x012B);
+ ph->config_size = get_unaligned_be64(data + 0x0133);
+ /* Version 2.11 is Win2k/XP and version 2.12 is Vista. */
+ if (ph->ver_major == 2 && ph->ver_minor == 12)
+ is_vista = true;
+ if (!is_vista && (ph->ver_major != 2 || ph->ver_minor != 11)) {
+ ldm_error("Expected PRIVHEAD version 2.11 or 2.12, got %d.%d."
+ " Aborting.", ph->ver_major, ph->ver_minor);
+ return false;
+ }
+ ldm_debug("PRIVHEAD version %d.%d (Windows %s).", ph->ver_major,
+ ph->ver_minor, is_vista ? "Vista" : "2000/XP");
+ if (ph->config_size != LDM_DB_SIZE) { /* 1 MiB in sectors. */
+ /* Warn the user and continue, carefully. */
+ ldm_info("Database is normally %u bytes, it claims to "
+ "be %llu bytes.", LDM_DB_SIZE,
+ (unsigned long long)ph->config_size);
+ }
+ if ((ph->logical_disk_size == 0) || (ph->logical_disk_start +
+ ph->logical_disk_size > ph->config_start)) {
+ ldm_error("PRIVHEAD disk size doesn't match real disk size");
+ return false;
+ }
+ if (!ldm_parse_guid(data + 0x0030, ph->disk_id)) {
+ ldm_error("PRIVHEAD contains an invalid GUID.");
+ return false;
+ }
+ ldm_debug("Parsed PRIVHEAD successfully.");
+ return true;
+}
+
+/**
+ * ldm_parse_tocblock - Read the LDM Database TOCBLOCK structure
+ * @data: Raw database TOCBLOCK structure loaded from the device
+ * @toc: In-memory toc structure in which to return parsed information
+ *
+ * This parses the LDM Database TOCBLOCK (table of contents) structure supplied
+ * in @data and sets up the in-memory tocblock structure @toc with the obtained
+ * information.
+ *
+ * N.B. The *_start and *_size values returned in @toc are not range-checked.
+ *
+ * Return: 'true' @toc contains the TOCBLOCK data
+ * 'false' @toc contents are undefined
+ */
+static bool ldm_parse_tocblock (const u8 *data, struct tocblock *toc)
+{
+ BUG_ON (!data || !toc);
+
+ if (MAGIC_TOCBLOCK != get_unaligned_be64(data)) {
+ ldm_crit ("Cannot find TOCBLOCK, database may be corrupt.");
+ return false;
+ }
+ strncpy (toc->bitmap1_name, data + 0x24, sizeof (toc->bitmap1_name));
+ toc->bitmap1_name[sizeof (toc->bitmap1_name) - 1] = 0;
+ toc->bitmap1_start = get_unaligned_be64(data + 0x2E);
+ toc->bitmap1_size = get_unaligned_be64(data + 0x36);
+
+ if (strncmp (toc->bitmap1_name, TOC_BITMAP1,
+ sizeof (toc->bitmap1_name)) != 0) {
+ ldm_crit ("TOCBLOCK's first bitmap is '%s', should be '%s'.",
+ TOC_BITMAP1, toc->bitmap1_name);
+ return false;
+ }
+ strncpy (toc->bitmap2_name, data + 0x46, sizeof (toc->bitmap2_name));
+ toc->bitmap2_name[sizeof (toc->bitmap2_name) - 1] = 0;
+ toc->bitmap2_start = get_unaligned_be64(data + 0x50);
+ toc->bitmap2_size = get_unaligned_be64(data + 0x58);
+ if (strncmp (toc->bitmap2_name, TOC_BITMAP2,
+ sizeof (toc->bitmap2_name)) != 0) {
+ ldm_crit ("TOCBLOCK's second bitmap is '%s', should be '%s'.",
+ TOC_BITMAP2, toc->bitmap2_name);
+ return false;
+ }
+ ldm_debug ("Parsed TOCBLOCK successfully.");
+ return true;
+}
+
+/**
+ * ldm_parse_vmdb - Read the LDM Database VMDB structure
+ * @data: Raw database VMDB structure loaded from the device
+ * @vm: In-memory vmdb structure in which to return parsed information
+ *
+ * This parses the LDM Database VMDB structure supplied in @data and sets up
+ * the in-memory vmdb structure @vm with the obtained information.
+ *
+ * N.B. The *_start, *_size and *_seq values will be range-checked later.
+ *
+ * Return: 'true' @vm contains VMDB info
+ * 'false' @vm contents are undefined
+ */
+static bool ldm_parse_vmdb (const u8 *data, struct vmdb *vm)
+{
+ BUG_ON (!data || !vm);
+
+ if (MAGIC_VMDB != get_unaligned_be32(data)) {
+ ldm_crit ("Cannot find the VMDB, database may be corrupt.");
+ return false;
+ }
+
+ vm->ver_major = get_unaligned_be16(data + 0x12);
+ vm->ver_minor = get_unaligned_be16(data + 0x14);
+ if ((vm->ver_major != 4) || (vm->ver_minor != 10)) {
+ ldm_error ("Expected VMDB version %d.%d, got %d.%d. "
+ "Aborting.", 4, 10, vm->ver_major, vm->ver_minor);
+ return false;
+ }
+
+ vm->vblk_size = get_unaligned_be32(data + 0x08);
+ if (vm->vblk_size == 0) {
+ ldm_error ("Illegal VBLK size");
+ return false;
+ }
+
+ vm->vblk_offset = get_unaligned_be32(data + 0x0C);
+ vm->last_vblk_seq = get_unaligned_be32(data + 0x04);
+
+ ldm_debug ("Parsed VMDB successfully.");
+ return true;
+}
+
+/**
+ * ldm_compare_privheads - Compare two privhead objects
+ * @ph1: First privhead
+ * @ph2: Second privhead
+ *
+ * This compares the two privhead structures @ph1 and @ph2.
+ *
+ * Return: 'true' Identical
+ * 'false' Different
+ */
+static bool ldm_compare_privheads (const struct privhead *ph1,
+ const struct privhead *ph2)
+{
+ BUG_ON (!ph1 || !ph2);
+
+ return ((ph1->ver_major == ph2->ver_major) &&
+ (ph1->ver_minor == ph2->ver_minor) &&
+ (ph1->logical_disk_start == ph2->logical_disk_start) &&
+ (ph1->logical_disk_size == ph2->logical_disk_size) &&
+ (ph1->config_start == ph2->config_start) &&
+ (ph1->config_size == ph2->config_size) &&
+ !memcmp (ph1->disk_id, ph2->disk_id, GUID_SIZE));
+}
+
+/**
+ * ldm_compare_tocblocks - Compare two tocblock objects
+ * @toc1: First toc
+ * @toc2: Second toc
+ *
+ * This compares the two tocblock structures @toc1 and @toc2.
+ *
+ * Return: 'true' Identical
+ * 'false' Different
+ */
+static bool ldm_compare_tocblocks (const struct tocblock *toc1,
+ const struct tocblock *toc2)
+{
+ BUG_ON (!toc1 || !toc2);
+
+ return ((toc1->bitmap1_start == toc2->bitmap1_start) &&
+ (toc1->bitmap1_size == toc2->bitmap1_size) &&
+ (toc1->bitmap2_start == toc2->bitmap2_start) &&
+ (toc1->bitmap2_size == toc2->bitmap2_size) &&
+ !strncmp (toc1->bitmap1_name, toc2->bitmap1_name,
+ sizeof (toc1->bitmap1_name)) &&
+ !strncmp (toc1->bitmap2_name, toc2->bitmap2_name,
+ sizeof (toc1->bitmap2_name)));
+}
+
+/**
+ * ldm_validate_privheads - Compare the primary privhead with its backups
+ * @state: Partition check state including device holding the LDM Database
+ * @ph1: Memory struct to fill with ph contents
+ *
+ * Read and compare all three privheads from disk.
+ *
+ * The privheads on disk show the size and location of the main disk area and
+ * the configuration area (the database). The values are range-checked against
+ * @hd, which contains the real size of the disk.
+ *
+ * Return: 'true' Success
+ * 'false' Error
+ */
+static bool ldm_validate_privheads(struct parsed_partitions *state,
+ struct privhead *ph1)
+{
+ static const int off[3] = { OFF_PRIV1, OFF_PRIV2, OFF_PRIV3 };
+ struct privhead *ph[3] = { ph1 };
+ Sector sect;
+ u8 *data;
+ bool result = false;
+ long num_sects;
+ int i;
+
+ BUG_ON (!state || !ph1);
+
+ ph[1] = kmalloc (sizeof (*ph[1]), GFP_KERNEL);
+ ph[2] = kmalloc (sizeof (*ph[2]), GFP_KERNEL);
+ if (!ph[1] || !ph[2]) {
+ ldm_crit ("Out of memory.");
+ goto out;
+ }
+
+ /* off[1 & 2] are relative to ph[0]->config_start */
+ ph[0]->config_start = 0;
+
+ /* Read and parse privheads */
+ for (i = 0; i < 3; i++) {
+ data = read_part_sector(state, ph[0]->config_start + off[i],
+ &sect);
+ if (!data) {
+ ldm_crit ("Disk read failed.");
+ goto out;
+ }
+ result = ldm_parse_privhead (data, ph[i]);
+ put_dev_sector (sect);
+ if (!result) {
+ ldm_error ("Cannot find PRIVHEAD %d.", i+1); /* Log again */
+ if (i < 2)
+ goto out; /* Already logged */
+ else
+ break; /* FIXME ignore for now, 3rd PH can fail on odd-sized disks */
+ }
+ }
+
+ num_sects = state->bdev->bd_inode->i_size >> 9;
+
+ if ((ph[0]->config_start > num_sects) ||
+ ((ph[0]->config_start + ph[0]->config_size) > num_sects)) {
+ ldm_crit ("Database extends beyond the end of the disk.");
+ goto out;
+ }
+
+ if ((ph[0]->logical_disk_start > ph[0]->config_start) ||
+ ((ph[0]->logical_disk_start + ph[0]->logical_disk_size)
+ > ph[0]->config_start)) {
+ ldm_crit ("Disk and database overlap.");
+ goto out;
+ }
+
+ if (!ldm_compare_privheads (ph[0], ph[1])) {
+ ldm_crit ("Primary and backup PRIVHEADs don't match.");
+ goto out;
+ }
+ /* FIXME ignore this for now
+ if (!ldm_compare_privheads (ph[0], ph[2])) {
+ ldm_crit ("Primary and backup PRIVHEADs don't match.");
+ goto out;
+ }*/
+ ldm_debug ("Validated PRIVHEADs successfully.");
+ result = true;
+out:
+ kfree (ph[1]);
+ kfree (ph[2]);
+ return result;
+}
+
+/**
+ * ldm_validate_tocblocks - Validate the table of contents and its backups
+ * @state: Partition check state including device holding the LDM Database
+ * @base: Offset, into @state->bdev, of the database
+ * @ldb: Cache of the database structures
+ *
+ * Find and compare the four tables of contents of the LDM Database stored on
+ * @state->bdev and return the parsed information into @toc1.
+ *
+ * The offsets and sizes of the configs are range-checked against a privhead.
+ *
+ * Return: 'true' @toc1 contains validated TOCBLOCK info
+ * 'false' @toc1 contents are undefined
+ */
+static bool ldm_validate_tocblocks(struct parsed_partitions *state,
+ unsigned long base, struct ldmdb *ldb)
+{
+ static const int off[4] = { OFF_TOCB1, OFF_TOCB2, OFF_TOCB3, OFF_TOCB4};
+ struct tocblock *tb[4];
+ struct privhead *ph;
+ Sector sect;
+ u8 *data;
+ int i, nr_tbs;
+ bool result = false;
+
+ BUG_ON(!state || !ldb);
+ ph = &ldb->ph;
+ tb[0] = &ldb->toc;
+ tb[1] = kmalloc(sizeof(*tb[1]) * 3, GFP_KERNEL);
+ if (!tb[1]) {
+ ldm_crit("Out of memory.");
+ goto err;
+ }
+ tb[2] = (struct tocblock*)((u8*)tb[1] + sizeof(*tb[1]));
+ tb[3] = (struct tocblock*)((u8*)tb[2] + sizeof(*tb[2]));
+ /*
+ * Try to read and parse all four TOCBLOCKs.
+ *
+ * Windows Vista LDM v2.12 does not always have all four TOCBLOCKs so
+ * skip any that fail as long as we get at least one valid TOCBLOCK.
+ */
+ for (nr_tbs = i = 0; i < 4; i++) {
+ data = read_part_sector(state, base + off[i], &sect);
+ if (!data) {
+ ldm_error("Disk read failed for TOCBLOCK %d.", i);
+ continue;
+ }
+ if (ldm_parse_tocblock(data, tb[nr_tbs]))
+ nr_tbs++;
+ put_dev_sector(sect);
+ }
+ if (!nr_tbs) {
+ ldm_crit("Failed to find a valid TOCBLOCK.");
+ goto err;
+ }
+ /* Range check the TOCBLOCK against a privhead. */
+ if (((tb[0]->bitmap1_start + tb[0]->bitmap1_size) > ph->config_size) ||
+ ((tb[0]->bitmap2_start + tb[0]->bitmap2_size) >
+ ph->config_size)) {
+ ldm_crit("The bitmaps are out of range. Giving up.");
+ goto err;
+ }
+ /* Compare all loaded TOCBLOCKs. */
+ for (i = 1; i < nr_tbs; i++) {
+ if (!ldm_compare_tocblocks(tb[0], tb[i])) {
+ ldm_crit("TOCBLOCKs 0 and %d do not match.", i);
+ goto err;
+ }
+ }
+ ldm_debug("Validated %d TOCBLOCKs successfully.", nr_tbs);
+ result = true;
+err:
+ kfree(tb[1]);
+ return result;
+}
+
+/**
+ * ldm_validate_vmdb - Read the VMDB and validate it
+ * @state: Partition check state including device holding the LDM Database
+ * @base: Offset, into @bdev, of the database
+ * @ldb: Cache of the database structures
+ *
+ * Find the vmdb of the LDM Database stored on @bdev and return the parsed
+ * information in @ldb.
+ *
+ * Return: 'true' @ldb contains validated VBDB info
+ * 'false' @ldb contents are undefined
+ */
+static bool ldm_validate_vmdb(struct parsed_partitions *state,
+ unsigned long base, struct ldmdb *ldb)
+{
+ Sector sect;
+ u8 *data;
+ bool result = false;
+ struct vmdb *vm;
+ struct tocblock *toc;
+
+ BUG_ON (!state || !ldb);
+
+ vm = &ldb->vm;
+ toc = &ldb->toc;
+
+ data = read_part_sector(state, base + OFF_VMDB, &sect);
+ if (!data) {
+ ldm_crit ("Disk read failed.");
+ return false;
+ }
+
+ if (!ldm_parse_vmdb (data, vm))
+ goto out; /* Already logged */
+
+ /* Are there uncommitted transactions? */
+ if (get_unaligned_be16(data + 0x10) != 0x01) {
+ ldm_crit ("Database is not in a consistent state. Aborting.");
+ goto out;
+ }
+
+ if (vm->vblk_offset != 512)
+ ldm_info ("VBLKs start at offset 0x%04x.", vm->vblk_offset);
+
+ /*
+ * The last_vblkd_seq can be before the end of the vmdb, just make sure
+ * it is not out of bounds.
+ */
+ if ((vm->vblk_size * vm->last_vblk_seq) > (toc->bitmap1_size << 9)) {
+ ldm_crit ("VMDB exceeds allowed size specified by TOCBLOCK. "
+ "Database is corrupt. Aborting.");
+ goto out;
+ }
+
+ result = true;
+out:
+ put_dev_sector (sect);
+ return result;
+}
+
+
+/**
+ * ldm_validate_partition_table - Determine whether bdev might be a dynamic disk
+ * @state: Partition check state including device holding the LDM Database
+ *
+ * This function provides a weak test to decide whether the device is a dynamic
+ * disk or not. It looks for an MS-DOS-style partition table containing at
+ * least one partition of type 0x42 (formerly SFS, now used by Windows for
+ * dynamic disks).
+ *
+ * N.B. The only possible error can come from the read_part_sector and that is
+ * only likely to happen if the underlying device is strange. If that IS
+ * the case we should return zero to let someone else try.
+ *
+ * Return: 'true' @state->bdev is a dynamic disk
+ * 'false' @state->bdev is not a dynamic disk, or an error occurred
+ */
+static bool ldm_validate_partition_table(struct parsed_partitions *state)
+{
+ Sector sect;
+ u8 *data;
+ struct partition *p;
+ int i;
+ bool result = false;
+
+ BUG_ON(!state);
+
+ data = read_part_sector(state, 0, &sect);
+ if (!data) {
+ ldm_info ("Disk read failed.");
+ return false;
+ }
+
+ if (*(__le16*) (data + 0x01FE) != cpu_to_le16 (MSDOS_LABEL_MAGIC))
+ goto out;
+
+ p = (struct partition*)(data + 0x01BE);
+ for (i = 0; i < 4; i++, p++)
+ if (SYS_IND (p) == LDM_PARTITION) {
+ result = true;
+ break;
+ }
+
+ if (result)
+ ldm_debug ("Found W2K dynamic disk partition type.");
+
+out:
+ put_dev_sector (sect);
+ return result;
+}
+
+/**
+ * ldm_get_disk_objid - Search a linked list of vblk's for a given Disk Id
+ * @ldb: Cache of the database structures
+ *
+ * The LDM Database contains a list of all partitions on all dynamic disks.
+ * The primary PRIVHEAD, at the beginning of the physical disk, tells us
+ * the GUID of this disk. This function searches for the GUID in a linked
+ * list of vblk's.
+ *
+ * Return: Pointer, A matching vblk was found
+ * NULL, No match, or an error
+ */
+static struct vblk * ldm_get_disk_objid (const struct ldmdb *ldb)
+{
+ struct list_head *item;
+
+ BUG_ON (!ldb);
+
+ list_for_each (item, &ldb->v_disk) {
+ struct vblk *v = list_entry (item, struct vblk, list);
+ if (!memcmp (v->vblk.disk.disk_id, ldb->ph.disk_id, GUID_SIZE))
+ return v;
+ }
+
+ return NULL;
+}
+
+/**
+ * ldm_create_data_partitions - Create data partitions for this device
+ * @pp: List of the partitions parsed so far
+ * @ldb: Cache of the database structures
+ *
+ * The database contains ALL the partitions for ALL disk groups, so we need to
+ * filter out this specific disk. Using the disk's object id, we can find all
+ * the partitions in the database that belong to this disk.
+ *
+ * Add each partition in our database, to the parsed_partitions structure.
+ *
+ * N.B. This function creates the partitions in the order it finds partition
+ * objects in the linked list.
+ *
+ * Return: 'true' Partition created
+ * 'false' Error, probably a range checking problem
+ */
+static bool ldm_create_data_partitions (struct parsed_partitions *pp,
+ const struct ldmdb *ldb)
+{
+ struct list_head *item;
+ struct vblk *vb;
+ struct vblk *disk;
+ struct vblk_part *part;
+ int part_num = 1;
+
+ BUG_ON (!pp || !ldb);
+
+ disk = ldm_get_disk_objid (ldb);
+ if (!disk) {
+ ldm_crit ("Can't find the ID of this disk in the database.");
+ return false;
+ }
+
+ strlcat(pp->pp_buf, " [LDM]", PAGE_SIZE);
+
+ /* Create the data partitions */
+ list_for_each (item, &ldb->v_part) {
+ vb = list_entry (item, struct vblk, list);
+ part = &vb->vblk.part;
+
+ if (part->disk_id != disk->obj_id)
+ continue;
+
+ put_partition (pp, part_num, ldb->ph.logical_disk_start +
+ part->start, part->size);
+ part_num++;
+ }
+
+ strlcat(pp->pp_buf, "\n", PAGE_SIZE);
+ return true;
+}
+
+
+/**
+ * ldm_relative - Calculate the next relative offset
+ * @buffer: Block of data being worked on
+ * @buflen: Size of the block of data
+ * @base: Size of the previous fixed width fields
+ * @offset: Cumulative size of the previous variable-width fields
+ *
+ * Because many of the VBLK fields are variable-width, it's necessary
+ * to calculate each offset based on the previous one and the length
+ * of the field it pointed to.
+ *
+ * Return: -1 Error, the calculated offset exceeded the size of the buffer
+ * n OK, a range-checked offset into buffer
+ */
+static int ldm_relative(const u8 *buffer, int buflen, int base, int offset)
+{
+
+ base += offset;
+ if (!buffer || offset < 0 || base > buflen) {
+ if (!buffer)
+ ldm_error("!buffer");
+ if (offset < 0)
+ ldm_error("offset (%d) < 0", offset);
+ if (base > buflen)
+ ldm_error("base (%d) > buflen (%d)", base, buflen);
+ return -1;
+ }
+ if (base + buffer[base] >= buflen) {
+ ldm_error("base (%d) + buffer[base] (%d) >= buflen (%d)", base,
+ buffer[base], buflen);
+ return -1;
+ }
+ return buffer[base] + offset + 1;
+}
+
+/**
+ * ldm_get_vnum - Convert a variable-width, big endian number, into cpu order
+ * @block: Pointer to the variable-width number to convert
+ *
+ * Large numbers in the LDM Database are often stored in a packed format. Each
+ * number is prefixed by a one byte width marker. All numbers in the database
+ * are stored in big-endian byte order. This function reads one of these
+ * numbers and returns the result
+ *
+ * N.B. This function DOES NOT perform any range checking, though the most
+ * it will read is eight bytes.
+ *
+ * Return: n A number
+ * 0 Zero, or an error occurred
+ */
+static u64 ldm_get_vnum (const u8 *block)
+{
+ u64 tmp = 0;
+ u8 length;
+
+ BUG_ON (!block);
+
+ length = *block++;
+
+ if (length && length <= 8)
+ while (length--)
+ tmp = (tmp << 8) | *block++;
+ else
+ ldm_error ("Illegal length %d.", length);
+
+ return tmp;
+}
+
+/**
+ * ldm_get_vstr - Read a length-prefixed string into a buffer
+ * @block: Pointer to the length marker
+ * @buffer: Location to copy string to
+ * @buflen: Size of the output buffer
+ *
+ * Many of the strings in the LDM Database are not NULL terminated. Instead
+ * they are prefixed by a one byte length marker. This function copies one of
+ * these strings into a buffer.
+ *
+ * N.B. This function DOES NOT perform any range checking on the input.
+ * If the buffer is too small, the output will be truncated.
+ *
+ * Return: 0, Error and @buffer contents are undefined
+ * n, String length in characters (excluding NULL)
+ * buflen-1, String was truncated.
+ */
+static int ldm_get_vstr (const u8 *block, u8 *buffer, int buflen)
+{
+ int length;
+
+ BUG_ON (!block || !buffer);
+
+ length = block[0];
+ if (length >= buflen) {
+ ldm_error ("Truncating string %d -> %d.", length, buflen);
+ length = buflen - 1;
+ }
+ memcpy (buffer, block + 1, length);
+ buffer[length] = 0;
+ return length;
+}
+
+
+/**
+ * ldm_parse_cmp3 - Read a raw VBLK Component object into a vblk structure
+ * @buffer: Block of data being worked on
+ * @buflen: Size of the block of data
+ * @vb: In-memory vblk in which to return information
+ *
+ * Read a raw VBLK Component object (version 3) into a vblk structure.
+ *
+ * Return: 'true' @vb contains a Component VBLK
+ * 'false' @vb contents are not defined
+ */
+static bool ldm_parse_cmp3 (const u8 *buffer, int buflen, struct vblk *vb)
+{
+ int r_objid, r_name, r_vstate, r_child, r_parent, r_stripe, r_cols, len;
+ struct vblk_comp *comp;
+
+ BUG_ON (!buffer || !vb);
+
+ r_objid = ldm_relative (buffer, buflen, 0x18, 0);
+ r_name = ldm_relative (buffer, buflen, 0x18, r_objid);
+ r_vstate = ldm_relative (buffer, buflen, 0x18, r_name);
+ r_child = ldm_relative (buffer, buflen, 0x1D, r_vstate);
+ r_parent = ldm_relative (buffer, buflen, 0x2D, r_child);
+
+ if (buffer[0x12] & VBLK_FLAG_COMP_STRIPE) {
+ r_stripe = ldm_relative (buffer, buflen, 0x2E, r_parent);
+ r_cols = ldm_relative (buffer, buflen, 0x2E, r_stripe);
+ len = r_cols;
+ } else {
+ r_stripe = 0;
+ r_cols = 0;
+ len = r_parent;
+ }
+ if (len < 0)
+ return false;
+
+ len += VBLK_SIZE_CMP3;
+ if (len != get_unaligned_be32(buffer + 0x14))
+ return false;
+
+ comp = &vb->vblk.comp;
+ ldm_get_vstr (buffer + 0x18 + r_name, comp->state,
+ sizeof (comp->state));
+ comp->type = buffer[0x18 + r_vstate];
+ comp->children = ldm_get_vnum (buffer + 0x1D + r_vstate);
+ comp->parent_id = ldm_get_vnum (buffer + 0x2D + r_child);
+ comp->chunksize = r_stripe ? ldm_get_vnum (buffer+r_parent+0x2E) : 0;
+
+ return true;
+}
+
+/**
+ * ldm_parse_dgr3 - Read a raw VBLK Disk Group object into a vblk structure
+ * @buffer: Block of data being worked on
+ * @buflen: Size of the block of data
+ * @vb: In-memory vblk in which to return information
+ *
+ * Read a raw VBLK Disk Group object (version 3) into a vblk structure.
+ *
+ * Return: 'true' @vb contains a Disk Group VBLK
+ * 'false' @vb contents are not defined
+ */
+static int ldm_parse_dgr3 (const u8 *buffer, int buflen, struct vblk *vb)
+{
+ int r_objid, r_name, r_diskid, r_id1, r_id2, len;
+ struct vblk_dgrp *dgrp;
+
+ BUG_ON (!buffer || !vb);
+
+ r_objid = ldm_relative (buffer, buflen, 0x18, 0);
+ r_name = ldm_relative (buffer, buflen, 0x18, r_objid);
+ r_diskid = ldm_relative (buffer, buflen, 0x18, r_name);
+
+ if (buffer[0x12] & VBLK_FLAG_DGR3_IDS) {
+ r_id1 = ldm_relative (buffer, buflen, 0x24, r_diskid);
+ r_id2 = ldm_relative (buffer, buflen, 0x24, r_id1);
+ len = r_id2;
+ } else {
+ r_id1 = 0;
+ r_id2 = 0;
+ len = r_diskid;
+ }
+ if (len < 0)
+ return false;
+
+ len += VBLK_SIZE_DGR3;
+ if (len != get_unaligned_be32(buffer + 0x14))
+ return false;
+
+ dgrp = &vb->vblk.dgrp;
+ ldm_get_vstr (buffer + 0x18 + r_name, dgrp->disk_id,
+ sizeof (dgrp->disk_id));
+ return true;
+}
+
+/**
+ * ldm_parse_dgr4 - Read a raw VBLK Disk Group object into a vblk structure
+ * @buffer: Block of data being worked on
+ * @buflen: Size of the block of data
+ * @vb: In-memory vblk in which to return information
+ *
+ * Read a raw VBLK Disk Group object (version 4) into a vblk structure.
+ *
+ * Return: 'true' @vb contains a Disk Group VBLK
+ * 'false' @vb contents are not defined
+ */
+static bool ldm_parse_dgr4 (const u8 *buffer, int buflen, struct vblk *vb)
+{
+ char buf[64];
+ int r_objid, r_name, r_id1, r_id2, len;
+ struct vblk_dgrp *dgrp;
+
+ BUG_ON (!buffer || !vb);
+
+ r_objid = ldm_relative (buffer, buflen, 0x18, 0);
+ r_name = ldm_relative (buffer, buflen, 0x18, r_objid);
+
+ if (buffer[0x12] & VBLK_FLAG_DGR4_IDS) {
+ r_id1 = ldm_relative (buffer, buflen, 0x44, r_name);
+ r_id2 = ldm_relative (buffer, buflen, 0x44, r_id1);
+ len = r_id2;
+ } else {
+ r_id1 = 0;
+ r_id2 = 0;
+ len = r_name;
+ }
+ if (len < 0)
+ return false;
+
+ len += VBLK_SIZE_DGR4;
+ if (len != get_unaligned_be32(buffer + 0x14))
+ return false;
+
+ dgrp = &vb->vblk.dgrp;
+
+ ldm_get_vstr (buffer + 0x18 + r_objid, buf, sizeof (buf));
+ return true;
+}
+
+/**
+ * ldm_parse_dsk3 - Read a raw VBLK Disk object into a vblk structure
+ * @buffer: Block of data being worked on
+ * @buflen: Size of the block of data
+ * @vb: In-memory vblk in which to return information
+ *
+ * Read a raw VBLK Disk object (version 3) into a vblk structure.
+ *
+ * Return: 'true' @vb contains a Disk VBLK
+ * 'false' @vb contents are not defined
+ */
+static bool ldm_parse_dsk3 (const u8 *buffer, int buflen, struct vblk *vb)
+{
+ int r_objid, r_name, r_diskid, r_altname, len;
+ struct vblk_disk *disk;
+
+ BUG_ON (!buffer || !vb);
+
+ r_objid = ldm_relative (buffer, buflen, 0x18, 0);
+ r_name = ldm_relative (buffer, buflen, 0x18, r_objid);
+ r_diskid = ldm_relative (buffer, buflen, 0x18, r_name);
+ r_altname = ldm_relative (buffer, buflen, 0x18, r_diskid);
+ len = r_altname;
+ if (len < 0)
+ return false;
+
+ len += VBLK_SIZE_DSK3;
+ if (len != get_unaligned_be32(buffer + 0x14))
+ return false;
+
+ disk = &vb->vblk.disk;
+ ldm_get_vstr (buffer + 0x18 + r_diskid, disk->alt_name,
+ sizeof (disk->alt_name));
+ if (!ldm_parse_guid (buffer + 0x19 + r_name, disk->disk_id))
+ return false;
+
+ return true;
+}
+
+/**
+ * ldm_parse_dsk4 - Read a raw VBLK Disk object into a vblk structure
+ * @buffer: Block of data being worked on
+ * @buflen: Size of the block of data
+ * @vb: In-memory vblk in which to return information
+ *
+ * Read a raw VBLK Disk object (version 4) into a vblk structure.
+ *
+ * Return: 'true' @vb contains a Disk VBLK
+ * 'false' @vb contents are not defined
+ */
+static bool ldm_parse_dsk4 (const u8 *buffer, int buflen, struct vblk *vb)
+{
+ int r_objid, r_name, len;
+ struct vblk_disk *disk;
+
+ BUG_ON (!buffer || !vb);
+
+ r_objid = ldm_relative (buffer, buflen, 0x18, 0);
+ r_name = ldm_relative (buffer, buflen, 0x18, r_objid);
+ len = r_name;
+ if (len < 0)
+ return false;
+
+ len += VBLK_SIZE_DSK4;
+ if (len != get_unaligned_be32(buffer + 0x14))
+ return false;
+
+ disk = &vb->vblk.disk;
+ memcpy (disk->disk_id, buffer + 0x18 + r_name, GUID_SIZE);
+ return true;
+}
+
+/**
+ * ldm_parse_prt3 - Read a raw VBLK Partition object into a vblk structure
+ * @buffer: Block of data being worked on
+ * @buflen: Size of the block of data
+ * @vb: In-memory vblk in which to return information
+ *
+ * Read a raw VBLK Partition object (version 3) into a vblk structure.
+ *
+ * Return: 'true' @vb contains a Partition VBLK
+ * 'false' @vb contents are not defined
+ */
+static bool ldm_parse_prt3(const u8 *buffer, int buflen, struct vblk *vb)
+{
+ int r_objid, r_name, r_size, r_parent, r_diskid, r_index, len;
+ struct vblk_part *part;
+
+ BUG_ON(!buffer || !vb);
+ r_objid = ldm_relative(buffer, buflen, 0x18, 0);
+ if (r_objid < 0) {
+ ldm_error("r_objid %d < 0", r_objid);
+ return false;
+ }
+ r_name = ldm_relative(buffer, buflen, 0x18, r_objid);
+ if (r_name < 0) {
+ ldm_error("r_name %d < 0", r_name);
+ return false;
+ }
+ r_size = ldm_relative(buffer, buflen, 0x34, r_name);
+ if (r_size < 0) {
+ ldm_error("r_size %d < 0", r_size);
+ return false;
+ }
+ r_parent = ldm_relative(buffer, buflen, 0x34, r_size);
+ if (r_parent < 0) {
+ ldm_error("r_parent %d < 0", r_parent);
+ return false;
+ }
+ r_diskid = ldm_relative(buffer, buflen, 0x34, r_parent);
+ if (r_diskid < 0) {
+ ldm_error("r_diskid %d < 0", r_diskid);
+ return false;
+ }
+ if (buffer[0x12] & VBLK_FLAG_PART_INDEX) {
+ r_index = ldm_relative(buffer, buflen, 0x34, r_diskid);
+ if (r_index < 0) {
+ ldm_error("r_index %d < 0", r_index);
+ return false;
+ }
+ len = r_index;
+ } else {
+ r_index = 0;
+ len = r_diskid;
+ }
+ if (len < 0) {
+ ldm_error("len %d < 0", len);
+ return false;
+ }
+ len += VBLK_SIZE_PRT3;
+ if (len > get_unaligned_be32(buffer + 0x14)) {
+ ldm_error("len %d > BE32(buffer + 0x14) %d", len,
+ get_unaligned_be32(buffer + 0x14));
+ return false;
+ }
+ part = &vb->vblk.part;
+ part->start = get_unaligned_be64(buffer + 0x24 + r_name);
+ part->volume_offset = get_unaligned_be64(buffer + 0x2C + r_name);
+ part->size = ldm_get_vnum(buffer + 0x34 + r_name);
+ part->parent_id = ldm_get_vnum(buffer + 0x34 + r_size);
+ part->disk_id = ldm_get_vnum(buffer + 0x34 + r_parent);
+ if (vb->flags & VBLK_FLAG_PART_INDEX)
+ part->partnum = buffer[0x35 + r_diskid];
+ else
+ part->partnum = 0;
+ return true;
+}
+
+/**
+ * ldm_parse_vol5 - Read a raw VBLK Volume object into a vblk structure
+ * @buffer: Block of data being worked on
+ * @buflen: Size of the block of data
+ * @vb: In-memory vblk in which to return information
+ *
+ * Read a raw VBLK Volume object (version 5) into a vblk structure.
+ *
+ * Return: 'true' @vb contains a Volume VBLK
+ * 'false' @vb contents are not defined
+ */
+static bool ldm_parse_vol5(const u8 *buffer, int buflen, struct vblk *vb)
+{
+ int r_objid, r_name, r_vtype, r_disable_drive_letter, r_child, r_size;
+ int r_id1, r_id2, r_size2, r_drive, len;
+ struct vblk_volu *volu;
+
+ BUG_ON(!buffer || !vb);
+ r_objid = ldm_relative(buffer, buflen, 0x18, 0);
+ if (r_objid < 0) {
+ ldm_error("r_objid %d < 0", r_objid);
+ return false;
+ }
+ r_name = ldm_relative(buffer, buflen, 0x18, r_objid);
+ if (r_name < 0) {
+ ldm_error("r_name %d < 0", r_name);
+ return false;
+ }
+ r_vtype = ldm_relative(buffer, buflen, 0x18, r_name);
+ if (r_vtype < 0) {
+ ldm_error("r_vtype %d < 0", r_vtype);
+ return false;
+ }
+ r_disable_drive_letter = ldm_relative(buffer, buflen, 0x18, r_vtype);
+ if (r_disable_drive_letter < 0) {
+ ldm_error("r_disable_drive_letter %d < 0",
+ r_disable_drive_letter);
+ return false;
+ }
+ r_child = ldm_relative(buffer, buflen, 0x2D, r_disable_drive_letter);
+ if (r_child < 0) {
+ ldm_error("r_child %d < 0", r_child);
+ return false;
+ }
+ r_size = ldm_relative(buffer, buflen, 0x3D, r_child);
+ if (r_size < 0) {
+ ldm_error("r_size %d < 0", r_size);
+ return false;
+ }
+ if (buffer[0x12] & VBLK_FLAG_VOLU_ID1) {
+ r_id1 = ldm_relative(buffer, buflen, 0x52, r_size);
+ if (r_id1 < 0) {
+ ldm_error("r_id1 %d < 0", r_id1);
+ return false;
+ }
+ } else
+ r_id1 = r_size;
+ if (buffer[0x12] & VBLK_FLAG_VOLU_ID2) {
+ r_id2 = ldm_relative(buffer, buflen, 0x52, r_id1);
+ if (r_id2 < 0) {
+ ldm_error("r_id2 %d < 0", r_id2);
+ return false;
+ }
+ } else
+ r_id2 = r_id1;
+ if (buffer[0x12] & VBLK_FLAG_VOLU_SIZE) {
+ r_size2 = ldm_relative(buffer, buflen, 0x52, r_id2);
+ if (r_size2 < 0) {
+ ldm_error("r_size2 %d < 0", r_size2);
+ return false;
+ }
+ } else
+ r_size2 = r_id2;
+ if (buffer[0x12] & VBLK_FLAG_VOLU_DRIVE) {
+ r_drive = ldm_relative(buffer, buflen, 0x52, r_size2);
+ if (r_drive < 0) {
+ ldm_error("r_drive %d < 0", r_drive);
+ return false;
+ }
+ } else
+ r_drive = r_size2;
+ len = r_drive;
+ if (len < 0) {
+ ldm_error("len %d < 0", len);
+ return false;
+ }
+ len += VBLK_SIZE_VOL5;
+ if (len > get_unaligned_be32(buffer + 0x14)) {
+ ldm_error("len %d > BE32(buffer + 0x14) %d", len,
+ get_unaligned_be32(buffer + 0x14));
+ return false;
+ }
+ volu = &vb->vblk.volu;
+ ldm_get_vstr(buffer + 0x18 + r_name, volu->volume_type,
+ sizeof(volu->volume_type));
+ memcpy(volu->volume_state, buffer + 0x18 + r_disable_drive_letter,
+ sizeof(volu->volume_state));
+ volu->size = ldm_get_vnum(buffer + 0x3D + r_child);
+ volu->partition_type = buffer[0x41 + r_size];
+ memcpy(volu->guid, buffer + 0x42 + r_size, sizeof(volu->guid));
+ if (buffer[0x12] & VBLK_FLAG_VOLU_DRIVE) {
+ ldm_get_vstr(buffer + 0x52 + r_size, volu->drive_hint,
+ sizeof(volu->drive_hint));
+ }
+ return true;
+}
+
+/**
+ * ldm_parse_vblk - Read a raw VBLK object into a vblk structure
+ * @buf: Block of data being worked on
+ * @len: Size of the block of data
+ * @vb: In-memory vblk in which to return information
+ *
+ * Read a raw VBLK object into a vblk structure. This function just reads the
+ * information common to all VBLK types, then delegates the rest of the work to
+ * helper functions: ldm_parse_*.
+ *
+ * Return: 'true' @vb contains a VBLK
+ * 'false' @vb contents are not defined
+ */
+static bool ldm_parse_vblk (const u8 *buf, int len, struct vblk *vb)
+{
+ bool result = false;
+ int r_objid;
+
+ BUG_ON (!buf || !vb);
+
+ r_objid = ldm_relative (buf, len, 0x18, 0);
+ if (r_objid < 0) {
+ ldm_error ("VBLK header is corrupt.");
+ return false;
+ }
+
+ vb->flags = buf[0x12];
+ vb->type = buf[0x13];
+ vb->obj_id = ldm_get_vnum (buf + 0x18);
+ ldm_get_vstr (buf+0x18+r_objid, vb->name, sizeof (vb->name));
+
+ switch (vb->type) {
+ case VBLK_CMP3: result = ldm_parse_cmp3 (buf, len, vb); break;
+ case VBLK_DSK3: result = ldm_parse_dsk3 (buf, len, vb); break;
+ case VBLK_DSK4: result = ldm_parse_dsk4 (buf, len, vb); break;
+ case VBLK_DGR3: result = ldm_parse_dgr3 (buf, len, vb); break;
+ case VBLK_DGR4: result = ldm_parse_dgr4 (buf, len, vb); break;
+ case VBLK_PRT3: result = ldm_parse_prt3 (buf, len, vb); break;
+ case VBLK_VOL5: result = ldm_parse_vol5 (buf, len, vb); break;
+ }
+
+ if (result)
+ ldm_debug ("Parsed VBLK 0x%llx (type: 0x%02x) ok.",
+ (unsigned long long) vb->obj_id, vb->type);
+ else
+ ldm_error ("Failed to parse VBLK 0x%llx (type: 0x%02x).",
+ (unsigned long long) vb->obj_id, vb->type);
+
+ return result;
+}
+
+
+/**
+ * ldm_ldmdb_add - Adds a raw VBLK entry to the ldmdb database
+ * @data: Raw VBLK to add to the database
+ * @len: Size of the raw VBLK
+ * @ldb: Cache of the database structures
+ *
+ * The VBLKs are sorted into categories. Partitions are also sorted by offset.
+ *
+ * N.B. This function does not check the validity of the VBLKs.
+ *
+ * Return: 'true' The VBLK was added
+ * 'false' An error occurred
+ */
+static bool ldm_ldmdb_add (u8 *data, int len, struct ldmdb *ldb)
+{
+ struct vblk *vb;
+ struct list_head *item;
+
+ BUG_ON (!data || !ldb);
+
+ vb = kmalloc (sizeof (*vb), GFP_KERNEL);
+ if (!vb) {
+ ldm_crit ("Out of memory.");
+ return false;
+ }
+
+ if (!ldm_parse_vblk (data, len, vb)) {
+ kfree(vb);
+ return false; /* Already logged */
+ }
+
+ /* Put vblk into the correct list. */
+ switch (vb->type) {
+ case VBLK_DGR3:
+ case VBLK_DGR4:
+ list_add (&vb->list, &ldb->v_dgrp);
+ break;
+ case VBLK_DSK3:
+ case VBLK_DSK4:
+ list_add (&vb->list, &ldb->v_disk);
+ break;
+ case VBLK_VOL5:
+ list_add (&vb->list, &ldb->v_volu);
+ break;
+ case VBLK_CMP3:
+ list_add (&vb->list, &ldb->v_comp);
+ break;
+ case VBLK_PRT3:
+ /* Sort by the partition's start sector. */
+ list_for_each (item, &ldb->v_part) {
+ struct vblk *v = list_entry (item, struct vblk, list);
+ if ((v->vblk.part.disk_id == vb->vblk.part.disk_id) &&
+ (v->vblk.part.start > vb->vblk.part.start)) {
+ list_add_tail (&vb->list, &v->list);
+ return true;
+ }
+ }
+ list_add_tail (&vb->list, &ldb->v_part);
+ break;
+ }
+ return true;
+}
+
+/**
+ * ldm_frag_add - Add a VBLK fragment to a list
+ * @data: Raw fragment to be added to the list
+ * @size: Size of the raw fragment
+ * @frags: Linked list of VBLK fragments
+ *
+ * Fragmented VBLKs may not be consecutive in the database, so they are placed
+ * in a list so they can be pieced together later.
+ *
+ * Return: 'true' Success, the VBLK was added to the list
+ * 'false' Error, a problem occurred
+ */
+static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
+{
+ struct frag *f;
+ struct list_head *item;
+ int rec, num, group;
+
+ BUG_ON (!data || !frags);
+
+ if (size < 2 * VBLK_SIZE_HEAD) {
+ ldm_error("Value of size is to small.");
+ return false;
+ }
+
+ group = get_unaligned_be32(data + 0x08);
+ rec = get_unaligned_be16(data + 0x0C);
+ num = get_unaligned_be16(data + 0x0E);
+ if ((num < 1) || (num > 4)) {
+ ldm_error ("A VBLK claims to have %d parts.", num);
+ return false;
+ }
+ if (rec >= num) {
+ ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
+ return false;
+ }
+
+ list_for_each (item, frags) {
+ f = list_entry (item, struct frag, list);
+ if (f->group == group)
+ goto found;
+ }
+
+ f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
+ if (!f) {
+ ldm_crit ("Out of memory.");
+ return false;
+ }
+
+ f->group = group;
+ f->num = num;
+ f->rec = rec;
+ f->map = 0xFF << num;
+
+ list_add_tail (&f->list, frags);
+found:
+ if (rec >= f->num) {
+ ldm_error("REC value (%d) exceeds NUM value (%d)", rec, f->num);
+ return false;
+ }
+ if (f->map & (1 << rec)) {
+ ldm_error ("Duplicate VBLK, part %d.", rec);
+ f->map &= 0x7F; /* Mark the group as broken */
+ return false;
+ }
+ f->map |= (1 << rec);
+ if (!rec)
+ memcpy(f->data, data, VBLK_SIZE_HEAD);
+ data += VBLK_SIZE_HEAD;
+ size -= VBLK_SIZE_HEAD;
+ memcpy(f->data + VBLK_SIZE_HEAD + rec * size, data, size);
+ return true;
+}
+
+/**
+ * ldm_frag_free - Free a linked list of VBLK fragments
+ * @list: Linked list of fragments
+ *
+ * Free a linked list of VBLK fragments
+ *
+ * Return: none
+ */
+static void ldm_frag_free (struct list_head *list)
+{
+ struct list_head *item, *tmp;
+
+ BUG_ON (!list);
+
+ list_for_each_safe (item, tmp, list)
+ kfree (list_entry (item, struct frag, list));
+}
+
+/**
+ * ldm_frag_commit - Validate fragmented VBLKs and add them to the database
+ * @frags: Linked list of VBLK fragments
+ * @ldb: Cache of the database structures
+ *
+ * Now that all the fragmented VBLKs have been collected, they must be added to
+ * the database for later use.
+ *
+ * Return: 'true' All the fragments we added successfully
+ * 'false' One or more of the fragments we invalid
+ */
+static bool ldm_frag_commit (struct list_head *frags, struct ldmdb *ldb)
+{
+ struct frag *f;
+ struct list_head *item;
+
+ BUG_ON (!frags || !ldb);
+
+ list_for_each (item, frags) {
+ f = list_entry (item, struct frag, list);
+
+ if (f->map != 0xFF) {
+ ldm_error ("VBLK group %d is incomplete (0x%02x).",
+ f->group, f->map);
+ return false;
+ }
+
+ if (!ldm_ldmdb_add (f->data, f->num*ldb->vm.vblk_size, ldb))
+ return false; /* Already logged */
+ }
+ return true;
+}
+
+/**
+ * ldm_get_vblks - Read the on-disk database of VBLKs into memory
+ * @state: Partition check state including device holding the LDM Database
+ * @base: Offset, into @state->bdev, of the database
+ * @ldb: Cache of the database structures
+ *
+ * To use the information from the VBLKs, they need to be read from the disk,
+ * unpacked and validated. We cache them in @ldb according to their type.
+ *
+ * Return: 'true' All the VBLKs were read successfully
+ * 'false' An error occurred
+ */
+static bool ldm_get_vblks(struct parsed_partitions *state, unsigned long base,
+ struct ldmdb *ldb)
+{
+ int size, perbuf, skip, finish, s, v, recs;
+ u8 *data = NULL;
+ Sector sect;
+ bool result = false;
+ LIST_HEAD (frags);
+
+ BUG_ON(!state || !ldb);
+
+ size = ldb->vm.vblk_size;
+ perbuf = 512 / size;
+ skip = ldb->vm.vblk_offset >> 9; /* Bytes to sectors */
+ finish = (size * ldb->vm.last_vblk_seq) >> 9;
+
+ for (s = skip; s < finish; s++) { /* For each sector */
+ data = read_part_sector(state, base + OFF_VMDB + s, &sect);
+ if (!data) {
+ ldm_crit ("Disk read failed.");
+ goto out;
+ }
+
+ for (v = 0; v < perbuf; v++, data+=size) { /* For each vblk */
+ if (MAGIC_VBLK != get_unaligned_be32(data)) {
+ ldm_error ("Expected to find a VBLK.");
+ goto out;
+ }
+
+ recs = get_unaligned_be16(data + 0x0E); /* Number of records */
+ if (recs == 1) {
+ if (!ldm_ldmdb_add (data, size, ldb))
+ goto out; /* Already logged */
+ } else if (recs > 1) {
+ if (!ldm_frag_add (data, size, &frags))
+ goto out; /* Already logged */
+ }
+ /* else Record is not in use, ignore it. */
+ }
+ put_dev_sector (sect);
+ data = NULL;
+ }
+
+ result = ldm_frag_commit (&frags, ldb); /* Failures, already logged */
+out:
+ if (data)
+ put_dev_sector (sect);
+ ldm_frag_free (&frags);
+
+ return result;
+}
+
+/**
+ * ldm_free_vblks - Free a linked list of vblk's
+ * @lh: Head of a linked list of struct vblk
+ *
+ * Free a list of vblk's and free the memory used to maintain the list.
+ *
+ * Return: none
+ */
+static void ldm_free_vblks (struct list_head *lh)
+{
+ struct list_head *item, *tmp;
+
+ BUG_ON (!lh);
+
+ list_for_each_safe (item, tmp, lh)
+ kfree (list_entry (item, struct vblk, list));
+}
+
+
+/**
+ * ldm_partition - Find out whether a device is a dynamic disk and handle it
+ * @state: Partition check state including device holding the LDM Database
+ *
+ * This determines whether the device @bdev is a dynamic disk and if so creates
+ * the partitions necessary in the gendisk structure pointed to by @hd.
+ *
+ * We create a dummy device 1, which contains the LDM database, and then create
+ * each partition described by the LDM database in sequence as devices 2+. For
+ * example, if the device is hda, we would have: hda1: LDM database, hda2, hda3,
+ * and so on: the actual data containing partitions.
+ *
+ * Return: 1 Success, @state->bdev is a dynamic disk and we handled it
+ * 0 Success, @state->bdev is not a dynamic disk
+ * -1 An error occurred before enough information had been read
+ * Or @state->bdev is a dynamic disk, but it may be corrupted
+ */
+int ldm_partition(struct parsed_partitions *state)
+{
+ struct ldmdb *ldb;
+ unsigned long base;
+ int result = -1;
+
+ BUG_ON(!state);
+
+ /* Look for signs of a Dynamic Disk */
+ if (!ldm_validate_partition_table(state))
+ return 0;
+
+ ldb = kmalloc (sizeof (*ldb), GFP_KERNEL);
+ if (!ldb) {
+ ldm_crit ("Out of memory.");
+ goto out;
+ }
+
+ /* Parse and check privheads. */
+ if (!ldm_validate_privheads(state, &ldb->ph))
+ goto out; /* Already logged */
+
+ /* All further references are relative to base (database start). */
+ base = ldb->ph.config_start;
+
+ /* Parse and check tocs and vmdb. */
+ if (!ldm_validate_tocblocks(state, base, ldb) ||
+ !ldm_validate_vmdb(state, base, ldb))
+ goto out; /* Already logged */
+
+ /* Initialize vblk lists in ldmdb struct */
+ INIT_LIST_HEAD (&ldb->v_dgrp);
+ INIT_LIST_HEAD (&ldb->v_disk);
+ INIT_LIST_HEAD (&ldb->v_volu);
+ INIT_LIST_HEAD (&ldb->v_comp);
+ INIT_LIST_HEAD (&ldb->v_part);
+
+ if (!ldm_get_vblks(state, base, ldb)) {
+ ldm_crit ("Failed to read the VBLKs from the database.");
+ goto cleanup;
+ }
+
+ /* Finally, create the data partition devices. */
+ if (ldm_create_data_partitions(state, ldb)) {
+ ldm_debug ("Parsed LDM database successfully.");
+ result = 1;
+ }
+ /* else Already logged */
+
+cleanup:
+ ldm_free_vblks (&ldb->v_dgrp);
+ ldm_free_vblks (&ldb->v_disk);
+ ldm_free_vblks (&ldb->v_volu);
+ ldm_free_vblks (&ldb->v_comp);
+ ldm_free_vblks (&ldb->v_part);
+out:
+ kfree (ldb);
+ return result;
+}
diff --git a/block/partitions/ldm.h b/block/partitions/ldm.h
new file mode 100644
index 00000000..374242c0
--- /dev/null
+++ b/block/partitions/ldm.h
@@ -0,0 +1,215 @@
+/**
+ * ldm - Part of the Linux-NTFS project.
+ *
+ * Copyright (C) 2001,2002 Richard Russon <ldm@flatcap.org>
+ * Copyright (c) 2001-2007 Anton Altaparmakov
+ * Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com>
+ *
+ * Documentation is available at http://www.linux-ntfs.org/doku.php?id=downloads
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS source
+ * in the file COPYING); if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _FS_PT_LDM_H_
+#define _FS_PT_LDM_H_
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/genhd.h>
+#include <linux/fs.h>
+#include <asm/unaligned.h>
+#include <asm/byteorder.h>
+
+struct parsed_partitions;
+
+/* Magic numbers in CPU format. */
+#define MAGIC_VMDB 0x564D4442 /* VMDB */
+#define MAGIC_VBLK 0x56424C4B /* VBLK */
+#define MAGIC_PRIVHEAD 0x5052495648454144ULL /* PRIVHEAD */
+#define MAGIC_TOCBLOCK 0x544F43424C4F434BULL /* TOCBLOCK */
+
+/* The defined vblk types. */
+#define VBLK_VOL5 0x51 /* Volume, version 5 */
+#define VBLK_CMP3 0x32 /* Component, version 3 */
+#define VBLK_PRT3 0x33 /* Partition, version 3 */
+#define VBLK_DSK3 0x34 /* Disk, version 3 */
+#define VBLK_DSK4 0x44 /* Disk, version 4 */
+#define VBLK_DGR3 0x35 /* Disk Group, version 3 */
+#define VBLK_DGR4 0x45 /* Disk Group, version 4 */
+
+/* vblk flags indicating extra information will be present */
+#define VBLK_FLAG_COMP_STRIPE 0x10
+#define VBLK_FLAG_PART_INDEX 0x08
+#define VBLK_FLAG_DGR3_IDS 0x08
+#define VBLK_FLAG_DGR4_IDS 0x08
+#define VBLK_FLAG_VOLU_ID1 0x08
+#define VBLK_FLAG_VOLU_ID2 0x20
+#define VBLK_FLAG_VOLU_SIZE 0x80
+#define VBLK_FLAG_VOLU_DRIVE 0x02
+
+/* size of a vblk's static parts */
+#define VBLK_SIZE_HEAD 16
+#define VBLK_SIZE_CMP3 22 /* Name and version */
+#define VBLK_SIZE_DGR3 12
+#define VBLK_SIZE_DGR4 44
+#define VBLK_SIZE_DSK3 12
+#define VBLK_SIZE_DSK4 45
+#define VBLK_SIZE_PRT3 28
+#define VBLK_SIZE_VOL5 58
+
+/* component types */
+#define COMP_STRIPE 0x01 /* Stripe-set */
+#define COMP_BASIC 0x02 /* Basic disk */
+#define COMP_RAID 0x03 /* Raid-set */
+
+/* Other constants. */
+#define LDM_DB_SIZE 2048 /* Size in sectors (= 1MiB). */
+
+#define OFF_PRIV1 6 /* Offset of the first privhead
+ relative to the start of the
+ device in sectors */
+
+/* Offsets to structures within the LDM Database in sectors. */
+#define OFF_PRIV2 1856 /* Backup private headers. */
+#define OFF_PRIV3 2047
+
+#define OFF_TOCB1 1 /* Tables of contents. */
+#define OFF_TOCB2 2
+#define OFF_TOCB3 2045
+#define OFF_TOCB4 2046
+
+#define OFF_VMDB 17 /* List of partitions. */
+
+#define LDM_PARTITION 0x42 /* Formerly SFS (Landis). */
+
+#define TOC_BITMAP1 "config" /* Names of the two defined */
+#define TOC_BITMAP2 "log" /* bitmaps in the TOCBLOCK. */
+
+/* Borrowed from msdos.c */
+#define SYS_IND(p) (get_unaligned(&(p)->sys_ind))
+
+struct frag { /* VBLK Fragment handling */
+ struct list_head list;
+ u32 group;
+ u8 num; /* Total number of records */
+ u8 rec; /* This is record number n */
+ u8 map; /* Which portions are in use */
+ u8 data[0];
+};
+
+/* In memory LDM database structures. */
+
+#define GUID_SIZE 16
+
+struct privhead { /* Offsets and sizes are in sectors. */
+ u16 ver_major;
+ u16 ver_minor;
+ u64 logical_disk_start;
+ u64 logical_disk_size;
+ u64 config_start;
+ u64 config_size;
+ u8 disk_id[GUID_SIZE];
+};
+
+struct tocblock { /* We have exactly two bitmaps. */
+ u8 bitmap1_name[16];
+ u64 bitmap1_start;
+ u64 bitmap1_size;
+ u8 bitmap2_name[16];
+ u64 bitmap2_start;
+ u64 bitmap2_size;
+};
+
+struct vmdb { /* VMDB: The database header */
+ u16 ver_major;
+ u16 ver_minor;
+ u32 vblk_size;
+ u32 vblk_offset;
+ u32 last_vblk_seq;
+};
+
+struct vblk_comp { /* VBLK Component */
+ u8 state[16];
+ u64 parent_id;
+ u8 type;
+ u8 children;
+ u16 chunksize;
+};
+
+struct vblk_dgrp { /* VBLK Disk Group */
+ u8 disk_id[64];
+};
+
+struct vblk_disk { /* VBLK Disk */
+ u8 disk_id[GUID_SIZE];
+ u8 alt_name[128];
+};
+
+struct vblk_part { /* VBLK Partition */
+ u64 start;
+ u64 size; /* start, size and vol_off in sectors */
+ u64 volume_offset;
+ u64 parent_id;
+ u64 disk_id;
+ u8 partnum;
+};
+
+struct vblk_volu { /* VBLK Volume */
+ u8 volume_type[16];
+ u8 volume_state[16];
+ u8 guid[16];
+ u8 drive_hint[4];
+ u64 size;
+ u8 partition_type;
+};
+
+struct vblk_head { /* VBLK standard header */
+ u32 group;
+ u16 rec;
+ u16 nrec;
+};
+
+struct vblk { /* Generalised VBLK */
+ u8 name[64];
+ u64 obj_id;
+ u32 sequence;
+ u8 flags;
+ u8 type;
+ union {
+ struct vblk_comp comp;
+ struct vblk_dgrp dgrp;
+ struct vblk_disk disk;
+ struct vblk_part part;
+ struct vblk_volu volu;
+ } vblk;
+ struct list_head list;
+};
+
+struct ldmdb { /* Cache of the database */
+ struct privhead ph;
+ struct tocblock toc;
+ struct vmdb vm;
+ struct list_head v_dgrp;
+ struct list_head v_disk;
+ struct list_head v_volu;
+ struct list_head v_comp;
+ struct list_head v_part;
+};
+
+int ldm_partition(struct parsed_partitions *state);
+
+#endif /* _FS_PT_LDM_H_ */
+
diff --git a/block/partitions/mac.c b/block/partitions/mac.c
new file mode 100644
index 00000000..11f688bd
--- /dev/null
+++ b/block/partitions/mac.c
@@ -0,0 +1,134 @@
+/*
+ * fs/partitions/mac.c
+ *
+ * Code extracted from drivers/block/genhd.c
+ * Copyright (C) 1991-1998 Linus Torvalds
+ * Re-organised Feb 1998 Russell King
+ */
+
+#include <linux/ctype.h>
+#include "check.h"
+#include "mac.h"
+
+#ifdef CONFIG_PPC_PMAC
+#include <asm/machdep.h>
+extern void note_bootable_part(dev_t dev, int part, int goodness);
+#endif
+
+/*
+ * Code to understand MacOS partition tables.
+ */
+
+static inline void mac_fix_string(char *stg, int len)
+{
+ int i;
+
+ for (i = len - 1; i >= 0 && stg[i] == ' '; i--)
+ stg[i] = 0;
+}
+
+int mac_partition(struct parsed_partitions *state)
+{
+ Sector sect;
+ unsigned char *data;
+ int slot, blocks_in_map;
+ unsigned secsize;
+#ifdef CONFIG_PPC_PMAC
+ int found_root = 0;
+ int found_root_goodness = 0;
+#endif
+ struct mac_partition *part;
+ struct mac_driver_desc *md;
+
+ /* Get 0th block and look at the first partition map entry. */
+ md = read_part_sector(state, 0, &sect);
+ if (!md)
+ return -1;
+ if (be16_to_cpu(md->signature) != MAC_DRIVER_MAGIC) {
+ put_dev_sector(sect);
+ return 0;
+ }
+ secsize = be16_to_cpu(md->block_size);
+ put_dev_sector(sect);
+ data = read_part_sector(state, secsize/512, &sect);
+ if (!data)
+ return -1;
+ part = (struct mac_partition *) (data + secsize%512);
+ if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) {
+ put_dev_sector(sect);
+ return 0; /* not a MacOS disk */
+ }
+ blocks_in_map = be32_to_cpu(part->map_count);
+ if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
+ put_dev_sector(sect);
+ return 0;
+ }
+ strlcat(state->pp_buf, " [mac]", PAGE_SIZE);
+ for (slot = 1; slot <= blocks_in_map; ++slot) {
+ int pos = slot * secsize;
+ put_dev_sector(sect);
+ data = read_part_sector(state, pos/512, &sect);
+ if (!data)
+ return -1;
+ part = (struct mac_partition *) (data + pos%512);
+ if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC)
+ break;
+ put_partition(state, slot,
+ be32_to_cpu(part->start_block) * (secsize/512),
+ be32_to_cpu(part->block_count) * (secsize/512));
+
+ if (!strnicmp(part->type, "Linux_RAID", 10))
+ state->parts[slot].flags = ADDPART_FLAG_RAID;
+#ifdef CONFIG_PPC_PMAC
+ /*
+ * If this is the first bootable partition, tell the
+ * setup code, in case it wants to make this the root.
+ */
+ if (machine_is(powermac)) {
+ int goodness = 0;
+
+ mac_fix_string(part->processor, 16);
+ mac_fix_string(part->name, 32);
+ mac_fix_string(part->type, 32);
+
+ if ((be32_to_cpu(part->status) & MAC_STATUS_BOOTABLE)
+ && strcasecmp(part->processor, "powerpc") == 0)
+ goodness++;
+
+ if (strcasecmp(part->type, "Apple_UNIX_SVR2") == 0
+ || (strnicmp(part->type, "Linux", 5) == 0
+ && strcasecmp(part->type, "Linux_swap") != 0)) {
+ int i, l;
+
+ goodness++;
+ l = strlen(part->name);
+ if (strcmp(part->name, "/") == 0)
+ goodness++;
+ for (i = 0; i <= l - 4; ++i) {
+ if (strnicmp(part->name + i, "root",
+ 4) == 0) {
+ goodness += 2;
+ break;
+ }
+ }
+ if (strnicmp(part->name, "swap", 4) == 0)
+ goodness--;
+ }
+
+ if (goodness > found_root_goodness) {
+ found_root = slot;
+ found_root_goodness = goodness;
+ }
+ }
+#endif /* CONFIG_PPC_PMAC */
+ }
+#ifdef CONFIG_PPC_PMAC
+ if (found_root_goodness)
+ note_bootable_part(state->bdev->bd_dev, found_root,
+ found_root_goodness);
+#endif
+
+ put_dev_sector(sect);
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
+ return 1;
+}
diff --git a/block/partitions/mac.h b/block/partitions/mac.h
new file mode 100644
index 00000000..3c7d9843
--- /dev/null
+++ b/block/partitions/mac.h
@@ -0,0 +1,44 @@
+/*
+ * fs/partitions/mac.h
+ */
+
+#define MAC_PARTITION_MAGIC 0x504d
+
+/* type field value for A/UX or other Unix partitions */
+#define APPLE_AUX_TYPE "Apple_UNIX_SVR2"
+
+struct mac_partition {
+ __be16 signature; /* expected to be MAC_PARTITION_MAGIC */
+ __be16 res1;
+ __be32 map_count; /* # blocks in partition map */
+ __be32 start_block; /* absolute starting block # of partition */
+ __be32 block_count; /* number of blocks in partition */
+ char name[32]; /* partition name */
+ char type[32]; /* string type description */
+ __be32 data_start; /* rel block # of first data block */
+ __be32 data_count; /* number of data blocks */
+ __be32 status; /* partition status bits */
+ __be32 boot_start;
+ __be32 boot_size;
+ __be32 boot_load;
+ __be32 boot_load2;
+ __be32 boot_entry;
+ __be32 boot_entry2;
+ __be32 boot_cksum;
+ char processor[16]; /* identifies ISA of boot */
+ /* there is more stuff after this that we don't need */
+};
+
+#define MAC_STATUS_BOOTABLE 8 /* partition is bootable */
+
+#define MAC_DRIVER_MAGIC 0x4552
+
+/* Driver descriptor structure, in block 0 */
+struct mac_driver_desc {
+ __be16 signature; /* expected to be MAC_DRIVER_MAGIC */
+ __be16 block_size;
+ __be32 block_count;
+ /* ... more stuff */
+};
+
+int mac_partition(struct parsed_partitions *state);
diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c
new file mode 100644
index 00000000..5f79a667
--- /dev/null
+++ b/block/partitions/msdos.c
@@ -0,0 +1,552 @@
+/*
+ * fs/partitions/msdos.c
+ *
+ * Code extracted from drivers/block/genhd.c
+ * Copyright (C) 1991-1998 Linus Torvalds
+ *
+ * Thanks to Branko Lankester, lankeste@fwi.uva.nl, who found a bug
+ * in the early extended-partition checks and added DM partitions
+ *
+ * Support for DiskManager v6.0x added by Mark Lord,
+ * with information provided by OnTrack. This now works for linux fdisk
+ * and LILO, as well as loadlin and bootln. Note that disks other than
+ * /dev/hda *must* have a "DOS" type 0x51 partition in the first slot (hda1).
+ *
+ * More flexible handling of extended partitions - aeb, 950831
+ *
+ * Check partition table on IDE disks for common CHS translations
+ *
+ * Re-organised Feb 1998 Russell King
+ */
+#include <linux/msdos_fs.h>
+
+#include "check.h"
+#include "msdos.h"
+#include "efi.h"
+
+/*
+ * Many architectures don't like unaligned accesses, while
+ * the nr_sects and start_sect partition table entries are
+ * at a 2 (mod 4) address.
+ */
+#include <asm/unaligned.h>
+
+#define SYS_IND(p) get_unaligned(&p->sys_ind)
+
+static inline sector_t nr_sects(struct partition *p)
+{
+ return (sector_t)get_unaligned_le32(&p->nr_sects);
+}
+
+static inline sector_t start_sect(struct partition *p)
+{
+ return (sector_t)get_unaligned_le32(&p->start_sect);
+}
+
+static inline int is_extended_partition(struct partition *p)
+{
+ return (SYS_IND(p) == DOS_EXTENDED_PARTITION ||
+ SYS_IND(p) == WIN98_EXTENDED_PARTITION ||
+ SYS_IND(p) == LINUX_EXTENDED_PARTITION);
+}
+
+#define MSDOS_LABEL_MAGIC1 0x55
+#define MSDOS_LABEL_MAGIC2 0xAA
+
+static inline int
+msdos_magic_present(unsigned char *p)
+{
+ return (p[0] == MSDOS_LABEL_MAGIC1 && p[1] == MSDOS_LABEL_MAGIC2);
+}
+
+/* Value is EBCDIC 'IBMA' */
+#define AIX_LABEL_MAGIC1 0xC9
+#define AIX_LABEL_MAGIC2 0xC2
+#define AIX_LABEL_MAGIC3 0xD4
+#define AIX_LABEL_MAGIC4 0xC1
+static int aix_magic_present(struct parsed_partitions *state, unsigned char *p)
+{
+ struct partition *pt = (struct partition *) (p + 0x1be);
+ Sector sect;
+ unsigned char *d;
+ int slot, ret = 0;
+
+ if (!(p[0] == AIX_LABEL_MAGIC1 &&
+ p[1] == AIX_LABEL_MAGIC2 &&
+ p[2] == AIX_LABEL_MAGIC3 &&
+ p[3] == AIX_LABEL_MAGIC4))
+ return 0;
+ /* Assume the partition table is valid if Linux partitions exists */
+ for (slot = 1; slot <= 4; slot++, pt++) {
+ if (pt->sys_ind == LINUX_SWAP_PARTITION ||
+ pt->sys_ind == LINUX_RAID_PARTITION ||
+ pt->sys_ind == LINUX_DATA_PARTITION ||
+ pt->sys_ind == LINUX_LVM_PARTITION ||
+ is_extended_partition(pt))
+ return 0;
+ }
+ d = read_part_sector(state, 7, &sect);
+ if (d) {
+ if (d[0] == '_' && d[1] == 'L' && d[2] == 'V' && d[3] == 'M')
+ ret = 1;
+ put_dev_sector(sect);
+ };
+ return ret;
+}
+
+/*
+ * Create devices for each logical partition in an extended partition.
+ * The logical partitions form a linked list, with each entry being
+ * a partition table with two entries. The first entry
+ * is the real data partition (with a start relative to the partition
+ * table start). The second is a pointer to the next logical partition
+ * (with a start relative to the entire extended partition).
+ * We do not create a Linux partition for the partition tables, but
+ * only for the actual data partitions.
+ */
+
+static void parse_extended(struct parsed_partitions *state,
+ sector_t first_sector, sector_t first_size)
+{
+ struct partition *p;
+ Sector sect;
+ unsigned char *data;
+ sector_t this_sector, this_size;
+ sector_t sector_size = bdev_logical_block_size(state->bdev) / 512;
+ int loopct = 0; /* number of links followed
+ without finding a data partition */
+ int i;
+
+ this_sector = first_sector;
+ this_size = first_size;
+
+ while (1) {
+ if (++loopct > 100)
+ return;
+ if (state->next == state->limit)
+ return;
+ data = read_part_sector(state, this_sector, &sect);
+ if (!data)
+ return;
+
+ if (!msdos_magic_present(data + 510))
+ goto done;
+
+ p = (struct partition *) (data + 0x1be);
+
+ /*
+ * Usually, the first entry is the real data partition,
+ * the 2nd entry is the next extended partition, or empty,
+ * and the 3rd and 4th entries are unused.
+ * However, DRDOS sometimes has the extended partition as
+ * the first entry (when the data partition is empty),
+ * and OS/2 seems to use all four entries.
+ */
+
+ /*
+ * First process the data partition(s)
+ */
+ for (i=0; i<4; i++, p++) {
+ sector_t offs, size, next;
+ if (!nr_sects(p) || is_extended_partition(p))
+ continue;
+
+ /* Check the 3rd and 4th entries -
+ these sometimes contain random garbage */
+ offs = start_sect(p)*sector_size;
+ size = nr_sects(p)*sector_size;
+ next = this_sector + offs;
+ if (i >= 2) {
+ if (offs + size > this_size)
+ continue;
+ if (next < first_sector)
+ continue;
+ if (next + size > first_sector + first_size)
+ continue;
+ }
+
+ put_partition(state, state->next, next, size);
+ if (SYS_IND(p) == LINUX_RAID_PARTITION)
+ state->parts[state->next].flags = ADDPART_FLAG_RAID;
+ loopct = 0;
+ if (++state->next == state->limit)
+ goto done;
+ }
+ /*
+ * Next, process the (first) extended partition, if present.
+ * (So far, there seems to be no reason to make
+ * parse_extended() recursive and allow a tree
+ * of extended partitions.)
+ * It should be a link to the next logical partition.
+ */
+ p -= 4;
+ for (i=0; i<4; i++, p++)
+ if (nr_sects(p) && is_extended_partition(p))
+ break;
+ if (i == 4)
+ goto done; /* nothing left to do */
+
+ this_sector = first_sector + start_sect(p) * sector_size;
+ this_size = nr_sects(p) * sector_size;
+ put_dev_sector(sect);
+ }
+done:
+ put_dev_sector(sect);
+}
+
+/* james@bpgc.com: Solaris has a nasty indicator: 0x82 which also
+ indicates linux swap. Be careful before believing this is Solaris. */
+
+static void parse_solaris_x86(struct parsed_partitions *state,
+ sector_t offset, sector_t size, int origin)
+{
+#ifdef CONFIG_SOLARIS_X86_PARTITION
+ Sector sect;
+ struct solaris_x86_vtoc *v;
+ int i;
+ short max_nparts;
+
+ v = read_part_sector(state, offset + 1, &sect);
+ if (!v)
+ return;
+ if (le32_to_cpu(v->v_sanity) != SOLARIS_X86_VTOC_SANE) {
+ put_dev_sector(sect);
+ return;
+ }
+ {
+ char tmp[1 + BDEVNAME_SIZE + 10 + 11 + 1];
+
+ snprintf(tmp, sizeof(tmp), " %s%d: <solaris:", state->name, origin);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+ }
+ if (le32_to_cpu(v->v_version) != 1) {
+ char tmp[64];
+
+ snprintf(tmp, sizeof(tmp), " cannot handle version %d vtoc>\n",
+ le32_to_cpu(v->v_version));
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+ put_dev_sector(sect);
+ return;
+ }
+ /* Ensure we can handle previous case of VTOC with 8 entries gracefully */
+ max_nparts = le16_to_cpu (v->v_nparts) > 8 ? SOLARIS_X86_NUMSLICE : 8;
+ for (i=0; i<max_nparts && state->next<state->limit; i++) {
+ struct solaris_x86_slice *s = &v->v_slice[i];
+ char tmp[3 + 10 + 1 + 1];
+
+ if (s->s_size == 0)
+ continue;
+ snprintf(tmp, sizeof(tmp), " [s%d]", i);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+ /* solaris partitions are relative to current MS-DOS
+ * one; must add the offset of the current partition */
+ put_partition(state, state->next++,
+ le32_to_cpu(s->s_start)+offset,
+ le32_to_cpu(s->s_size));
+ }
+ put_dev_sector(sect);
+ strlcat(state->pp_buf, " >\n", PAGE_SIZE);
+#endif
+}
+
+#if defined(CONFIG_BSD_DISKLABEL)
+/*
+ * Create devices for BSD partitions listed in a disklabel, under a
+ * dos-like partition. See parse_extended() for more information.
+ */
+static void parse_bsd(struct parsed_partitions *state,
+ sector_t offset, sector_t size, int origin, char *flavour,
+ int max_partitions)
+{
+ Sector sect;
+ struct bsd_disklabel *l;
+ struct bsd_partition *p;
+ char tmp[64];
+
+ l = read_part_sector(state, offset + 1, &sect);
+ if (!l)
+ return;
+ if (le32_to_cpu(l->d_magic) != BSD_DISKMAGIC) {
+ put_dev_sector(sect);
+ return;
+ }
+
+ snprintf(tmp, sizeof(tmp), " %s%d: <%s:", state->name, origin, flavour);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+
+ if (le16_to_cpu(l->d_npartitions) < max_partitions)
+ max_partitions = le16_to_cpu(l->d_npartitions);
+ for (p = l->d_partitions; p - l->d_partitions < max_partitions; p++) {
+ sector_t bsd_start, bsd_size;
+
+ if (state->next == state->limit)
+ break;
+ if (p->p_fstype == BSD_FS_UNUSED)
+ continue;
+ bsd_start = le32_to_cpu(p->p_offset);
+ bsd_size = le32_to_cpu(p->p_size);
+ if (offset == bsd_start && size == bsd_size)
+ /* full parent partition, we have it already */
+ continue;
+ if (offset > bsd_start || offset+size < bsd_start+bsd_size) {
+ strlcat(state->pp_buf, "bad subpartition - ignored\n", PAGE_SIZE);
+ continue;
+ }
+ put_partition(state, state->next++, bsd_start, bsd_size);
+ }
+ put_dev_sector(sect);
+ if (le16_to_cpu(l->d_npartitions) > max_partitions) {
+ snprintf(tmp, sizeof(tmp), " (ignored %d more)",
+ le16_to_cpu(l->d_npartitions) - max_partitions);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+ }
+ strlcat(state->pp_buf, " >\n", PAGE_SIZE);
+}
+#endif
+
+static void parse_freebsd(struct parsed_partitions *state,
+ sector_t offset, sector_t size, int origin)
+{
+#ifdef CONFIG_BSD_DISKLABEL
+ parse_bsd(state, offset, size, origin, "bsd", BSD_MAXPARTITIONS);
+#endif
+}
+
+static void parse_netbsd(struct parsed_partitions *state,
+ sector_t offset, sector_t size, int origin)
+{
+#ifdef CONFIG_BSD_DISKLABEL
+ parse_bsd(state, offset, size, origin, "netbsd", BSD_MAXPARTITIONS);
+#endif
+}
+
+static void parse_openbsd(struct parsed_partitions *state,
+ sector_t offset, sector_t size, int origin)
+{
+#ifdef CONFIG_BSD_DISKLABEL
+ parse_bsd(state, offset, size, origin, "openbsd",
+ OPENBSD_MAXPARTITIONS);
+#endif
+}
+
+/*
+ * Create devices for Unixware partitions listed in a disklabel, under a
+ * dos-like partition. See parse_extended() for more information.
+ */
+static void parse_unixware(struct parsed_partitions *state,
+ sector_t offset, sector_t size, int origin)
+{
+#ifdef CONFIG_UNIXWARE_DISKLABEL
+ Sector sect;
+ struct unixware_disklabel *l;
+ struct unixware_slice *p;
+
+ l = read_part_sector(state, offset + 29, &sect);
+ if (!l)
+ return;
+ if (le32_to_cpu(l->d_magic) != UNIXWARE_DISKMAGIC ||
+ le32_to_cpu(l->vtoc.v_magic) != UNIXWARE_DISKMAGIC2) {
+ put_dev_sector(sect);
+ return;
+ }
+ {
+ char tmp[1 + BDEVNAME_SIZE + 10 + 12 + 1];
+
+ snprintf(tmp, sizeof(tmp), " %s%d: <unixware:", state->name, origin);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+ }
+ p = &l->vtoc.v_slice[1];
+ /* I omit the 0th slice as it is the same as whole disk. */
+ while (p - &l->vtoc.v_slice[0] < UNIXWARE_NUMSLICE) {
+ if (state->next == state->limit)
+ break;
+
+ if (p->s_label != UNIXWARE_FS_UNUSED)
+ put_partition(state, state->next++,
+ le32_to_cpu(p->start_sect),
+ le32_to_cpu(p->nr_sects));
+ p++;
+ }
+ put_dev_sector(sect);
+ strlcat(state->pp_buf, " >\n", PAGE_SIZE);
+#endif
+}
+
+/*
+ * Minix 2.0.0/2.0.2 subpartition support.
+ * Anand Krishnamurthy <anandk@wiproge.med.ge.com>
+ * Rajeev V. Pillai <rajeevvp@yahoo.com>
+ */
+static void parse_minix(struct parsed_partitions *state,
+ sector_t offset, sector_t size, int origin)
+{
+#ifdef CONFIG_MINIX_SUBPARTITION
+ Sector sect;
+ unsigned char *data;
+ struct partition *p;
+ int i;
+
+ data = read_part_sector(state, offset, &sect);
+ if (!data)
+ return;
+
+ p = (struct partition *)(data + 0x1be);
+
+ /* The first sector of a Minix partition can have either
+ * a secondary MBR describing its subpartitions, or
+ * the normal boot sector. */
+ if (msdos_magic_present (data + 510) &&
+ SYS_IND(p) == MINIX_PARTITION) { /* subpartition table present */
+ char tmp[1 + BDEVNAME_SIZE + 10 + 9 + 1];
+
+ snprintf(tmp, sizeof(tmp), " %s%d: <minix:", state->name, origin);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+ for (i = 0; i < MINIX_NR_SUBPARTITIONS; i++, p++) {
+ if (state->next == state->limit)
+ break;
+ /* add each partition in use */
+ if (SYS_IND(p) == MINIX_PARTITION)
+ put_partition(state, state->next++,
+ start_sect(p), nr_sects(p));
+ }
+ strlcat(state->pp_buf, " >\n", PAGE_SIZE);
+ }
+ put_dev_sector(sect);
+#endif /* CONFIG_MINIX_SUBPARTITION */
+}
+
+static struct {
+ unsigned char id;
+ void (*parse)(struct parsed_partitions *, sector_t, sector_t, int);
+} subtypes[] = {
+ {FREEBSD_PARTITION, parse_freebsd},
+ {NETBSD_PARTITION, parse_netbsd},
+ {OPENBSD_PARTITION, parse_openbsd},
+ {MINIX_PARTITION, parse_minix},
+ {UNIXWARE_PARTITION, parse_unixware},
+ {SOLARIS_X86_PARTITION, parse_solaris_x86},
+ {NEW_SOLARIS_X86_PARTITION, parse_solaris_x86},
+ {0, NULL},
+};
+
+int msdos_partition(struct parsed_partitions *state)
+{
+ sector_t sector_size = bdev_logical_block_size(state->bdev) / 512;
+ Sector sect;
+ unsigned char *data;
+ struct partition *p;
+ struct fat_boot_sector *fb;
+ int slot;
+
+ data = read_part_sector(state, 0, &sect);
+ if (!data)
+ return -1;
+ if (!msdos_magic_present(data + 510)) {
+ put_dev_sector(sect);
+ return 0;
+ }
+
+ if (aix_magic_present(state, data)) {
+ put_dev_sector(sect);
+ strlcat(state->pp_buf, " [AIX]", PAGE_SIZE);
+ return 0;
+ }
+
+ /*
+ * Now that the 55aa signature is present, this is probably
+ * either the boot sector of a FAT filesystem or a DOS-type
+ * partition table. Reject this in case the boot indicator
+ * is not 0 or 0x80.
+ */
+ p = (struct partition *) (data + 0x1be);
+ for (slot = 1; slot <= 4; slot++, p++) {
+ if (p->boot_ind != 0 && p->boot_ind != 0x80) {
+ /*
+ * Even without a valid boot inidicator value
+ * its still possible this is valid FAT filesystem
+ * without a partition table.
+ */
+ fb = (struct fat_boot_sector *) data;
+ if (slot == 1 && fb->reserved && fb->fats
+ && fat_valid_media(fb->media)) {
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
+ put_dev_sector(sect);
+ return 1;
+ } else {
+ put_dev_sector(sect);
+ return 0;
+ }
+ }
+ }
+
+#ifdef CONFIG_EFI_PARTITION
+ p = (struct partition *) (data + 0x1be);
+ for (slot = 1 ; slot <= 4 ; slot++, p++) {
+ /* If this is an EFI GPT disk, msdos should ignore it. */
+ if (SYS_IND(p) == EFI_PMBR_OSTYPE_EFI_GPT) {
+ put_dev_sector(sect);
+ return 0;
+ }
+ }
+#endif
+ p = (struct partition *) (data + 0x1be);
+
+ /*
+ * Look for partitions in two passes:
+ * First find the primary and DOS-type extended partitions.
+ * On the second pass look inside *BSD, Unixware and Solaris partitions.
+ */
+
+ state->next = 5;
+ for (slot = 1 ; slot <= 4 ; slot++, p++) {
+ sector_t start = start_sect(p)*sector_size;
+ sector_t size = nr_sects(p)*sector_size;
+ if (!size)
+ continue;
+ if (is_extended_partition(p)) {
+ /*
+ * prevent someone doing mkfs or mkswap on an
+ * extended partition, but leave room for LILO
+ * FIXME: this uses one logical sector for > 512b
+ * sector, although it may not be enough/proper.
+ */
+ sector_t n = 2;
+ n = min(size, max(sector_size, n));
+ put_partition(state, slot, start, n);
+
+ strlcat(state->pp_buf, " <", PAGE_SIZE);
+ parse_extended(state, start, size);
+ strlcat(state->pp_buf, " >", PAGE_SIZE);
+ continue;
+ }
+ put_partition(state, slot, start, size);
+ if (SYS_IND(p) == LINUX_RAID_PARTITION)
+ state->parts[slot].flags = ADDPART_FLAG_RAID;
+ if (SYS_IND(p) == DM6_PARTITION)
+ strlcat(state->pp_buf, "[DM]", PAGE_SIZE);
+ if (SYS_IND(p) == EZD_PARTITION)
+ strlcat(state->pp_buf, "[EZD]", PAGE_SIZE);
+ }
+
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
+
+ /* second pass - output for each on a separate line */
+ p = (struct partition *) (0x1be + data);
+ for (slot = 1 ; slot <= 4 ; slot++, p++) {
+ unsigned char id = SYS_IND(p);
+ int n;
+
+ if (!nr_sects(p))
+ continue;
+
+ for (n = 0; subtypes[n].parse && id != subtypes[n].id; n++)
+ ;
+
+ if (!subtypes[n].parse)
+ continue;
+ subtypes[n].parse(state, start_sect(p) * sector_size,
+ nr_sects(p) * sector_size, slot);
+ }
+ put_dev_sector(sect);
+ return 1;
+}
diff --git a/block/partitions/msdos.h b/block/partitions/msdos.h
new file mode 100644
index 00000000..38c781c4
--- /dev/null
+++ b/block/partitions/msdos.h
@@ -0,0 +1,8 @@
+/*
+ * fs/partitions/msdos.h
+ */
+
+#define MSDOS_LABEL_MAGIC 0xAA55
+
+int msdos_partition(struct parsed_partitions *state);
+
diff --git a/block/partitions/osf.c b/block/partitions/osf.c
new file mode 100644
index 00000000..764b86a0
--- /dev/null
+++ b/block/partitions/osf.c
@@ -0,0 +1,86 @@
+/*
+ * fs/partitions/osf.c
+ *
+ * Code extracted from drivers/block/genhd.c
+ *
+ * Copyright (C) 1991-1998 Linus Torvalds
+ * Re-organised Feb 1998 Russell King
+ */
+
+#include "check.h"
+#include "osf.h"
+
+#define MAX_OSF_PARTITIONS 18
+
+int osf_partition(struct parsed_partitions *state)
+{
+ int i;
+ int slot = 1;
+ unsigned int npartitions;
+ Sector sect;
+ unsigned char *data;
+ struct disklabel {
+ __le32 d_magic;
+ __le16 d_type,d_subtype;
+ u8 d_typename[16];
+ u8 d_packname[16];
+ __le32 d_secsize;
+ __le32 d_nsectors;
+ __le32 d_ntracks;
+ __le32 d_ncylinders;
+ __le32 d_secpercyl;
+ __le32 d_secprtunit;
+ __le16 d_sparespertrack;
+ __le16 d_sparespercyl;
+ __le32 d_acylinders;
+ __le16 d_rpm, d_interleave, d_trackskew, d_cylskew;
+ __le32 d_headswitch, d_trkseek, d_flags;
+ __le32 d_drivedata[5];
+ __le32 d_spare[5];
+ __le32 d_magic2;
+ __le16 d_checksum;
+ __le16 d_npartitions;
+ __le32 d_bbsize, d_sbsize;
+ struct d_partition {
+ __le32 p_size;
+ __le32 p_offset;
+ __le32 p_fsize;
+ u8 p_fstype;
+ u8 p_frag;
+ __le16 p_cpg;
+ } d_partitions[MAX_OSF_PARTITIONS];
+ } * label;
+ struct d_partition * partition;
+
+ data = read_part_sector(state, 0, &sect);
+ if (!data)
+ return -1;
+
+ label = (struct disklabel *) (data+64);
+ partition = label->d_partitions;
+ if (le32_to_cpu(label->d_magic) != DISKLABELMAGIC) {
+ put_dev_sector(sect);
+ return 0;
+ }
+ if (le32_to_cpu(label->d_magic2) != DISKLABELMAGIC) {
+ put_dev_sector(sect);
+ return 0;
+ }
+ npartitions = le16_to_cpu(label->d_npartitions);
+ if (npartitions > MAX_OSF_PARTITIONS) {
+ put_dev_sector(sect);
+ return 0;
+ }
+ for (i = 0 ; i < npartitions; i++, partition++) {
+ if (slot == state->limit)
+ break;
+ if (le32_to_cpu(partition->p_size))
+ put_partition(state, slot,
+ le32_to_cpu(partition->p_offset),
+ le32_to_cpu(partition->p_size));
+ slot++;
+ }
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
+ put_dev_sector(sect);
+ return 1;
+}
diff --git a/block/partitions/osf.h b/block/partitions/osf.h
new file mode 100644
index 00000000..20ed2315
--- /dev/null
+++ b/block/partitions/osf.h
@@ -0,0 +1,7 @@
+/*
+ * fs/partitions/osf.h
+ */
+
+#define DISKLABELMAGIC (0x82564557UL)
+
+int osf_partition(struct parsed_partitions *state);
diff --git a/block/partitions/sgi.c b/block/partitions/sgi.c
new file mode 100644
index 00000000..ea8a86dc
--- /dev/null
+++ b/block/partitions/sgi.c
@@ -0,0 +1,82 @@
+/*
+ * fs/partitions/sgi.c
+ *
+ * Code extracted from drivers/block/genhd.c
+ */
+
+#include "check.h"
+#include "sgi.h"
+
+struct sgi_disklabel {
+ __be32 magic_mushroom; /* Big fat spliff... */
+ __be16 root_part_num; /* Root partition number */
+ __be16 swap_part_num; /* Swap partition number */
+ s8 boot_file[16]; /* Name of boot file for ARCS */
+ u8 _unused0[48]; /* Device parameter useless crapola.. */
+ struct sgi_volume {
+ s8 name[8]; /* Name of volume */
+ __be32 block_num; /* Logical block number */
+ __be32 num_bytes; /* How big, in bytes */
+ } volume[15];
+ struct sgi_partition {
+ __be32 num_blocks; /* Size in logical blocks */
+ __be32 first_block; /* First logical block */
+ __be32 type; /* Type of this partition */
+ } partitions[16];
+ __be32 csum; /* Disk label checksum */
+ __be32 _unused1; /* Padding */
+};
+
+int sgi_partition(struct parsed_partitions *state)
+{
+ int i, csum;
+ __be32 magic;
+ int slot = 1;
+ unsigned int start, blocks;
+ __be32 *ui, cs;
+ Sector sect;
+ struct sgi_disklabel *label;
+ struct sgi_partition *p;
+ char b[BDEVNAME_SIZE];
+
+ label = read_part_sector(state, 0, &sect);
+ if (!label)
+ return -1;
+ p = &label->partitions[0];
+ magic = label->magic_mushroom;
+ if(be32_to_cpu(magic) != SGI_LABEL_MAGIC) {
+ /*printk("Dev %s SGI disklabel: bad magic %08x\n",
+ bdevname(bdev, b), be32_to_cpu(magic));*/
+ put_dev_sector(sect);
+ return 0;
+ }
+ ui = ((__be32 *) (label + 1)) - 1;
+ for(csum = 0; ui >= ((__be32 *) label);) {
+ cs = *ui--;
+ csum += be32_to_cpu(cs);
+ }
+ if(csum) {
+ printk(KERN_WARNING "Dev %s SGI disklabel: csum bad, label corrupted\n",
+ bdevname(state->bdev, b));
+ put_dev_sector(sect);
+ return 0;
+ }
+ /* All SGI disk labels have 16 partitions, disks under Linux only
+ * have 15 minor's. Luckily there are always a few zero length
+ * partitions which we don't care about so we never overflow the
+ * current_minor.
+ */
+ for(i = 0; i < 16; i++, p++) {
+ blocks = be32_to_cpu(p->num_blocks);
+ start = be32_to_cpu(p->first_block);
+ if (blocks) {
+ put_partition(state, slot, start, blocks);
+ if (be32_to_cpu(p->type) == LINUX_RAID_PARTITION)
+ state->parts[slot].flags = ADDPART_FLAG_RAID;
+ }
+ slot++;
+ }
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
+ put_dev_sector(sect);
+ return 1;
+}
diff --git a/block/partitions/sgi.h b/block/partitions/sgi.h
new file mode 100644
index 00000000..b9553ebd
--- /dev/null
+++ b/block/partitions/sgi.h
@@ -0,0 +1,8 @@
+/*
+ * fs/partitions/sgi.h
+ */
+
+extern int sgi_partition(struct parsed_partitions *state);
+
+#define SGI_LABEL_MAGIC 0x0be5a941
+
diff --git a/block/partitions/sun.c b/block/partitions/sun.c
new file mode 100644
index 00000000..b5b6fcfb
--- /dev/null
+++ b/block/partitions/sun.c
@@ -0,0 +1,122 @@
+/*
+ * fs/partitions/sun.c
+ *
+ * Code extracted from drivers/block/genhd.c
+ *
+ * Copyright (C) 1991-1998 Linus Torvalds
+ * Re-organised Feb 1998 Russell King
+ */
+
+#include "check.h"
+#include "sun.h"
+
+int sun_partition(struct parsed_partitions *state)
+{
+ int i;
+ __be16 csum;
+ int slot = 1;
+ __be16 *ush;
+ Sector sect;
+ struct sun_disklabel {
+ unsigned char info[128]; /* Informative text string */
+ struct sun_vtoc {
+ __be32 version; /* Layout version */
+ char volume[8]; /* Volume name */
+ __be16 nparts; /* Number of partitions */
+ struct sun_info { /* Partition hdrs, sec 2 */
+ __be16 id;
+ __be16 flags;
+ } infos[8];
+ __be16 padding; /* Alignment padding */
+ __be32 bootinfo[3]; /* Info needed by mboot */
+ __be32 sanity; /* To verify vtoc sanity */
+ __be32 reserved[10]; /* Free space */
+ __be32 timestamp[8]; /* Partition timestamp */
+ } vtoc;
+ __be32 write_reinstruct; /* sectors to skip, writes */
+ __be32 read_reinstruct; /* sectors to skip, reads */
+ unsigned char spare[148]; /* Padding */
+ __be16 rspeed; /* Disk rotational speed */
+ __be16 pcylcount; /* Physical cylinder count */
+ __be16 sparecyl; /* extra sects per cylinder */
+ __be16 obs1; /* gap1 */
+ __be16 obs2; /* gap2 */
+ __be16 ilfact; /* Interleave factor */
+ __be16 ncyl; /* Data cylinder count */
+ __be16 nacyl; /* Alt. cylinder count */
+ __be16 ntrks; /* Tracks per cylinder */
+ __be16 nsect; /* Sectors per track */
+ __be16 obs3; /* bhead - Label head offset */
+ __be16 obs4; /* ppart - Physical Partition */
+ struct sun_partition {
+ __be32 start_cylinder;
+ __be32 num_sectors;
+ } partitions[8];
+ __be16 magic; /* Magic number */
+ __be16 csum; /* Label xor'd checksum */
+ } * label;
+ struct sun_partition *p;
+ unsigned long spc;
+ char b[BDEVNAME_SIZE];
+ int use_vtoc;
+ int nparts;
+
+ label = read_part_sector(state, 0, &sect);
+ if (!label)
+ return -1;
+
+ p = label->partitions;
+ if (be16_to_cpu(label->magic) != SUN_LABEL_MAGIC) {
+/* printk(KERN_INFO "Dev %s Sun disklabel: bad magic %04x\n",
+ bdevname(bdev, b), be16_to_cpu(label->magic)); */
+ put_dev_sector(sect);
+ return 0;
+ }
+ /* Look at the checksum */
+ ush = ((__be16 *) (label+1)) - 1;
+ for (csum = 0; ush >= ((__be16 *) label);)
+ csum ^= *ush--;
+ if (csum) {
+ printk("Dev %s Sun disklabel: Csum bad, label corrupted\n",
+ bdevname(state->bdev, b));
+ put_dev_sector(sect);
+ return 0;
+ }
+
+ /* Check to see if we can use the VTOC table */
+ use_vtoc = ((be32_to_cpu(label->vtoc.sanity) == SUN_VTOC_SANITY) &&
+ (be32_to_cpu(label->vtoc.version) == 1) &&
+ (be16_to_cpu(label->vtoc.nparts) <= 8));
+
+ /* Use 8 partition entries if not specified in validated VTOC */
+ nparts = (use_vtoc) ? be16_to_cpu(label->vtoc.nparts) : 8;
+
+ /*
+ * So that old Linux-Sun partitions continue to work,
+ * alow the VTOC to be used under the additional condition ...
+ */
+ use_vtoc = use_vtoc || !(label->vtoc.sanity ||
+ label->vtoc.version || label->vtoc.nparts);
+ spc = be16_to_cpu(label->ntrks) * be16_to_cpu(label->nsect);
+ for (i = 0; i < nparts; i++, p++) {
+ unsigned long st_sector;
+ unsigned int num_sectors;
+
+ st_sector = be32_to_cpu(p->start_cylinder) * spc;
+ num_sectors = be32_to_cpu(p->num_sectors);
+ if (num_sectors) {
+ put_partition(state, slot, st_sector, num_sectors);
+ state->parts[slot].flags = 0;
+ if (use_vtoc) {
+ if (be16_to_cpu(label->vtoc.infos[i].id) == LINUX_RAID_PARTITION)
+ state->parts[slot].flags |= ADDPART_FLAG_RAID;
+ else if (be16_to_cpu(label->vtoc.infos[i].id) == SUN_WHOLE_DISK)
+ state->parts[slot].flags |= ADDPART_FLAG_WHOLEDISK;
+ }
+ }
+ slot++;
+ }
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
+ put_dev_sector(sect);
+ return 1;
+}
diff --git a/block/partitions/sun.h b/block/partitions/sun.h
new file mode 100644
index 00000000..2424baa8
--- /dev/null
+++ b/block/partitions/sun.h
@@ -0,0 +1,8 @@
+/*
+ * fs/partitions/sun.h
+ */
+
+#define SUN_LABEL_MAGIC 0xDABE
+#define SUN_VTOC_SANITY 0x600DDEEE
+
+int sun_partition(struct parsed_partitions *state);
diff --git a/block/partitions/sysv68.c b/block/partitions/sysv68.c
new file mode 100644
index 00000000..9627ccff
--- /dev/null
+++ b/block/partitions/sysv68.c
@@ -0,0 +1,95 @@
+/*
+ * fs/partitions/sysv68.c
+ *
+ * Copyright (C) 2007 Philippe De Muyter <phdm@macqel.be>
+ */
+
+#include "check.h"
+#include "sysv68.h"
+
+/*
+ * Volume ID structure: on first 256-bytes sector of disk
+ */
+
+struct volumeid {
+ u8 vid_unused[248];
+ u8 vid_mac[8]; /* ASCII string "MOTOROLA" */
+};
+
+/*
+ * config block: second 256-bytes sector on disk
+ */
+
+struct dkconfig {
+ u8 ios_unused0[128];
+ __be32 ios_slcblk; /* Slice table block number */
+ __be16 ios_slccnt; /* Number of entries in slice table */
+ u8 ios_unused1[122];
+};
+
+/*
+ * combined volumeid and dkconfig block
+ */
+
+struct dkblk0 {
+ struct volumeid dk_vid;
+ struct dkconfig dk_ios;
+};
+
+/*
+ * Slice Table Structure
+ */
+
+struct slice {
+ __be32 nblocks; /* slice size (in blocks) */
+ __be32 blkoff; /* block offset of slice */
+};
+
+
+int sysv68_partition(struct parsed_partitions *state)
+{
+ int i, slices;
+ int slot = 1;
+ Sector sect;
+ unsigned char *data;
+ struct dkblk0 *b;
+ struct slice *slice;
+ char tmp[64];
+
+ data = read_part_sector(state, 0, &sect);
+ if (!data)
+ return -1;
+
+ b = (struct dkblk0 *)data;
+ if (memcmp(b->dk_vid.vid_mac, "MOTOROLA", sizeof(b->dk_vid.vid_mac))) {
+ put_dev_sector(sect);
+ return 0;
+ }
+ slices = be16_to_cpu(b->dk_ios.ios_slccnt);
+ i = be32_to_cpu(b->dk_ios.ios_slcblk);
+ put_dev_sector(sect);
+
+ data = read_part_sector(state, i, &sect);
+ if (!data)
+ return -1;
+
+ slices -= 1; /* last slice is the whole disk */
+ snprintf(tmp, sizeof(tmp), "sysV68: %s(s%u)", state->name, slices);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+ slice = (struct slice *)data;
+ for (i = 0; i < slices; i++, slice++) {
+ if (slot == state->limit)
+ break;
+ if (be32_to_cpu(slice->nblocks)) {
+ put_partition(state, slot,
+ be32_to_cpu(slice->blkoff),
+ be32_to_cpu(slice->nblocks));
+ snprintf(tmp, sizeof(tmp), "(s%u)", i);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+ }
+ slot++;
+ }
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
+ put_dev_sector(sect);
+ return 1;
+}
diff --git a/block/partitions/sysv68.h b/block/partitions/sysv68.h
new file mode 100644
index 00000000..bf2f5ffa
--- /dev/null
+++ b/block/partitions/sysv68.h
@@ -0,0 +1 @@
+extern int sysv68_partition(struct parsed_partitions *state);
diff --git a/block/partitions/ultrix.c b/block/partitions/ultrix.c
new file mode 100644
index 00000000..8dbaf9f7
--- /dev/null
+++ b/block/partitions/ultrix.c
@@ -0,0 +1,48 @@
+/*
+ * fs/partitions/ultrix.c
+ *
+ * Code extracted from drivers/block/genhd.c
+ *
+ * Re-organised Jul 1999 Russell King
+ */
+
+#include "check.h"
+#include "ultrix.h"
+
+int ultrix_partition(struct parsed_partitions *state)
+{
+ int i;
+ Sector sect;
+ unsigned char *data;
+ struct ultrix_disklabel {
+ s32 pt_magic; /* magic no. indicating part. info exits */
+ s32 pt_valid; /* set by driver if pt is current */
+ struct pt_info {
+ s32 pi_nblocks; /* no. of sectors */
+ u32 pi_blkoff; /* block offset for start */
+ } pt_part[8];
+ } *label;
+
+#define PT_MAGIC 0x032957 /* Partition magic number */
+#define PT_VALID 1 /* Indicates if struct is valid */
+
+ data = read_part_sector(state, (16384 - sizeof(*label))/512, &sect);
+ if (!data)
+ return -1;
+
+ label = (struct ultrix_disklabel *)(data + 512 - sizeof(*label));
+
+ if (label->pt_magic == PT_MAGIC && label->pt_valid == PT_VALID) {
+ for (i=0; i<8; i++)
+ if (label->pt_part[i].pi_nblocks)
+ put_partition(state, i+1,
+ label->pt_part[i].pi_blkoff,
+ label->pt_part[i].pi_nblocks);
+ put_dev_sector(sect);
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
+ return 1;
+ } else {
+ put_dev_sector(sect);
+ return 0;
+ }
+}
diff --git a/block/partitions/ultrix.h b/block/partitions/ultrix.h
new file mode 100644
index 00000000..a3cc00b2
--- /dev/null
+++ b/block/partitions/ultrix.h
@@ -0,0 +1,5 @@
+/*
+ * fs/partitions/ultrix.h
+ */
+
+int ultrix_partition(struct parsed_partitions *state);
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
new file mode 100644
index 00000000..260fa80e
--- /dev/null
+++ b/block/scsi_ioctl.c
@@ -0,0 +1,750 @@
+/*
+ * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public Licens
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/capability.h>
+#include <linux/completion.h>
+#include <linux/cdrom.h>
+#include <linux/ratelimit.h>
+#include <linux/slab.h>
+#include <linux/times.h>
+#include <asm/uaccess.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi_cmnd.h>
+
+struct blk_cmd_filter {
+ unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
+ unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
+};
+
+static struct blk_cmd_filter blk_default_cmd_filter;
+
+/* Command group 3 is reserved and should never be used. */
+const unsigned char scsi_command_size_tbl[8] =
+{
+ 6, 10, 10, 12,
+ 16, 12, 10, 10
+};
+EXPORT_SYMBOL(scsi_command_size_tbl);
+
+#include <scsi/sg.h>
+
+static int sg_get_version(int __user *p)
+{
+ static const int sg_version_num = 30527;
+ return put_user(sg_version_num, p);
+}
+
+static int scsi_get_idlun(struct request_queue *q, int __user *p)
+{
+ return put_user(0, p);
+}
+
+static int scsi_get_bus(struct request_queue *q, int __user *p)
+{
+ return put_user(0, p);
+}
+
+static int sg_get_timeout(struct request_queue *q)
+{
+ return jiffies_to_clock_t(q->sg_timeout);
+}
+
+static int sg_set_timeout(struct request_queue *q, int __user *p)
+{
+ int timeout, err = get_user(timeout, p);
+
+ if (!err)
+ q->sg_timeout = clock_t_to_jiffies(timeout);
+
+ return err;
+}
+
+static int sg_get_reserved_size(struct request_queue *q, int __user *p)
+{
+ unsigned val = min(q->sg_reserved_size, queue_max_sectors(q) << 9);
+
+ return put_user(val, p);
+}
+
+static int sg_set_reserved_size(struct request_queue *q, int __user *p)
+{
+ int size, err = get_user(size, p);
+
+ if (err)
+ return err;
+
+ if (size < 0)
+ return -EINVAL;
+ if (size > (queue_max_sectors(q) << 9))
+ size = queue_max_sectors(q) << 9;
+
+ q->sg_reserved_size = size;
+ return 0;
+}
+
+/*
+ * will always return that we are ATAPI even for a real SCSI drive, I'm not
+ * so sure this is worth doing anything about (why would you care??)
+ */
+static int sg_emulated_host(struct request_queue *q, int __user *p)
+{
+ return put_user(1, p);
+}
+
+static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
+{
+ /* Basic read-only commands */
+ __set_bit(TEST_UNIT_READY, filter->read_ok);
+ __set_bit(REQUEST_SENSE, filter->read_ok);
+ __set_bit(READ_6, filter->read_ok);
+ __set_bit(READ_10, filter->read_ok);
+ __set_bit(READ_12, filter->read_ok);
+ __set_bit(READ_16, filter->read_ok);
+ __set_bit(READ_BUFFER, filter->read_ok);
+ __set_bit(READ_DEFECT_DATA, filter->read_ok);
+ __set_bit(READ_CAPACITY, filter->read_ok);
+ __set_bit(READ_LONG, filter->read_ok);
+ __set_bit(INQUIRY, filter->read_ok);
+ __set_bit(MODE_SENSE, filter->read_ok);
+ __set_bit(MODE_SENSE_10, filter->read_ok);
+ __set_bit(LOG_SENSE, filter->read_ok);
+ __set_bit(START_STOP, filter->read_ok);
+ __set_bit(GPCMD_VERIFY_10, filter->read_ok);
+ __set_bit(VERIFY_16, filter->read_ok);
+ __set_bit(REPORT_LUNS, filter->read_ok);
+ __set_bit(SERVICE_ACTION_IN, filter->read_ok);
+ __set_bit(RECEIVE_DIAGNOSTIC, filter->read_ok);
+ __set_bit(MAINTENANCE_IN, filter->read_ok);
+ __set_bit(GPCMD_READ_BUFFER_CAPACITY, filter->read_ok);
+
+ /* Audio CD commands */
+ __set_bit(GPCMD_PLAY_CD, filter->read_ok);
+ __set_bit(GPCMD_PLAY_AUDIO_10, filter->read_ok);
+ __set_bit(GPCMD_PLAY_AUDIO_MSF, filter->read_ok);
+ __set_bit(GPCMD_PLAY_AUDIO_TI, filter->read_ok);
+ __set_bit(GPCMD_PAUSE_RESUME, filter->read_ok);
+
+ /* CD/DVD data reading */
+ __set_bit(GPCMD_READ_CD, filter->read_ok);
+ __set_bit(GPCMD_READ_CD_MSF, filter->read_ok);
+ __set_bit(GPCMD_READ_DISC_INFO, filter->read_ok);
+ __set_bit(GPCMD_READ_CDVD_CAPACITY, filter->read_ok);
+ __set_bit(GPCMD_READ_DVD_STRUCTURE, filter->read_ok);
+ __set_bit(GPCMD_READ_HEADER, filter->read_ok);
+ __set_bit(GPCMD_READ_TRACK_RZONE_INFO, filter->read_ok);
+ __set_bit(GPCMD_READ_SUBCHANNEL, filter->read_ok);
+ __set_bit(GPCMD_READ_TOC_PMA_ATIP, filter->read_ok);
+ __set_bit(GPCMD_REPORT_KEY, filter->read_ok);
+ __set_bit(GPCMD_SCAN, filter->read_ok);
+ __set_bit(GPCMD_GET_CONFIGURATION, filter->read_ok);
+ __set_bit(GPCMD_READ_FORMAT_CAPACITIES, filter->read_ok);
+ __set_bit(GPCMD_GET_EVENT_STATUS_NOTIFICATION, filter->read_ok);
+ __set_bit(GPCMD_GET_PERFORMANCE, filter->read_ok);
+ __set_bit(GPCMD_SEEK, filter->read_ok);
+ __set_bit(GPCMD_STOP_PLAY_SCAN, filter->read_ok);
+
+ /* Basic writing commands */
+ __set_bit(WRITE_6, filter->write_ok);
+ __set_bit(WRITE_10, filter->write_ok);
+ __set_bit(WRITE_VERIFY, filter->write_ok);
+ __set_bit(WRITE_12, filter->write_ok);
+ __set_bit(WRITE_VERIFY_12, filter->write_ok);
+ __set_bit(WRITE_16, filter->write_ok);
+ __set_bit(WRITE_LONG, filter->write_ok);
+ __set_bit(WRITE_LONG_2, filter->write_ok);
+ __set_bit(ERASE, filter->write_ok);
+ __set_bit(GPCMD_MODE_SELECT_10, filter->write_ok);
+ __set_bit(MODE_SELECT, filter->write_ok);
+ __set_bit(LOG_SELECT, filter->write_ok);
+ __set_bit(GPCMD_BLANK, filter->write_ok);
+ __set_bit(GPCMD_CLOSE_TRACK, filter->write_ok);
+ __set_bit(GPCMD_FLUSH_CACHE, filter->write_ok);
+ __set_bit(GPCMD_FORMAT_UNIT, filter->write_ok);
+ __set_bit(GPCMD_REPAIR_RZONE_TRACK, filter->write_ok);
+ __set_bit(GPCMD_RESERVE_RZONE_TRACK, filter->write_ok);
+ __set_bit(GPCMD_SEND_DVD_STRUCTURE, filter->write_ok);
+ __set_bit(GPCMD_SEND_EVENT, filter->write_ok);
+ __set_bit(GPCMD_SEND_KEY, filter->write_ok);
+ __set_bit(GPCMD_SEND_OPC, filter->write_ok);
+ __set_bit(GPCMD_SEND_CUE_SHEET, filter->write_ok);
+ __set_bit(GPCMD_SET_SPEED, filter->write_ok);
+ __set_bit(GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL, filter->write_ok);
+ __set_bit(GPCMD_LOAD_UNLOAD, filter->write_ok);
+ __set_bit(GPCMD_SET_STREAMING, filter->write_ok);
+ __set_bit(GPCMD_SET_READ_AHEAD, filter->write_ok);
+}
+
+int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm)
+{
+ struct blk_cmd_filter *filter = &blk_default_cmd_filter;
+
+ /* root can do any command. */
+ if (capable(CAP_SYS_RAWIO))
+ return 0;
+
+ /* if there's no filter set, assume we're filtering everything out */
+ if (!filter)
+ return -EPERM;
+
+ /* Anybody who can open the device can do a read-safe command */
+ if (test_bit(cmd[0], filter->read_ok))
+ return 0;
+
+ /* Write-safe commands require a writable open */
+ if (test_bit(cmd[0], filter->write_ok) && has_write_perm)
+ return 0;
+
+ return -EPERM;
+}
+EXPORT_SYMBOL(blk_verify_command);
+
+static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
+ struct sg_io_hdr *hdr, fmode_t mode)
+{
+ if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
+ return -EFAULT;
+ if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
+ return -EPERM;
+
+ /*
+ * fill in request structure
+ */
+ rq->cmd_len = hdr->cmd_len;
+ rq->cmd_type = REQ_TYPE_BLOCK_PC;
+
+ rq->timeout = msecs_to_jiffies(hdr->timeout);
+ if (!rq->timeout)
+ rq->timeout = q->sg_timeout;
+ if (!rq->timeout)
+ rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
+ if (rq->timeout < BLK_MIN_SG_TIMEOUT)
+ rq->timeout = BLK_MIN_SG_TIMEOUT;
+
+ return 0;
+}
+
+static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
+ struct bio *bio)
+{
+ int r, ret = 0;
+
+ /*
+ * fill in all the output members
+ */
+ hdr->status = rq->errors & 0xff;
+ hdr->masked_status = status_byte(rq->errors);
+ hdr->msg_status = msg_byte(rq->errors);
+ hdr->host_status = host_byte(rq->errors);
+ hdr->driver_status = driver_byte(rq->errors);
+ hdr->info = 0;
+ if (hdr->masked_status || hdr->host_status || hdr->driver_status)
+ hdr->info |= SG_INFO_CHECK;
+ hdr->resid = rq->resid_len;
+ hdr->sb_len_wr = 0;
+
+ if (rq->sense_len && hdr->sbp) {
+ int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len);
+
+ if (!copy_to_user(hdr->sbp, rq->sense, len))
+ hdr->sb_len_wr = len;
+ else
+ ret = -EFAULT;
+ }
+
+ r = blk_rq_unmap_user(bio);
+ if (!ret)
+ ret = r;
+ blk_put_request(rq);
+
+ return ret;
+}
+
+static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
+ struct sg_io_hdr *hdr, fmode_t mode)
+{
+ unsigned long start_time;
+ int writing = 0, ret = 0;
+ struct request *rq;
+ char sense[SCSI_SENSE_BUFFERSIZE];
+ struct bio *bio;
+
+ if (hdr->interface_id != 'S')
+ return -EINVAL;
+ if (hdr->cmd_len > BLK_MAX_CDB)
+ return -EINVAL;
+
+ if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
+ return -EIO;
+
+ if (hdr->dxfer_len)
+ switch (hdr->dxfer_direction) {
+ default:
+ return -EINVAL;
+ case SG_DXFER_TO_DEV:
+ writing = 1;
+ break;
+ case SG_DXFER_TO_FROM_DEV:
+ case SG_DXFER_FROM_DEV:
+ break;
+ }
+
+ rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
+ if (!rq)
+ return -ENOMEM;
+
+ if (blk_fill_sghdr_rq(q, rq, hdr, mode)) {
+ blk_put_request(rq);
+ return -EFAULT;
+ }
+
+ if (hdr->iovec_count) {
+ const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
+ size_t iov_data_len;
+ struct sg_iovec *sg_iov;
+ struct iovec *iov;
+ int i;
+
+ sg_iov = kmalloc(size, GFP_KERNEL);
+ if (!sg_iov) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user(sg_iov, hdr->dxferp, size)) {
+ kfree(sg_iov);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /*
+ * Sum up the vecs, making sure they don't overflow
+ */
+ iov = (struct iovec *) sg_iov;
+ iov_data_len = 0;
+ for (i = 0; i < hdr->iovec_count; i++) {
+ if (iov_data_len + iov[i].iov_len < iov_data_len) {
+ kfree(sg_iov);
+ ret = -EINVAL;
+ goto out;
+ }
+ iov_data_len += iov[i].iov_len;
+ }
+
+ /* SG_IO howto says that the shorter of the two wins */
+ if (hdr->dxfer_len < iov_data_len) {
+ hdr->iovec_count = iov_shorten(iov,
+ hdr->iovec_count,
+ hdr->dxfer_len);
+ iov_data_len = hdr->dxfer_len;
+ }
+
+ ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count,
+ iov_data_len, GFP_KERNEL);
+ kfree(sg_iov);
+ } else if (hdr->dxfer_len)
+ ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
+ GFP_KERNEL);
+
+ if (ret)
+ goto out;
+
+ bio = rq->bio;
+ memset(sense, 0, sizeof(sense));
+ rq->sense = sense;
+ rq->sense_len = 0;
+ rq->retries = 0;
+
+ start_time = jiffies;
+
+ /* ignore return value. All information is passed back to caller
+ * (if he doesn't check that is his problem).
+ * N.B. a non-zero SCSI status is _not_ necessarily an error.
+ */
+ blk_execute_rq(q, bd_disk, rq, 0);
+
+ hdr->duration = jiffies_to_msecs(jiffies - start_time);
+
+ return blk_complete_sghdr_rq(rq, hdr, bio);
+out:
+ blk_put_request(rq);
+ return ret;
+}
+
+/**
+ * sg_scsi_ioctl -- handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl
+ * @file: file this ioctl operates on (optional)
+ * @q: request queue to send scsi commands down
+ * @disk: gendisk to operate on (option)
+ * @sic: userspace structure describing the command to perform
+ *
+ * Send down the scsi command described by @sic to the device below
+ * the request queue @q. If @file is non-NULL it's used to perform
+ * fine-grained permission checks that allow users to send down
+ * non-destructive SCSI commands. If the caller has a struct gendisk
+ * available it should be passed in as @disk to allow the low level
+ * driver to use the information contained in it. A non-NULL @disk
+ * is only allowed if the caller knows that the low level driver doesn't
+ * need it (e.g. in the scsi subsystem).
+ *
+ * Notes:
+ * - This interface is deprecated - users should use the SG_IO
+ * interface instead, as this is a more flexible approach to
+ * performing SCSI commands on a device.
+ * - The SCSI command length is determined by examining the 1st byte
+ * of the given command. There is no way to override this.
+ * - Data transfers are limited to PAGE_SIZE
+ * - The length (x + y) must be at least OMAX_SB_LEN bytes long to
+ * accommodate the sense buffer when an error occurs.
+ * The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that
+ * old code will not be surprised.
+ * - If a Unix error occurs (e.g. ENOMEM) then the user will receive
+ * a negative return and the Unix error code in 'errno'.
+ * If the SCSI command succeeds then 0 is returned.
+ * Positive numbers returned are the compacted SCSI error codes (4
+ * bytes in one int) where the lowest byte is the SCSI status.
+ */
+#define OMAX_SB_LEN 16 /* For backward compatibility */
+int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
+ struct scsi_ioctl_command __user *sic)
+{
+ struct request *rq;
+ int err;
+ unsigned int in_len, out_len, bytes, opcode, cmdlen;
+ char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
+
+ if (!sic)
+ return -EINVAL;
+
+ /*
+ * get in an out lengths, verify they don't exceed a page worth of data
+ */
+ if (get_user(in_len, &sic->inlen))
+ return -EFAULT;
+ if (get_user(out_len, &sic->outlen))
+ return -EFAULT;
+ if (in_len > PAGE_SIZE || out_len > PAGE_SIZE)
+ return -EINVAL;
+ if (get_user(opcode, sic->data))
+ return -EFAULT;
+
+ bytes = max(in_len, out_len);
+ if (bytes) {
+ buffer = kzalloc(bytes, q->bounce_gfp | GFP_USER| __GFP_NOWARN);
+ if (!buffer)
+ return -ENOMEM;
+
+ }
+
+ rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
+
+ cmdlen = COMMAND_SIZE(opcode);
+
+ /*
+ * get command and data to send to device, if any
+ */
+ err = -EFAULT;
+ rq->cmd_len = cmdlen;
+ if (copy_from_user(rq->cmd, sic->data, cmdlen))
+ goto error;
+
+ if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
+ goto error;
+
+ err = blk_verify_command(rq->cmd, mode & FMODE_WRITE);
+ if (err)
+ goto error;
+
+ /* default. possible overriden later */
+ rq->retries = 5;
+
+ switch (opcode) {
+ case SEND_DIAGNOSTIC:
+ case FORMAT_UNIT:
+ rq->timeout = FORMAT_UNIT_TIMEOUT;
+ rq->retries = 1;
+ break;
+ case START_STOP:
+ rq->timeout = START_STOP_TIMEOUT;
+ break;
+ case MOVE_MEDIUM:
+ rq->timeout = MOVE_MEDIUM_TIMEOUT;
+ break;
+ case READ_ELEMENT_STATUS:
+ rq->timeout = READ_ELEMENT_STATUS_TIMEOUT;
+ break;
+ case READ_DEFECT_DATA:
+ rq->timeout = READ_DEFECT_DATA_TIMEOUT;
+ rq->retries = 1;
+ break;
+ default:
+ rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
+ break;
+ }
+
+ if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) {
+ err = DRIVER_ERROR << 24;
+ goto out;
+ }
+
+ memset(sense, 0, sizeof(sense));
+ rq->sense = sense;
+ rq->sense_len = 0;
+ rq->cmd_type = REQ_TYPE_BLOCK_PC;
+
+ blk_execute_rq(q, disk, rq, 0);
+
+out:
+ err = rq->errors & 0xff; /* only 8 bit SCSI status */
+ if (err) {
+ if (rq->sense_len && rq->sense) {
+ bytes = (OMAX_SB_LEN > rq->sense_len) ?
+ rq->sense_len : OMAX_SB_LEN;
+ if (copy_to_user(sic->data, rq->sense, bytes))
+ err = -EFAULT;
+ }
+ } else {
+ if (copy_to_user(sic->data, buffer, out_len))
+ err = -EFAULT;
+ }
+
+error:
+ kfree(buffer);
+ blk_put_request(rq);
+ return err;
+}
+EXPORT_SYMBOL_GPL(sg_scsi_ioctl);
+
+/* Send basic block requests */
+static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
+ int cmd, int data)
+{
+ struct request *rq;
+ int err;
+
+ rq = blk_get_request(q, WRITE, __GFP_WAIT);
+ rq->cmd_type = REQ_TYPE_BLOCK_PC;
+ rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
+ rq->cmd[0] = cmd;
+ rq->cmd[4] = data;
+ rq->cmd_len = 6;
+ err = blk_execute_rq(q, bd_disk, rq, 0);
+ blk_put_request(rq);
+
+ return err;
+}
+
+static inline int blk_send_start_stop(struct request_queue *q,
+ struct gendisk *bd_disk, int data)
+{
+ return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data);
+}
+
+int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mode,
+ unsigned int cmd, void __user *arg)
+{
+ int err;
+
+ if (!q)
+ return -ENXIO;
+
+ switch (cmd) {
+ /*
+ * new sgv3 interface
+ */
+ case SG_GET_VERSION_NUM:
+ err = sg_get_version(arg);
+ break;
+ case SCSI_IOCTL_GET_IDLUN:
+ err = scsi_get_idlun(q, arg);
+ break;
+ case SCSI_IOCTL_GET_BUS_NUMBER:
+ err = scsi_get_bus(q, arg);
+ break;
+ case SG_SET_TIMEOUT:
+ err = sg_set_timeout(q, arg);
+ break;
+ case SG_GET_TIMEOUT:
+ err = sg_get_timeout(q);
+ break;
+ case SG_GET_RESERVED_SIZE:
+ err = sg_get_reserved_size(q, arg);
+ break;
+ case SG_SET_RESERVED_SIZE:
+ err = sg_set_reserved_size(q, arg);
+ break;
+ case SG_EMULATED_HOST:
+ err = sg_emulated_host(q, arg);
+ break;
+ case SG_IO: {
+ struct sg_io_hdr hdr;
+
+ err = -EFAULT;
+ if (copy_from_user(&hdr, arg, sizeof(hdr)))
+ break;
+ err = sg_io(q, bd_disk, &hdr, mode);
+ if (err == -EFAULT)
+ break;
+
+ if (copy_to_user(arg, &hdr, sizeof(hdr)))
+ err = -EFAULT;
+ break;
+ }
+ case CDROM_SEND_PACKET: {
+ struct cdrom_generic_command cgc;
+ struct sg_io_hdr hdr;
+
+ err = -EFAULT;
+ if (copy_from_user(&cgc, arg, sizeof(cgc)))
+ break;
+ cgc.timeout = clock_t_to_jiffies(cgc.timeout);
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.interface_id = 'S';
+ hdr.cmd_len = sizeof(cgc.cmd);
+ hdr.dxfer_len = cgc.buflen;
+ err = 0;
+ switch (cgc.data_direction) {
+ case CGC_DATA_UNKNOWN:
+ hdr.dxfer_direction = SG_DXFER_UNKNOWN;
+ break;
+ case CGC_DATA_WRITE:
+ hdr.dxfer_direction = SG_DXFER_TO_DEV;
+ break;
+ case CGC_DATA_READ:
+ hdr.dxfer_direction = SG_DXFER_FROM_DEV;
+ break;
+ case CGC_DATA_NONE:
+ hdr.dxfer_direction = SG_DXFER_NONE;
+ break;
+ default:
+ err = -EINVAL;
+ }
+ if (err)
+ break;
+
+ hdr.dxferp = cgc.buffer;
+ hdr.sbp = cgc.sense;
+ if (hdr.sbp)
+ hdr.mx_sb_len = sizeof(struct request_sense);
+ hdr.timeout = jiffies_to_msecs(cgc.timeout);
+ hdr.cmdp = ((struct cdrom_generic_command __user*) arg)->cmd;
+ hdr.cmd_len = sizeof(cgc.cmd);
+
+ err = sg_io(q, bd_disk, &hdr, mode);
+ if (err == -EFAULT)
+ break;
+
+ if (hdr.status)
+ err = -EIO;
+
+ cgc.stat = err;
+ cgc.buflen = hdr.resid;
+ if (copy_to_user(arg, &cgc, sizeof(cgc)))
+ err = -EFAULT;
+
+ break;
+ }
+
+ /*
+ * old junk scsi send command ioctl
+ */
+ case SCSI_IOCTL_SEND_COMMAND:
+ printk(KERN_WARNING "program %s is using a deprecated SCSI ioctl, please convert it to SG_IO\n", current->comm);
+ err = -EINVAL;
+ if (!arg)
+ break;
+
+ err = sg_scsi_ioctl(q, bd_disk, mode, arg);
+ break;
+ case CDROMCLOSETRAY:
+ err = blk_send_start_stop(q, bd_disk, 0x03);
+ break;
+ case CDROMEJECT:
+ err = blk_send_start_stop(q, bd_disk, 0x02);
+ break;
+ default:
+ err = -ENOTTY;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(scsi_cmd_ioctl);
+
+int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
+{
+ if (bd && bd == bd->bd_contains)
+ return 0;
+
+ /* Actually none of these is particularly useful on a partition,
+ * but they are safe.
+ */
+ switch (cmd) {
+ case SCSI_IOCTL_GET_IDLUN:
+ case SCSI_IOCTL_GET_BUS_NUMBER:
+ case SCSI_IOCTL_GET_PCI:
+ case SCSI_IOCTL_PROBE_HOST:
+ case SG_GET_VERSION_NUM:
+ case SG_SET_TIMEOUT:
+ case SG_GET_TIMEOUT:
+ case SG_GET_RESERVED_SIZE:
+ case SG_SET_RESERVED_SIZE:
+ case SG_EMULATED_HOST:
+ return 0;
+ case CDROM_GET_CAPABILITY:
+ /* Keep this until we remove the printk below. udev sends it
+ * and we do not want to spam dmesg about it. CD-ROMs do
+ * not have partitions, so we get here only for disks.
+ */
+ return -ENOIOCTLCMD;
+ default:
+ break;
+ }
+
+ /* In particular, rule out all resets and host-specific ioctls. */
+ printk_ratelimited(KERN_WARNING
+ "%s: sending ioctl %x to a partition!\n", current->comm, cmd);
+
+ return capable(CAP_SYS_RAWIO) ? 0 : -ENOIOCTLCMD;
+}
+EXPORT_SYMBOL(scsi_verify_blk_ioctl);
+
+int scsi_cmd_blk_ioctl(struct block_device *bd, fmode_t mode,
+ unsigned int cmd, void __user *arg)
+{
+ int ret;
+
+ ret = scsi_verify_blk_ioctl(bd, cmd);
+ if (ret < 0)
+ return ret;
+
+ return scsi_cmd_ioctl(bd->bd_disk->queue, bd->bd_disk, mode, cmd, arg);
+}
+EXPORT_SYMBOL(scsi_cmd_blk_ioctl);
+
+static int __init blk_scsi_ioctl_init(void)
+{
+ blk_set_cmd_filter_defaults(&blk_default_cmd_filter);
+ return 0;
+}
+fs_initcall(blk_scsi_ioctl_init);