aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/Kconfig13
-rw-r--r--block/Kconfig.iosched4
-rw-r--r--block/Makefile11
-rw-r--r--block/bio-integrity.c657
-rw-r--r--block/bio.c2052
-rw-r--r--block/blk-cgroup.c2322
-rw-r--r--block/blk-cgroup.h866
-rw-r--r--block/blk-core.c1337
-rw-r--r--block/blk-exec.c55
-rw-r--r--block/blk-flush.c181
-rw-r--r--block/blk-integrity.c46
-rw-r--r--block/blk-ioc.c140
-rw-r--r--block/blk-iopoll.c23
-rw-r--r--block/blk-lib.c169
-rw-r--r--block/blk-map.c13
-rw-r--r--block/blk-merge.c316
-rw-r--r--block/blk-mq-cpu.c67
-rw-r--r--block/blk-mq-cpumap.c119
-rw-r--r--block/blk-mq-sysfs.c456
-rw-r--r--block/blk-mq-tag.c618
-rw-r--r--block/blk-mq-tag.h88
-rw-r--r--block/blk-mq.c2058
-rw-r--r--block/blk-mq.h117
-rw-r--r--block/blk-settings.c59
-rw-r--r--block/blk-softirq.c48
-rw-r--r--block/blk-sysfs.c124
-rw-r--r--block/blk-tag.c50
-rw-r--r--block/blk-throttle.c1670
-rw-r--r--block/blk-timeout.c121
-rw-r--r--block/blk.h86
-rw-r--r--block/bounce.c290
-rw-r--r--block/bsg-lib.c66
-rw-r--r--block/bsg.c34
-rw-r--r--block/cfq-iosched.c1696
-rw-r--r--block/cfq.h115
-rw-r--r--block/cmdline-parser.c254
-rw-r--r--block/compat_ioctl.c4
-rw-r--r--block/deadline-iosched.c32
-rw-r--r--block/elevator.c278
-rw-r--r--block/genhd.c130
-rw-r--r--block/ioctl.c90
-rw-r--r--block/ioprio.c241
-rw-r--r--block/noop-iosched.c21
-rw-r--r--block/partition-generic.c10
-rw-r--r--block/partitions/Kconfig22
-rw-r--r--block/partitions/Makefile2
-rw-r--r--block/partitions/aix.c293
-rw-r--r--block/partitions/aix.h1
-rw-r--r--block/partitions/atari.h4
-rw-r--r--block/partitions/check.c41
-rw-r--r--block/partitions/check.h4
-rw-r--r--block/partitions/cmdline.c99
-rw-r--r--block/partitions/cmdline.h2
-rw-r--r--block/partitions/efi.c208
-rw-r--r--block/partitions/efi.h47
-rw-r--r--block/partitions/ibm.c455
-rw-r--r--block/partitions/karma.c3
-rw-r--r--block/partitions/mac.c4
-rw-r--r--block/partitions/msdos.c49
-rw-r--r--block/scsi_ioctl.c57
60 files changed, 13761 insertions, 4677 deletions
diff --git a/block/Kconfig b/block/Kconfig
index 09acf1b3990..2429515c05c 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -89,7 +89,7 @@ config BLK_DEV_INTEGRITY
config BLK_DEV_THROTTLING
bool "Block layer bio throttling support"
- depends on BLK_CGROUP=y && EXPERIMENTAL
+ depends on BLK_CGROUP=y
default n
---help---
Block layer bio throttling support. It can be used to limit
@@ -99,6 +99,17 @@ config BLK_DEV_THROTTLING
See Documentation/cgroups/blkio-controller.txt for more information.
+config BLK_CMDLINE_PARSER
+ bool "Block device command line partition parser"
+ default n
+ ---help---
+ Enabling this option allows you to specify the partition layout from
+ the kernel boot args. This is typically of use for embedded devices
+ which don't otherwise have any standardized method for listing the
+ partitions on a block device.
+
+ See Documentation/block/cmdline-partition.txt for more information.
+
menu "Partition Types"
source "block/partitions/Kconfig"
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 3199b76f795..421bef9c4c4 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -23,8 +23,6 @@ config IOSCHED_DEADLINE
config IOSCHED_CFQ
tristate "CFQ I/O scheduler"
- # If BLK_CGROUP is a module, CFQ has to be built as module.
- depends on (BLK_CGROUP=m && m) || !BLK_CGROUP || BLK_CGROUP=y
default y
---help---
The CFQ I/O scheduler tries to distribute bandwidth equally
@@ -34,8 +32,6 @@ config IOSCHED_CFQ
This is the default I/O scheduler.
- Note: If BLK_CGROUP=m, then CFQ can be built only as module.
-
config CFQ_GROUP_IOSCHED
bool "CFQ Group Scheduling support"
depends on IOSCHED_CFQ && BLK_CGROUP
diff --git a/block/Makefile b/block/Makefile
index 39b76ba66ff..a2ce6ac935e 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -2,12 +2,15 @@
# Makefile for the kernel block layer
#
-obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
+obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \
blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
- blk-iopoll.o blk-lib.o ioctl.o genhd.o scsi_ioctl.o \
- partition-generic.o partitions/
+ blk-iopoll.o blk-lib.o blk-mq.o blk-mq-tag.o \
+ blk-mq-sysfs.o blk-mq-cpu.o blk-mq-cpumap.o ioctl.o \
+ genhd.o scsi_ioctl.o partition-generic.o ioprio.o \
+ partitions/
+obj-$(CONFIG_BOUNCE) += bounce.o
obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o
obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o
@@ -18,3 +21,5 @@ obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o
+obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
+obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
new file mode 100644
index 00000000000..9e241063a61
--- /dev/null
+++ b/block/bio-integrity.c
@@ -0,0 +1,657 @@
+/*
+ * bio-integrity.c - bio data integrity extensions
+ *
+ * Copyright (C) 2007, 2008, 2009 Oracle Corporation
+ * Written by: Martin K. Petersen <martin.petersen@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
+ * USA.
+ *
+ */
+
+#include <linux/blkdev.h>
+#include <linux/mempool.h>
+#include <linux/export.h>
+#include <linux/bio.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+
+#define BIP_INLINE_VECS 4
+
+static struct kmem_cache *bip_slab;
+static struct workqueue_struct *kintegrityd_wq;
+
+/**
+ * bio_integrity_alloc - Allocate integrity payload and attach it to bio
+ * @bio: bio to attach integrity metadata to
+ * @gfp_mask: Memory allocation mask
+ * @nr_vecs: Number of integrity metadata scatter-gather elements
+ *
+ * Description: This function prepares a bio for attaching integrity
+ * metadata. nr_vecs specifies the maximum number of pages containing
+ * integrity metadata that can be attached.
+ */
+struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
+ gfp_t gfp_mask,
+ unsigned int nr_vecs)
+{
+ struct bio_integrity_payload *bip;
+ struct bio_set *bs = bio->bi_pool;
+ unsigned long idx = BIO_POOL_NONE;
+ unsigned inline_vecs;
+
+ if (!bs) {
+ bip = kmalloc(sizeof(struct bio_integrity_payload) +
+ sizeof(struct bio_vec) * nr_vecs, gfp_mask);
+ inline_vecs = nr_vecs;
+ } else {
+ bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask);
+ inline_vecs = BIP_INLINE_VECS;
+ }
+
+ if (unlikely(!bip))
+ return NULL;
+
+ memset(bip, 0, sizeof(*bip));
+
+ if (nr_vecs > inline_vecs) {
+ bip->bip_vec = bvec_alloc(gfp_mask, nr_vecs, &idx,
+ bs->bvec_integrity_pool);
+ if (!bip->bip_vec)
+ goto err;
+ } else {
+ bip->bip_vec = bip->bip_inline_vecs;
+ }
+
+ bip->bip_slab = idx;
+ bip->bip_bio = bio;
+ bio->bi_integrity = bip;
+
+ return bip;
+err:
+ mempool_free(bip, bs->bio_integrity_pool);
+ return NULL;
+}
+EXPORT_SYMBOL(bio_integrity_alloc);
+
+/**
+ * bio_integrity_free - Free bio integrity payload
+ * @bio: bio containing bip to be freed
+ *
+ * Description: Used to free the integrity portion of a bio. Usually
+ * called from bio_free().
+ */
+void bio_integrity_free(struct bio *bio)
+{
+ struct bio_integrity_payload *bip = bio->bi_integrity;
+ struct bio_set *bs = bio->bi_pool;
+
+ if (bip->bip_owns_buf)
+ kfree(bip->bip_buf);
+
+ if (bs) {
+ if (bip->bip_slab != BIO_POOL_NONE)
+ bvec_free(bs->bvec_integrity_pool, bip->bip_vec,
+ bip->bip_slab);
+
+ mempool_free(bip, bs->bio_integrity_pool);
+ } else {
+ kfree(bip);
+ }
+
+ bio->bi_integrity = NULL;
+}
+EXPORT_SYMBOL(bio_integrity_free);
+
+static inline unsigned int bip_integrity_vecs(struct bio_integrity_payload *bip)
+{
+ if (bip->bip_slab == BIO_POOL_NONE)
+ return BIP_INLINE_VECS;
+
+ return bvec_nr_vecs(bip->bip_slab);
+}
+
+/**
+ * bio_integrity_add_page - Attach integrity metadata
+ * @bio: bio to update
+ * @page: page containing integrity metadata
+ * @len: number of bytes of integrity metadata in page
+ * @offset: start offset within page
+ *
+ * Description: Attach a page containing integrity metadata to bio.
+ */
+int bio_integrity_add_page(struct bio *bio, struct page *page,
+ unsigned int len, unsigned int offset)
+{
+ struct bio_integrity_payload *bip = bio->bi_integrity;
+ struct bio_vec *iv;
+
+ if (bip->bip_vcnt >= bip_integrity_vecs(bip)) {
+ printk(KERN_ERR "%s: bip_vec full\n", __func__);
+ return 0;
+ }
+
+ iv = bip->bip_vec + bip->bip_vcnt;
+
+ iv->bv_page = page;
+ iv->bv_len = len;
+ iv->bv_offset = offset;
+ bip->bip_vcnt++;
+
+ return len;
+}
+EXPORT_SYMBOL(bio_integrity_add_page);
+
+static int bdev_integrity_enabled(struct block_device *bdev, int rw)
+{
+ struct blk_integrity *bi = bdev_get_integrity(bdev);
+
+ if (bi == NULL)
+ return 0;
+
+ if (rw == READ && bi->verify_fn != NULL &&
+ (bi->flags & INTEGRITY_FLAG_READ))
+ return 1;
+
+ if (rw == WRITE && bi->generate_fn != NULL &&
+ (bi->flags & INTEGRITY_FLAG_WRITE))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * bio_integrity_enabled - Check whether integrity can be passed
+ * @bio: bio to check
+ *
+ * Description: Determines whether bio_integrity_prep() can be called
+ * on this bio or not. bio data direction and target device must be
+ * set prior to calling. The functions honors the write_generate and
+ * read_verify flags in sysfs.
+ */
+int bio_integrity_enabled(struct bio *bio)
+{
+ if (!bio_is_rw(bio))
+ return 0;
+
+ /* Already protected? */
+ if (bio_integrity(bio))
+ return 0;
+
+ return bdev_integrity_enabled(bio->bi_bdev, bio_data_dir(bio));
+}
+EXPORT_SYMBOL(bio_integrity_enabled);
+
+/**
+ * bio_integrity_hw_sectors - Convert 512b sectors to hardware ditto
+ * @bi: blk_integrity profile for device
+ * @sectors: Number of 512 sectors to convert
+ *
+ * Description: The block layer calculates everything in 512 byte
+ * sectors but integrity metadata is done in terms of the hardware
+ * sector size of the storage device. Convert the block layer sectors
+ * to physical sectors.
+ */
+static inline unsigned int bio_integrity_hw_sectors(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ /* At this point there are only 512b or 4096b DIF/EPP devices */
+ if (bi->sector_size == 4096)
+ return sectors >>= 3;
+
+ return sectors;
+}
+
+static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return bio_integrity_hw_sectors(bi, sectors) * bi->tuple_size;
+}
+
+/**
+ * bio_integrity_tag_size - Retrieve integrity tag space
+ * @bio: bio to inspect
+ *
+ * Description: Returns the maximum number of tag bytes that can be
+ * attached to this bio. Filesystems can use this to determine how
+ * much metadata to attach to an I/O.
+ */
+unsigned int bio_integrity_tag_size(struct bio *bio)
+{
+ struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
+
+ BUG_ON(bio->bi_iter.bi_size == 0);
+
+ return bi->tag_size * (bio->bi_iter.bi_size / bi->sector_size);
+}
+EXPORT_SYMBOL(bio_integrity_tag_size);
+
+static int bio_integrity_tag(struct bio *bio, void *tag_buf, unsigned int len,
+ int set)
+{
+ struct bio_integrity_payload *bip = bio->bi_integrity;
+ struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
+ unsigned int nr_sectors;
+
+ BUG_ON(bip->bip_buf == NULL);
+
+ if (bi->tag_size == 0)
+ return -1;
+
+ nr_sectors = bio_integrity_hw_sectors(bi,
+ DIV_ROUND_UP(len, bi->tag_size));
+
+ if (nr_sectors * bi->tuple_size > bip->bip_iter.bi_size) {
+ printk(KERN_ERR "%s: tag too big for bio: %u > %u\n", __func__,
+ nr_sectors * bi->tuple_size, bip->bip_iter.bi_size);
+ return -1;
+ }
+
+ if (set)
+ bi->set_tag_fn(bip->bip_buf, tag_buf, nr_sectors);
+ else
+ bi->get_tag_fn(bip->bip_buf, tag_buf, nr_sectors);
+
+ return 0;
+}
+
+/**
+ * bio_integrity_set_tag - Attach a tag buffer to a bio
+ * @bio: bio to attach buffer to
+ * @tag_buf: Pointer to a buffer containing tag data
+ * @len: Length of the included buffer
+ *
+ * Description: Use this function to tag a bio by leveraging the extra
+ * space provided by devices formatted with integrity protection. The
+ * size of the integrity buffer must be <= to the size reported by
+ * bio_integrity_tag_size().
+ */
+int bio_integrity_set_tag(struct bio *bio, void *tag_buf, unsigned int len)
+{
+ BUG_ON(bio_data_dir(bio) != WRITE);
+
+ return bio_integrity_tag(bio, tag_buf, len, 1);
+}
+EXPORT_SYMBOL(bio_integrity_set_tag);
+
+/**
+ * bio_integrity_get_tag - Retrieve a tag buffer from a bio
+ * @bio: bio to retrieve buffer from
+ * @tag_buf: Pointer to a buffer for the tag data
+ * @len: Length of the target buffer
+ *
+ * Description: Use this function to retrieve the tag buffer from a
+ * completed I/O. The size of the integrity buffer must be <= to the
+ * size reported by bio_integrity_tag_size().
+ */
+int bio_integrity_get_tag(struct bio *bio, void *tag_buf, unsigned int len)
+{
+ BUG_ON(bio_data_dir(bio) != READ);
+
+ return bio_integrity_tag(bio, tag_buf, len, 0);
+}
+EXPORT_SYMBOL(bio_integrity_get_tag);
+
+/**
+ * bio_integrity_generate_verify - Generate/verify integrity metadata for a bio
+ * @bio: bio to generate/verify integrity metadata for
+ * @operate: operate number, 1 for generate, 0 for verify
+ */
+static int bio_integrity_generate_verify(struct bio *bio, int operate)
+{
+ struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
+ struct blk_integrity_exchg bix;
+ struct bio_vec *bv;
+ sector_t sector;
+ unsigned int sectors, ret = 0, i;
+ void *prot_buf = bio->bi_integrity->bip_buf;
+
+ if (operate)
+ sector = bio->bi_iter.bi_sector;
+ else
+ sector = bio->bi_integrity->bip_iter.bi_sector;
+
+ bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
+ bix.sector_size = bi->sector_size;
+
+ bio_for_each_segment_all(bv, bio, i) {
+ void *kaddr = kmap_atomic(bv->bv_page);
+ bix.data_buf = kaddr + bv->bv_offset;
+ bix.data_size = bv->bv_len;
+ bix.prot_buf = prot_buf;
+ bix.sector = sector;
+
+ if (operate)
+ bi->generate_fn(&bix);
+ else {
+ ret = bi->verify_fn(&bix);
+ if (ret) {
+ kunmap_atomic(kaddr);
+ return ret;
+ }
+ }
+
+ sectors = bv->bv_len / bi->sector_size;
+ sector += sectors;
+ prot_buf += sectors * bi->tuple_size;
+
+ kunmap_atomic(kaddr);
+ }
+ return ret;
+}
+
+/**
+ * bio_integrity_generate - Generate integrity metadata for a bio
+ * @bio: bio to generate integrity metadata for
+ *
+ * Description: Generates integrity metadata for a bio by calling the
+ * block device's generation callback function. The bio must have a
+ * bip attached with enough room to accommodate the generated
+ * integrity metadata.
+ */
+static void bio_integrity_generate(struct bio *bio)
+{
+ bio_integrity_generate_verify(bio, 1);
+}
+
+static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi)
+{
+ if (bi)
+ return bi->tuple_size;
+
+ return 0;
+}
+
+/**
+ * bio_integrity_prep - Prepare bio for integrity I/O
+ * @bio: bio to prepare
+ *
+ * Description: Allocates a buffer for integrity metadata, maps the
+ * pages and attaches them to a bio. The bio must have data
+ * direction, target device and start sector set priot to calling. In
+ * the WRITE case, integrity metadata will be generated using the
+ * block device's integrity function. In the READ case, the buffer
+ * will be prepared for DMA and a suitable end_io handler set up.
+ */
+int bio_integrity_prep(struct bio *bio)
+{
+ struct bio_integrity_payload *bip;
+ struct blk_integrity *bi;
+ struct request_queue *q;
+ void *buf;
+ unsigned long start, end;
+ unsigned int len, nr_pages;
+ unsigned int bytes, offset, i;
+ unsigned int sectors;
+
+ bi = bdev_get_integrity(bio->bi_bdev);
+ q = bdev_get_queue(bio->bi_bdev);
+ BUG_ON(bi == NULL);
+ BUG_ON(bio_integrity(bio));
+
+ sectors = bio_integrity_hw_sectors(bi, bio_sectors(bio));
+
+ /* Allocate kernel buffer for protection data */
+ len = sectors * blk_integrity_tuple_size(bi);
+ buf = kmalloc(len, GFP_NOIO | q->bounce_gfp);
+ if (unlikely(buf == NULL)) {
+ printk(KERN_ERR "could not allocate integrity buffer\n");
+ return -ENOMEM;
+ }
+
+ end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ start = ((unsigned long) buf) >> PAGE_SHIFT;
+ nr_pages = end - start;
+
+ /* Allocate bio integrity payload and integrity vectors */
+ bip = bio_integrity_alloc(bio, GFP_NOIO, nr_pages);
+ if (unlikely(bip == NULL)) {
+ printk(KERN_ERR "could not allocate data integrity bioset\n");
+ kfree(buf);
+ return -EIO;
+ }
+
+ bip->bip_owns_buf = 1;
+ bip->bip_buf = buf;
+ bip->bip_iter.bi_size = len;
+ bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
+
+ /* Map it */
+ offset = offset_in_page(buf);
+ for (i = 0 ; i < nr_pages ; i++) {
+ int ret;
+ bytes = PAGE_SIZE - offset;
+
+ if (len <= 0)
+ break;
+
+ if (bytes > len)
+ bytes = len;
+
+ ret = bio_integrity_add_page(bio, virt_to_page(buf),
+ bytes, offset);
+
+ if (ret == 0)
+ return 0;
+
+ if (ret < bytes)
+ break;
+
+ buf += bytes;
+ len -= bytes;
+ offset = 0;
+ }
+
+ /* Install custom I/O completion handler if read verify is enabled */
+ if (bio_data_dir(bio) == READ) {
+ bip->bip_end_io = bio->bi_end_io;
+ bio->bi_end_io = bio_integrity_endio;
+ }
+
+ /* Auto-generate integrity metadata if this is a write */
+ if (bio_data_dir(bio) == WRITE)
+ bio_integrity_generate(bio);
+
+ return 0;
+}
+EXPORT_SYMBOL(bio_integrity_prep);
+
+/**
+ * bio_integrity_verify - Verify integrity metadata for a bio
+ * @bio: bio to verify
+ *
+ * Description: This function is called to verify the integrity of a
+ * bio. The data in the bio io_vec is compared to the integrity
+ * metadata returned by the HBA.
+ */
+static int bio_integrity_verify(struct bio *bio)
+{
+ return bio_integrity_generate_verify(bio, 0);
+}
+
+/**
+ * bio_integrity_verify_fn - Integrity I/O completion worker
+ * @work: Work struct stored in bio to be verified
+ *
+ * Description: This workqueue function is called to complete a READ
+ * request. The function verifies the transferred integrity metadata
+ * and then calls the original bio end_io function.
+ */
+static void bio_integrity_verify_fn(struct work_struct *work)
+{
+ struct bio_integrity_payload *bip =
+ container_of(work, struct bio_integrity_payload, bip_work);
+ struct bio *bio = bip->bip_bio;
+ int error;
+
+ error = bio_integrity_verify(bio);
+
+ /* Restore original bio completion handler */
+ bio->bi_end_io = bip->bip_end_io;
+ bio_endio_nodec(bio, error);
+}
+
+/**
+ * bio_integrity_endio - Integrity I/O completion function
+ * @bio: Protected bio
+ * @error: Pointer to errno
+ *
+ * Description: Completion for integrity I/O
+ *
+ * Normally I/O completion is done in interrupt context. However,
+ * verifying I/O integrity is a time-consuming task which must be run
+ * in process context. This function postpones completion
+ * accordingly.
+ */
+void bio_integrity_endio(struct bio *bio, int error)
+{
+ struct bio_integrity_payload *bip = bio->bi_integrity;
+
+ BUG_ON(bip->bip_bio != bio);
+
+ /* In case of an I/O error there is no point in verifying the
+ * integrity metadata. Restore original bio end_io handler
+ * and run it.
+ */
+ if (error) {
+ bio->bi_end_io = bip->bip_end_io;
+ bio_endio(bio, error);
+
+ return;
+ }
+
+ INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
+ queue_work(kintegrityd_wq, &bip->bip_work);
+}
+EXPORT_SYMBOL(bio_integrity_endio);
+
+/**
+ * bio_integrity_advance - Advance integrity vector
+ * @bio: bio whose integrity vector to update
+ * @bytes_done: number of data bytes that have been completed
+ *
+ * Description: This function calculates how many integrity bytes the
+ * number of completed data bytes correspond to and advances the
+ * integrity vector accordingly.
+ */
+void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
+{
+ struct bio_integrity_payload *bip = bio->bi_integrity;
+ struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
+ unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
+
+ bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
+}
+EXPORT_SYMBOL(bio_integrity_advance);
+
+/**
+ * bio_integrity_trim - Trim integrity vector
+ * @bio: bio whose integrity vector to update
+ * @offset: offset to first data sector
+ * @sectors: number of data sectors
+ *
+ * Description: Used to trim the integrity vector in a cloned bio.
+ * The ivec will be advanced corresponding to 'offset' data sectors
+ * and the length will be truncated corresponding to 'len' data
+ * sectors.
+ */
+void bio_integrity_trim(struct bio *bio, unsigned int offset,
+ unsigned int sectors)
+{
+ struct bio_integrity_payload *bip = bio->bi_integrity;
+ struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
+
+ bio_integrity_advance(bio, offset << 9);
+ bip->bip_iter.bi_size = bio_integrity_bytes(bi, sectors);
+}
+EXPORT_SYMBOL(bio_integrity_trim);
+
+/**
+ * bio_integrity_clone - Callback for cloning bios with integrity metadata
+ * @bio: New bio
+ * @bio_src: Original bio
+ * @gfp_mask: Memory allocation mask
+ *
+ * Description: Called to allocate a bip when cloning a bio
+ */
+int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
+ gfp_t gfp_mask)
+{
+ struct bio_integrity_payload *bip_src = bio_src->bi_integrity;
+ struct bio_integrity_payload *bip;
+
+ BUG_ON(bip_src == NULL);
+
+ bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt);
+
+ if (bip == NULL)
+ return -EIO;
+
+ memcpy(bip->bip_vec, bip_src->bip_vec,
+ bip_src->bip_vcnt * sizeof(struct bio_vec));
+
+ bip->bip_vcnt = bip_src->bip_vcnt;
+ bip->bip_iter = bip_src->bip_iter;
+
+ return 0;
+}
+EXPORT_SYMBOL(bio_integrity_clone);
+
+int bioset_integrity_create(struct bio_set *bs, int pool_size)
+{
+ if (bs->bio_integrity_pool)
+ return 0;
+
+ bs->bio_integrity_pool = mempool_create_slab_pool(pool_size, bip_slab);
+ if (!bs->bio_integrity_pool)
+ return -1;
+
+ bs->bvec_integrity_pool = biovec_create_pool(pool_size);
+ if (!bs->bvec_integrity_pool) {
+ mempool_destroy(bs->bio_integrity_pool);
+ return -1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(bioset_integrity_create);
+
+void bioset_integrity_free(struct bio_set *bs)
+{
+ if (bs->bio_integrity_pool)
+ mempool_destroy(bs->bio_integrity_pool);
+
+ if (bs->bvec_integrity_pool)
+ mempool_destroy(bs->bvec_integrity_pool);
+}
+EXPORT_SYMBOL(bioset_integrity_free);
+
+void __init bio_integrity_init(void)
+{
+ /*
+ * kintegrityd won't block much but may burn a lot of CPU cycles.
+ * Make it highpri CPU intensive wq with max concurrency of 1.
+ */
+ kintegrityd_wq = alloc_workqueue("kintegrityd", WQ_MEM_RECLAIM |
+ WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
+ if (!kintegrityd_wq)
+ panic("Failed to create kintegrityd\n");
+
+ bip_slab = kmem_cache_create("bio_integrity_payload",
+ sizeof(struct bio_integrity_payload) +
+ sizeof(struct bio_vec) * BIP_INLINE_VECS,
+ 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+ if (!bip_slab)
+ panic("Failed to create slab\n");
+}
diff --git a/block/bio.c b/block/bio.c
new file mode 100644
index 00000000000..0ec61c9e536
--- /dev/null
+++ b/block/bio.c
@@ -0,0 +1,2052 @@
+/*
+ * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public Licens
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
+ *
+ */
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/uio.h>
+#include <linux/iocontext.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/mempool.h>
+#include <linux/workqueue.h>
+#include <linux/cgroup.h>
+#include <scsi/sg.h> /* for struct sg_iovec */
+
+#include <trace/events/block.h>
+
+/*
+ * Test patch to inline a certain number of bi_io_vec's inside the bio
+ * itself, to shrink a bio data allocation from two mempool calls to one
+ */
+#define BIO_INLINE_VECS 4
+
+/*
+ * if you change this list, also change bvec_alloc or things will
+ * break badly! cannot be bigger than what you can fit into an
+ * unsigned short
+ */
+#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
+static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
+ BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
+};
+#undef BV
+
+/*
+ * fs_bio_set is the bio_set containing bio and iovec memory pools used by
+ * IO code that does not need private memory pools.
+ */
+struct bio_set *fs_bio_set;
+EXPORT_SYMBOL(fs_bio_set);
+
+/*
+ * Our slab pool management
+ */
+struct bio_slab {
+ struct kmem_cache *slab;
+ unsigned int slab_ref;
+ unsigned int slab_size;
+ char name[8];
+};
+static DEFINE_MUTEX(bio_slab_lock);
+static struct bio_slab *bio_slabs;
+static unsigned int bio_slab_nr, bio_slab_max;
+
+static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
+{
+ unsigned int sz = sizeof(struct bio) + extra_size;
+ struct kmem_cache *slab = NULL;
+ struct bio_slab *bslab, *new_bio_slabs;
+ unsigned int new_bio_slab_max;
+ unsigned int i, entry = -1;
+
+ mutex_lock(&bio_slab_lock);
+
+ i = 0;
+ while (i < bio_slab_nr) {
+ bslab = &bio_slabs[i];
+
+ if (!bslab->slab && entry == -1)
+ entry = i;
+ else if (bslab->slab_size == sz) {
+ slab = bslab->slab;
+ bslab->slab_ref++;
+ break;
+ }
+ i++;
+ }
+
+ if (slab)
+ goto out_unlock;
+
+ if (bio_slab_nr == bio_slab_max && entry == -1) {
+ new_bio_slab_max = bio_slab_max << 1;
+ new_bio_slabs = krealloc(bio_slabs,
+ new_bio_slab_max * sizeof(struct bio_slab),
+ GFP_KERNEL);
+ if (!new_bio_slabs)
+ goto out_unlock;
+ bio_slab_max = new_bio_slab_max;
+ bio_slabs = new_bio_slabs;
+ }
+ if (entry == -1)
+ entry = bio_slab_nr++;
+
+ bslab = &bio_slabs[entry];
+
+ snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
+ slab = kmem_cache_create(bslab->name, sz, 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!slab)
+ goto out_unlock;
+
+ bslab->slab = slab;
+ bslab->slab_ref = 1;
+ bslab->slab_size = sz;
+out_unlock:
+ mutex_unlock(&bio_slab_lock);
+ return slab;
+}
+
+static void bio_put_slab(struct bio_set *bs)
+{
+ struct bio_slab *bslab = NULL;
+ unsigned int i;
+
+ mutex_lock(&bio_slab_lock);
+
+ for (i = 0; i < bio_slab_nr; i++) {
+ if (bs->bio_slab == bio_slabs[i].slab) {
+ bslab = &bio_slabs[i];
+ break;
+ }
+ }
+
+ if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
+ goto out;
+
+ WARN_ON(!bslab->slab_ref);
+
+ if (--bslab->slab_ref)
+ goto out;
+
+ kmem_cache_destroy(bslab->slab);
+ bslab->slab = NULL;
+
+out:
+ mutex_unlock(&bio_slab_lock);
+}
+
+unsigned int bvec_nr_vecs(unsigned short idx)
+{
+ return bvec_slabs[idx].nr_vecs;
+}
+
+void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
+{
+ BIO_BUG_ON(idx >= BIOVEC_NR_POOLS);
+
+ if (idx == BIOVEC_MAX_IDX)
+ mempool_free(bv, pool);
+ else {
+ struct biovec_slab *bvs = bvec_slabs + idx;
+
+ kmem_cache_free(bvs->slab, bv);
+ }
+}
+
+struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
+ mempool_t *pool)
+{
+ struct bio_vec *bvl;
+
+ /*
+ * see comment near bvec_array define!
+ */
+ switch (nr) {
+ case 1:
+ *idx = 0;
+ break;
+ case 2 ... 4:
+ *idx = 1;
+ break;
+ case 5 ... 16:
+ *idx = 2;
+ break;
+ case 17 ... 64:
+ *idx = 3;
+ break;
+ case 65 ... 128:
+ *idx = 4;
+ break;
+ case 129 ... BIO_MAX_PAGES:
+ *idx = 5;
+ break;
+ default:
+ return NULL;
+ }
+
+ /*
+ * idx now points to the pool we want to allocate from. only the
+ * 1-vec entry pool is mempool backed.
+ */
+ if (*idx == BIOVEC_MAX_IDX) {
+fallback:
+ bvl = mempool_alloc(pool, gfp_mask);
+ } else {
+ struct biovec_slab *bvs = bvec_slabs + *idx;
+ gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
+
+ /*
+ * Make this allocation restricted and don't dump info on
+ * allocation failures, since we'll fallback to the mempool
+ * in case of failure.
+ */
+ __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
+
+ /*
+ * Try a slab allocation. If this fails and __GFP_WAIT
+ * is set, retry with the 1-entry mempool
+ */
+ bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
+ if (unlikely(!bvl && (gfp_mask & __GFP_WAIT))) {
+ *idx = BIOVEC_MAX_IDX;
+ goto fallback;
+ }
+ }
+
+ return bvl;
+}
+
+static void __bio_free(struct bio *bio)
+{
+ bio_disassociate_task(bio);
+
+ if (bio_integrity(bio))
+ bio_integrity_free(bio);
+}
+
+static void bio_free(struct bio *bio)
+{
+ struct bio_set *bs = bio->bi_pool;
+ void *p;
+
+ __bio_free(bio);
+
+ if (bs) {
+ if (bio_flagged(bio, BIO_OWNS_VEC))
+ bvec_free(bs->bvec_pool, bio->bi_io_vec, BIO_POOL_IDX(bio));
+
+ /*
+ * If we have front padding, adjust the bio pointer before freeing
+ */
+ p = bio;
+ p -= bs->front_pad;
+
+ mempool_free(p, bs->bio_pool);
+ } else {
+ /* Bio was allocated by bio_kmalloc() */
+ kfree(bio);
+ }
+}
+
+void bio_init(struct bio *bio)
+{
+ memset(bio, 0, sizeof(*bio));
+ bio->bi_flags = 1 << BIO_UPTODATE;
+ atomic_set(&bio->bi_remaining, 1);
+ atomic_set(&bio->bi_cnt, 1);
+}
+EXPORT_SYMBOL(bio_init);
+
+/**
+ * bio_reset - reinitialize a bio
+ * @bio: bio to reset
+ *
+ * Description:
+ * After calling bio_reset(), @bio will be in the same state as a freshly
+ * allocated bio returned bio bio_alloc_bioset() - the only fields that are
+ * preserved are the ones that are initialized by bio_alloc_bioset(). See
+ * comment in struct bio.
+ */
+void bio_reset(struct bio *bio)
+{
+ unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
+
+ __bio_free(bio);
+
+ memset(bio, 0, BIO_RESET_BYTES);
+ bio->bi_flags = flags|(1 << BIO_UPTODATE);
+ atomic_set(&bio->bi_remaining, 1);
+}
+EXPORT_SYMBOL(bio_reset);
+
+static void bio_chain_endio(struct bio *bio, int error)
+{
+ bio_endio(bio->bi_private, error);
+ bio_put(bio);
+}
+
+/**
+ * bio_chain - chain bio completions
+ * @bio: the target bio
+ * @parent: the @bio's parent bio
+ *
+ * The caller won't have a bi_end_io called when @bio completes - instead,
+ * @parent's bi_end_io won't be called until both @parent and @bio have
+ * completed; the chained bio will also be freed when it completes.
+ *
+ * The caller must not set bi_private or bi_end_io in @bio.
+ */
+void bio_chain(struct bio *bio, struct bio *parent)
+{
+ BUG_ON(bio->bi_private || bio->bi_end_io);
+
+ bio->bi_private = parent;
+ bio->bi_end_io = bio_chain_endio;
+ atomic_inc(&parent->bi_remaining);
+}
+EXPORT_SYMBOL(bio_chain);
+
+static void bio_alloc_rescue(struct work_struct *work)
+{
+ struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
+ struct bio *bio;
+
+ while (1) {
+ spin_lock(&bs->rescue_lock);
+ bio = bio_list_pop(&bs->rescue_list);
+ spin_unlock(&bs->rescue_lock);
+
+ if (!bio)
+ break;
+
+ generic_make_request(bio);
+ }
+}
+
+static void punt_bios_to_rescuer(struct bio_set *bs)
+{
+ struct bio_list punt, nopunt;
+ struct bio *bio;
+
+ /*
+ * In order to guarantee forward progress we must punt only bios that
+ * were allocated from this bio_set; otherwise, if there was a bio on
+ * there for a stacking driver higher up in the stack, processing it
+ * could require allocating bios from this bio_set, and doing that from
+ * our own rescuer would be bad.
+ *
+ * Since bio lists are singly linked, pop them all instead of trying to
+ * remove from the middle of the list:
+ */
+
+ bio_list_init(&punt);
+ bio_list_init(&nopunt);
+
+ while ((bio = bio_list_pop(current->bio_list)))
+ bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
+
+ *current->bio_list = nopunt;
+
+ spin_lock(&bs->rescue_lock);
+ bio_list_merge(&bs->rescue_list, &punt);
+ spin_unlock(&bs->rescue_lock);
+
+ queue_work(bs->rescue_workqueue, &bs->rescue_work);
+}
+
+/**
+ * bio_alloc_bioset - allocate a bio for I/O
+ * @gfp_mask: the GFP_ mask given to the slab allocator
+ * @nr_iovecs: number of iovecs to pre-allocate
+ * @bs: the bio_set to allocate from.
+ *
+ * Description:
+ * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
+ * backed by the @bs's mempool.
+ *
+ * When @bs is not NULL, if %__GFP_WAIT is set then bio_alloc will always be
+ * able to allocate a bio. This is due to the mempool guarantees. To make this
+ * work, callers must never allocate more than 1 bio at a time from this pool.
+ * Callers that need to allocate more than 1 bio must always submit the
+ * previously allocated bio for IO before attempting to allocate a new one.
+ * Failure to do so can cause deadlocks under memory pressure.
+ *
+ * Note that when running under generic_make_request() (i.e. any block
+ * driver), bios are not submitted until after you return - see the code in
+ * generic_make_request() that converts recursion into iteration, to prevent
+ * stack overflows.
+ *
+ * This would normally mean allocating multiple bios under
+ * generic_make_request() would be susceptible to deadlocks, but we have
+ * deadlock avoidance code that resubmits any blocked bios from a rescuer
+ * thread.
+ *
+ * However, we do not guarantee forward progress for allocations from other
+ * mempools. Doing multiple allocations from the same mempool under
+ * generic_make_request() should be avoided - instead, use bio_set's front_pad
+ * for per bio allocations.
+ *
+ * RETURNS:
+ * Pointer to new bio on success, NULL on failure.
+ */
+struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
+{
+ gfp_t saved_gfp = gfp_mask;
+ unsigned front_pad;
+ unsigned inline_vecs;
+ unsigned long idx = BIO_POOL_NONE;
+ struct bio_vec *bvl = NULL;
+ struct bio *bio;
+ void *p;
+
+ if (!bs) {
+ if (nr_iovecs > UIO_MAXIOV)
+ return NULL;
+
+ p = kmalloc(sizeof(struct bio) +
+ nr_iovecs * sizeof(struct bio_vec),
+ gfp_mask);
+ front_pad = 0;
+ inline_vecs = nr_iovecs;
+ } else {
+ /*
+ * generic_make_request() converts recursion to iteration; this
+ * means if we're running beneath it, any bios we allocate and
+ * submit will not be submitted (and thus freed) until after we
+ * return.
+ *
+ * This exposes us to a potential deadlock if we allocate
+ * multiple bios from the same bio_set() while running
+ * underneath generic_make_request(). If we were to allocate
+ * multiple bios (say a stacking block driver that was splitting
+ * bios), we would deadlock if we exhausted the mempool's
+ * reserve.
+ *
+ * We solve this, and guarantee forward progress, with a rescuer
+ * workqueue per bio_set. If we go to allocate and there are
+ * bios on current->bio_list, we first try the allocation
+ * without __GFP_WAIT; if that fails, we punt those bios we
+ * would be blocking to the rescuer workqueue before we retry
+ * with the original gfp_flags.
+ */
+
+ if (current->bio_list && !bio_list_empty(current->bio_list))
+ gfp_mask &= ~__GFP_WAIT;
+
+ p = mempool_alloc(bs->bio_pool, gfp_mask);
+ if (!p && gfp_mask != saved_gfp) {
+ punt_bios_to_rescuer(bs);
+ gfp_mask = saved_gfp;
+ p = mempool_alloc(bs->bio_pool, gfp_mask);
+ }
+
+ front_pad = bs->front_pad;
+ inline_vecs = BIO_INLINE_VECS;
+ }
+
+ if (unlikely(!p))
+ return NULL;
+
+ bio = p + front_pad;
+ bio_init(bio);
+
+ if (nr_iovecs > inline_vecs) {
+ bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
+ if (!bvl && gfp_mask != saved_gfp) {
+ punt_bios_to_rescuer(bs);
+ gfp_mask = saved_gfp;
+ bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
+ }
+
+ if (unlikely(!bvl))
+ goto err_free;
+
+ bio->bi_flags |= 1 << BIO_OWNS_VEC;
+ } else if (nr_iovecs) {
+ bvl = bio->bi_inline_vecs;
+ }
+
+ bio->bi_pool = bs;
+ bio->bi_flags |= idx << BIO_POOL_OFFSET;
+ bio->bi_max_vecs = nr_iovecs;
+ bio->bi_io_vec = bvl;
+ return bio;
+
+err_free:
+ mempool_free(p, bs->bio_pool);
+ return NULL;
+}
+EXPORT_SYMBOL(bio_alloc_bioset);
+
+void zero_fill_bio(struct bio *bio)
+{
+ unsigned long flags;
+ struct bio_vec bv;
+ struct bvec_iter iter;
+
+ bio_for_each_segment(bv, bio, iter) {
+ char *data = bvec_kmap_irq(&bv, &flags);
+ memset(data, 0, bv.bv_len);
+ flush_dcache_page(bv.bv_page);
+ bvec_kunmap_irq(data, &flags);
+ }
+}
+EXPORT_SYMBOL(zero_fill_bio);
+
+/**
+ * bio_put - release a reference to a bio
+ * @bio: bio to release reference to
+ *
+ * Description:
+ * Put a reference to a &struct bio, either one you have gotten with
+ * bio_alloc, bio_get or bio_clone. The last put of a bio will free it.
+ **/
+void bio_put(struct bio *bio)
+{
+ BIO_BUG_ON(!atomic_read(&bio->bi_cnt));
+
+ /*
+ * last put frees it
+ */
+ if (atomic_dec_and_test(&bio->bi_cnt))
+ bio_free(bio);
+}
+EXPORT_SYMBOL(bio_put);
+
+inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
+{
+ if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
+ blk_recount_segments(q, bio);
+
+ return bio->bi_phys_segments;
+}
+EXPORT_SYMBOL(bio_phys_segments);
+
+/**
+ * __bio_clone_fast - clone a bio that shares the original bio's biovec
+ * @bio: destination bio
+ * @bio_src: bio to clone
+ *
+ * Clone a &bio. Caller will own the returned bio, but not
+ * the actual data it points to. Reference count of returned
+ * bio will be one.
+ *
+ * Caller must ensure that @bio_src is not freed before @bio.
+ */
+void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
+{
+ BUG_ON(bio->bi_pool && BIO_POOL_IDX(bio) != BIO_POOL_NONE);
+
+ /*
+ * most users will be overriding ->bi_bdev with a new target,
+ * so we don't set nor calculate new physical/hw segment counts here
+ */
+ bio->bi_bdev = bio_src->bi_bdev;
+ bio->bi_flags |= 1 << BIO_CLONED;
+ bio->bi_rw = bio_src->bi_rw;
+ bio->bi_iter = bio_src->bi_iter;
+ bio->bi_io_vec = bio_src->bi_io_vec;
+}
+EXPORT_SYMBOL(__bio_clone_fast);
+
+/**
+ * bio_clone_fast - clone a bio that shares the original bio's biovec
+ * @bio: bio to clone
+ * @gfp_mask: allocation priority
+ * @bs: bio_set to allocate from
+ *
+ * Like __bio_clone_fast, only also allocates the returned bio
+ */
+struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
+{
+ struct bio *b;
+
+ b = bio_alloc_bioset(gfp_mask, 0, bs);
+ if (!b)
+ return NULL;
+
+ __bio_clone_fast(b, bio);
+
+ if (bio_integrity(bio)) {
+ int ret;
+
+ ret = bio_integrity_clone(b, bio, gfp_mask);
+
+ if (ret < 0) {
+ bio_put(b);
+ return NULL;
+ }
+ }
+
+ return b;
+}
+EXPORT_SYMBOL(bio_clone_fast);
+
+/**
+ * bio_clone_bioset - clone a bio
+ * @bio_src: bio to clone
+ * @gfp_mask: allocation priority
+ * @bs: bio_set to allocate from
+ *
+ * Clone bio. Caller will own the returned bio, but not the actual data it
+ * points to. Reference count of returned bio will be one.
+ */
+struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
+ struct bio_set *bs)
+{
+ struct bvec_iter iter;
+ struct bio_vec bv;
+ struct bio *bio;
+
+ /*
+ * Pre immutable biovecs, __bio_clone() used to just do a memcpy from
+ * bio_src->bi_io_vec to bio->bi_io_vec.
+ *
+ * We can't do that anymore, because:
+ *
+ * - The point of cloning the biovec is to produce a bio with a biovec
+ * the caller can modify: bi_idx and bi_bvec_done should be 0.
+ *
+ * - The original bio could've had more than BIO_MAX_PAGES biovecs; if
+ * we tried to clone the whole thing bio_alloc_bioset() would fail.
+ * But the clone should succeed as long as the number of biovecs we
+ * actually need to allocate is fewer than BIO_MAX_PAGES.
+ *
+ * - Lastly, bi_vcnt should not be looked at or relied upon by code
+ * that does not own the bio - reason being drivers don't use it for
+ * iterating over the biovec anymore, so expecting it to be kept up
+ * to date (i.e. for clones that share the parent biovec) is just
+ * asking for trouble and would force extra work on
+ * __bio_clone_fast() anyways.
+ */
+
+ bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
+ if (!bio)
+ return NULL;
+
+ bio->bi_bdev = bio_src->bi_bdev;
+ bio->bi_rw = bio_src->bi_rw;
+ bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
+ bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
+
+ if (bio->bi_rw & REQ_DISCARD)
+ goto integrity_clone;
+
+ if (bio->bi_rw & REQ_WRITE_SAME) {
+ bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
+ goto integrity_clone;
+ }
+
+ bio_for_each_segment(bv, bio_src, iter)
+ bio->bi_io_vec[bio->bi_vcnt++] = bv;
+
+integrity_clone:
+ if (bio_integrity(bio_src)) {
+ int ret;
+
+ ret = bio_integrity_clone(bio, bio_src, gfp_mask);
+ if (ret < 0) {
+ bio_put(bio);
+ return NULL;
+ }
+ }
+
+ return bio;
+}
+EXPORT_SYMBOL(bio_clone_bioset);
+
+/**
+ * bio_get_nr_vecs - return approx number of vecs
+ * @bdev: I/O target
+ *
+ * Return the approximate number of pages we can send to this target.
+ * There's no guarantee that you will be able to fit this number of pages
+ * into a bio, it does not account for dynamic restrictions that vary
+ * on offset.
+ */
+int bio_get_nr_vecs(struct block_device *bdev)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+ int nr_pages;
+
+ nr_pages = min_t(unsigned,
+ queue_max_segments(q),
+ queue_max_sectors(q) / (PAGE_SIZE >> 9) + 1);
+
+ return min_t(unsigned, nr_pages, BIO_MAX_PAGES);
+
+}
+EXPORT_SYMBOL(bio_get_nr_vecs);
+
+static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
+ *page, unsigned int len, unsigned int offset,
+ unsigned int max_sectors)
+{
+ int retried_segments = 0;
+ struct bio_vec *bvec;
+
+ /*
+ * cloned bio must not modify vec list
+ */
+ if (unlikely(bio_flagged(bio, BIO_CLONED)))
+ return 0;
+
+ if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors)
+ return 0;
+
+ /*
+ * For filesystems with a blocksize smaller than the pagesize
+ * we will often be called with the same page as last time and
+ * a consecutive offset. Optimize this special case.
+ */
+ if (bio->bi_vcnt > 0) {
+ struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
+
+ if (page == prev->bv_page &&
+ offset == prev->bv_offset + prev->bv_len) {
+ unsigned int prev_bv_len = prev->bv_len;
+ prev->bv_len += len;
+
+ if (q->merge_bvec_fn) {
+ struct bvec_merge_data bvm = {
+ /* prev_bvec is already charged in
+ bi_size, discharge it in order to
+ simulate merging updated prev_bvec
+ as new bvec. */
+ .bi_bdev = bio->bi_bdev,
+ .bi_sector = bio->bi_iter.bi_sector,
+ .bi_size = bio->bi_iter.bi_size -
+ prev_bv_len,
+ .bi_rw = bio->bi_rw,
+ };
+
+ if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) {
+ prev->bv_len -= len;
+ return 0;
+ }
+ }
+
+ goto done;
+ }
+
+ /*
+ * If the queue doesn't support SG gaps and adding this
+ * offset would create a gap, disallow it.
+ */
+ if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) &&
+ bvec_gap_to_prev(prev, offset))
+ return 0;
+ }
+
+ if (bio->bi_vcnt >= bio->bi_max_vecs)
+ return 0;
+
+ /*
+ * we might lose a segment or two here, but rather that than
+ * make this too complex.
+ */
+
+ while (bio->bi_phys_segments >= queue_max_segments(q)) {
+
+ if (retried_segments)
+ return 0;
+
+ retried_segments = 1;
+ blk_recount_segments(q, bio);
+ }
+
+ /*
+ * setup the new entry, we might clear it again later if we
+ * cannot add the page
+ */
+ bvec = &bio->bi_io_vec[bio->bi_vcnt];
+ bvec->bv_page = page;
+ bvec->bv_len = len;
+ bvec->bv_offset = offset;
+
+ /*
+ * if queue has other restrictions (eg varying max sector size
+ * depending on offset), it can specify a merge_bvec_fn in the
+ * queue to get further control
+ */
+ if (q->merge_bvec_fn) {
+ struct bvec_merge_data bvm = {
+ .bi_bdev = bio->bi_bdev,
+ .bi_sector = bio->bi_iter.bi_sector,
+ .bi_size = bio->bi_iter.bi_size,
+ .bi_rw = bio->bi_rw,
+ };
+
+ /*
+ * merge_bvec_fn() returns number of bytes it can accept
+ * at this offset
+ */
+ if (q->merge_bvec_fn(q, &bvm, bvec) < bvec->bv_len) {
+ bvec->bv_page = NULL;
+ bvec->bv_len = 0;
+ bvec->bv_offset = 0;
+ return 0;
+ }
+ }
+
+ /* If we may be able to merge these biovecs, force a recount */
+ if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
+ bio->bi_flags &= ~(1 << BIO_SEG_VALID);
+
+ bio->bi_vcnt++;
+ bio->bi_phys_segments++;
+ done:
+ bio->bi_iter.bi_size += len;
+ return len;
+}
+
+/**
+ * bio_add_pc_page - attempt to add page to bio
+ * @q: the target queue
+ * @bio: destination bio
+ * @page: page to add
+ * @len: vec entry length
+ * @offset: vec entry offset
+ *
+ * Attempt to add a page to the bio_vec maplist. This can fail for a
+ * number of reasons, such as the bio being full or target block device
+ * limitations. The target block device must allow bio's up to PAGE_SIZE,
+ * so it is always possible to add a single page to an empty bio.
+ *
+ * This should only be used by REQ_PC bios.
+ */
+int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
+ unsigned int len, unsigned int offset)
+{
+ return __bio_add_page(q, bio, page, len, offset,
+ queue_max_hw_sectors(q));
+}
+EXPORT_SYMBOL(bio_add_pc_page);
+
+/**
+ * bio_add_page - attempt to add page to bio
+ * @bio: destination bio
+ * @page: page to add
+ * @len: vec entry length
+ * @offset: vec entry offset
+ *
+ * Attempt to add a page to the bio_vec maplist. This can fail for a
+ * number of reasons, such as the bio being full or target block device
+ * limitations. The target block device must allow bio's up to PAGE_SIZE,
+ * so it is always possible to add a single page to an empty bio.
+ */
+int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
+ unsigned int offset)
+{
+ struct request_queue *q = bdev_get_queue(bio->bi_bdev);
+ unsigned int max_sectors;
+
+ max_sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
+ if ((max_sectors < (len >> 9)) && !bio->bi_iter.bi_size)
+ max_sectors = len >> 9;
+
+ return __bio_add_page(q, bio, page, len, offset, max_sectors);
+}
+EXPORT_SYMBOL(bio_add_page);
+
+struct submit_bio_ret {
+ struct completion event;
+ int error;
+};
+
+static void submit_bio_wait_endio(struct bio *bio, int error)
+{
+ struct submit_bio_ret *ret = bio->bi_private;
+
+ ret->error = error;
+ complete(&ret->event);
+}
+
+/**
+ * submit_bio_wait - submit a bio, and wait until it completes
+ * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
+ * @bio: The &struct bio which describes the I/O
+ *
+ * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
+ * bio_endio() on failure.
+ */
+int submit_bio_wait(int rw, struct bio *bio)
+{
+ struct submit_bio_ret ret;
+
+ rw |= REQ_SYNC;
+ init_completion(&ret.event);
+ bio->bi_private = &ret;
+ bio->bi_end_io = submit_bio_wait_endio;
+ submit_bio(rw, bio);
+ wait_for_completion(&ret.event);
+
+ return ret.error;
+}
+EXPORT_SYMBOL(submit_bio_wait);
+
+/**
+ * bio_advance - increment/complete a bio by some number of bytes
+ * @bio: bio to advance
+ * @bytes: number of bytes to complete
+ *
+ * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
+ * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
+ * be updated on the last bvec as well.
+ *
+ * @bio will then represent the remaining, uncompleted portion of the io.
+ */
+void bio_advance(struct bio *bio, unsigned bytes)
+{
+ if (bio_integrity(bio))
+ bio_integrity_advance(bio, bytes);
+
+ bio_advance_iter(bio, &bio->bi_iter, bytes);
+}
+EXPORT_SYMBOL(bio_advance);
+
+/**
+ * bio_alloc_pages - allocates a single page for each bvec in a bio
+ * @bio: bio to allocate pages for
+ * @gfp_mask: flags for allocation
+ *
+ * Allocates pages up to @bio->bi_vcnt.
+ *
+ * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are
+ * freed.
+ */
+int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
+{
+ int i;
+ struct bio_vec *bv;
+
+ bio_for_each_segment_all(bv, bio, i) {
+ bv->bv_page = alloc_page(gfp_mask);
+ if (!bv->bv_page) {
+ while (--bv >= bio->bi_io_vec)
+ __free_page(bv->bv_page);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(bio_alloc_pages);
+
+/**
+ * bio_copy_data - copy contents of data buffers from one chain of bios to
+ * another
+ * @src: source bio list
+ * @dst: destination bio list
+ *
+ * If @src and @dst are single bios, bi_next must be NULL - otherwise, treats
+ * @src and @dst as linked lists of bios.
+ *
+ * Stops when it reaches the end of either @src or @dst - that is, copies
+ * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
+ */
+void bio_copy_data(struct bio *dst, struct bio *src)
+{
+ struct bvec_iter src_iter, dst_iter;
+ struct bio_vec src_bv, dst_bv;
+ void *src_p, *dst_p;
+ unsigned bytes;
+
+ src_iter = src->bi_iter;
+ dst_iter = dst->bi_iter;
+
+ while (1) {
+ if (!src_iter.bi_size) {
+ src = src->bi_next;
+ if (!src)
+ break;
+
+ src_iter = src->bi_iter;
+ }
+
+ if (!dst_iter.bi_size) {
+ dst = dst->bi_next;
+ if (!dst)
+ break;
+
+ dst_iter = dst->bi_iter;
+ }
+
+ src_bv = bio_iter_iovec(src, src_iter);
+ dst_bv = bio_iter_iovec(dst, dst_iter);
+
+ bytes = min(src_bv.bv_len, dst_bv.bv_len);
+
+ src_p = kmap_atomic(src_bv.bv_page);
+ dst_p = kmap_atomic(dst_bv.bv_page);
+
+ memcpy(dst_p + dst_bv.bv_offset,
+ src_p + src_bv.bv_offset,
+ bytes);
+
+ kunmap_atomic(dst_p);
+ kunmap_atomic(src_p);
+
+ bio_advance_iter(src, &src_iter, bytes);
+ bio_advance_iter(dst, &dst_iter, bytes);
+ }
+}
+EXPORT_SYMBOL(bio_copy_data);
+
+struct bio_map_data {
+ int nr_sgvecs;
+ int is_our_pages;
+ struct sg_iovec sgvecs[];
+};
+
+static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
+ const struct sg_iovec *iov, int iov_count,
+ int is_our_pages)
+{
+ memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count);
+ bmd->nr_sgvecs = iov_count;
+ bmd->is_our_pages = is_our_pages;
+ bio->bi_private = bmd;
+}
+
+static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count,
+ gfp_t gfp_mask)
+{
+ if (iov_count > UIO_MAXIOV)
+ return NULL;
+
+ return kmalloc(sizeof(struct bio_map_data) +
+ sizeof(struct sg_iovec) * iov_count, gfp_mask);
+}
+
+static int __bio_copy_iov(struct bio *bio, const struct sg_iovec *iov, int iov_count,
+ int to_user, int from_user, int do_free_page)
+{
+ int ret = 0, i;
+ struct bio_vec *bvec;
+ int iov_idx = 0;
+ unsigned int iov_off = 0;
+
+ bio_for_each_segment_all(bvec, bio, i) {
+ char *bv_addr = page_address(bvec->bv_page);
+ unsigned int bv_len = bvec->bv_len;
+
+ while (bv_len && iov_idx < iov_count) {
+ unsigned int bytes;
+ char __user *iov_addr;
+
+ bytes = min_t(unsigned int,
+ iov[iov_idx].iov_len - iov_off, bv_len);
+ iov_addr = iov[iov_idx].iov_base + iov_off;
+
+ if (!ret) {
+ if (to_user)
+ ret = copy_to_user(iov_addr, bv_addr,
+ bytes);
+
+ if (from_user)
+ ret = copy_from_user(bv_addr, iov_addr,
+ bytes);
+
+ if (ret)
+ ret = -EFAULT;
+ }
+
+ bv_len -= bytes;
+ bv_addr += bytes;
+ iov_addr += bytes;
+ iov_off += bytes;
+
+ if (iov[iov_idx].iov_len == iov_off) {
+ iov_idx++;
+ iov_off = 0;
+ }
+ }
+
+ if (do_free_page)
+ __free_page(bvec->bv_page);
+ }
+
+ return ret;
+}
+
+/**
+ * bio_uncopy_user - finish previously mapped bio
+ * @bio: bio being terminated
+ *
+ * Free pages allocated from bio_copy_user() and write back data
+ * to user space in case of a read.
+ */
+int bio_uncopy_user(struct bio *bio)
+{
+ struct bio_map_data *bmd = bio->bi_private;
+ struct bio_vec *bvec;
+ int ret = 0, i;
+
+ if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
+ /*
+ * if we're in a workqueue, the request is orphaned, so
+ * don't copy into a random user address space, just free.
+ */
+ if (current->mm)
+ ret = __bio_copy_iov(bio, bmd->sgvecs, bmd->nr_sgvecs,
+ bio_data_dir(bio) == READ,
+ 0, bmd->is_our_pages);
+ else if (bmd->is_our_pages)
+ bio_for_each_segment_all(bvec, bio, i)
+ __free_page(bvec->bv_page);
+ }
+ kfree(bmd);
+ bio_put(bio);
+ return ret;
+}
+EXPORT_SYMBOL(bio_uncopy_user);
+
+/**
+ * bio_copy_user_iov - copy user data to bio
+ * @q: destination block queue
+ * @map_data: pointer to the rq_map_data holding pages (if necessary)
+ * @iov: the iovec.
+ * @iov_count: number of elements in the iovec
+ * @write_to_vm: bool indicating writing to pages or not
+ * @gfp_mask: memory allocation flags
+ *
+ * Prepares and returns a bio for indirect user io, bouncing data
+ * to/from kernel pages as necessary. Must be paired with
+ * call bio_uncopy_user() on io completion.
+ */
+struct bio *bio_copy_user_iov(struct request_queue *q,
+ struct rq_map_data *map_data,
+ const struct sg_iovec *iov, int iov_count,
+ int write_to_vm, gfp_t gfp_mask)
+{
+ struct bio_map_data *bmd;
+ struct bio_vec *bvec;
+ struct page *page;
+ struct bio *bio;
+ int i, ret;
+ int nr_pages = 0;
+ unsigned int len = 0;
+ unsigned int offset = map_data ? map_data->offset & ~PAGE_MASK : 0;
+
+ for (i = 0; i < iov_count; i++) {
+ unsigned long uaddr;
+ unsigned long end;
+ unsigned long start;
+
+ uaddr = (unsigned long)iov[i].iov_base;
+ end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ start = uaddr >> PAGE_SHIFT;
+
+ /*
+ * Overflow, abort
+ */
+ if (end < start)
+ return ERR_PTR(-EINVAL);
+
+ nr_pages += end - start;
+ len += iov[i].iov_len;
+ }
+
+ if (offset)
+ nr_pages++;
+
+ bmd = bio_alloc_map_data(iov_count, gfp_mask);
+ if (!bmd)
+ return ERR_PTR(-ENOMEM);
+
+ ret = -ENOMEM;
+ bio = bio_kmalloc(gfp_mask, nr_pages);
+ if (!bio)
+ goto out_bmd;
+
+ if (!write_to_vm)
+ bio->bi_rw |= REQ_WRITE;
+
+ ret = 0;
+
+ if (map_data) {
+ nr_pages = 1 << map_data->page_order;
+ i = map_data->offset / PAGE_SIZE;
+ }
+ while (len) {
+ unsigned int bytes = PAGE_SIZE;
+
+ bytes -= offset;
+
+ if (bytes > len)
+ bytes = len;
+
+ if (map_data) {
+ if (i == map_data->nr_entries * nr_pages) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ page = map_data->pages[i / nr_pages];
+ page += (i % nr_pages);
+
+ i++;
+ } else {
+ page = alloc_page(q->bounce_gfp | gfp_mask);
+ if (!page) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
+ if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
+ break;
+
+ len -= bytes;
+ offset = 0;
+ }
+
+ if (ret)
+ goto cleanup;
+
+ /*
+ * success
+ */
+ if ((!write_to_vm && (!map_data || !map_data->null_mapped)) ||
+ (map_data && map_data->from_user)) {
+ ret = __bio_copy_iov(bio, iov, iov_count, 0, 1, 0);
+ if (ret)
+ goto cleanup;
+ }
+
+ bio_set_map_data(bmd, bio, iov, iov_count, map_data ? 0 : 1);
+ return bio;
+cleanup:
+ if (!map_data)
+ bio_for_each_segment_all(bvec, bio, i)
+ __free_page(bvec->bv_page);
+
+ bio_put(bio);
+out_bmd:
+ kfree(bmd);
+ return ERR_PTR(ret);
+}
+
+/**
+ * bio_copy_user - copy user data to bio
+ * @q: destination block queue
+ * @map_data: pointer to the rq_map_data holding pages (if necessary)
+ * @uaddr: start of user address
+ * @len: length in bytes
+ * @write_to_vm: bool indicating writing to pages or not
+ * @gfp_mask: memory allocation flags
+ *
+ * Prepares and returns a bio for indirect user io, bouncing data
+ * to/from kernel pages as necessary. Must be paired with
+ * call bio_uncopy_user() on io completion.
+ */
+struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *map_data,
+ unsigned long uaddr, unsigned int len,
+ int write_to_vm, gfp_t gfp_mask)
+{
+ struct sg_iovec iov;
+
+ iov.iov_base = (void __user *)uaddr;
+ iov.iov_len = len;
+
+ return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask);
+}
+EXPORT_SYMBOL(bio_copy_user);
+
+static struct bio *__bio_map_user_iov(struct request_queue *q,
+ struct block_device *bdev,
+ const struct sg_iovec *iov, int iov_count,
+ int write_to_vm, gfp_t gfp_mask)
+{
+ int i, j;
+ int nr_pages = 0;
+ struct page **pages;
+ struct bio *bio;
+ int cur_page = 0;
+ int ret, offset;
+
+ for (i = 0; i < iov_count; i++) {
+ unsigned long uaddr = (unsigned long)iov[i].iov_base;
+ unsigned long len = iov[i].iov_len;
+ unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ unsigned long start = uaddr >> PAGE_SHIFT;
+
+ /*
+ * Overflow, abort
+ */
+ if (end < start)
+ return ERR_PTR(-EINVAL);
+
+ nr_pages += end - start;
+ /*
+ * buffer must be aligned to at least hardsector size for now
+ */
+ if (uaddr & queue_dma_alignment(q))
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!nr_pages)
+ return ERR_PTR(-EINVAL);
+
+ bio = bio_kmalloc(gfp_mask, nr_pages);
+ if (!bio)
+ return ERR_PTR(-ENOMEM);
+
+ ret = -ENOMEM;
+ pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
+ if (!pages)
+ goto out;
+
+ for (i = 0; i < iov_count; i++) {
+ unsigned long uaddr = (unsigned long)iov[i].iov_base;
+ unsigned long len = iov[i].iov_len;
+ unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ unsigned long start = uaddr >> PAGE_SHIFT;
+ const int local_nr_pages = end - start;
+ const int page_limit = cur_page + local_nr_pages;
+
+ ret = get_user_pages_fast(uaddr, local_nr_pages,
+ write_to_vm, &pages[cur_page]);
+ if (ret < local_nr_pages) {
+ ret = -EFAULT;
+ goto out_unmap;
+ }
+
+ offset = uaddr & ~PAGE_MASK;
+ for (j = cur_page; j < page_limit; j++) {
+ unsigned int bytes = PAGE_SIZE - offset;
+
+ if (len <= 0)
+ break;
+
+ if (bytes > len)
+ bytes = len;
+
+ /*
+ * sorry...
+ */
+ if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
+ bytes)
+ break;
+
+ len -= bytes;
+ offset = 0;
+ }
+
+ cur_page = j;
+ /*
+ * release the pages we didn't map into the bio, if any
+ */
+ while (j < page_limit)
+ page_cache_release(pages[j++]);
+ }
+
+ kfree(pages);
+
+ /*
+ * set data direction, and check if mapped pages need bouncing
+ */
+ if (!write_to_vm)
+ bio->bi_rw |= REQ_WRITE;
+
+ bio->bi_bdev = bdev;
+ bio->bi_flags |= (1 << BIO_USER_MAPPED);
+ return bio;
+
+ out_unmap:
+ for (i = 0; i < nr_pages; i++) {
+ if(!pages[i])
+ break;
+ page_cache_release(pages[i]);
+ }
+ out:
+ kfree(pages);
+ bio_put(bio);
+ return ERR_PTR(ret);
+}
+
+/**
+ * bio_map_user - map user address into bio
+ * @q: the struct request_queue for the bio
+ * @bdev: destination block device
+ * @uaddr: start of user address
+ * @len: length in bytes
+ * @write_to_vm: bool indicating writing to pages or not
+ * @gfp_mask: memory allocation flags
+ *
+ * Map the user space address into a bio suitable for io to a block
+ * device. Returns an error pointer in case of error.
+ */
+struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
+ unsigned long uaddr, unsigned int len, int write_to_vm,
+ gfp_t gfp_mask)
+{
+ struct sg_iovec iov;
+
+ iov.iov_base = (void __user *)uaddr;
+ iov.iov_len = len;
+
+ return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask);
+}
+EXPORT_SYMBOL(bio_map_user);
+
+/**
+ * bio_map_user_iov - map user sg_iovec table into bio
+ * @q: the struct request_queue for the bio
+ * @bdev: destination block device
+ * @iov: the iovec.
+ * @iov_count: number of elements in the iovec
+ * @write_to_vm: bool indicating writing to pages or not
+ * @gfp_mask: memory allocation flags
+ *
+ * Map the user space address into a bio suitable for io to a block
+ * device. Returns an error pointer in case of error.
+ */
+struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
+ const struct sg_iovec *iov, int iov_count,
+ int write_to_vm, gfp_t gfp_mask)
+{
+ struct bio *bio;
+
+ bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm,
+ gfp_mask);
+ if (IS_ERR(bio))
+ return bio;
+
+ /*
+ * subtle -- if __bio_map_user() ended up bouncing a bio,
+ * it would normally disappear when its bi_end_io is run.
+ * however, we need it for the unmap, so grab an extra
+ * reference to it
+ */
+ bio_get(bio);
+
+ return bio;
+}
+
+static void __bio_unmap_user(struct bio *bio)
+{
+ struct bio_vec *bvec;
+ int i;
+
+ /*
+ * make sure we dirty pages we wrote to
+ */
+ bio_for_each_segment_all(bvec, bio, i) {
+ if (bio_data_dir(bio) == READ)
+ set_page_dirty_lock(bvec->bv_page);
+
+ page_cache_release(bvec->bv_page);
+ }
+
+ bio_put(bio);
+}
+
+/**
+ * bio_unmap_user - unmap a bio
+ * @bio: the bio being unmapped
+ *
+ * Unmap a bio previously mapped by bio_map_user(). Must be called with
+ * a process context.
+ *
+ * bio_unmap_user() may sleep.
+ */
+void bio_unmap_user(struct bio *bio)
+{
+ __bio_unmap_user(bio);
+ bio_put(bio);
+}
+EXPORT_SYMBOL(bio_unmap_user);
+
+static void bio_map_kern_endio(struct bio *bio, int err)
+{
+ bio_put(bio);
+}
+
+static struct bio *__bio_map_kern(struct request_queue *q, void *data,
+ unsigned int len, gfp_t gfp_mask)
+{
+ unsigned long kaddr = (unsigned long)data;
+ unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ unsigned long start = kaddr >> PAGE_SHIFT;
+ const int nr_pages = end - start;
+ int offset, i;
+ struct bio *bio;
+
+ bio = bio_kmalloc(gfp_mask, nr_pages);
+ if (!bio)
+ return ERR_PTR(-ENOMEM);
+
+ offset = offset_in_page(kaddr);
+ for (i = 0; i < nr_pages; i++) {
+ unsigned int bytes = PAGE_SIZE - offset;
+
+ if (len <= 0)
+ break;
+
+ if (bytes > len)
+ bytes = len;
+
+ if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
+ offset) < bytes)
+ break;
+
+ data += bytes;
+ len -= bytes;
+ offset = 0;
+ }
+
+ bio->bi_end_io = bio_map_kern_endio;
+ return bio;
+}
+
+/**
+ * bio_map_kern - map kernel address into bio
+ * @q: the struct request_queue for the bio
+ * @data: pointer to buffer to map
+ * @len: length in bytes
+ * @gfp_mask: allocation flags for bio allocation
+ *
+ * Map the kernel address into a bio suitable for io to a block
+ * device. Returns an error pointer in case of error.
+ */
+struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
+ gfp_t gfp_mask)
+{
+ struct bio *bio;
+
+ bio = __bio_map_kern(q, data, len, gfp_mask);
+ if (IS_ERR(bio))
+ return bio;
+
+ if (bio->bi_iter.bi_size == len)
+ return bio;
+
+ /*
+ * Don't support partial mappings.
+ */
+ bio_put(bio);
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL(bio_map_kern);
+
+static void bio_copy_kern_endio(struct bio *bio, int err)
+{
+ struct bio_vec *bvec;
+ const int read = bio_data_dir(bio) == READ;
+ struct bio_map_data *bmd = bio->bi_private;
+ int i;
+ char *p = bmd->sgvecs[0].iov_base;
+
+ bio_for_each_segment_all(bvec, bio, i) {
+ char *addr = page_address(bvec->bv_page);
+
+ if (read)
+ memcpy(p, addr, bvec->bv_len);
+
+ __free_page(bvec->bv_page);
+ p += bvec->bv_len;
+ }
+
+ kfree(bmd);
+ bio_put(bio);
+}
+
+/**
+ * bio_copy_kern - copy kernel address into bio
+ * @q: the struct request_queue for the bio
+ * @data: pointer to buffer to copy
+ * @len: length in bytes
+ * @gfp_mask: allocation flags for bio and page allocation
+ * @reading: data direction is READ
+ *
+ * copy the kernel address into a bio suitable for io to a block
+ * device. Returns an error pointer in case of error.
+ */
+struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
+ gfp_t gfp_mask, int reading)
+{
+ struct bio *bio;
+ struct bio_vec *bvec;
+ int i;
+
+ bio = bio_copy_user(q, NULL, (unsigned long)data, len, 1, gfp_mask);
+ if (IS_ERR(bio))
+ return bio;
+
+ if (!reading) {
+ void *p = data;
+
+ bio_for_each_segment_all(bvec, bio, i) {
+ char *addr = page_address(bvec->bv_page);
+
+ memcpy(addr, p, bvec->bv_len);
+ p += bvec->bv_len;
+ }
+ }
+
+ bio->bi_end_io = bio_copy_kern_endio;
+
+ return bio;
+}
+EXPORT_SYMBOL(bio_copy_kern);
+
+/*
+ * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
+ * for performing direct-IO in BIOs.
+ *
+ * The problem is that we cannot run set_page_dirty() from interrupt context
+ * because the required locks are not interrupt-safe. So what we can do is to
+ * mark the pages dirty _before_ performing IO. And in interrupt context,
+ * check that the pages are still dirty. If so, fine. If not, redirty them
+ * in process context.
+ *
+ * We special-case compound pages here: normally this means reads into hugetlb
+ * pages. The logic in here doesn't really work right for compound pages
+ * because the VM does not uniformly chase down the head page in all cases.
+ * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
+ * handle them at all. So we skip compound pages here at an early stage.
+ *
+ * Note that this code is very hard to test under normal circumstances because
+ * direct-io pins the pages with get_user_pages(). This makes
+ * is_page_cache_freeable return false, and the VM will not clean the pages.
+ * But other code (eg, flusher threads) could clean the pages if they are mapped
+ * pagecache.
+ *
+ * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
+ * deferred bio dirtying paths.
+ */
+
+/*
+ * bio_set_pages_dirty() will mark all the bio's pages as dirty.
+ */
+void bio_set_pages_dirty(struct bio *bio)
+{
+ struct bio_vec *bvec;
+ int i;
+
+ bio_for_each_segment_all(bvec, bio, i) {
+ struct page *page = bvec->bv_page;
+
+ if (page && !PageCompound(page))
+ set_page_dirty_lock(page);
+ }
+}
+
+static void bio_release_pages(struct bio *bio)
+{
+ struct bio_vec *bvec;
+ int i;
+
+ bio_for_each_segment_all(bvec, bio, i) {
+ struct page *page = bvec->bv_page;
+
+ if (page)
+ put_page(page);
+ }
+}
+
+/*
+ * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
+ * If they are, then fine. If, however, some pages are clean then they must
+ * have been written out during the direct-IO read. So we take another ref on
+ * the BIO and the offending pages and re-dirty the pages in process context.
+ *
+ * It is expected that bio_check_pages_dirty() will wholly own the BIO from
+ * here on. It will run one page_cache_release() against each page and will
+ * run one bio_put() against the BIO.
+ */
+
+static void bio_dirty_fn(struct work_struct *work);
+
+static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
+static DEFINE_SPINLOCK(bio_dirty_lock);
+static struct bio *bio_dirty_list;
+
+/*
+ * This runs in process context
+ */
+static void bio_dirty_fn(struct work_struct *work)
+{
+ unsigned long flags;
+ struct bio *bio;
+
+ spin_lock_irqsave(&bio_dirty_lock, flags);
+ bio = bio_dirty_list;
+ bio_dirty_list = NULL;
+ spin_unlock_irqrestore(&bio_dirty_lock, flags);
+
+ while (bio) {
+ struct bio *next = bio->bi_private;
+
+ bio_set_pages_dirty(bio);
+ bio_release_pages(bio);
+ bio_put(bio);
+ bio = next;
+ }
+}
+
+void bio_check_pages_dirty(struct bio *bio)
+{
+ struct bio_vec *bvec;
+ int nr_clean_pages = 0;
+ int i;
+
+ bio_for_each_segment_all(bvec, bio, i) {
+ struct page *page = bvec->bv_page;
+
+ if (PageDirty(page) || PageCompound(page)) {
+ page_cache_release(page);
+ bvec->bv_page = NULL;
+ } else {
+ nr_clean_pages++;
+ }
+ }
+
+ if (nr_clean_pages) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&bio_dirty_lock, flags);
+ bio->bi_private = bio_dirty_list;
+ bio_dirty_list = bio;
+ spin_unlock_irqrestore(&bio_dirty_lock, flags);
+ schedule_work(&bio_dirty_work);
+ } else {
+ bio_put(bio);
+ }
+}
+
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+void bio_flush_dcache_pages(struct bio *bi)
+{
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+
+ bio_for_each_segment(bvec, bi, iter)
+ flush_dcache_page(bvec.bv_page);
+}
+EXPORT_SYMBOL(bio_flush_dcache_pages);
+#endif
+
+/**
+ * bio_endio - end I/O on a bio
+ * @bio: bio
+ * @error: error, if any
+ *
+ * Description:
+ * bio_endio() will end I/O on the whole bio. bio_endio() is the
+ * preferred way to end I/O on a bio, it takes care of clearing
+ * BIO_UPTODATE on error. @error is 0 on success, and and one of the
+ * established -Exxxx (-EIO, for instance) error values in case
+ * something went wrong. No one should call bi_end_io() directly on a
+ * bio unless they own it and thus know that it has an end_io
+ * function.
+ **/
+void bio_endio(struct bio *bio, int error)
+{
+ while (bio) {
+ BUG_ON(atomic_read(&bio->bi_remaining) <= 0);
+
+ if (error)
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+ error = -EIO;
+
+ if (!atomic_dec_and_test(&bio->bi_remaining))
+ return;
+
+ /*
+ * Need to have a real endio function for chained bios,
+ * otherwise various corner cases will break (like stacking
+ * block devices that save/restore bi_end_io) - however, we want
+ * to avoid unbounded recursion and blowing the stack. Tail call
+ * optimization would handle this, but compiling with frame
+ * pointers also disables gcc's sibling call optimization.
+ */
+ if (bio->bi_end_io == bio_chain_endio) {
+ struct bio *parent = bio->bi_private;
+ bio_put(bio);
+ bio = parent;
+ } else {
+ if (bio->bi_end_io)
+ bio->bi_end_io(bio, error);
+ bio = NULL;
+ }
+ }
+}
+EXPORT_SYMBOL(bio_endio);
+
+/**
+ * bio_endio_nodec - end I/O on a bio, without decrementing bi_remaining
+ * @bio: bio
+ * @error: error, if any
+ *
+ * For code that has saved and restored bi_end_io; thing hard before using this
+ * function, probably you should've cloned the entire bio.
+ **/
+void bio_endio_nodec(struct bio *bio, int error)
+{
+ atomic_inc(&bio->bi_remaining);
+ bio_endio(bio, error);
+}
+EXPORT_SYMBOL(bio_endio_nodec);
+
+/**
+ * bio_split - split a bio
+ * @bio: bio to split
+ * @sectors: number of sectors to split from the front of @bio
+ * @gfp: gfp mask
+ * @bs: bio set to allocate from
+ *
+ * Allocates and returns a new bio which represents @sectors from the start of
+ * @bio, and updates @bio to represent the remaining sectors.
+ *
+ * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's
+ * responsibility to ensure that @bio is not freed before the split.
+ */
+struct bio *bio_split(struct bio *bio, int sectors,
+ gfp_t gfp, struct bio_set *bs)
+{
+ struct bio *split = NULL;
+
+ BUG_ON(sectors <= 0);
+ BUG_ON(sectors >= bio_sectors(bio));
+
+ split = bio_clone_fast(bio, gfp, bs);
+ if (!split)
+ return NULL;
+
+ split->bi_iter.bi_size = sectors << 9;
+
+ if (bio_integrity(split))
+ bio_integrity_trim(split, 0, sectors);
+
+ bio_advance(bio, split->bi_iter.bi_size);
+
+ return split;
+}
+EXPORT_SYMBOL(bio_split);
+
+/**
+ * bio_trim - trim a bio
+ * @bio: bio to trim
+ * @offset: number of sectors to trim from the front of @bio
+ * @size: size we want to trim @bio to, in sectors
+ */
+void bio_trim(struct bio *bio, int offset, int size)
+{
+ /* 'bio' is a cloned bio which we need to trim to match
+ * the given offset and size.
+ */
+
+ size <<= 9;
+ if (offset == 0 && size == bio->bi_iter.bi_size)
+ return;
+
+ clear_bit(BIO_SEG_VALID, &bio->bi_flags);
+
+ bio_advance(bio, offset << 9);
+
+ bio->bi_iter.bi_size = size;
+}
+EXPORT_SYMBOL_GPL(bio_trim);
+
+/*
+ * create memory pools for biovec's in a bio_set.
+ * use the global biovec slabs created for general use.
+ */
+mempool_t *biovec_create_pool(int pool_entries)
+{
+ struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX;
+
+ return mempool_create_slab_pool(pool_entries, bp->slab);
+}
+
+void bioset_free(struct bio_set *bs)
+{
+ if (bs->rescue_workqueue)
+ destroy_workqueue(bs->rescue_workqueue);
+
+ if (bs->bio_pool)
+ mempool_destroy(bs->bio_pool);
+
+ if (bs->bvec_pool)
+ mempool_destroy(bs->bvec_pool);
+
+ bioset_integrity_free(bs);
+ bio_put_slab(bs);
+
+ kfree(bs);
+}
+EXPORT_SYMBOL(bioset_free);
+
+/**
+ * bioset_create - Create a bio_set
+ * @pool_size: Number of bio and bio_vecs to cache in the mempool
+ * @front_pad: Number of bytes to allocate in front of the returned bio
+ *
+ * Description:
+ * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
+ * to ask for a number of bytes to be allocated in front of the bio.
+ * Front pad allocation is useful for embedding the bio inside
+ * another structure, to avoid allocating extra data to go with the bio.
+ * Note that the bio must be embedded at the END of that structure always,
+ * or things will break badly.
+ */
+struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
+{
+ unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
+ struct bio_set *bs;
+
+ bs = kzalloc(sizeof(*bs), GFP_KERNEL);
+ if (!bs)
+ return NULL;
+
+ bs->front_pad = front_pad;
+
+ spin_lock_init(&bs->rescue_lock);
+ bio_list_init(&bs->rescue_list);
+ INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
+
+ bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
+ if (!bs->bio_slab) {
+ kfree(bs);
+ return NULL;
+ }
+
+ bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab);
+ if (!bs->bio_pool)
+ goto bad;
+
+ bs->bvec_pool = biovec_create_pool(pool_size);
+ if (!bs->bvec_pool)
+ goto bad;
+
+ bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
+ if (!bs->rescue_workqueue)
+ goto bad;
+
+ return bs;
+bad:
+ bioset_free(bs);
+ return NULL;
+}
+EXPORT_SYMBOL(bioset_create);
+
+#ifdef CONFIG_BLK_CGROUP
+/**
+ * bio_associate_current - associate a bio with %current
+ * @bio: target bio
+ *
+ * Associate @bio with %current if it hasn't been associated yet. Block
+ * layer will treat @bio as if it were issued by %current no matter which
+ * task actually issues it.
+ *
+ * This function takes an extra reference of @task's io_context and blkcg
+ * which will be put when @bio is released. The caller must own @bio,
+ * ensure %current->io_context exists, and is responsible for synchronizing
+ * calls to this function.
+ */
+int bio_associate_current(struct bio *bio)
+{
+ struct io_context *ioc;
+ struct cgroup_subsys_state *css;
+
+ if (bio->bi_ioc)
+ return -EBUSY;
+
+ ioc = current->io_context;
+ if (!ioc)
+ return -ENOENT;
+
+ /* acquire active ref on @ioc and associate */
+ get_io_context_active(ioc);
+ bio->bi_ioc = ioc;
+
+ /* associate blkcg if exists */
+ rcu_read_lock();
+ css = task_css(current, blkio_cgrp_id);
+ if (css && css_tryget_online(css))
+ bio->bi_css = css;
+ rcu_read_unlock();
+
+ return 0;
+}
+
+/**
+ * bio_disassociate_task - undo bio_associate_current()
+ * @bio: target bio
+ */
+void bio_disassociate_task(struct bio *bio)
+{
+ if (bio->bi_ioc) {
+ put_io_context(bio->bi_ioc);
+ bio->bi_ioc = NULL;
+ }
+ if (bio->bi_css) {
+ css_put(bio->bi_css);
+ bio->bi_css = NULL;
+ }
+}
+
+#endif /* CONFIG_BLK_CGROUP */
+
+static void __init biovec_init_slabs(void)
+{
+ int i;
+
+ for (i = 0; i < BIOVEC_NR_POOLS; i++) {
+ int size;
+ struct biovec_slab *bvs = bvec_slabs + i;
+
+ if (bvs->nr_vecs <= BIO_INLINE_VECS) {
+ bvs->slab = NULL;
+ continue;
+ }
+
+ size = bvs->nr_vecs * sizeof(struct bio_vec);
+ bvs->slab = kmem_cache_create(bvs->name, size, 0,
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+ }
+}
+
+static int __init init_bio(void)
+{
+ bio_slab_max = 2;
+ bio_slab_nr = 0;
+ bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
+ if (!bio_slabs)
+ panic("bio: can't allocate bios\n");
+
+ bio_integrity_init();
+ biovec_init_slabs();
+
+ fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
+ if (!fs_bio_set)
+ panic("bio: can't allocate bios\n");
+
+ if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE))
+ panic("bio: can't create integrity pool\n");
+
+ return 0;
+}
+subsys_initcall(init_bio);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 75642a352a8..28d227c5ca7 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -11,1685 +11,1143 @@
* Nauman Rafique <nauman@google.com>
*/
#include <linux/ioprio.h>
-#include <linux/seq_file.h>
#include <linux/kdev_t.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
-#include "blk-cgroup.h"
#include <linux/genhd.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include "blk-cgroup.h"
+#include "blk.h"
#define MAX_KEY_LEN 100
-static DEFINE_SPINLOCK(blkio_list_lock);
-static LIST_HEAD(blkio_list);
-
-struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
-EXPORT_SYMBOL_GPL(blkio_root_cgroup);
-
-static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
- struct cgroup *);
-static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
- struct cgroup_taskset *);
-static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
- struct cgroup_taskset *);
-static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
-static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
-
-/* for encoding cft->private value on file */
-#define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
-/* What policy owns the file, proportional or throttle */
-#define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
-#define BLKIOFILE_ATTR(val) ((val) & 0xffff)
-
-struct cgroup_subsys blkio_subsys = {
- .name = "blkio",
- .create = blkiocg_create,
- .can_attach = blkiocg_can_attach,
- .attach = blkiocg_attach,
- .destroy = blkiocg_destroy,
- .populate = blkiocg_populate,
-#ifdef CONFIG_BLK_CGROUP
- /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
- .subsys_id = blkio_subsys_id,
-#endif
- .use_id = 1,
- .module = THIS_MODULE,
-};
-EXPORT_SYMBOL_GPL(blkio_subsys);
-
-static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
- struct blkio_policy_node *pn)
-{
- list_add(&pn->node, &blkcg->policy_list);
-}
-
-static inline bool cftype_blkg_same_policy(struct cftype *cft,
- struct blkio_group *blkg)
-{
- enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
-
- if (blkg->plid == plid)
- return 1;
-
- return 0;
-}
+static DEFINE_MUTEX(blkcg_pol_mutex);
-/* Determines if policy node matches cgroup file being accessed */
-static inline bool pn_matches_cftype(struct cftype *cft,
- struct blkio_policy_node *pn)
-{
- enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
- int fileid = BLKIOFILE_ATTR(cft->private);
+struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT,
+ .cfq_leaf_weight = 2 * CFQ_WEIGHT_DEFAULT, };
+EXPORT_SYMBOL_GPL(blkcg_root);
- return (plid == pn->plid && fileid == pn->fileid);
-}
+static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
-/* Must be called with blkcg->lock held */
-static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
+static bool blkcg_policy_enabled(struct request_queue *q,
+ const struct blkcg_policy *pol)
{
- list_del(&pn->node);
+ return pol && test_bit(pol->plid, q->blkcg_pols);
}
-/* Must be called with blkcg->lock held */
-static struct blkio_policy_node *
-blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev,
- enum blkio_policy_id plid, int fileid)
+/**
+ * blkg_free - free a blkg
+ * @blkg: blkg to free
+ *
+ * Free @blkg which may be partially allocated.
+ */
+static void blkg_free(struct blkcg_gq *blkg)
{
- struct blkio_policy_node *pn;
+ int i;
- list_for_each_entry(pn, &blkcg->policy_list, node) {
- if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid)
- return pn;
- }
+ if (!blkg)
+ return;
- return NULL;
-}
+ for (i = 0; i < BLKCG_MAX_POLS; i++)
+ kfree(blkg->pd[i]);
-struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
-{
- return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
- struct blkio_cgroup, css);
+ blk_exit_rl(&blkg->rl);
+ kfree(blkg);
}
-EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
-struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
-{
- return container_of(task_subsys_state(tsk, blkio_subsys_id),
- struct blkio_cgroup, css);
-}
-EXPORT_SYMBOL_GPL(task_blkio_cgroup);
-
-static inline void
-blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
+/**
+ * blkg_alloc - allocate a blkg
+ * @blkcg: block cgroup the new blkg is associated with
+ * @q: request_queue the new blkg is associated with
+ * @gfp_mask: allocation mask to use
+ *
+ * Allocate a new blkg assocating @blkcg and @q.
+ */
+static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
+ gfp_t gfp_mask)
{
- struct blkio_policy_type *blkiop;
+ struct blkcg_gq *blkg;
+ int i;
- list_for_each_entry(blkiop, &blkio_list, list) {
- /* If this policy does not own the blkg, do not send updates */
- if (blkiop->plid != blkg->plid)
- continue;
- if (blkiop->ops.blkio_update_group_weight_fn)
- blkiop->ops.blkio_update_group_weight_fn(blkg->key,
- blkg, weight);
+ /* alloc and init base part */
+ blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
+ if (!blkg)
+ return NULL;
+
+ blkg->q = q;
+ INIT_LIST_HEAD(&blkg->q_node);
+ blkg->blkcg = blkcg;
+ atomic_set(&blkg->refcnt, 1);
+
+ /* root blkg uses @q->root_rl, init rl only for !root blkgs */
+ if (blkcg != &blkcg_root) {
+ if (blk_init_rl(&blkg->rl, q, gfp_mask))
+ goto err_free;
+ blkg->rl.blkg = blkg;
}
-}
-static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
- int fileid)
-{
- struct blkio_policy_type *blkiop;
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
+ struct blkg_policy_data *pd;
- list_for_each_entry(blkiop, &blkio_list, list) {
-
- /* If this policy does not own the blkg, do not send updates */
- if (blkiop->plid != blkg->plid)
+ if (!blkcg_policy_enabled(q, pol))
continue;
- if (fileid == BLKIO_THROTL_read_bps_device
- && blkiop->ops.blkio_update_group_read_bps_fn)
- blkiop->ops.blkio_update_group_read_bps_fn(blkg->key,
- blkg, bps);
+ /* alloc per-policy data and attach it to blkg */
+ pd = kzalloc_node(pol->pd_size, gfp_mask, q->node);
+ if (!pd)
+ goto err_free;
- if (fileid == BLKIO_THROTL_write_bps_device
- && blkiop->ops.blkio_update_group_write_bps_fn)
- blkiop->ops.blkio_update_group_write_bps_fn(blkg->key,
- blkg, bps);
+ blkg->pd[i] = pd;
+ pd->blkg = blkg;
+ pd->plid = i;
}
-}
-static inline void blkio_update_group_iops(struct blkio_group *blkg,
- unsigned int iops, int fileid)
-{
- struct blkio_policy_type *blkiop;
+ return blkg;
- list_for_each_entry(blkiop, &blkio_list, list) {
+err_free:
+ blkg_free(blkg);
+ return NULL;
+}
- /* If this policy does not own the blkg, do not send updates */
- if (blkiop->plid != blkg->plid)
- continue;
+/**
+ * __blkg_lookup - internal version of blkg_lookup()
+ * @blkcg: blkcg of interest
+ * @q: request_queue of interest
+ * @update_hint: whether to update lookup hint with the result or not
+ *
+ * This is internal version and shouldn't be used by policy
+ * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
+ * @q's bypass state. If @update_hint is %true, the caller should be
+ * holding @q->queue_lock and lookup hint is updated on success.
+ */
+struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
+ bool update_hint)
+{
+ struct blkcg_gq *blkg;
- if (fileid == BLKIO_THROTL_read_iops_device
- && blkiop->ops.blkio_update_group_read_iops_fn)
- blkiop->ops.blkio_update_group_read_iops_fn(blkg->key,
- blkg, iops);
+ blkg = rcu_dereference(blkcg->blkg_hint);
+ if (blkg && blkg->q == q)
+ return blkg;
- if (fileid == BLKIO_THROTL_write_iops_device
- && blkiop->ops.blkio_update_group_write_iops_fn)
- blkiop->ops.blkio_update_group_write_iops_fn(blkg->key,
- blkg,iops);
+ /*
+ * Hint didn't match. Look up from the radix tree. Note that the
+ * hint can only be updated under queue_lock as otherwise @blkg
+ * could have already been removed from blkg_tree. The caller is
+ * responsible for grabbing queue_lock if @update_hint.
+ */
+ blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
+ if (blkg && blkg->q == q) {
+ if (update_hint) {
+ lockdep_assert_held(q->queue_lock);
+ rcu_assign_pointer(blkcg->blkg_hint, blkg);
+ }
+ return blkg;
}
+
+ return NULL;
}
-/*
- * Add to the appropriate stat variable depending on the request type.
- * This should be called with the blkg->stats_lock held.
+/**
+ * blkg_lookup - lookup blkg for the specified blkcg - q pair
+ * @blkcg: blkcg of interest
+ * @q: request_queue of interest
+ *
+ * Lookup blkg for the @blkcg - @q pair. This function should be called
+ * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
+ * - see blk_queue_bypass_start() for details.
*/
-static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
- bool sync)
+struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
{
- if (direction)
- stat[BLKIO_STAT_WRITE] += add;
- else
- stat[BLKIO_STAT_READ] += add;
- if (sync)
- stat[BLKIO_STAT_SYNC] += add;
- else
- stat[BLKIO_STAT_ASYNC] += add;
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ if (unlikely(blk_queue_bypass(q)))
+ return NULL;
+ return __blkg_lookup(blkcg, q, false);
}
+EXPORT_SYMBOL_GPL(blkg_lookup);
/*
- * Decrements the appropriate stat variable if non-zero depending on the
- * request type. Panics on value being zero.
- * This should be called with the blkg->stats_lock held.
+ * If @new_blkg is %NULL, this function tries to allocate a new one as
+ * necessary using %GFP_ATOMIC. @new_blkg is always consumed on return.
*/
-static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
+static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
+ struct request_queue *q,
+ struct blkcg_gq *new_blkg)
{
- if (direction) {
- BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
- stat[BLKIO_STAT_WRITE]--;
- } else {
- BUG_ON(stat[BLKIO_STAT_READ] == 0);
- stat[BLKIO_STAT_READ]--;
- }
- if (sync) {
- BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
- stat[BLKIO_STAT_SYNC]--;
- } else {
- BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
- stat[BLKIO_STAT_ASYNC]--;
- }
-}
+ struct blkcg_gq *blkg;
+ int i, ret;
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-/* This should be called with the blkg->stats_lock held. */
-static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
- struct blkio_group *curr_blkg)
-{
- if (blkio_blkg_waiting(&blkg->stats))
- return;
- if (blkg == curr_blkg)
- return;
- blkg->stats.start_group_wait_time = sched_clock();
- blkio_mark_blkg_waiting(&blkg->stats);
-}
+ WARN_ON_ONCE(!rcu_read_lock_held());
+ lockdep_assert_held(q->queue_lock);
-/* This should be called with the blkg->stats_lock held. */
-static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
-{
- unsigned long long now;
+ /* blkg holds a reference to blkcg */
+ if (!css_tryget_online(&blkcg->css)) {
+ ret = -EINVAL;
+ goto err_free_blkg;
+ }
- if (!blkio_blkg_waiting(stats))
- return;
+ /* allocate */
+ if (!new_blkg) {
+ new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC);
+ if (unlikely(!new_blkg)) {
+ ret = -ENOMEM;
+ goto err_put_css;
+ }
+ }
+ blkg = new_blkg;
- now = sched_clock();
- if (time_after64(now, stats->start_group_wait_time))
- stats->group_wait_time += now - stats->start_group_wait_time;
- blkio_clear_blkg_waiting(stats);
-}
+ /* link parent */
+ if (blkcg_parent(blkcg)) {
+ blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
+ if (WARN_ON_ONCE(!blkg->parent)) {
+ ret = -EINVAL;
+ goto err_put_css;
+ }
+ blkg_get(blkg->parent);
+ }
-/* This should be called with the blkg->stats_lock held. */
-static void blkio_end_empty_time(struct blkio_group_stats *stats)
-{
- unsigned long long now;
+ /* invoke per-policy init */
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
- if (!blkio_blkg_empty(stats))
- return;
+ if (blkg->pd[i] && pol->pd_init_fn)
+ pol->pd_init_fn(blkg);
+ }
- now = sched_clock();
- if (time_after64(now, stats->start_empty_time))
- stats->empty_time += now - stats->start_empty_time;
- blkio_clear_blkg_empty(stats);
-}
+ /* insert */
+ spin_lock(&blkcg->lock);
+ ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
+ if (likely(!ret)) {
+ hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
+ list_add(&blkg->q_node, &q->blkg_list);
-void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
-{
- unsigned long flags;
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
- spin_lock_irqsave(&blkg->stats_lock, flags);
- BUG_ON(blkio_blkg_idling(&blkg->stats));
- blkg->stats.start_idle_time = sched_clock();
- blkio_mark_blkg_idling(&blkg->stats);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
+ if (blkg->pd[i] && pol->pd_online_fn)
+ pol->pd_online_fn(blkg);
+ }
+ }
+ blkg->online = true;
+ spin_unlock(&blkcg->lock);
-void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
-{
- unsigned long flags;
- unsigned long long now;
- struct blkio_group_stats *stats;
-
- spin_lock_irqsave(&blkg->stats_lock, flags);
- stats = &blkg->stats;
- if (blkio_blkg_idling(stats)) {
- now = sched_clock();
- if (time_after64(now, stats->start_idle_time))
- stats->idle_time += now - stats->start_idle_time;
- blkio_clear_blkg_idling(stats);
+ if (!ret) {
+ if (blkcg == &blkcg_root) {
+ q->root_blkg = blkg;
+ q->root_rl.blkg = blkg;
+ }
+ return blkg;
}
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
-void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
-{
- unsigned long flags;
- struct blkio_group_stats *stats;
-
- spin_lock_irqsave(&blkg->stats_lock, flags);
- stats = &blkg->stats;
- stats->avg_queue_size_sum +=
- stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
- stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
- stats->avg_queue_size_samples++;
- blkio_update_group_wait_time(stats);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
+ /* @blkg failed fully initialized, use the usual release path */
+ blkg_put(blkg);
+ return ERR_PTR(ret);
+
+err_put_css:
+ css_put(&blkcg->css);
+err_free_blkg:
+ blkg_free(new_blkg);
+ return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
-void blkiocg_set_start_empty_time(struct blkio_group *blkg)
+/**
+ * blkg_lookup_create - lookup blkg, try to create one if not there
+ * @blkcg: blkcg of interest
+ * @q: request_queue of interest
+ *
+ * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to
+ * create one. blkg creation is performed recursively from blkcg_root such
+ * that all non-root blkg's have access to the parent blkg. This function
+ * should be called under RCU read lock and @q->queue_lock.
+ *
+ * Returns pointer to the looked up or created blkg on success, ERR_PTR()
+ * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not
+ * dead and bypassing, returns ERR_PTR(-EBUSY).
+ */
+struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q)
{
- unsigned long flags;
- struct blkio_group_stats *stats;
+ struct blkcg_gq *blkg;
- spin_lock_irqsave(&blkg->stats_lock, flags);
- stats = &blkg->stats;
-
- if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
- stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
- return;
- }
+ WARN_ON_ONCE(!rcu_read_lock_held());
+ lockdep_assert_held(q->queue_lock);
/*
- * group is already marked empty. This can happen if cfqq got new
- * request in parent group and moved to this group while being added
- * to service tree. Just ignore the event and move on.
+ * This could be the first entry point of blkcg implementation and
+ * we shouldn't allow anything to go through for a bypassing queue.
*/
- if(blkio_blkg_empty(stats)) {
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
- return;
- }
+ if (unlikely(blk_queue_bypass(q)))
+ return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY);
- stats->start_empty_time = sched_clock();
- blkio_mark_blkg_empty(stats);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
+ blkg = __blkg_lookup(blkcg, q, true);
+ if (blkg)
+ return blkg;
-void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
- unsigned long dequeue)
-{
- blkg->stats.dequeue += dequeue;
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
-#else
-static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
- struct blkio_group *curr_blkg) {}
-static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
-#endif
-
-void blkiocg_update_io_add_stats(struct blkio_group *blkg,
- struct blkio_group *curr_blkg, bool direction,
- bool sync)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&blkg->stats_lock, flags);
- blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
- sync);
- blkio_end_empty_time(&blkg->stats);
- blkio_set_start_group_wait_time(blkg, curr_blkg);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
+ /*
+ * Create blkgs walking down from blkcg_root to @blkcg, so that all
+ * non-root blkgs have access to their parents.
+ */
+ while (true) {
+ struct blkcg *pos = blkcg;
+ struct blkcg *parent = blkcg_parent(blkcg);
-void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
- bool direction, bool sync)
-{
- unsigned long flags;
+ while (parent && !__blkg_lookup(parent, q, false)) {
+ pos = parent;
+ parent = blkcg_parent(parent);
+ }
- spin_lock_irqsave(&blkg->stats_lock, flags);
- blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
- direction, sync);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
+ blkg = blkg_create(pos, q, NULL);
+ if (pos == blkcg || IS_ERR(blkg))
+ return blkg;
+ }
}
-EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
+EXPORT_SYMBOL_GPL(blkg_lookup_create);
-void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
- unsigned long unaccounted_time)
+static void blkg_destroy(struct blkcg_gq *blkg)
{
- unsigned long flags;
-
- spin_lock_irqsave(&blkg->stats_lock, flags);
- blkg->stats.time += time;
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- blkg->stats.unaccounted_time += unaccounted_time;
-#endif
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
+ struct blkcg *blkcg = blkg->blkcg;
+ int i;
-/*
- * should be called under rcu read lock or queue lock to make sure blkg pointer
- * is valid.
- */
-void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
- uint64_t bytes, bool direction, bool sync)
-{
- struct blkio_group_stats_cpu *stats_cpu;
- unsigned long flags;
+ lockdep_assert_held(blkg->q->queue_lock);
+ lockdep_assert_held(&blkcg->lock);
- /*
- * Disabling interrupts to provide mutual exclusion between two
- * writes on same cpu. It probably is not needed for 64bit. Not
- * optimizing that case yet.
- */
- local_irq_save(flags);
-
- stats_cpu = this_cpu_ptr(blkg->stats_cpu);
-
- u64_stats_update_begin(&stats_cpu->syncp);
- stats_cpu->sectors += bytes >> 9;
- blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
- 1, direction, sync);
- blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
- bytes, direction, sync);
- u64_stats_update_end(&stats_cpu->syncp);
- local_irq_restore(flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
+ /* Something wrong if we are trying to remove same group twice */
+ WARN_ON_ONCE(list_empty(&blkg->q_node));
+ WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
-void blkiocg_update_completion_stats(struct blkio_group *blkg,
- uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
-{
- struct blkio_group_stats *stats;
- unsigned long flags;
- unsigned long long now = sched_clock();
-
- spin_lock_irqsave(&blkg->stats_lock, flags);
- stats = &blkg->stats;
- if (time_after64(now, io_start_time))
- blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
- now - io_start_time, direction, sync);
- if (time_after64(io_start_time, start_time))
- blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
- io_start_time - start_time, direction, sync);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
-/* Merged stats are per cpu. */
-void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
- bool sync)
-{
- struct blkio_group_stats_cpu *stats_cpu;
- unsigned long flags;
+ if (blkg->pd[i] && pol->pd_offline_fn)
+ pol->pd_offline_fn(blkg);
+ }
+ blkg->online = false;
+
+ radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
+ list_del_init(&blkg->q_node);
+ hlist_del_init_rcu(&blkg->blkcg_node);
/*
- * Disabling interrupts to provide mutual exclusion between two
- * writes on same cpu. It probably is not needed for 64bit. Not
- * optimizing that case yet.
+ * Both setting lookup hint to and clearing it from @blkg are done
+ * under queue_lock. If it's not pointing to @blkg now, it never
+ * will. Hint assignment itself can race safely.
*/
- local_irq_save(flags);
+ if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
+ rcu_assign_pointer(blkcg->blkg_hint, NULL);
- stats_cpu = this_cpu_ptr(blkg->stats_cpu);
+ /*
+ * If root blkg is destroyed. Just clear the pointer since root_rl
+ * does not take reference on root blkg.
+ */
+ if (blkcg == &blkcg_root) {
+ blkg->q->root_blkg = NULL;
+ blkg->q->root_rl.blkg = NULL;
+ }
- u64_stats_update_begin(&stats_cpu->syncp);
- blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
- direction, sync);
- u64_stats_update_end(&stats_cpu->syncp);
- local_irq_restore(flags);
+ /*
+ * Put the reference taken at the time of creation so that when all
+ * queues are gone, group can be destroyed.
+ */
+ blkg_put(blkg);
}
-EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
-/*
- * This function allocates the per cpu stats for blkio_group. Should be called
- * from sleepable context as alloc_per_cpu() requires that.
+/**
+ * blkg_destroy_all - destroy all blkgs associated with a request_queue
+ * @q: request_queue of interest
+ *
+ * Destroy all blkgs associated with @q.
*/
-int blkio_alloc_blkg_stats(struct blkio_group *blkg)
+static void blkg_destroy_all(struct request_queue *q)
{
- /* Allocate memory for per cpu stats */
- blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
- if (!blkg->stats_cpu)
- return -ENOMEM;
- return 0;
-}
-EXPORT_SYMBOL_GPL(blkio_alloc_blkg_stats);
+ struct blkcg_gq *blkg, *n;
-void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
- struct blkio_group *blkg, void *key, dev_t dev,
- enum blkio_policy_id plid)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&blkcg->lock, flags);
- spin_lock_init(&blkg->stats_lock);
- rcu_assign_pointer(blkg->key, key);
- blkg->blkcg_id = css_id(&blkcg->css);
- hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
- blkg->plid = plid;
- spin_unlock_irqrestore(&blkcg->lock, flags);
- /* Need to take css reference ? */
- cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
- blkg->dev = dev;
-}
-EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
+ lockdep_assert_held(q->queue_lock);
-static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
-{
- hlist_del_init_rcu(&blkg->blkcg_node);
- blkg->blkcg_id = 0;
+ list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
+ struct blkcg *blkcg = blkg->blkcg;
+
+ spin_lock(&blkcg->lock);
+ blkg_destroy(blkg);
+ spin_unlock(&blkcg->lock);
+ }
}
/*
- * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
- * indicating that blk_group was unhashed by the time we got to it.
+ * A group is RCU protected, but having an rcu lock does not mean that one
+ * can access all the fields of blkg and assume these are valid. For
+ * example, don't try to follow throtl_data and request queue links.
+ *
+ * Having a reference to blkg under an rcu allows accesses to only values
+ * local to groups like group stats and group rate limits.
*/
-int blkiocg_del_blkio_group(struct blkio_group *blkg)
+void __blkg_release_rcu(struct rcu_head *rcu_head)
{
- struct blkio_cgroup *blkcg;
- unsigned long flags;
- struct cgroup_subsys_state *css;
- int ret = 1;
+ struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
+ int i;
- rcu_read_lock();
- css = css_lookup(&blkio_subsys, blkg->blkcg_id);
- if (css) {
- blkcg = container_of(css, struct blkio_cgroup, css);
- spin_lock_irqsave(&blkcg->lock, flags);
- if (!hlist_unhashed(&blkg->blkcg_node)) {
- __blkiocg_del_blkio_group(blkg);
- ret = 0;
- }
- spin_unlock_irqrestore(&blkcg->lock, flags);
+ /* tell policies that this one is being freed */
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
+
+ if (blkg->pd[i] && pol->pd_exit_fn)
+ pol->pd_exit_fn(blkg);
}
- rcu_read_unlock();
- return ret;
+ /* release the blkcg and parent blkg refs this blkg has been holding */
+ css_put(&blkg->blkcg->css);
+ if (blkg->parent)
+ blkg_put(blkg->parent);
+
+ blkg_free(blkg);
}
-EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
+EXPORT_SYMBOL_GPL(__blkg_release_rcu);
-/* called under rcu_read_lock(). */
-struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
+/*
+ * The next function used by blk_queue_for_each_rl(). It's a bit tricky
+ * because the root blkg uses @q->root_rl instead of its own rl.
+ */
+struct request_list *__blk_queue_next_rl(struct request_list *rl,
+ struct request_queue *q)
{
- struct blkio_group *blkg;
- struct hlist_node *n;
- void *__key;
+ struct list_head *ent;
+ struct blkcg_gq *blkg;
- hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
- __key = blkg->key;
- if (__key == key)
- return blkg;
+ /*
+ * Determine the current blkg list_head. The first entry is
+ * root_rl which is off @q->blkg_list and mapped to the head.
+ */
+ if (rl == &q->root_rl) {
+ ent = &q->blkg_list;
+ /* There are no more block groups, hence no request lists */
+ if (list_empty(ent))
+ return NULL;
+ } else {
+ blkg = container_of(rl, struct blkcg_gq, rl);
+ ent = &blkg->q_node;
}
- return NULL;
+ /* walk to the next list_head, skip root blkcg */
+ ent = ent->next;
+ if (ent == &q->root_blkg->q_node)
+ ent = ent->next;
+ if (ent == &q->blkg_list)
+ return NULL;
+
+ blkg = container_of(ent, struct blkcg_gq, q_node);
+ return &blkg->rl;
}
-EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
-static void blkio_reset_stats_cpu(struct blkio_group *blkg)
+static int blkcg_reset_stats(struct cgroup_subsys_state *css,
+ struct cftype *cftype, u64 val)
{
- struct blkio_group_stats_cpu *stats_cpu;
- int i, j, k;
+ struct blkcg *blkcg = css_to_blkcg(css);
+ struct blkcg_gq *blkg;
+ int i;
+
/*
- * Note: On 64 bit arch this should not be an issue. This has the
- * possibility of returning some inconsistent value on 32bit arch
- * as 64bit update on 32bit is non atomic. Taking care of this
- * corner case makes code very complicated, like sending IPIs to
- * cpus, taking care of stats of offline cpus etc.
+ * XXX: We invoke cgroup_add/rm_cftypes() under blkcg_pol_mutex
+ * which ends up putting cgroup's internal cgroup_tree_mutex under
+ * it; however, cgroup_tree_mutex is nested above cgroup file
+ * active protection and grabbing blkcg_pol_mutex from a cgroup
+ * file operation creates a possible circular dependency. cgroup
+ * internal locking is planned to go through further simplification
+ * and this issue should go away soon. For now, let's trylock
+ * blkcg_pol_mutex and restart the write on failure.
*
- * reset stats is anyway more of a debug feature and this sounds a
- * corner case. So I am not complicating the code yet until and
- * unless this becomes a real issue.
+ * http://lkml.kernel.org/g/5363C04B.4010400@oracle.com
*/
- for_each_possible_cpu(i) {
- stats_cpu = per_cpu_ptr(blkg->stats_cpu, i);
- stats_cpu->sectors = 0;
- for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
- for (k = 0; k < BLKIO_STAT_TOTAL; k++)
- stats_cpu->stat_arr_cpu[j][k] = 0;
- }
-}
+ if (!mutex_trylock(&blkcg_pol_mutex))
+ return restart_syscall();
+ spin_lock_irq(&blkcg->lock);
-static int
-blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
-{
- struct blkio_cgroup *blkcg;
- struct blkio_group *blkg;
- struct blkio_group_stats *stats;
- struct hlist_node *n;
- uint64_t queued[BLKIO_STAT_TOTAL];
- int i;
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- bool idling, waiting, empty;
- unsigned long long now = sched_clock();
-#endif
+ /*
+ * Note that stat reset is racy - it doesn't synchronize against
+ * stat updates. This is a debug feature which shouldn't exist
+ * anyway. If you get hit by a race, retry.
+ */
+ hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
- blkcg = cgroup_to_blkio_cgroup(cgroup);
- spin_lock_irq(&blkcg->lock);
- hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
- spin_lock(&blkg->stats_lock);
- stats = &blkg->stats;
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- idling = blkio_blkg_idling(stats);
- waiting = blkio_blkg_waiting(stats);
- empty = blkio_blkg_empty(stats);
-#endif
- for (i = 0; i < BLKIO_STAT_TOTAL; i++)
- queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
- memset(stats, 0, sizeof(struct blkio_group_stats));
- for (i = 0; i < BLKIO_STAT_TOTAL; i++)
- stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- if (idling) {
- blkio_mark_blkg_idling(stats);
- stats->start_idle_time = now;
- }
- if (waiting) {
- blkio_mark_blkg_waiting(stats);
- stats->start_group_wait_time = now;
+ if (blkcg_policy_enabled(blkg->q, pol) &&
+ pol->pd_reset_stats_fn)
+ pol->pd_reset_stats_fn(blkg);
}
- if (empty) {
- blkio_mark_blkg_empty(stats);
- stats->start_empty_time = now;
- }
-#endif
- spin_unlock(&blkg->stats_lock);
-
- /* Reset Per cpu stats which don't take blkg->stats_lock */
- blkio_reset_stats_cpu(blkg);
}
spin_unlock_irq(&blkcg->lock);
+ mutex_unlock(&blkcg_pol_mutex);
return 0;
}
-static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
- int chars_left, bool diskname_only)
+static const char *blkg_dev_name(struct blkcg_gq *blkg)
{
- snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
- chars_left -= strlen(str);
- if (chars_left <= 0) {
- printk(KERN_WARNING
- "Possibly incorrect cgroup stat display format");
- return;
- }
- if (diskname_only)
- return;
- switch (type) {
- case BLKIO_STAT_READ:
- strlcat(str, " Read", chars_left);
- break;
- case BLKIO_STAT_WRITE:
- strlcat(str, " Write", chars_left);
- break;
- case BLKIO_STAT_SYNC:
- strlcat(str, " Sync", chars_left);
- break;
- case BLKIO_STAT_ASYNC:
- strlcat(str, " Async", chars_left);
- break;
- case BLKIO_STAT_TOTAL:
- strlcat(str, " Total", chars_left);
- break;
- default:
- strlcat(str, " Invalid", chars_left);
- }
+ /* some drivers (floppy) instantiate a queue w/o disk registered */
+ if (blkg->q->backing_dev_info.dev)
+ return dev_name(blkg->q->backing_dev_info.dev);
+ return NULL;
}
-static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
- struct cgroup_map_cb *cb, dev_t dev)
+/**
+ * blkcg_print_blkgs - helper for printing per-blkg data
+ * @sf: seq_file to print to
+ * @blkcg: blkcg of interest
+ * @prfill: fill function to print out a blkg
+ * @pol: policy in question
+ * @data: data to be passed to @prfill
+ * @show_total: to print out sum of prfill return values or not
+ *
+ * This function invokes @prfill on each blkg of @blkcg if pd for the
+ * policy specified by @pol exists. @prfill is invoked with @sf, the
+ * policy data and @data and the matching queue lock held. If @show_total
+ * is %true, the sum of the return values from @prfill is printed with
+ * "Total" label at the end.
+ *
+ * This is to be used to construct print functions for
+ * cftype->read_seq_string method.
+ */
+void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
+ u64 (*prfill)(struct seq_file *,
+ struct blkg_policy_data *, int),
+ const struct blkcg_policy *pol, int data,
+ bool show_total)
{
- blkio_get_key_name(0, dev, str, chars_left, true);
- cb->fill(cb, str, val);
- return val;
-}
+ struct blkcg_gq *blkg;
+ u64 total = 0;
-
-static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
- enum stat_type_cpu type, enum stat_sub_type sub_type)
-{
- int cpu;
- struct blkio_group_stats_cpu *stats_cpu;
- u64 val = 0, tval;
-
- for_each_possible_cpu(cpu) {
- unsigned int start;
- stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu);
-
- do {
- start = u64_stats_fetch_begin(&stats_cpu->syncp);
- if (type == BLKIO_STAT_CPU_SECTORS)
- tval = stats_cpu->sectors;
- else
- tval = stats_cpu->stat_arr_cpu[type][sub_type];
- } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
-
- val += tval;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
+ spin_lock_irq(blkg->q->queue_lock);
+ if (blkcg_policy_enabled(blkg->q, pol))
+ total += prfill(sf, blkg->pd[pol->plid], data);
+ spin_unlock_irq(blkg->q->queue_lock);
}
+ rcu_read_unlock();
- return val;
+ if (show_total)
+ seq_printf(sf, "Total %llu\n", (unsigned long long)total);
}
+EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
-static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
- struct cgroup_map_cb *cb, dev_t dev, enum stat_type_cpu type)
+/**
+ * __blkg_prfill_u64 - prfill helper for a single u64 value
+ * @sf: seq_file to print to
+ * @pd: policy private data of interest
+ * @v: value to print
+ *
+ * Print @v to @sf for the device assocaited with @pd.
+ */
+u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
{
- uint64_t disk_total, val;
- char key_str[MAX_KEY_LEN];
- enum stat_sub_type sub_type;
+ const char *dname = blkg_dev_name(pd->blkg);
- if (type == BLKIO_STAT_CPU_SECTORS) {
- val = blkio_read_stat_cpu(blkg, type, 0);
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, dev);
- }
+ if (!dname)
+ return 0;
- for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
- sub_type++) {
- blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
- val = blkio_read_stat_cpu(blkg, type, sub_type);
- cb->fill(cb, key_str, val);
- }
+ seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
+ return v;
+}
+EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
+
+/**
+ * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
+ * @sf: seq_file to print to
+ * @pd: policy private data of interest
+ * @rwstat: rwstat to print
+ *
+ * Print @rwstat to @sf for the device assocaited with @pd.
+ */
+u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ const struct blkg_rwstat *rwstat)
+{
+ static const char *rwstr[] = {
+ [BLKG_RWSTAT_READ] = "Read",
+ [BLKG_RWSTAT_WRITE] = "Write",
+ [BLKG_RWSTAT_SYNC] = "Sync",
+ [BLKG_RWSTAT_ASYNC] = "Async",
+ };
+ const char *dname = blkg_dev_name(pd->blkg);
+ u64 v;
+ int i;
+
+ if (!dname)
+ return 0;
- disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) +
- blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE);
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
+ (unsigned long long)rwstat->cnt[i]);
- blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
- cb->fill(cb, key_str, disk_total);
- return disk_total;
+ v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
+ seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
+ return v;
}
+EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
-/* This should be called with blkg->stats_lock held */
-static uint64_t blkio_get_stat(struct blkio_group *blkg,
- struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
+/**
+ * blkg_prfill_stat - prfill callback for blkg_stat
+ * @sf: seq_file to print to
+ * @pd: policy private data of interest
+ * @off: offset to the blkg_stat in @pd
+ *
+ * prfill callback for printing a blkg_stat.
+ */
+u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
{
- uint64_t disk_total;
- char key_str[MAX_KEY_LEN];
- enum stat_sub_type sub_type;
-
- if (type == BLKIO_STAT_TIME)
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- blkg->stats.time, cb, dev);
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- if (type == BLKIO_STAT_UNACCOUNTED_TIME)
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- blkg->stats.unaccounted_time, cb, dev);
- if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
- uint64_t sum = blkg->stats.avg_queue_size_sum;
- uint64_t samples = blkg->stats.avg_queue_size_samples;
- if (samples)
- do_div(sum, samples);
- else
- sum = 0;
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
- }
- if (type == BLKIO_STAT_GROUP_WAIT_TIME)
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- blkg->stats.group_wait_time, cb, dev);
- if (type == BLKIO_STAT_IDLE_TIME)
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- blkg->stats.idle_time, cb, dev);
- if (type == BLKIO_STAT_EMPTY_TIME)
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- blkg->stats.empty_time, cb, dev);
- if (type == BLKIO_STAT_DEQUEUE)
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- blkg->stats.dequeue, cb, dev);
-#endif
-
- for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
- sub_type++) {
- blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
- cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
- }
- disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
- blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
- blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
- cb->fill(cb, key_str, disk_total);
- return disk_total;
+ return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
}
+EXPORT_SYMBOL_GPL(blkg_prfill_stat);
-static int blkio_policy_parse_and_set(char *buf,
- struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
+/**
+ * blkg_prfill_rwstat - prfill callback for blkg_rwstat
+ * @sf: seq_file to print to
+ * @pd: policy private data of interest
+ * @off: offset to the blkg_rwstat in @pd
+ *
+ * prfill callback for printing a blkg_rwstat.
+ */
+u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ int off)
{
- struct gendisk *disk = NULL;
- char *s[4], *p, *major_s = NULL, *minor_s = NULL;
- unsigned long major, minor;
- int i = 0, ret = -EINVAL;
- int part;
- dev_t dev;
- u64 temp;
-
- memset(s, 0, sizeof(s));
-
- while ((p = strsep(&buf, " ")) != NULL) {
- if (!*p)
- continue;
-
- s[i++] = p;
-
- /* Prevent from inputing too many things */
- if (i == 3)
- break;
- }
-
- if (i != 2)
- goto out;
-
- p = strsep(&s[0], ":");
- if (p != NULL)
- major_s = p;
- else
- goto out;
-
- minor_s = s[0];
- if (!minor_s)
- goto out;
+ struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
- if (strict_strtoul(major_s, 10, &major))
- goto out;
+ return __blkg_prfill_rwstat(sf, pd, &rwstat);
+}
+EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
- if (strict_strtoul(minor_s, 10, &minor))
- goto out;
+/**
+ * blkg_stat_recursive_sum - collect hierarchical blkg_stat
+ * @pd: policy private data of interest
+ * @off: offset to the blkg_stat in @pd
+ *
+ * Collect the blkg_stat specified by @off from @pd and all its online
+ * descendants and return the sum. The caller must be holding the queue
+ * lock for online tests.
+ */
+u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off)
+{
+ struct blkcg_policy *pol = blkcg_policy[pd->plid];
+ struct blkcg_gq *pos_blkg;
+ struct cgroup_subsys_state *pos_css;
+ u64 sum = 0;
- dev = MKDEV(major, minor);
+ lockdep_assert_held(pd->blkg->q->queue_lock);
- if (strict_strtoull(s[1], 10, &temp))
- goto out;
+ rcu_read_lock();
+ blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) {
+ struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
+ struct blkg_stat *stat = (void *)pos_pd + off;
- /* For rule removal, do not check for device presence. */
- if (temp) {
- disk = get_gendisk(dev, &part);
- if (!disk || part) {
- ret = -ENODEV;
- goto out;
- }
+ if (pos_blkg->online)
+ sum += blkg_stat_read(stat);
}
+ rcu_read_unlock();
- newpn->dev = dev;
-
- switch (plid) {
- case BLKIO_POLICY_PROP:
- if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
- temp > BLKIO_WEIGHT_MAX)
- goto out;
-
- newpn->plid = plid;
- newpn->fileid = fileid;
- newpn->val.weight = temp;
- break;
- case BLKIO_POLICY_THROTL:
- switch(fileid) {
- case BLKIO_THROTL_read_bps_device:
- case BLKIO_THROTL_write_bps_device:
- newpn->plid = plid;
- newpn->fileid = fileid;
- newpn->val.bps = temp;
- break;
- case BLKIO_THROTL_read_iops_device:
- case BLKIO_THROTL_write_iops_device:
- if (temp > THROTL_IOPS_MAX)
- goto out;
-
- newpn->plid = plid;
- newpn->fileid = fileid;
- newpn->val.iops = (unsigned int)temp;
- break;
- }
- break;
- default:
- BUG();
- }
- ret = 0;
-out:
- put_disk(disk);
- return ret;
+ return sum;
}
+EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);
-unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
- dev_t dev)
+/**
+ * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
+ * @pd: policy private data of interest
+ * @off: offset to the blkg_stat in @pd
+ *
+ * Collect the blkg_rwstat specified by @off from @pd and all its online
+ * descendants and return the sum. The caller must be holding the queue
+ * lock for online tests.
+ */
+struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
+ int off)
{
- struct blkio_policy_node *pn;
- unsigned long flags;
- unsigned int weight;
+ struct blkcg_policy *pol = blkcg_policy[pd->plid];
+ struct blkcg_gq *pos_blkg;
+ struct cgroup_subsys_state *pos_css;
+ struct blkg_rwstat sum = { };
+ int i;
- spin_lock_irqsave(&blkcg->lock, flags);
+ lockdep_assert_held(pd->blkg->q->queue_lock);
- pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
- BLKIO_PROP_weight_device);
- if (pn)
- weight = pn->val.weight;
- else
- weight = blkcg->weight;
+ rcu_read_lock();
+ blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) {
+ struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
+ struct blkg_rwstat *rwstat = (void *)pos_pd + off;
+ struct blkg_rwstat tmp;
- spin_unlock_irqrestore(&blkcg->lock, flags);
+ if (!pos_blkg->online)
+ continue;
- return weight;
-}
-EXPORT_SYMBOL_GPL(blkcg_get_weight);
+ tmp = blkg_rwstat_read(rwstat);
-uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev)
-{
- struct blkio_policy_node *pn;
- unsigned long flags;
- uint64_t bps = -1;
-
- spin_lock_irqsave(&blkcg->lock, flags);
- pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
- BLKIO_THROTL_read_bps_device);
- if (pn)
- bps = pn->val.bps;
- spin_unlock_irqrestore(&blkcg->lock, flags);
-
- return bps;
-}
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ sum.cnt[i] += tmp.cnt[i];
+ }
+ rcu_read_unlock();
-uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev)
-{
- struct blkio_policy_node *pn;
- unsigned long flags;
- uint64_t bps = -1;
-
- spin_lock_irqsave(&blkcg->lock, flags);
- pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
- BLKIO_THROTL_write_bps_device);
- if (pn)
- bps = pn->val.bps;
- spin_unlock_irqrestore(&blkcg->lock, flags);
-
- return bps;
+ return sum;
}
+EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
-unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev)
+/**
+ * blkg_conf_prep - parse and prepare for per-blkg config update
+ * @blkcg: target block cgroup
+ * @pol: target policy
+ * @input: input string
+ * @ctx: blkg_conf_ctx to be filled
+ *
+ * Parse per-blkg config update from @input and initialize @ctx with the
+ * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new
+ * value. This function returns with RCU read lock and queue lock held and
+ * must be paired with blkg_conf_finish().
+ */
+int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
+ const char *input, struct blkg_conf_ctx *ctx)
+ __acquires(rcu) __acquires(disk->queue->queue_lock)
{
- struct blkio_policy_node *pn;
- unsigned long flags;
- unsigned int iops = -1;
-
- spin_lock_irqsave(&blkcg->lock, flags);
- pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
- BLKIO_THROTL_read_iops_device);
- if (pn)
- iops = pn->val.iops;
- spin_unlock_irqrestore(&blkcg->lock, flags);
-
- return iops;
-}
+ struct gendisk *disk;
+ struct blkcg_gq *blkg;
+ unsigned int major, minor;
+ unsigned long long v;
+ int part, ret;
-unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev)
-{
- struct blkio_policy_node *pn;
- unsigned long flags;
- unsigned int iops = -1;
-
- spin_lock_irqsave(&blkcg->lock, flags);
- pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
- BLKIO_THROTL_write_iops_device);
- if (pn)
- iops = pn->val.iops;
- spin_unlock_irqrestore(&blkcg->lock, flags);
-
- return iops;
-}
+ if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3)
+ return -EINVAL;
-/* Checks whether user asked for deleting a policy rule */
-static bool blkio_delete_rule_command(struct blkio_policy_node *pn)
-{
- switch(pn->plid) {
- case BLKIO_POLICY_PROP:
- if (pn->val.weight == 0)
- return 1;
- break;
- case BLKIO_POLICY_THROTL:
- switch(pn->fileid) {
- case BLKIO_THROTL_read_bps_device:
- case BLKIO_THROTL_write_bps_device:
- if (pn->val.bps == 0)
- return 1;
- break;
- case BLKIO_THROTL_read_iops_device:
- case BLKIO_THROTL_write_iops_device:
- if (pn->val.iops == 0)
- return 1;
- }
- break;
- default:
- BUG();
- }
+ disk = get_gendisk(MKDEV(major, minor), &part);
+ if (!disk || part)
+ return -EINVAL;
- return 0;
-}
+ rcu_read_lock();
+ spin_lock_irq(disk->queue->queue_lock);
-static void blkio_update_policy_rule(struct blkio_policy_node *oldpn,
- struct blkio_policy_node *newpn)
-{
- switch(oldpn->plid) {
- case BLKIO_POLICY_PROP:
- oldpn->val.weight = newpn->val.weight;
- break;
- case BLKIO_POLICY_THROTL:
- switch(newpn->fileid) {
- case BLKIO_THROTL_read_bps_device:
- case BLKIO_THROTL_write_bps_device:
- oldpn->val.bps = newpn->val.bps;
- break;
- case BLKIO_THROTL_read_iops_device:
- case BLKIO_THROTL_write_iops_device:
- oldpn->val.iops = newpn->val.iops;
+ if (blkcg_policy_enabled(disk->queue, pol))
+ blkg = blkg_lookup_create(blkcg, disk->queue);
+ else
+ blkg = ERR_PTR(-EINVAL);
+
+ if (IS_ERR(blkg)) {
+ ret = PTR_ERR(blkg);
+ rcu_read_unlock();
+ spin_unlock_irq(disk->queue->queue_lock);
+ put_disk(disk);
+ /*
+ * If queue was bypassing, we should retry. Do so after a
+ * short msleep(). It isn't strictly necessary but queue
+ * can be bypassing for some time and it's always nice to
+ * avoid busy looping.
+ */
+ if (ret == -EBUSY) {
+ msleep(10);
+ ret = restart_syscall();
}
- break;
- default:
- BUG();
+ return ret;
}
+
+ ctx->disk = disk;
+ ctx->blkg = blkg;
+ ctx->v = v;
+ return 0;
}
+EXPORT_SYMBOL_GPL(blkg_conf_prep);
-/*
- * Some rules/values in blkg have changed. Propagate those to respective
- * policies.
+/**
+ * blkg_conf_finish - finish up per-blkg config update
+ * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
+ *
+ * Finish up after per-blkg config update. This function must be paired
+ * with blkg_conf_prep().
*/
-static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg,
- struct blkio_group *blkg, struct blkio_policy_node *pn)
+void blkg_conf_finish(struct blkg_conf_ctx *ctx)
+ __releases(ctx->disk->queue->queue_lock) __releases(rcu)
{
- unsigned int weight, iops;
- u64 bps;
-
- switch(pn->plid) {
- case BLKIO_POLICY_PROP:
- weight = pn->val.weight ? pn->val.weight :
- blkcg->weight;
- blkio_update_group_weight(blkg, weight);
- break;
- case BLKIO_POLICY_THROTL:
- switch(pn->fileid) {
- case BLKIO_THROTL_read_bps_device:
- case BLKIO_THROTL_write_bps_device:
- bps = pn->val.bps ? pn->val.bps : (-1);
- blkio_update_group_bps(blkg, bps, pn->fileid);
- break;
- case BLKIO_THROTL_read_iops_device:
- case BLKIO_THROTL_write_iops_device:
- iops = pn->val.iops ? pn->val.iops : (-1);
- blkio_update_group_iops(blkg, iops, pn->fileid);
- break;
- }
- break;
- default:
- BUG();
- }
+ spin_unlock_irq(ctx->disk->queue->queue_lock);
+ rcu_read_unlock();
+ put_disk(ctx->disk);
}
+EXPORT_SYMBOL_GPL(blkg_conf_finish);
-/*
- * A policy node rule has been updated. Propagate this update to all the
- * block groups which might be affected by this update.
+struct cftype blkcg_files[] = {
+ {
+ .name = "reset_stats",
+ .write_u64 = blkcg_reset_stats,
+ },
+ { } /* terminate */
+};
+
+/**
+ * blkcg_css_offline - cgroup css_offline callback
+ * @css: css of interest
+ *
+ * This function is called when @css is about to go away and responsible
+ * for shooting down all blkgs associated with @css. blkgs should be
+ * removed while holding both q and blkcg locks. As blkcg lock is nested
+ * inside q lock, this function performs reverse double lock dancing.
+ *
+ * This is the blkcg counterpart of ioc_release_fn().
*/
-static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg,
- struct blkio_policy_node *pn)
+static void blkcg_css_offline(struct cgroup_subsys_state *css)
{
- struct blkio_group *blkg;
- struct hlist_node *n;
+ struct blkcg *blkcg = css_to_blkcg(css);
- spin_lock(&blkio_list_lock);
spin_lock_irq(&blkcg->lock);
- hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
- if (pn->dev != blkg->dev || pn->plid != blkg->plid)
- continue;
- blkio_update_blkg_policy(blkcg, blkg, pn);
+ while (!hlist_empty(&blkcg->blkg_list)) {
+ struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
+ struct blkcg_gq, blkcg_node);
+ struct request_queue *q = blkg->q;
+
+ if (spin_trylock(q->queue_lock)) {
+ blkg_destroy(blkg);
+ spin_unlock(q->queue_lock);
+ } else {
+ spin_unlock_irq(&blkcg->lock);
+ cpu_relax();
+ spin_lock_irq(&blkcg->lock);
+ }
}
spin_unlock_irq(&blkcg->lock);
- spin_unlock(&blkio_list_lock);
}
-static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
- const char *buffer)
+static void blkcg_css_free(struct cgroup_subsys_state *css)
{
- int ret = 0;
- char *buf;
- struct blkio_policy_node *newpn, *pn;
- struct blkio_cgroup *blkcg;
- int keep_newpn = 0;
- enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
- int fileid = BLKIOFILE_ATTR(cft->private);
-
- buf = kstrdup(buffer, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ struct blkcg *blkcg = css_to_blkcg(css);
- newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
- if (!newpn) {
- ret = -ENOMEM;
- goto free_buf;
- }
-
- ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid);
- if (ret)
- goto free_newpn;
-
- blkcg = cgroup_to_blkio_cgroup(cgrp);
-
- spin_lock_irq(&blkcg->lock);
+ if (blkcg != &blkcg_root)
+ kfree(blkcg);
+}
- pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid);
- if (!pn) {
- if (!blkio_delete_rule_command(newpn)) {
- blkio_policy_insert_node(blkcg, newpn);
- keep_newpn = 1;
- }
- spin_unlock_irq(&blkcg->lock);
- goto update_io_group;
- }
+static struct cgroup_subsys_state *
+blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
+{
+ static atomic64_t id_seq = ATOMIC64_INIT(0);
+ struct blkcg *blkcg;
- if (blkio_delete_rule_command(newpn)) {
- blkio_policy_delete_node(pn);
- kfree(pn);
- spin_unlock_irq(&blkcg->lock);
- goto update_io_group;
+ if (!parent_css) {
+ blkcg = &blkcg_root;
+ goto done;
}
- spin_unlock_irq(&blkcg->lock);
- blkio_update_policy_rule(pn, newpn);
+ blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
+ if (!blkcg)
+ return ERR_PTR(-ENOMEM);
-update_io_group:
- blkio_update_policy_node_blkg(blkcg, newpn);
+ blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
+ blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT;
+ blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
+done:
+ spin_lock_init(&blkcg->lock);
+ INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
+ INIT_HLIST_HEAD(&blkcg->blkg_list);
-free_newpn:
- if (!keep_newpn)
- kfree(newpn);
-free_buf:
- kfree(buf);
- return ret;
+ return &blkcg->css;
}
-static void
-blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn)
+/**
+ * blkcg_init_queue - initialize blkcg part of request queue
+ * @q: request_queue to initialize
+ *
+ * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
+ * part of new request_queue @q.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int blkcg_init_queue(struct request_queue *q)
{
- switch(pn->plid) {
- case BLKIO_POLICY_PROP:
- if (pn->fileid == BLKIO_PROP_weight_device)
- seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
- MINOR(pn->dev), pn->val.weight);
- break;
- case BLKIO_POLICY_THROTL:
- switch(pn->fileid) {
- case BLKIO_THROTL_read_bps_device:
- case BLKIO_THROTL_write_bps_device:
- seq_printf(m, "%u:%u\t%llu\n", MAJOR(pn->dev),
- MINOR(pn->dev), pn->val.bps);
- break;
- case BLKIO_THROTL_read_iops_device:
- case BLKIO_THROTL_write_iops_device:
- seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
- MINOR(pn->dev), pn->val.iops);
- break;
- }
- break;
- default:
- BUG();
- }
-}
+ might_sleep();
-/* cgroup files which read their data from policy nodes end up here */
-static void blkio_read_policy_node_files(struct cftype *cft,
- struct blkio_cgroup *blkcg, struct seq_file *m)
-{
- struct blkio_policy_node *pn;
-
- if (!list_empty(&blkcg->policy_list)) {
- spin_lock_irq(&blkcg->lock);
- list_for_each_entry(pn, &blkcg->policy_list, node) {
- if (!pn_matches_cftype(cft, pn))
- continue;
- blkio_print_policy_node(m, pn);
- }
- spin_unlock_irq(&blkcg->lock);
- }
+ return blk_throtl_init(q);
}
-static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *m)
+/**
+ * blkcg_drain_queue - drain blkcg part of request_queue
+ * @q: request_queue to drain
+ *
+ * Called from blk_drain_queue(). Responsible for draining blkcg part.
+ */
+void blkcg_drain_queue(struct request_queue *q)
{
- struct blkio_cgroup *blkcg;
- enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
- int name = BLKIOFILE_ATTR(cft->private);
-
- blkcg = cgroup_to_blkio_cgroup(cgrp);
-
- switch(plid) {
- case BLKIO_POLICY_PROP:
- switch(name) {
- case BLKIO_PROP_weight_device:
- blkio_read_policy_node_files(cft, blkcg, m);
- return 0;
- default:
- BUG();
- }
- break;
- case BLKIO_POLICY_THROTL:
- switch(name){
- case BLKIO_THROTL_read_bps_device:
- case BLKIO_THROTL_write_bps_device:
- case BLKIO_THROTL_read_iops_device:
- case BLKIO_THROTL_write_iops_device:
- blkio_read_policy_node_files(cft, blkcg, m);
- return 0;
- default:
- BUG();
- }
- break;
- default:
- BUG();
- }
+ lockdep_assert_held(q->queue_lock);
- return 0;
+ /*
+ * @q could be exiting and already have destroyed all blkgs as
+ * indicated by NULL root_blkg. If so, don't confuse policies.
+ */
+ if (!q->root_blkg)
+ return;
+
+ blk_throtl_drain(q);
}
-static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
- struct cftype *cft, struct cgroup_map_cb *cb,
- enum stat_type type, bool show_total, bool pcpu)
+/**
+ * blkcg_exit_queue - exit and release blkcg part of request_queue
+ * @q: request_queue being released
+ *
+ * Called from blk_release_queue(). Responsible for exiting blkcg part.
+ */
+void blkcg_exit_queue(struct request_queue *q)
{
- struct blkio_group *blkg;
- struct hlist_node *n;
- uint64_t cgroup_total = 0;
+ spin_lock_irq(q->queue_lock);
+ blkg_destroy_all(q);
+ spin_unlock_irq(q->queue_lock);
- rcu_read_lock();
- hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
- if (blkg->dev) {
- if (!cftype_blkg_same_policy(cft, blkg))
- continue;
- if (pcpu)
- cgroup_total += blkio_get_stat_cpu(blkg, cb,
- blkg->dev, type);
- else {
- spin_lock_irq(&blkg->stats_lock);
- cgroup_total += blkio_get_stat(blkg, cb,
- blkg->dev, type);
- spin_unlock_irq(&blkg->stats_lock);
- }
- }
- }
- if (show_total)
- cb->fill(cb, "Total", cgroup_total);
- rcu_read_unlock();
- return 0;
+ blk_throtl_exit(q);
}
-/* All map kind of cgroup file get serviced by this function */
-static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
- struct cgroup_map_cb *cb)
+/*
+ * We cannot support shared io contexts, as we have no mean to support
+ * two tasks with the same ioc in two different groups without major rework
+ * of the main cic data structures. For now we allow a task to change
+ * its cgroup only if it's the only owner of its ioc.
+ */
+static int blkcg_can_attach(struct cgroup_subsys_state *css,
+ struct cgroup_taskset *tset)
{
- struct blkio_cgroup *blkcg;
- enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
- int name = BLKIOFILE_ATTR(cft->private);
-
- blkcg = cgroup_to_blkio_cgroup(cgrp);
-
- switch(plid) {
- case BLKIO_POLICY_PROP:
- switch(name) {
- case BLKIO_PROP_time:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_TIME, 0, 0);
- case BLKIO_PROP_sectors:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_CPU_SECTORS, 0, 1);
- case BLKIO_PROP_io_service_bytes:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
- case BLKIO_PROP_io_serviced:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_CPU_SERVICED, 1, 1);
- case BLKIO_PROP_io_service_time:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_SERVICE_TIME, 1, 0);
- case BLKIO_PROP_io_wait_time:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_WAIT_TIME, 1, 0);
- case BLKIO_PROP_io_merged:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_CPU_MERGED, 1, 1);
- case BLKIO_PROP_io_queued:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_QUEUED, 1, 0);
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- case BLKIO_PROP_unaccounted_time:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
- case BLKIO_PROP_dequeue:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_DEQUEUE, 0, 0);
- case BLKIO_PROP_avg_queue_size:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
- case BLKIO_PROP_group_wait_time:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
- case BLKIO_PROP_idle_time:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_IDLE_TIME, 0, 0);
- case BLKIO_PROP_empty_time:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_EMPTY_TIME, 0, 0);
-#endif
- default:
- BUG();
- }
- break;
- case BLKIO_POLICY_THROTL:
- switch(name){
- case BLKIO_THROTL_io_service_bytes:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
- case BLKIO_THROTL_io_serviced:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_CPU_SERVICED, 1, 1);
- default:
- BUG();
- }
- break;
- default:
- BUG();
- }
+ struct task_struct *task;
+ struct io_context *ioc;
+ int ret = 0;
- return 0;
+ /* task_lock() is needed to avoid races with exit_io_context() */
+ cgroup_taskset_for_each(task, tset) {
+ task_lock(task);
+ ioc = task->io_context;
+ if (ioc && atomic_read(&ioc->nr_tasks) > 1)
+ ret = -EINVAL;
+ task_unlock(task);
+ if (ret)
+ break;
+ }
+ return ret;
}
-static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val)
+struct cgroup_subsys blkio_cgrp_subsys = {
+ .css_alloc = blkcg_css_alloc,
+ .css_offline = blkcg_css_offline,
+ .css_free = blkcg_css_free,
+ .can_attach = blkcg_can_attach,
+ .base_cftypes = blkcg_files,
+};
+EXPORT_SYMBOL_GPL(blkio_cgrp_subsys);
+
+/**
+ * blkcg_activate_policy - activate a blkcg policy on a request_queue
+ * @q: request_queue of interest
+ * @pol: blkcg policy to activate
+ *
+ * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through
+ * bypass mode to populate its blkgs with policy_data for @pol.
+ *
+ * Activation happens with @q bypassed, so nobody would be accessing blkgs
+ * from IO path. Update of each blkg is protected by both queue and blkcg
+ * locks so that holding either lock and testing blkcg_policy_enabled() is
+ * always enough for dereferencing policy data.
+ *
+ * The caller is responsible for synchronizing [de]activations and policy
+ * [un]registerations. Returns 0 on success, -errno on failure.
+ */
+int blkcg_activate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol)
{
- struct blkio_group *blkg;
- struct hlist_node *n;
- struct blkio_policy_node *pn;
+ LIST_HEAD(pds);
+ struct blkcg_gq *blkg, *new_blkg;
+ struct blkg_policy_data *pd, *n;
+ int cnt = 0, ret;
+ bool preloaded;
- if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
- return -EINVAL;
+ if (blkcg_policy_enabled(q, pol))
+ return 0;
- spin_lock(&blkio_list_lock);
- spin_lock_irq(&blkcg->lock);
- blkcg->weight = (unsigned int)val;
+ /* preallocations for root blkg */
+ new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
+ if (!new_blkg)
+ return -ENOMEM;
- hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
- pn = blkio_policy_search_node(blkcg, blkg->dev,
- BLKIO_POLICY_PROP, BLKIO_PROP_weight_device);
- if (pn)
- continue;
+ blk_queue_bypass_start(q);
- blkio_update_group_weight(blkg, blkcg->weight);
- }
- spin_unlock_irq(&blkcg->lock);
- spin_unlock(&blkio_list_lock);
- return 0;
-}
+ preloaded = !radix_tree_preload(GFP_KERNEL);
-static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
- struct blkio_cgroup *blkcg;
- enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
- int name = BLKIOFILE_ATTR(cft->private);
+ /*
+ * Make sure the root blkg exists and count the existing blkgs. As
+ * @q is bypassing at this point, blkg_lookup_create() can't be
+ * used. Open code it.
+ */
+ spin_lock_irq(q->queue_lock);
- blkcg = cgroup_to_blkio_cgroup(cgrp);
+ rcu_read_lock();
+ blkg = __blkg_lookup(&blkcg_root, q, false);
+ if (blkg)
+ blkg_free(new_blkg);
+ else
+ blkg = blkg_create(&blkcg_root, q, new_blkg);
+ rcu_read_unlock();
- switch(plid) {
- case BLKIO_POLICY_PROP:
- switch(name) {
- case BLKIO_PROP_weight:
- return (u64)blkcg->weight;
- }
- break;
- default:
- BUG();
+ if (preloaded)
+ radix_tree_preload_end();
+
+ if (IS_ERR(blkg)) {
+ ret = PTR_ERR(blkg);
+ goto out_unlock;
}
- return 0;
-}
-static int
-blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
-{
- struct blkio_cgroup *blkcg;
- enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
- int name = BLKIOFILE_ATTR(cft->private);
+ list_for_each_entry(blkg, &q->blkg_list, q_node)
+ cnt++;
- blkcg = cgroup_to_blkio_cgroup(cgrp);
+ spin_unlock_irq(q->queue_lock);
- switch(plid) {
- case BLKIO_POLICY_PROP:
- switch(name) {
- case BLKIO_PROP_weight:
- return blkio_weight_write(blkcg, val);
+ /* allocate policy_data for all existing blkgs */
+ while (cnt--) {
+ pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
+ if (!pd) {
+ ret = -ENOMEM;
+ goto out_free;
}
- break;
- default:
- BUG();
+ list_add_tail(&pd->alloc_node, &pds);
}
- return 0;
-}
-
-struct cftype blkio_files[] = {
- {
- .name = "weight_device",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_weight_device),
- .read_seq_string = blkiocg_file_read,
- .write_string = blkiocg_file_write,
- .max_write_len = 256,
- },
- {
- .name = "weight",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_weight),
- .read_u64 = blkiocg_file_read_u64,
- .write_u64 = blkiocg_file_write_u64,
- },
- {
- .name = "time",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_time),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "sectors",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_sectors),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "io_service_bytes",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_io_service_bytes),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "io_serviced",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_io_serviced),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "io_service_time",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_io_service_time),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "io_wait_time",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_io_wait_time),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "io_merged",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_io_merged),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "io_queued",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_io_queued),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "reset_stats",
- .write_u64 = blkiocg_reset_stats,
- },
-#ifdef CONFIG_BLK_DEV_THROTTLING
- {
- .name = "throttle.read_bps_device",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
- BLKIO_THROTL_read_bps_device),
- .read_seq_string = blkiocg_file_read,
- .write_string = blkiocg_file_write,
- .max_write_len = 256,
- },
+ /*
+ * Install the allocated pds. With @q bypassing, no new blkg
+ * should have been created while the queue lock was dropped.
+ */
+ spin_lock_irq(q->queue_lock);
- {
- .name = "throttle.write_bps_device",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
- BLKIO_THROTL_write_bps_device),
- .read_seq_string = blkiocg_file_read,
- .write_string = blkiocg_file_write,
- .max_write_len = 256,
- },
+ list_for_each_entry(blkg, &q->blkg_list, q_node) {
+ if (WARN_ON(list_empty(&pds))) {
+ /* umm... this shouldn't happen, just abort */
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
+ list_del_init(&pd->alloc_node);
- {
- .name = "throttle.read_iops_device",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
- BLKIO_THROTL_read_iops_device),
- .read_seq_string = blkiocg_file_read,
- .write_string = blkiocg_file_write,
- .max_write_len = 256,
- },
+ /* grab blkcg lock too while installing @pd on @blkg */
+ spin_lock(&blkg->blkcg->lock);
- {
- .name = "throttle.write_iops_device",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
- BLKIO_THROTL_write_iops_device),
- .read_seq_string = blkiocg_file_read,
- .write_string = blkiocg_file_write,
- .max_write_len = 256,
- },
- {
- .name = "throttle.io_service_bytes",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
- BLKIO_THROTL_io_service_bytes),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "throttle.io_serviced",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
- BLKIO_THROTL_io_serviced),
- .read_map = blkiocg_file_read_map,
- },
-#endif /* CONFIG_BLK_DEV_THROTTLING */
+ blkg->pd[pol->plid] = pd;
+ pd->blkg = blkg;
+ pd->plid = pol->plid;
+ pol->pd_init_fn(blkg);
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- {
- .name = "avg_queue_size",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_avg_queue_size),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "group_wait_time",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_group_wait_time),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "idle_time",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_idle_time),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "empty_time",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_empty_time),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "dequeue",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_dequeue),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "unaccounted_time",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_unaccounted_time),
- .read_map = blkiocg_file_read_map,
- },
-#endif
-};
+ spin_unlock(&blkg->blkcg->lock);
+ }
-static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
-{
- return cgroup_add_files(cgroup, subsys, blkio_files,
- ARRAY_SIZE(blkio_files));
+ __set_bit(pol->plid, q->blkcg_pols);
+ ret = 0;
+out_unlock:
+ spin_unlock_irq(q->queue_lock);
+out_free:
+ blk_queue_bypass_end(q);
+ list_for_each_entry_safe(pd, n, &pds, alloc_node)
+ kfree(pd);
+ return ret;
}
+EXPORT_SYMBOL_GPL(blkcg_activate_policy);
-static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
+/**
+ * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
+ * @q: request_queue of interest
+ * @pol: blkcg policy to deactivate
+ *
+ * Deactivate @pol on @q. Follows the same synchronization rules as
+ * blkcg_activate_policy().
+ */
+void blkcg_deactivate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol)
{
- struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
- unsigned long flags;
- struct blkio_group *blkg;
- void *key;
- struct blkio_policy_type *blkiop;
- struct blkio_policy_node *pn, *pntmp;
-
- rcu_read_lock();
- do {
- spin_lock_irqsave(&blkcg->lock, flags);
+ struct blkcg_gq *blkg;
- if (hlist_empty(&blkcg->blkg_list)) {
- spin_unlock_irqrestore(&blkcg->lock, flags);
- break;
- }
+ if (!blkcg_policy_enabled(q, pol))
+ return;
- blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
- blkcg_node);
- key = rcu_dereference(blkg->key);
- __blkiocg_del_blkio_group(blkg);
+ blk_queue_bypass_start(q);
+ spin_lock_irq(q->queue_lock);
- spin_unlock_irqrestore(&blkcg->lock, flags);
+ __clear_bit(pol->plid, q->blkcg_pols);
- /*
- * This blkio_group is being unlinked as associated cgroup is
- * going away. Let all the IO controlling policies know about
- * this event.
- */
- spin_lock(&blkio_list_lock);
- list_for_each_entry(blkiop, &blkio_list, list) {
- if (blkiop->plid != blkg->plid)
- continue;
- blkiop->ops.blkio_unlink_group_fn(key, blkg);
- }
- spin_unlock(&blkio_list_lock);
- } while (1);
+ /* if no policy is left, no need for blkgs - shoot them down */
+ if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS))
+ blkg_destroy_all(q);
- list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
- blkio_policy_delete_node(pn);
- kfree(pn);
- }
+ list_for_each_entry(blkg, &q->blkg_list, q_node) {
+ /* grab blkcg lock too while removing @pd from @blkg */
+ spin_lock(&blkg->blkcg->lock);
- free_css_id(&blkio_subsys, &blkcg->css);
- rcu_read_unlock();
- if (blkcg != &blkio_root_cgroup)
- kfree(blkcg);
-}
+ if (pol->pd_offline_fn)
+ pol->pd_offline_fn(blkg);
+ if (pol->pd_exit_fn)
+ pol->pd_exit_fn(blkg);
-static struct cgroup_subsys_state *
-blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
-{
- struct blkio_cgroup *blkcg;
- struct cgroup *parent = cgroup->parent;
+ kfree(blkg->pd[pol->plid]);
+ blkg->pd[pol->plid] = NULL;
- if (!parent) {
- blkcg = &blkio_root_cgroup;
- goto done;
+ spin_unlock(&blkg->blkcg->lock);
}
- blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
- if (!blkcg)
- return ERR_PTR(-ENOMEM);
-
- blkcg->weight = BLKIO_WEIGHT_DEFAULT;
-done:
- spin_lock_init(&blkcg->lock);
- INIT_HLIST_HEAD(&blkcg->blkg_list);
-
- INIT_LIST_HEAD(&blkcg->policy_list);
- return &blkcg->css;
+ spin_unlock_irq(q->queue_lock);
+ blk_queue_bypass_end(q);
}
+EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
-/*
- * We cannot support shared io contexts, as we have no mean to support
- * two tasks with the same ioc in two different groups without major rework
- * of the main cic data structures. For now we allow a task to change
- * its cgroup only if it's the only owner of its ioc.
+/**
+ * blkcg_policy_register - register a blkcg policy
+ * @pol: blkcg policy to register
+ *
+ * Register @pol with blkcg core. Might sleep and @pol may be modified on
+ * successful registration. Returns 0 on success and -errno on failure.
*/
-static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
- struct cgroup_taskset *tset)
+int blkcg_policy_register(struct blkcg_policy *pol)
{
- struct task_struct *task;
- struct io_context *ioc;
- int ret = 0;
+ int i, ret;
- /* task_lock() is needed to avoid races with exit_io_context() */
- cgroup_taskset_for_each(task, cgrp, tset) {
- task_lock(task);
- ioc = task->io_context;
- if (ioc && atomic_read(&ioc->nr_tasks) > 1)
- ret = -EINVAL;
- task_unlock(task);
- if (ret)
+ if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data)))
+ return -EINVAL;
+
+ mutex_lock(&blkcg_pol_mutex);
+
+ /* find an empty slot */
+ ret = -ENOSPC;
+ for (i = 0; i < BLKCG_MAX_POLS; i++)
+ if (!blkcg_policy[i])
break;
- }
- return ret;
-}
+ if (i >= BLKCG_MAX_POLS)
+ goto out_unlock;
-static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
- struct cgroup_taskset *tset)
-{
- struct task_struct *task;
- struct io_context *ioc;
+ /* register and update blkgs */
+ pol->plid = i;
+ blkcg_policy[i] = pol;
- cgroup_taskset_for_each(task, cgrp, tset) {
- /* we don't lose anything even if ioc allocation fails */
- ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
- if (ioc) {
- ioc_cgroup_changed(ioc);
- put_io_context(ioc);
- }
- }
+ /* everything is in place, add intf files for the new policy */
+ if (pol->cftypes)
+ WARN_ON(cgroup_add_cftypes(&blkio_cgrp_subsys, pol->cftypes));
+ ret = 0;
+out_unlock:
+ mutex_unlock(&blkcg_pol_mutex);
+ return ret;
}
+EXPORT_SYMBOL_GPL(blkcg_policy_register);
-void blkio_policy_register(struct blkio_policy_type *blkiop)
+/**
+ * blkcg_policy_unregister - unregister a blkcg policy
+ * @pol: blkcg policy to unregister
+ *
+ * Undo blkcg_policy_register(@pol). Might sleep.
+ */
+void blkcg_policy_unregister(struct blkcg_policy *pol)
{
- spin_lock(&blkio_list_lock);
- list_add_tail(&blkiop->list, &blkio_list);
- spin_unlock(&blkio_list_lock);
-}
-EXPORT_SYMBOL_GPL(blkio_policy_register);
+ mutex_lock(&blkcg_pol_mutex);
-void blkio_policy_unregister(struct blkio_policy_type *blkiop)
-{
- spin_lock(&blkio_list_lock);
- list_del_init(&blkiop->list);
- spin_unlock(&blkio_list_lock);
-}
-EXPORT_SYMBOL_GPL(blkio_policy_unregister);
+ if (WARN_ON(blkcg_policy[pol->plid] != pol))
+ goto out_unlock;
-static int __init init_cgroup_blkio(void)
-{
- return cgroup_load_subsys(&blkio_subsys);
-}
+ /* kill the intf files first */
+ if (pol->cftypes)
+ cgroup_rm_cftypes(pol->cftypes);
-static void __exit exit_cgroup_blkio(void)
-{
- cgroup_unload_subsys(&blkio_subsys);
+ /* unregister and update blkgs */
+ blkcg_policy[pol->plid] = NULL;
+out_unlock:
+ mutex_unlock(&blkcg_pol_mutex);
}
-
-module_init(init_cgroup_blkio);
-module_exit(exit_cgroup_blkio);
-MODULE_LICENSE("GPL");
+EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 6f3ace7e792..d3fd7aa3d2a 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -15,350 +15,592 @@
#include <linux/cgroup.h>
#include <linux/u64_stats_sync.h>
-
-enum blkio_policy_id {
- BLKIO_POLICY_PROP = 0, /* Proportional Bandwidth division */
- BLKIO_POLICY_THROTL, /* Throttling */
-};
+#include <linux/seq_file.h>
+#include <linux/radix-tree.h>
+#include <linux/blkdev.h>
+#include <linux/atomic.h>
/* Max limits for throttle policy */
#define THROTL_IOPS_MAX UINT_MAX
-#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
-
-#ifndef CONFIG_BLK_CGROUP
-/* When blk-cgroup is a module, its subsys_id isn't a compile-time constant */
-extern struct cgroup_subsys blkio_subsys;
-#define blkio_subsys_id blkio_subsys.subsys_id
-#endif
-
-enum stat_type {
- /* Total time spent (in ns) between request dispatch to the driver and
- * request completion for IOs doen by this cgroup. This may not be
- * accurate when NCQ is turned on. */
- BLKIO_STAT_SERVICE_TIME = 0,
- /* Total time spent waiting in scheduler queue in ns */
- BLKIO_STAT_WAIT_TIME,
- /* Number of IOs queued up */
- BLKIO_STAT_QUEUED,
- /* All the single valued stats go below this */
- BLKIO_STAT_TIME,
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- /* Time not charged to this cgroup */
- BLKIO_STAT_UNACCOUNTED_TIME,
- BLKIO_STAT_AVG_QUEUE_SIZE,
- BLKIO_STAT_IDLE_TIME,
- BLKIO_STAT_EMPTY_TIME,
- BLKIO_STAT_GROUP_WAIT_TIME,
- BLKIO_STAT_DEQUEUE
-#endif
+/* CFQ specific, out here for blkcg->cfq_weight */
+#define CFQ_WEIGHT_MIN 10
+#define CFQ_WEIGHT_MAX 1000
+#define CFQ_WEIGHT_DEFAULT 500
+
+#ifdef CONFIG_BLK_CGROUP
+
+enum blkg_rwstat_type {
+ BLKG_RWSTAT_READ,
+ BLKG_RWSTAT_WRITE,
+ BLKG_RWSTAT_SYNC,
+ BLKG_RWSTAT_ASYNC,
+
+ BLKG_RWSTAT_NR,
+ BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
+};
+
+struct blkcg_gq;
+
+struct blkcg {
+ struct cgroup_subsys_state css;
+ spinlock_t lock;
+
+ struct radix_tree_root blkg_tree;
+ struct blkcg_gq *blkg_hint;
+ struct hlist_head blkg_list;
+
+ /* for policies to test whether associated blkcg has changed */
+ uint64_t id;
+
+ /* TODO: per-policy storage in blkcg */
+ unsigned int cfq_weight; /* belongs to cfq */
+ unsigned int cfq_leaf_weight;
};
-/* Per cpu stats */
-enum stat_type_cpu {
- BLKIO_STAT_CPU_SECTORS,
- /* Total bytes transferred */
- BLKIO_STAT_CPU_SERVICE_BYTES,
- /* Total IOs serviced, post merge */
- BLKIO_STAT_CPU_SERVICED,
- /* Number of IOs merged */
- BLKIO_STAT_CPU_MERGED,
- BLKIO_STAT_CPU_NR
+struct blkg_stat {
+ struct u64_stats_sync syncp;
+ uint64_t cnt;
};
-enum stat_sub_type {
- BLKIO_STAT_READ = 0,
- BLKIO_STAT_WRITE,
- BLKIO_STAT_SYNC,
- BLKIO_STAT_ASYNC,
- BLKIO_STAT_TOTAL
+struct blkg_rwstat {
+ struct u64_stats_sync syncp;
+ uint64_t cnt[BLKG_RWSTAT_NR];
};
-/* blkg state flags */
-enum blkg_state_flags {
- BLKG_waiting = 0,
- BLKG_idling,
- BLKG_empty,
+/*
+ * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
+ * request_queue (q). This is used by blkcg policies which need to track
+ * information per blkcg - q pair.
+ *
+ * There can be multiple active blkcg policies and each has its private
+ * data on each blkg, the size of which is determined by
+ * blkcg_policy->pd_size. blkcg core allocates and frees such areas
+ * together with blkg and invokes pd_init/exit_fn() methods.
+ *
+ * Such private data must embed struct blkg_policy_data (pd) at the
+ * beginning and pd_size can't be smaller than pd.
+ */
+struct blkg_policy_data {
+ /* the blkg and policy id this per-policy data belongs to */
+ struct blkcg_gq *blkg;
+ int plid;
+
+ /* used during policy activation */
+ struct list_head alloc_node;
};
-/* cgroup files owned by proportional weight policy */
-enum blkcg_file_name_prop {
- BLKIO_PROP_weight = 1,
- BLKIO_PROP_weight_device,
- BLKIO_PROP_io_service_bytes,
- BLKIO_PROP_io_serviced,
- BLKIO_PROP_time,
- BLKIO_PROP_sectors,
- BLKIO_PROP_unaccounted_time,
- BLKIO_PROP_io_service_time,
- BLKIO_PROP_io_wait_time,
- BLKIO_PROP_io_merged,
- BLKIO_PROP_io_queued,
- BLKIO_PROP_avg_queue_size,
- BLKIO_PROP_group_wait_time,
- BLKIO_PROP_idle_time,
- BLKIO_PROP_empty_time,
- BLKIO_PROP_dequeue,
+/* association between a blk cgroup and a request queue */
+struct blkcg_gq {
+ /* Pointer to the associated request_queue */
+ struct request_queue *q;
+ struct list_head q_node;
+ struct hlist_node blkcg_node;
+ struct blkcg *blkcg;
+
+ /* all non-root blkcg_gq's are guaranteed to have access to parent */
+ struct blkcg_gq *parent;
+
+ /* request allocation list for this blkcg-q pair */
+ struct request_list rl;
+
+ /* reference count */
+ atomic_t refcnt;
+
+ /* is this blkg online? protected by both blkcg and q locks */
+ bool online;
+
+ struct blkg_policy_data *pd[BLKCG_MAX_POLS];
+
+ struct rcu_head rcu_head;
};
-/* cgroup files owned by throttle policy */
-enum blkcg_file_name_throtl {
- BLKIO_THROTL_read_bps_device,
- BLKIO_THROTL_write_bps_device,
- BLKIO_THROTL_read_iops_device,
- BLKIO_THROTL_write_iops_device,
- BLKIO_THROTL_io_service_bytes,
- BLKIO_THROTL_io_serviced,
+typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
+typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
+typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
+typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
+typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
+
+struct blkcg_policy {
+ int plid;
+ /* policy specific private data size */
+ size_t pd_size;
+ /* cgroup files for the policy */
+ struct cftype *cftypes;
+
+ /* operations */
+ blkcg_pol_init_pd_fn *pd_init_fn;
+ blkcg_pol_online_pd_fn *pd_online_fn;
+ blkcg_pol_offline_pd_fn *pd_offline_fn;
+ blkcg_pol_exit_pd_fn *pd_exit_fn;
+ blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
};
-struct blkio_cgroup {
- struct cgroup_subsys_state css;
- unsigned int weight;
- spinlock_t lock;
- struct hlist_head blkg_list;
- struct list_head policy_list; /* list of blkio_policy_node */
+extern struct blkcg blkcg_root;
+
+struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
+struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q);
+int blkcg_init_queue(struct request_queue *q);
+void blkcg_drain_queue(struct request_queue *q);
+void blkcg_exit_queue(struct request_queue *q);
+
+/* Blkio controller policy registration */
+int blkcg_policy_register(struct blkcg_policy *pol);
+void blkcg_policy_unregister(struct blkcg_policy *pol);
+int blkcg_activate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol);
+void blkcg_deactivate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol);
+
+void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
+ u64 (*prfill)(struct seq_file *,
+ struct blkg_policy_data *, int),
+ const struct blkcg_policy *pol, int data,
+ bool show_total);
+u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
+u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ const struct blkg_rwstat *rwstat);
+u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
+u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ int off);
+
+u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
+struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
+ int off);
+
+struct blkg_conf_ctx {
+ struct gendisk *disk;
+ struct blkcg_gq *blkg;
+ u64 v;
};
-struct blkio_group_stats {
- /* total disk time and nr sectors dispatched by this group */
- uint64_t time;
- uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL];
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- /* Time not charged to this cgroup */
- uint64_t unaccounted_time;
-
- /* Sum of number of IOs queued across all samples */
- uint64_t avg_queue_size_sum;
- /* Count of samples taken for average */
- uint64_t avg_queue_size_samples;
- /* How many times this group has been removed from service tree */
- unsigned long dequeue;
-
- /* Total time spent waiting for it to be assigned a timeslice. */
- uint64_t group_wait_time;
- uint64_t start_group_wait_time;
-
- /* Time spent idling for this blkio_group */
- uint64_t idle_time;
- uint64_t start_idle_time;
+int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
+ const char *input, struct blkg_conf_ctx *ctx);
+void blkg_conf_finish(struct blkg_conf_ctx *ctx);
+
+
+static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
+{
+ return css ? container_of(css, struct blkcg, css) : NULL;
+}
+
+static inline struct blkcg *task_blkcg(struct task_struct *tsk)
+{
+ return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
+}
+
+static inline struct blkcg *bio_blkcg(struct bio *bio)
+{
+ if (bio && bio->bi_css)
+ return css_to_blkcg(bio->bi_css);
+ return task_blkcg(current);
+}
+
+/**
+ * blkcg_parent - get the parent of a blkcg
+ * @blkcg: blkcg of interest
+ *
+ * Return the parent blkcg of @blkcg. Can be called anytime.
+ */
+static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
+{
+ return css_to_blkcg(blkcg->css.parent);
+}
+
+/**
+ * blkg_to_pdata - get policy private data
+ * @blkg: blkg of interest
+ * @pol: policy of interest
+ *
+ * Return pointer to private data associated with the @blkg-@pol pair.
+ */
+static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
+ struct blkcg_policy *pol)
+{
+ return blkg ? blkg->pd[pol->plid] : NULL;
+}
+
+/**
+ * pdata_to_blkg - get blkg associated with policy private data
+ * @pd: policy private data of interest
+ *
+ * @pd is policy private data. Determine the blkg it's associated with.
+ */
+static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
+{
+ return pd ? pd->blkg : NULL;
+}
+
+/**
+ * blkg_path - format cgroup path of blkg
+ * @blkg: blkg of interest
+ * @buf: target buffer
+ * @buflen: target buffer length
+ *
+ * Format the path of the cgroup of @blkg into @buf.
+ */
+static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
+{
+ char *p;
+
+ p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
+ if (!p) {
+ strncpy(buf, "<unavailable>", buflen);
+ return -ENAMETOOLONG;
+ }
+
+ memmove(buf, p, buf + buflen - p);
+ return 0;
+}
+
+/**
+ * blkg_get - get a blkg reference
+ * @blkg: blkg to get
+ *
+ * The caller should be holding an existing reference.
+ */
+static inline void blkg_get(struct blkcg_gq *blkg)
+{
+ WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
+ atomic_inc(&blkg->refcnt);
+}
+
+void __blkg_release_rcu(struct rcu_head *rcu);
+
+/**
+ * blkg_put - put a blkg reference
+ * @blkg: blkg to put
+ */
+static inline void blkg_put(struct blkcg_gq *blkg)
+{
+ WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
+ if (atomic_dec_and_test(&blkg->refcnt))
+ call_rcu(&blkg->rcu_head, __blkg_release_rcu);
+}
+
+struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
+ bool update_hint);
+
+/**
+ * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
+ * @d_blkg: loop cursor pointing to the current descendant
+ * @pos_css: used for iteration
+ * @p_blkg: target blkg to walk descendants of
+ *
+ * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
+ * read locked. If called under either blkcg or queue lock, the iteration
+ * is guaranteed to include all and only online blkgs. The caller may
+ * update @pos_css by calling css_rightmost_descendant() to skip subtree.
+ * @p_blkg is included in the iteration and the first node to be visited.
+ */
+#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
+ css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
+ if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
+ (p_blkg)->q, false)))
+
+/**
+ * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
+ * @d_blkg: loop cursor pointing to the current descendant
+ * @pos_css: used for iteration
+ * @p_blkg: target blkg to walk descendants of
+ *
+ * Similar to blkg_for_each_descendant_pre() but performs post-order
+ * traversal instead. Synchronization rules are the same. @p_blkg is
+ * included in the iteration and the last node to be visited.
+ */
+#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
+ css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
+ if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
+ (p_blkg)->q, false)))
+
+/**
+ * blk_get_rl - get request_list to use
+ * @q: request_queue of interest
+ * @bio: bio which will be attached to the allocated request (may be %NULL)
+ *
+ * The caller wants to allocate a request from @q to use for @bio. Find
+ * the request_list to use and obtain a reference on it. Should be called
+ * under queue_lock. This function is guaranteed to return non-%NULL
+ * request_list.
+ */
+static inline struct request_list *blk_get_rl(struct request_queue *q,
+ struct bio *bio)
+{
+ struct blkcg *blkcg;
+ struct blkcg_gq *blkg;
+
+ rcu_read_lock();
+
+ blkcg = bio_blkcg(bio);
+
+ /* bypass blkg lookup and use @q->root_rl directly for root */
+ if (blkcg == &blkcg_root)
+ goto root_rl;
+
/*
- * Total time when we have requests queued and do not contain the
- * current active queue.
+ * Try to use blkg->rl. blkg lookup may fail under memory pressure
+ * or if either the blkcg or queue is going away. Fall back to
+ * root_rl in such cases.
*/
- uint64_t empty_time;
- uint64_t start_empty_time;
- uint16_t flags;
-#endif
-};
+ blkg = blkg_lookup_create(blkcg, q);
+ if (unlikely(IS_ERR(blkg)))
+ goto root_rl;
+
+ blkg_get(blkg);
+ rcu_read_unlock();
+ return &blkg->rl;
+root_rl:
+ rcu_read_unlock();
+ return &q->root_rl;
+}
-/* Per cpu blkio group stats */
-struct blkio_group_stats_cpu {
- uint64_t sectors;
- uint64_t stat_arr_cpu[BLKIO_STAT_CPU_NR][BLKIO_STAT_TOTAL];
- struct u64_stats_sync syncp;
-};
+/**
+ * blk_put_rl - put request_list
+ * @rl: request_list to put
+ *
+ * Put the reference acquired by blk_get_rl(). Should be called under
+ * queue_lock.
+ */
+static inline void blk_put_rl(struct request_list *rl)
+{
+ /* root_rl may not have blkg set */
+ if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
+ blkg_put(rl->blkg);
+}
-struct blkio_group {
- /* An rcu protected unique identifier for the group */
- void *key;
- struct hlist_node blkcg_node;
- unsigned short blkcg_id;
- /* Store cgroup path */
- char path[128];
- /* The device MKDEV(major, minor), this group has been created for */
- dev_t dev;
- /* policy which owns this blk group */
- enum blkio_policy_id plid;
-
- /* Need to serialize the stats in the case of reset/update */
- spinlock_t stats_lock;
- struct blkio_group_stats stats;
- /* Per cpu stats pointer */
- struct blkio_group_stats_cpu __percpu *stats_cpu;
-};
+/**
+ * blk_rq_set_rl - associate a request with a request_list
+ * @rq: request of interest
+ * @rl: target request_list
+ *
+ * Associate @rq with @rl so that accounting and freeing can know the
+ * request_list @rq came from.
+ */
+static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
+{
+ rq->rl = rl;
+}
-struct blkio_policy_node {
- struct list_head node;
- dev_t dev;
- /* This node belongs to max bw policy or porportional weight policy */
- enum blkio_policy_id plid;
- /* cgroup file to which this rule belongs to */
- int fileid;
-
- union {
- unsigned int weight;
- /*
- * Rate read/write in terms of bytes per second
- * Whether this rate represents read or write is determined
- * by file type "fileid".
- */
- u64 bps;
- unsigned int iops;
- } val;
-};
+/**
+ * blk_rq_rl - return the request_list a request came from
+ * @rq: request of interest
+ *
+ * Return the request_list @rq is allocated from.
+ */
+static inline struct request_list *blk_rq_rl(struct request *rq)
+{
+ return rq->rl;
+}
-extern unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
- dev_t dev);
-extern uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg,
- dev_t dev);
-extern uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg,
- dev_t dev);
-extern unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg,
- dev_t dev);
-extern unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg,
- dev_t dev);
-
-typedef void (blkio_unlink_group_fn) (void *key, struct blkio_group *blkg);
-
-typedef void (blkio_update_group_weight_fn) (void *key,
- struct blkio_group *blkg, unsigned int weight);
-typedef void (blkio_update_group_read_bps_fn) (void * key,
- struct blkio_group *blkg, u64 read_bps);
-typedef void (blkio_update_group_write_bps_fn) (void *key,
- struct blkio_group *blkg, u64 write_bps);
-typedef void (blkio_update_group_read_iops_fn) (void *key,
- struct blkio_group *blkg, unsigned int read_iops);
-typedef void (blkio_update_group_write_iops_fn) (void *key,
- struct blkio_group *blkg, unsigned int write_iops);
-
-struct blkio_policy_ops {
- blkio_unlink_group_fn *blkio_unlink_group_fn;
- blkio_update_group_weight_fn *blkio_update_group_weight_fn;
- blkio_update_group_read_bps_fn *blkio_update_group_read_bps_fn;
- blkio_update_group_write_bps_fn *blkio_update_group_write_bps_fn;
- blkio_update_group_read_iops_fn *blkio_update_group_read_iops_fn;
- blkio_update_group_write_iops_fn *blkio_update_group_write_iops_fn;
-};
+struct request_list *__blk_queue_next_rl(struct request_list *rl,
+ struct request_queue *q);
+/**
+ * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
+ *
+ * Should be used under queue_lock.
+ */
+#define blk_queue_for_each_rl(rl, q) \
+ for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
-struct blkio_policy_type {
- struct list_head list;
- struct blkio_policy_ops ops;
- enum blkio_policy_id plid;
-};
+static inline void blkg_stat_init(struct blkg_stat *stat)
+{
+ u64_stats_init(&stat->syncp);
+}
-/* Blkio controller policy registration */
-extern void blkio_policy_register(struct blkio_policy_type *);
-extern void blkio_policy_unregister(struct blkio_policy_type *);
+/**
+ * blkg_stat_add - add a value to a blkg_stat
+ * @stat: target blkg_stat
+ * @val: value to add
+ *
+ * Add @val to @stat. The caller is responsible for synchronizing calls to
+ * this function.
+ */
+static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
+{
+ u64_stats_update_begin(&stat->syncp);
+ stat->cnt += val;
+ u64_stats_update_end(&stat->syncp);
+}
+
+/**
+ * blkg_stat_read - read the current value of a blkg_stat
+ * @stat: blkg_stat to read
+ *
+ * Read the current value of @stat. This function can be called without
+ * synchroniztion and takes care of u64 atomicity.
+ */
+static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
+{
+ unsigned int start;
+ uint64_t v;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&stat->syncp);
+ v = stat->cnt;
+ } while (u64_stats_fetch_retry_irq(&stat->syncp, start));
+
+ return v;
+}
+
+/**
+ * blkg_stat_reset - reset a blkg_stat
+ * @stat: blkg_stat to reset
+ */
+static inline void blkg_stat_reset(struct blkg_stat *stat)
+{
+ stat->cnt = 0;
+}
+
+/**
+ * blkg_stat_merge - merge a blkg_stat into another
+ * @to: the destination blkg_stat
+ * @from: the source
+ *
+ * Add @from's count to @to.
+ */
+static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
+{
+ blkg_stat_add(to, blkg_stat_read(from));
+}
-static inline char *blkg_path(struct blkio_group *blkg)
+static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
+{
+ u64_stats_init(&rwstat->syncp);
+}
+
+/**
+ * blkg_rwstat_add - add a value to a blkg_rwstat
+ * @rwstat: target blkg_rwstat
+ * @rw: mask of REQ_{WRITE|SYNC}
+ * @val: value to add
+ *
+ * Add @val to @rwstat. The counters are chosen according to @rw. The
+ * caller is responsible for synchronizing calls to this function.
+ */
+static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
+ int rw, uint64_t val)
{
- return blkg->path;
+ u64_stats_update_begin(&rwstat->syncp);
+
+ if (rw & REQ_WRITE)
+ rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
+ else
+ rwstat->cnt[BLKG_RWSTAT_READ] += val;
+ if (rw & REQ_SYNC)
+ rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
+ else
+ rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
+
+ u64_stats_update_end(&rwstat->syncp);
}
-#else
+/**
+ * blkg_rwstat_read - read the current values of a blkg_rwstat
+ * @rwstat: blkg_rwstat to read
+ *
+ * Read the current snapshot of @rwstat and return it as the return value.
+ * This function can be called without synchronization and takes care of
+ * u64 atomicity.
+ */
+static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
+{
+ unsigned int start;
+ struct blkg_rwstat tmp;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&rwstat->syncp);
+ tmp = *rwstat;
+ } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
-struct blkio_group {
+ return tmp;
+}
+
+/**
+ * blkg_rwstat_total - read the total count of a blkg_rwstat
+ * @rwstat: blkg_rwstat to read
+ *
+ * Return the total count of @rwstat regardless of the IO direction. This
+ * function can be called without synchronization and takes care of u64
+ * atomicity.
+ */
+static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
+{
+ struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
+
+ return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
+}
+
+/**
+ * blkg_rwstat_reset - reset a blkg_rwstat
+ * @rwstat: blkg_rwstat to reset
+ */
+static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
+{
+ memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
+}
+
+/**
+ * blkg_rwstat_merge - merge a blkg_rwstat into another
+ * @to: the destination blkg_rwstat
+ * @from: the source
+ *
+ * Add @from's counts to @to.
+ */
+static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
+ struct blkg_rwstat *from)
+{
+ struct blkg_rwstat v = blkg_rwstat_read(from);
+ int i;
+
+ u64_stats_update_begin(&to->syncp);
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ to->cnt[i] += v.cnt[i];
+ u64_stats_update_end(&to->syncp);
+}
+
+#else /* CONFIG_BLK_CGROUP */
+
+struct cgroup;
+struct blkcg;
+
+struct blkg_policy_data {
};
-struct blkio_policy_type {
+struct blkcg_gq {
};
-static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { }
-static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
-
-static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
-
-#endif
-
-#define BLKIO_WEIGHT_MIN 10
-#define BLKIO_WEIGHT_MAX 1000
-#define BLKIO_WEIGHT_DEFAULT 500
-
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg);
-void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
- unsigned long dequeue);
-void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg);
-void blkiocg_update_idle_time_stats(struct blkio_group *blkg);
-void blkiocg_set_start_empty_time(struct blkio_group *blkg);
-
-#define BLKG_FLAG_FNS(name) \
-static inline void blkio_mark_blkg_##name( \
- struct blkio_group_stats *stats) \
-{ \
- stats->flags |= (1 << BLKG_##name); \
-} \
-static inline void blkio_clear_blkg_##name( \
- struct blkio_group_stats *stats) \
-{ \
- stats->flags &= ~(1 << BLKG_##name); \
-} \
-static inline int blkio_blkg_##name(struct blkio_group_stats *stats) \
-{ \
- return (stats->flags & (1 << BLKG_##name)) != 0; \
-} \
-
-BLKG_FLAG_FNS(waiting)
-BLKG_FLAG_FNS(idling)
-BLKG_FLAG_FNS(empty)
-#undef BLKG_FLAG_FNS
-#else
-static inline void blkiocg_update_avg_queue_size_stats(
- struct blkio_group *blkg) {}
-static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
- unsigned long dequeue) {}
-static inline void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
-{}
-static inline void blkiocg_update_idle_time_stats(struct blkio_group *blkg) {}
-static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
-#endif
-
-#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
-extern struct blkio_cgroup blkio_root_cgroup;
-extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
-extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk);
-extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
- struct blkio_group *blkg, void *key, dev_t dev,
- enum blkio_policy_id plid);
-extern int blkio_alloc_blkg_stats(struct blkio_group *blkg);
-extern int blkiocg_del_blkio_group(struct blkio_group *blkg);
-extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg,
- void *key);
-void blkiocg_update_timeslice_used(struct blkio_group *blkg,
- unsigned long time,
- unsigned long unaccounted_time);
-void blkiocg_update_dispatch_stats(struct blkio_group *blkg, uint64_t bytes,
- bool direction, bool sync);
-void blkiocg_update_completion_stats(struct blkio_group *blkg,
- uint64_t start_time, uint64_t io_start_time, bool direction, bool sync);
-void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
- bool sync);
-void blkiocg_update_io_add_stats(struct blkio_group *blkg,
- struct blkio_group *curr_blkg, bool direction, bool sync);
-void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
- bool direction, bool sync);
-#else
-struct cgroup;
-static inline struct blkio_cgroup *
-cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
-static inline struct blkio_cgroup *
-task_blkio_cgroup(struct task_struct *tsk) { return NULL; }
-
-static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
- struct blkio_group *blkg, void *key, dev_t dev,
- enum blkio_policy_id plid) {}
-
-static inline int blkio_alloc_blkg_stats(struct blkio_group *blkg) { return 0; }
-
-static inline int
-blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; }
-
-static inline struct blkio_group *
-blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) { return NULL; }
-static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg,
- unsigned long time,
- unsigned long unaccounted_time)
-{}
-static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
- uint64_t bytes, bool direction, bool sync) {}
-static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
- uint64_t start_time, uint64_t io_start_time, bool direction,
- bool sync) {}
-static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
- bool direction, bool sync) {}
-static inline void blkiocg_update_io_add_stats(struct blkio_group *blkg,
- struct blkio_group *curr_blkg, bool direction, bool sync) {}
-static inline void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
- bool direction, bool sync) {}
-#endif
-#endif /* _BLK_CGROUP_H */
+struct blkcg_policy {
+};
+
+static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
+static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
+static inline void blkcg_drain_queue(struct request_queue *q) { }
+static inline void blkcg_exit_queue(struct request_queue *q) { }
+static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
+static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
+static inline int blkcg_activate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol) { return 0; }
+static inline void blkcg_deactivate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol) { }
+
+static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
+
+static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
+ struct blkcg_policy *pol) { return NULL; }
+static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
+static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
+static inline void blkg_get(struct blkcg_gq *blkg) { }
+static inline void blkg_put(struct blkcg_gq *blkg) { }
+
+static inline struct request_list *blk_get_rl(struct request_queue *q,
+ struct bio *bio) { return &q->root_rl; }
+static inline void blk_put_rl(struct request_list *rl) { }
+static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
+static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
+
+#define blk_queue_for_each_rl(rl, q) \
+ for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
+
+#endif /* CONFIG_BLK_CGROUP */
+#endif /* _BLK_CGROUP_H */
diff --git a/block/blk-core.c b/block/blk-core.c
index 3a78b00edd7..6f8dba161bf 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -16,6 +16,7 @@
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/kernel_stat.h>
@@ -29,22 +30,28 @@
#include <linux/fault-inject.h>
#include <linux/list_sort.h>
#include <linux/delay.h>
+#include <linux/ratelimit.h>
+#include <linux/pm_runtime.h>
#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
#include "blk.h"
+#include "blk-cgroup.h"
+#include "blk-mq.h"
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
+EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
+EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
DEFINE_IDA(blk_queue_ida);
/*
* For the allocated request tables
*/
-static struct kmem_cache *request_cachep;
+struct kmem_cache *request_cachep = NULL;
/*
* For queue allocation
@@ -56,42 +63,6 @@ struct kmem_cache *blk_requestq_cachep;
*/
static struct workqueue_struct *kblockd_workqueue;
-static void drive_stat_acct(struct request *rq, int new_io)
-{
- struct hd_struct *part;
- int rw = rq_data_dir(rq);
- int cpu;
-
- if (!blk_do_io_stat(rq))
- return;
-
- cpu = part_stat_lock();
-
- if (!new_io) {
- part = rq->part;
- part_stat_inc(cpu, part, merges[rw]);
- } else {
- part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
- if (!hd_struct_try_get(part)) {
- /*
- * The partition is already being removed,
- * the request will be accounted on the disk only
- *
- * We take a reference on disk->part0 although that
- * partition will never be deleted, so we can treat
- * it as any other partition.
- */
- part = &rq->rq_disk->part0;
- hd_struct_get(part);
- }
- part_round_stats(cpu, part);
- part_inc_in_flight(part, rw);
- rq->part = part;
- }
-
- part_stat_unlock();
-}
-
void blk_queue_congestion_threshold(struct request_queue *q)
{
int nr;
@@ -141,7 +112,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
rq->cmd = rq->__cmd;
rq->cmd_len = BLK_MAX_CDB;
rq->tag = -1;
- rq->ref_count = 1;
rq->start_time = jiffies;
set_start_time_ns(rq);
rq->part = NULL;
@@ -156,23 +126,13 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
error = -EIO;
- if (unlikely(nbytes > bio->bi_size)) {
- printk(KERN_ERR "%s: want %u bytes done, %u left\n",
- __func__, nbytes, bio->bi_size);
- nbytes = bio->bi_size;
- }
-
if (unlikely(rq->cmd_flags & REQ_QUIET))
set_bit(BIO_QUIET, &bio->bi_flags);
- bio->bi_size -= nbytes;
- bio->bi_sector += (nbytes >> 9);
-
- if (bio_integrity(bio))
- bio_integrity_advance(bio, nbytes);
+ bio_advance(bio, nbytes);
/* don't actually finish bio if it's part of flush sequence */
- if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
+ if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
bio_endio(bio, error);
}
@@ -180,15 +140,15 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
{
int bit;
- printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
+ printk(KERN_INFO "%s: dev %s: type=%x, flags=%llx\n", msg,
rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
- rq->cmd_flags);
+ (unsigned long long) rq->cmd_flags);
printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
(unsigned long long)blk_rq_pos(rq),
blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
- printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n",
- rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
+ printk(KERN_INFO " bio %p, biotail %p, len %u\n",
+ rq->bio, rq->biotail, blk_rq_bytes(rq));
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
printk(KERN_INFO " cdb: ");
@@ -217,12 +177,13 @@ static void blk_delay_work(struct work_struct *work)
* Description:
* Sometimes queueing needs to be postponed for a little while, to allow
* resources to come back. This function will make sure that queueing is
- * restarted around the specified time.
+ * restarted around the specified time. Queue lock must be held.
*/
void blk_delay_queue(struct request_queue *q, unsigned long msecs)
{
- queue_delayed_work(kblockd_workqueue, &q->delay_work,
- msecs_to_jiffies(msecs));
+ if (likely(!blk_queue_dead(q)))
+ queue_delayed_work(kblockd_workqueue, &q->delay_work,
+ msecs_to_jiffies(msecs));
}
EXPORT_SYMBOL(blk_delay_queue);
@@ -260,7 +221,7 @@ EXPORT_SYMBOL(blk_start_queue);
**/
void blk_stop_queue(struct request_queue *q)
{
- __cancel_delayed_work(&q->delay_work);
+ cancel_delayed_work(&q->delay_work);
queue_flag_set(QUEUE_FLAG_STOPPED, q);
}
EXPORT_SYMBOL(blk_stop_queue);
@@ -280,17 +241,56 @@ EXPORT_SYMBOL(blk_stop_queue);
*
* This function does not cancel any asynchronous activity arising
* out of elevator or throttling code. That would require elevaotor_exit()
- * and blk_throtl_exit() to be called with queue lock initialized.
+ * and blkcg_exit_queue() to be called with queue lock initialized.
*
*/
void blk_sync_queue(struct request_queue *q)
{
del_timer_sync(&q->timeout);
- cancel_delayed_work_sync(&q->delay_work);
+
+ if (q->mq_ops) {
+ struct blk_mq_hw_ctx *hctx;
+ int i;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ cancel_delayed_work_sync(&hctx->run_work);
+ cancel_delayed_work_sync(&hctx->delay_work);
+ }
+ } else {
+ cancel_delayed_work_sync(&q->delay_work);
+ }
}
EXPORT_SYMBOL(blk_sync_queue);
/**
+ * __blk_run_queue_uncond - run a queue whether or not it has been stopped
+ * @q: The queue to run
+ *
+ * Description:
+ * Invoke request handling on a queue if there are any pending requests.
+ * May be used to restart request handling after a request has completed.
+ * This variant runs the queue whether or not the queue has been
+ * stopped. Must be called with the queue lock held and interrupts
+ * disabled. See also @blk_run_queue.
+ */
+inline void __blk_run_queue_uncond(struct request_queue *q)
+{
+ if (unlikely(blk_queue_dead(q)))
+ return;
+
+ /*
+ * Some request_fn implementations, e.g. scsi_request_fn(), unlock
+ * the queue lock internally. As a result multiple threads may be
+ * running such a request function concurrently. Keep track of the
+ * number of active request_fn invocations such that blk_drain_queue()
+ * can wait until all these request_fn calls have finished.
+ */
+ q->request_fn_active++;
+ q->request_fn(q);
+ q->request_fn_active--;
+}
+
+/**
* __blk_run_queue - run a single device queue
* @q: The queue to run
*
@@ -303,7 +303,7 @@ void __blk_run_queue(struct request_queue *q)
if (unlikely(blk_queue_stopped(q)))
return;
- q->request_fn(q);
+ __blk_run_queue_uncond(q);
}
EXPORT_SYMBOL(__blk_run_queue);
@@ -313,14 +313,12 @@ EXPORT_SYMBOL(__blk_run_queue);
*
* Description:
* Tells kblockd to perform the equivalent of @blk_run_queue on behalf
- * of us.
+ * of us. The caller must hold the queue lock.
*/
void blk_run_queue_async(struct request_queue *q)
{
- if (likely(!blk_queue_stopped(q))) {
- __cancel_delayed_work(&q->delay_work);
- queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
- }
+ if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
+ mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
}
EXPORT_SYMBOL(blk_run_queue_async);
@@ -349,7 +347,7 @@ void blk_put_queue(struct request_queue *q)
EXPORT_SYMBOL(blk_put_queue);
/**
- * blk_drain_queue - drain requests from request_queue
+ * __blk_drain_queue - drain requests from request_queue
* @q: queue to drain
* @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
*
@@ -357,28 +355,38 @@ EXPORT_SYMBOL(blk_put_queue);
* If not, only ELVPRIV requests are drained. The caller is responsible
* for ensuring that no new requests which need to be drained are queued.
*/
-void blk_drain_queue(struct request_queue *q, bool drain_all)
+static void __blk_drain_queue(struct request_queue *q, bool drain_all)
+ __releases(q->queue_lock)
+ __acquires(q->queue_lock)
{
+ int i;
+
+ lockdep_assert_held(q->queue_lock);
+
while (true) {
bool drain = false;
- int i;
- spin_lock_irq(q->queue_lock);
+ /*
+ * The caller might be trying to drain @q before its
+ * elevator is initialized.
+ */
+ if (q->elevator)
+ elv_drain_elevator(q);
- elv_drain_elevator(q);
- if (drain_all)
- blk_throtl_drain(q);
+ blkcg_drain_queue(q);
/*
* This function might be called on a queue which failed
- * driver init after queue creation. Some drivers
- * (e.g. fd) get unhappy in such cases. Kick queue iff
- * dispatch queue has something on it.
+ * driver init after queue creation or is not yet fully
+ * active yet. Some drivers (e.g. fd and loop) get unhappy
+ * in such cases. Kick queue iff dispatch queue has
+ * something on it and @q has request_fn set.
*/
- if (!list_empty(&q->queue_head))
+ if (!list_empty(&q->queue_head) && q->request_fn)
__blk_run_queue(q);
- drain |= q->rq.elvpriv;
+ drain |= q->nr_rqs_elvpriv;
+ drain |= q->request_fn_active;
/*
* Unfortunately, requests are queued at and tracked from
@@ -388,88 +396,174 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
if (drain_all) {
drain |= !list_empty(&q->queue_head);
for (i = 0; i < 2; i++) {
- drain |= q->rq.count[i];
+ drain |= q->nr_rqs[i];
drain |= q->in_flight[i];
drain |= !list_empty(&q->flush_queue[i]);
}
}
- spin_unlock_irq(q->queue_lock);
-
if (!drain)
break;
+
+ spin_unlock_irq(q->queue_lock);
+
msleep(10);
+
+ spin_lock_irq(q->queue_lock);
+ }
+
+ /*
+ * With queue marked dead, any woken up waiter will fail the
+ * allocation path, so the wakeup chaining is lost and we're
+ * left with hung waiters. We need to wake up those waiters.
+ */
+ if (q->request_fn) {
+ struct request_list *rl;
+
+ blk_queue_for_each_rl(rl, q)
+ for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
+ wake_up_all(&rl->wait[i]);
+ }
+}
+
+/**
+ * blk_queue_bypass_start - enter queue bypass mode
+ * @q: queue of interest
+ *
+ * In bypass mode, only the dispatch FIFO queue of @q is used. This
+ * function makes @q enter bypass mode and drains all requests which were
+ * throttled or issued before. On return, it's guaranteed that no request
+ * is being throttled or has ELVPRIV set and blk_queue_bypass() %true
+ * inside queue or RCU read lock.
+ */
+void blk_queue_bypass_start(struct request_queue *q)
+{
+ bool drain;
+
+ spin_lock_irq(q->queue_lock);
+ drain = !q->bypass_depth++;
+ queue_flag_set(QUEUE_FLAG_BYPASS, q);
+ spin_unlock_irq(q->queue_lock);
+
+ if (drain) {
+ spin_lock_irq(q->queue_lock);
+ __blk_drain_queue(q, false);
+ spin_unlock_irq(q->queue_lock);
+
+ /* ensure blk_queue_bypass() is %true inside RCU read lock */
+ synchronize_rcu();
}
}
+EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
+
+/**
+ * blk_queue_bypass_end - leave queue bypass mode
+ * @q: queue of interest
+ *
+ * Leave bypass mode and restore the normal queueing behavior.
+ */
+void blk_queue_bypass_end(struct request_queue *q)
+{
+ spin_lock_irq(q->queue_lock);
+ if (!--q->bypass_depth)
+ queue_flag_clear(QUEUE_FLAG_BYPASS, q);
+ WARN_ON_ONCE(q->bypass_depth < 0);
+ spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
/**
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
*
- * Mark @q DEAD, drain all pending requests, destroy and put it. All
- * future requests will be failed immediately with -ENODEV.
+ * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
+ * put it. All future requests will be failed immediately with -ENODEV.
*/
void blk_cleanup_queue(struct request_queue *q)
{
spinlock_t *lock = q->queue_lock;
- /* mark @q DEAD, no new request or merges will be allowed afterwards */
+ /* mark @q DYING, no new request or merges will be allowed afterwards */
mutex_lock(&q->sysfs_lock);
- queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
-
+ queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
spin_lock_irq(lock);
- queue_flag_set(QUEUE_FLAG_NOMERGES, q);
- queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
- queue_flag_set(QUEUE_FLAG_DEAD, q);
- if (q->queue_lock != &q->__queue_lock)
- q->queue_lock = &q->__queue_lock;
+ /*
+ * A dying queue is permanently in bypass mode till released. Note
+ * that, unlike blk_queue_bypass_start(), we aren't performing
+ * synchronize_rcu() after entering bypass mode to avoid the delay
+ * as some drivers create and destroy a lot of queues while
+ * probing. This is still safe because blk_release_queue() will be
+ * called only after the queue refcnt drops to zero and nothing,
+ * RCU or not, would be traversing the queue by then.
+ */
+ q->bypass_depth++;
+ queue_flag_set(QUEUE_FLAG_BYPASS, q);
+ queue_flag_set(QUEUE_FLAG_NOMERGES, q);
+ queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
+ queue_flag_set(QUEUE_FLAG_DYING, q);
spin_unlock_irq(lock);
mutex_unlock(&q->sysfs_lock);
/*
- * Drain all requests queued before DEAD marking. The caller might
- * be trying to tear down @q before its elevator is initialized, in
- * which case we don't want to call into draining.
+ * Drain all requests queued before DYING marking. Set DEAD flag to
+ * prevent that q->request_fn() gets invoked after draining finished.
*/
- if (q->elevator)
- blk_drain_queue(q, true);
+ if (q->mq_ops) {
+ blk_mq_drain_queue(q);
+ spin_lock_irq(lock);
+ } else {
+ spin_lock_irq(lock);
+ __blk_drain_queue(q, true);
+ }
+ queue_flag_set(QUEUE_FLAG_DEAD, q);
+ spin_unlock_irq(lock);
/* @q won't process any more request, flush async actions */
del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
blk_sync_queue(q);
+ spin_lock_irq(lock);
+ if (q->queue_lock != &q->__queue_lock)
+ q->queue_lock = &q->__queue_lock;
+ spin_unlock_irq(lock);
+
/* @q is and will stay empty, shutdown and put */
blk_put_queue(q);
}
EXPORT_SYMBOL(blk_cleanup_queue);
-static int blk_init_free_list(struct request_queue *q)
+int blk_init_rl(struct request_list *rl, struct request_queue *q,
+ gfp_t gfp_mask)
{
- struct request_list *rl = &q->rq;
-
if (unlikely(rl->rq_pool))
return 0;
+ rl->q = q;
rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
- rl->elvpriv = 0;
init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
- mempool_free_slab, request_cachep, q->node);
-
+ mempool_free_slab, request_cachep,
+ gfp_mask, q->node);
if (!rl->rq_pool)
return -ENOMEM;
return 0;
}
+void blk_exit_rl(struct request_list *rl)
+{
+ if (rl->rq_pool)
+ mempool_destroy(rl->rq_pool);
+}
+
struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
{
- return blk_alloc_queue_node(gfp_mask, -1);
+ return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
}
EXPORT_SYMBOL(blk_alloc_queue);
@@ -483,7 +577,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
if (!q)
return NULL;
- q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
+ q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
if (q->id < 0)
goto fail_q;
@@ -498,14 +592,15 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
if (err)
goto fail_id;
- if (blk_throtl_init(q))
- goto fail_id;
-
setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
laptop_mode_timer_fn, (unsigned long) q);
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
+ INIT_LIST_HEAD(&q->queue_head);
INIT_LIST_HEAD(&q->timeout_list);
INIT_LIST_HEAD(&q->icq_list);
+#ifdef CONFIG_BLK_CGROUP
+ INIT_LIST_HEAD(&q->blkg_list);
+#endif
INIT_LIST_HEAD(&q->flush_queue[0]);
INIT_LIST_HEAD(&q->flush_queue[1]);
INIT_LIST_HEAD(&q->flush_data_in_flight);
@@ -522,8 +617,24 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
*/
q->queue_lock = &q->__queue_lock;
+ /*
+ * A queue starts its life with bypass turned on to avoid
+ * unnecessary bypass on/off overhead and nasty surprises during
+ * init. The initial bypass will be finished when the queue is
+ * registered by blk_register_queue().
+ */
+ q->bypass_depth = 1;
+ __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
+
+ init_waitqueue_head(&q->mq_freeze_wq);
+
+ if (blkcg_init_queue(q))
+ goto fail_bdi;
+
return q;
+fail_bdi:
+ bdi_destroy(&q->backing_dev_info);
fail_id:
ida_simple_remove(&blk_queue_ida, q->id);
fail_q:
@@ -567,7 +678,7 @@ EXPORT_SYMBOL(blk_alloc_queue_node);
struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
{
- return blk_init_queue_node(rfn, lock, -1);
+ return blk_init_queue_node(rfn, lock, NUMA_NO_NODE);
}
EXPORT_SYMBOL(blk_init_queue);
@@ -595,13 +706,17 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
if (!q)
return NULL;
- if (blk_init_free_list(q))
+ q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
+ if (!q->flush_rq)
return NULL;
+ if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
+ goto fail;
+
q->request_fn = rfn;
q->prep_rq_fn = NULL;
q->unprep_rq_fn = NULL;
- q->queue_flags = QUEUE_FLAG_DEFAULT;
+ q->queue_flags |= QUEUE_FLAG_DEFAULT;
/* Override internal queue lock with supplied lock pointer */
if (lock)
@@ -614,21 +729,28 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
q->sg_reserved_size = INT_MAX;
- /*
- * all done
- */
- if (!elevator_init(q, NULL)) {
- blk_queue_congestion_threshold(q);
- return q;
+ /* Protect q->elevator from elevator_change */
+ mutex_lock(&q->sysfs_lock);
+
+ /* init elevator */
+ if (elevator_init(q, NULL)) {
+ mutex_unlock(&q->sysfs_lock);
+ goto fail;
}
+ mutex_unlock(&q->sysfs_lock);
+
+ return q;
+
+fail:
+ kfree(q->flush_rq);
return NULL;
}
EXPORT_SYMBOL(blk_init_allocated_queue);
bool blk_get_queue(struct request_queue *q)
{
- if (likely(!blk_queue_dead(q))) {
+ if (likely(!blk_queue_dying(q))) {
__blk_get_queue(q);
return true;
}
@@ -637,42 +759,15 @@ bool blk_get_queue(struct request_queue *q)
}
EXPORT_SYMBOL(blk_get_queue);
-static inline void blk_free_request(struct request_queue *q, struct request *rq)
+static inline void blk_free_request(struct request_list *rl, struct request *rq)
{
if (rq->cmd_flags & REQ_ELVPRIV) {
- elv_put_request(q, rq);
+ elv_put_request(rl->q, rq);
if (rq->elv.icq)
put_io_context(rq->elv.icq->ioc);
}
- mempool_free(rq, q->rq.rq_pool);
-}
-
-static struct request *
-blk_alloc_request(struct request_queue *q, struct io_cq *icq,
- unsigned int flags, gfp_t gfp_mask)
-{
- struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
-
- if (!rq)
- return NULL;
-
- blk_rq_init(q, rq);
-
- rq->cmd_flags = flags | REQ_ALLOCED;
-
- if (flags & REQ_ELVPRIV) {
- rq->elv.icq = icq;
- if (unlikely(elv_set_request(q, rq, gfp_mask))) {
- mempool_free(rq, q->rq.rq_pool);
- return NULL;
- }
- /* @rq->elv.icq holds on to io_context until @rq is freed */
- if (icq)
- get_io_context(icq->ioc);
- }
-
- return rq;
+ mempool_free(rq, rl->rq_pool);
}
/*
@@ -709,18 +804,23 @@ static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
ioc->last_waited = jiffies;
}
-static void __freed_request(struct request_queue *q, int sync)
+static void __freed_request(struct request_list *rl, int sync)
{
- struct request_list *rl = &q->rq;
+ struct request_queue *q = rl->q;
- if (rl->count[sync] < queue_congestion_off_threshold(q))
+ /*
+ * bdi isn't aware of blkcg yet. As all async IOs end up root
+ * blkcg anyway, just use root blkcg state.
+ */
+ if (rl == &q->root_rl &&
+ rl->count[sync] < queue_congestion_off_threshold(q))
blk_clear_queue_congested(q, sync);
if (rl->count[sync] + 1 <= q->nr_requests) {
if (waitqueue_active(&rl->wait[sync]))
wake_up(&rl->wait[sync]);
- blk_clear_queue_full(q, sync);
+ blk_clear_rl_full(rl, sync);
}
}
@@ -728,19 +828,61 @@ static void __freed_request(struct request_queue *q, int sync)
* A request has just been released. Account for it, update the full and
* congestion status, wake up any waiters. Called under q->queue_lock.
*/
-static void freed_request(struct request_queue *q, unsigned int flags)
+static void freed_request(struct request_list *rl, unsigned int flags)
{
- struct request_list *rl = &q->rq;
+ struct request_queue *q = rl->q;
int sync = rw_is_sync(flags);
+ q->nr_rqs[sync]--;
rl->count[sync]--;
if (flags & REQ_ELVPRIV)
- rl->elvpriv--;
+ q->nr_rqs_elvpriv--;
- __freed_request(q, sync);
+ __freed_request(rl, sync);
if (unlikely(rl->starved[sync ^ 1]))
- __freed_request(q, sync ^ 1);
+ __freed_request(rl, sync ^ 1);
+}
+
+int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
+{
+ struct request_list *rl;
+
+ spin_lock_irq(q->queue_lock);
+ q->nr_requests = nr;
+ blk_queue_congestion_threshold(q);
+
+ /* congestion isn't cgroup aware and follows root blkcg for now */
+ rl = &q->root_rl;
+
+ if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
+ blk_set_queue_congested(q, BLK_RW_SYNC);
+ else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
+ blk_clear_queue_congested(q, BLK_RW_SYNC);
+
+ if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q))
+ blk_set_queue_congested(q, BLK_RW_ASYNC);
+ else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
+ blk_clear_queue_congested(q, BLK_RW_ASYNC);
+
+ blk_queue_for_each_rl(rl, q) {
+ if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
+ blk_set_rl_full(rl, BLK_RW_SYNC);
+ } else {
+ blk_clear_rl_full(rl, BLK_RW_SYNC);
+ wake_up(&rl->wait[BLK_RW_SYNC]);
+ }
+
+ if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
+ blk_set_rl_full(rl, BLK_RW_ASYNC);
+ } else {
+ blk_clear_rl_full(rl, BLK_RW_ASYNC);
+ wake_up(&rl->wait[BLK_RW_ASYNC]);
+ }
+ }
+
+ spin_unlock_irq(q->queue_lock);
+ return 0;
}
/*
@@ -763,8 +905,24 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
}
/**
- * get_request - get a free request
- * @q: request_queue to allocate request from
+ * rq_ioc - determine io_context for request allocation
+ * @bio: request being allocated is for this bio (can be %NULL)
+ *
+ * Determine io_context to use for request allocation for @bio. May return
+ * %NULL if %current->io_context doesn't exist.
+ */
+static struct io_context *rq_ioc(struct bio *bio)
+{
+#ifdef CONFIG_BLK_CGROUP
+ if (bio && bio->bi_ioc)
+ return bio->bi_ioc;
+#endif
+ return current->io_context;
+}
+
+/**
+ * __get_request - get a free request
+ * @rl: request list to allocate from
* @rw_flags: RW and SYNC flags
* @bio: bio to allocate request for (can be %NULL)
* @gfp_mask: allocation mask
@@ -776,22 +934,18 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
* Returns %NULL on failure, with @q->queue_lock held.
* Returns !%NULL on success, with @q->queue_lock *not held*.
*/
-static struct request *get_request(struct request_queue *q, int rw_flags,
- struct bio *bio, gfp_t gfp_mask)
+static struct request *__get_request(struct request_list *rl, int rw_flags,
+ struct bio *bio, gfp_t gfp_mask)
{
- struct request *rq = NULL;
- struct request_list *rl = &q->rq;
- struct elevator_type *et;
- struct io_context *ioc;
+ struct request_queue *q = rl->q;
+ struct request *rq;
+ struct elevator_type *et = q->elevator->type;
+ struct io_context *ioc = rq_ioc(bio);
struct io_cq *icq = NULL;
const bool is_sync = rw_is_sync(rw_flags) != 0;
- bool retried = false;
int may_queue;
-retry:
- et = q->elevator->type;
- ioc = current->io_context;
- if (unlikely(blk_queue_dead(q)))
+ if (unlikely(blk_queue_dying(q)))
return NULL;
may_queue = elv_may_queue(q, rw_flags);
@@ -801,28 +955,14 @@ retry:
if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
if (rl->count[is_sync]+1 >= q->nr_requests) {
/*
- * We want ioc to record batching state. If it's
- * not already there, creating a new one requires
- * dropping queue_lock, which in turn requires
- * retesting conditions to avoid queue hang.
- */
- if (!ioc && !retried) {
- spin_unlock_irq(q->queue_lock);
- create_io_context(current, gfp_mask, q->node);
- spin_lock_irq(q->queue_lock);
- retried = true;
- goto retry;
- }
-
- /*
* The queue will fill after this allocation, so set
* it as full, and mark this process as "batching".
* This process will be allowed to complete a batch of
* requests, others will be blocked.
*/
- if (!blk_queue_full(q, is_sync)) {
+ if (!blk_rl_full(rl, is_sync)) {
ioc_set_batching(q, ioc);
- blk_set_queue_full(q, is_sync);
+ blk_set_rl_full(rl, is_sync);
} else {
if (may_queue != ELV_MQUEUE_MUST
&& !ioc_batching(q, ioc)) {
@@ -831,11 +971,16 @@ retry:
* process is not a "batcher", and not
* exempted by the IO scheduler
*/
- goto out;
+ return NULL;
}
}
}
- blk_set_queue_congested(q, is_sync);
+ /*
+ * bdi isn't aware of blkcg yet. As all async IOs end up
+ * root blkcg anyway, just use root blkcg state.
+ */
+ if (rl == &q->root_rl)
+ blk_set_queue_congested(q, is_sync);
}
/*
@@ -844,8 +989,9 @@ retry:
* allocated with any setting of ->nr_requests
*/
if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
- goto out;
+ return NULL;
+ q->nr_rqs[is_sync]++;
rl->count[is_sync]++;
rl->starved[is_sync] = 0;
@@ -859,10 +1005,9 @@ retry:
* Also, lookup icq while holding queue_lock. If it doesn't exist,
* it will be created after releasing queue_lock.
*/
- if (blk_rq_should_init_elevator(bio) &&
- !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags)) {
+ if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
rw_flags |= REQ_ELVPRIV;
- rl->elvpriv++;
+ q->nr_rqs_elvpriv++;
if (et->icq_cache && ioc)
icq = ioc_lookup_icq(ioc, q);
}
@@ -871,41 +1016,33 @@ retry:
rw_flags |= REQ_IO_STAT;
spin_unlock_irq(q->queue_lock);
- /* create icq if missing */
- if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) {
- icq = ioc_create_icq(q, gfp_mask);
- if (!icq)
- goto fail_icq;
- }
-
- rq = blk_alloc_request(q, icq, rw_flags, gfp_mask);
+ /* allocate and init request */
+ rq = mempool_alloc(rl->rq_pool, gfp_mask);
+ if (!rq)
+ goto fail_alloc;
-fail_icq:
- if (unlikely(!rq)) {
- /*
- * Allocation failed presumably due to memory. Undo anything
- * we might have messed up.
- *
- * Allocating task should really be put onto the front of the
- * wait queue, but this is pretty rare.
- */
- spin_lock_irq(q->queue_lock);
- freed_request(q, rw_flags);
+ blk_rq_init(q, rq);
+ blk_rq_set_rl(rq, rl);
+ rq->cmd_flags = rw_flags | REQ_ALLOCED;
+
+ /* init elvpriv */
+ if (rw_flags & REQ_ELVPRIV) {
+ if (unlikely(et->icq_cache && !icq)) {
+ if (ioc)
+ icq = ioc_create_icq(ioc, q, gfp_mask);
+ if (!icq)
+ goto fail_elvpriv;
+ }
- /*
- * in the very unlikely event that allocation failed and no
- * requests for this direction was pending, mark us starved
- * so that freeing of a request in the other direction will
- * notice us. another possible fix would be to split the
- * rq mempool into READ and WRITE
- */
-rq_starved:
- if (unlikely(rl->count[is_sync] == 0))
- rl->starved[is_sync] = 1;
+ rq->elv.icq = icq;
+ if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
+ goto fail_elvpriv;
- goto out;
+ /* @rq->elv.icq holds io_context until @rq is freed */
+ if (icq)
+ get_io_context(icq->ioc);
}
-
+out:
/*
* ioc may be NULL here, and ioc_batching will be false. That's
* OK, if the queue is under the request limit then requests need
@@ -916,80 +1053,131 @@ rq_starved:
ioc->nr_batch_requests--;
trace_block_getrq(q, bio, rw_flags & 1);
-out:
return rq;
+
+fail_elvpriv:
+ /*
+ * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed
+ * and may fail indefinitely under memory pressure and thus
+ * shouldn't stall IO. Treat this request as !elvpriv. This will
+ * disturb iosched and blkcg but weird is bettern than dead.
+ */
+ printk_ratelimited(KERN_WARNING "%s: request aux data allocation failed, iosched may be disturbed\n",
+ dev_name(q->backing_dev_info.dev));
+
+ rq->cmd_flags &= ~REQ_ELVPRIV;
+ rq->elv.icq = NULL;
+
+ spin_lock_irq(q->queue_lock);
+ q->nr_rqs_elvpriv--;
+ spin_unlock_irq(q->queue_lock);
+ goto out;
+
+fail_alloc:
+ /*
+ * Allocation failed presumably due to memory. Undo anything we
+ * might have messed up.
+ *
+ * Allocating task should really be put onto the front of the wait
+ * queue, but this is pretty rare.
+ */
+ spin_lock_irq(q->queue_lock);
+ freed_request(rl, rw_flags);
+
+ /*
+ * in the very unlikely event that allocation failed and no
+ * requests for this direction was pending, mark us starved so that
+ * freeing of a request in the other direction will notice
+ * us. another possible fix would be to split the rq mempool into
+ * READ and WRITE
+ */
+rq_starved:
+ if (unlikely(rl->count[is_sync] == 0))
+ rl->starved[is_sync] = 1;
+ return NULL;
}
/**
- * get_request_wait - get a free request with retry
+ * get_request - get a free request
* @q: request_queue to allocate request from
* @rw_flags: RW and SYNC flags
* @bio: bio to allocate request for (can be %NULL)
+ * @gfp_mask: allocation mask
*
- * Get a free request from @q. This function keeps retrying under memory
- * pressure and fails iff @q is dead.
+ * Get a free request from @q. If %__GFP_WAIT is set in @gfp_mask, this
+ * function keeps retrying under memory pressure and fails iff @q is dead.
*
* Must be callled with @q->queue_lock held and,
* Returns %NULL on failure, with @q->queue_lock held.
* Returns !%NULL on success, with @q->queue_lock *not held*.
*/
-static struct request *get_request_wait(struct request_queue *q, int rw_flags,
- struct bio *bio)
+static struct request *get_request(struct request_queue *q, int rw_flags,
+ struct bio *bio, gfp_t gfp_mask)
{
const bool is_sync = rw_is_sync(rw_flags) != 0;
+ DEFINE_WAIT(wait);
+ struct request_list *rl;
struct request *rq;
- rq = get_request(q, rw_flags, bio, GFP_NOIO);
- while (!rq) {
- DEFINE_WAIT(wait);
- struct request_list *rl = &q->rq;
-
- if (unlikely(blk_queue_dead(q)))
- return NULL;
+ rl = blk_get_rl(q, bio); /* transferred to @rq on success */
+retry:
+ rq = __get_request(rl, rw_flags, bio, gfp_mask);
+ if (rq)
+ return rq;
- prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
- TASK_UNINTERRUPTIBLE);
+ if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) {
+ blk_put_rl(rl);
+ return NULL;
+ }
- trace_block_sleeprq(q, bio, rw_flags & 1);
+ /* wait on @rl and retry */
+ prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
+ TASK_UNINTERRUPTIBLE);
- spin_unlock_irq(q->queue_lock);
- io_schedule();
+ trace_block_sleeprq(q, bio, rw_flags & 1);
- /*
- * After sleeping, we become a "batching" process and
- * will be able to allocate at least one request, and
- * up to a big batch of them for a small period time.
- * See ioc_batching, ioc_set_batching
- */
- create_io_context(current, GFP_NOIO, q->node);
- ioc_set_batching(q, current->io_context);
+ spin_unlock_irq(q->queue_lock);
+ io_schedule();
- spin_lock_irq(q->queue_lock);
- finish_wait(&rl->wait[is_sync], &wait);
+ /*
+ * After sleeping, we become a "batching" process and will be able
+ * to allocate at least one request, and up to a big batch of them
+ * for a small period time. See ioc_batching, ioc_set_batching
+ */
+ ioc_set_batching(q, current->io_context);
- rq = get_request(q, rw_flags, bio, GFP_NOIO);
- };
+ spin_lock_irq(q->queue_lock);
+ finish_wait(&rl->wait[is_sync], &wait);
- return rq;
+ goto retry;
}
-struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
+static struct request *blk_old_get_request(struct request_queue *q, int rw,
+ gfp_t gfp_mask)
{
struct request *rq;
BUG_ON(rw != READ && rw != WRITE);
+ /* create ioc upfront */
+ create_io_context(gfp_mask, q->node);
+
spin_lock_irq(q->queue_lock);
- if (gfp_mask & __GFP_WAIT)
- rq = get_request_wait(q, rw, NULL);
- else
- rq = get_request(q, rw, NULL, gfp_mask);
+ rq = get_request(q, rw, NULL, gfp_mask);
if (!rq)
spin_unlock_irq(q->queue_lock);
/* q->queue_lock is unlocked at this point */
return rq;
}
+
+struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
+{
+ if (q->mq_ops)
+ return blk_mq_alloc_request(q, rw, gfp_mask, false);
+ else
+ return blk_old_get_request(q, rw, gfp_mask);
+}
EXPORT_SYMBOL(blk_get_request);
/**
@@ -1031,6 +1219,8 @@ struct request *blk_make_request(struct request_queue *q, struct bio *bio,
if (unlikely(!rq))
return ERR_PTR(-ENOMEM);
+ blk_rq_set_block_pc(rq);
+
for_each_bio(bio) {
struct bio *bounce_bio = bio;
int ret;
@@ -1048,6 +1238,22 @@ struct request *blk_make_request(struct request_queue *q, struct bio *bio,
EXPORT_SYMBOL(blk_make_request);
/**
+ * blk_rq_set_block_pc - initialize a requeest to type BLOCK_PC
+ * @rq: request to be initialized
+ *
+ */
+void blk_rq_set_block_pc(struct request *rq)
+{
+ rq->cmd_type = REQ_TYPE_BLOCK_PC;
+ rq->__data_len = 0;
+ rq->__sector = (sector_t) -1;
+ rq->bio = rq->biotail = NULL;
+ memset(rq->__cmd, 0, sizeof(rq->__cmd));
+ rq->cmd = rq->__cmd;
+}
+EXPORT_SYMBOL(blk_rq_set_block_pc);
+
+/**
* blk_requeue_request - put a request back on queue
* @q: request queue where request should be inserted
* @rq: request to be inserted
@@ -1075,19 +1281,22 @@ EXPORT_SYMBOL(blk_requeue_request);
static void add_acct_request(struct request_queue *q, struct request *rq,
int where)
{
- drive_stat_acct(rq, 1);
+ blk_account_io_start(rq, true);
__elv_add_request(q, rq, where);
}
static void part_round_stats_single(int cpu, struct hd_struct *part,
unsigned long now)
{
+ int inflight;
+
if (now == part->stamp)
return;
- if (part_in_flight(part)) {
+ inflight = part_in_flight(part);
+ if (inflight) {
__part_stat_add(cpu, part, time_in_queue,
- part_in_flight(part) * (now - part->stamp));
+ inflight * (now - part->stamp));
__part_stat_add(cpu, part, io_ticks, (now - part->stamp));
}
part->stamp = now;
@@ -1119,6 +1328,16 @@ void part_round_stats(int cpu, struct hd_struct *part)
}
EXPORT_SYMBOL_GPL(part_round_stats);
+#ifdef CONFIG_PM_RUNTIME
+static void blk_pm_put_request(struct request *rq)
+{
+ if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending)
+ pm_runtime_mark_last_busy(rq->q->dev);
+}
+#else
+static inline void blk_pm_put_request(struct request *rq) {}
+#endif
+
/*
* queue lock must be held
*/
@@ -1126,8 +1345,13 @@ void __blk_put_request(struct request_queue *q, struct request *req)
{
if (unlikely(!q))
return;
- if (unlikely(--req->ref_count))
+
+ if (q->mq_ops) {
+ blk_mq_free_request(req);
return;
+ }
+
+ blk_pm_put_request(req);
elv_completed_request(q, req);
@@ -1140,24 +1364,31 @@ void __blk_put_request(struct request_queue *q, struct request *req)
*/
if (req->cmd_flags & REQ_ALLOCED) {
unsigned int flags = req->cmd_flags;
+ struct request_list *rl = blk_rq_rl(req);
BUG_ON(!list_empty(&req->queuelist));
- BUG_ON(!hlist_unhashed(&req->hash));
+ BUG_ON(ELV_ON_HASH(req));
- blk_free_request(q, req);
- freed_request(q, flags);
+ blk_free_request(rl, req);
+ freed_request(rl, flags);
+ blk_put_rl(rl);
}
}
EXPORT_SYMBOL_GPL(__blk_put_request);
void blk_put_request(struct request *req)
{
- unsigned long flags;
struct request_queue *q = req->q;
- spin_lock_irqsave(q->queue_lock, flags);
- __blk_put_request(q, req);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ if (q->mq_ops)
+ blk_mq_free_request(req);
+ else {
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ __blk_put_request(q, req);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
}
EXPORT_SYMBOL(blk_put_request);
@@ -1183,47 +1414,46 @@ void blk_add_request_payload(struct request *rq, struct page *page,
bio->bi_io_vec->bv_offset = 0;
bio->bi_io_vec->bv_len = len;
- bio->bi_size = len;
+ bio->bi_iter.bi_size = len;
bio->bi_vcnt = 1;
bio->bi_phys_segments = 1;
rq->__data_len = rq->resid_len = len;
rq->nr_phys_segments = 1;
- rq->buffer = bio_data(bio);
}
EXPORT_SYMBOL_GPL(blk_add_request_payload);
-static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
- struct bio *bio)
+bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
+ struct bio *bio)
{
const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
if (!ll_back_merge_fn(q, req, bio))
return false;
- trace_block_bio_backmerge(q, bio);
+ trace_block_bio_backmerge(q, req, bio);
if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
blk_rq_set_mixed_merge(req);
req->biotail->bi_next = bio;
req->biotail = bio;
- req->__data_len += bio->bi_size;
+ req->__data_len += bio->bi_iter.bi_size;
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
- drive_stat_acct(req, 0);
+ blk_account_io_start(req, false);
return true;
}
-static bool bio_attempt_front_merge(struct request_queue *q,
- struct request *req, struct bio *bio)
+bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
+ struct bio *bio)
{
const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
if (!ll_front_merge_fn(q, req, bio))
return false;
- trace_block_bio_frontmerge(q, bio);
+ trace_block_bio_frontmerge(q, req, bio);
if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
blk_rq_set_mixed_merge(req);
@@ -1231,22 +1461,16 @@ static bool bio_attempt_front_merge(struct request_queue *q,
bio->bi_next = req->bio;
req->bio = bio;
- /*
- * may not be valid. if the low level driver said
- * it didn't need a bounce buffer then it better
- * not touch req->buffer either...
- */
- req->buffer = bio_data(bio);
- req->__sector = bio->bi_sector;
- req->__data_len += bio->bi_size;
+ req->__sector = bio->bi_iter.bi_sector;
+ req->__data_len += bio->bi_iter.bi_size;
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
- drive_stat_acct(req, 0);
+ blk_account_io_start(req, false);
return true;
}
/**
- * attempt_plug_merge - try to merge with %current's plugged list
+ * blk_attempt_plug_merge - try to merge with %current's plugged list
* @q: request_queue new bio is being queued at
* @bio: new bio being queued
* @request_count: out parameter for number of traversed plugged requests
@@ -1261,23 +1485,32 @@ static bool bio_attempt_front_merge(struct request_queue *q,
* added on the elevator at this point. In addition, we don't have
* reliable access to the elevator outside queue lock. Only check basic
* merging parameters without querying the elevator.
+ *
+ * Caller must ensure !blk_queue_nomerges(q) beforehand.
*/
-static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
- unsigned int *request_count)
+bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
+ unsigned int *request_count)
{
struct blk_plug *plug;
struct request *rq;
bool ret = false;
+ struct list_head *plug_list;
plug = current->plug;
if (!plug)
goto out;
*request_count = 0;
- list_for_each_entry_reverse(rq, &plug->list, queuelist) {
+ if (q->mq_ops)
+ plug_list = &plug->mq_list;
+ else
+ plug_list = &plug->list;
+
+ list_for_each_entry_reverse(rq, plug_list, queuelist) {
int el_ret;
- (*request_count)++;
+ if (rq->q == q)
+ (*request_count)++;
if (rq->q != q || !blk_rq_merge_ok(rq, bio))
continue;
@@ -1306,7 +1539,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
req->cmd_flags |= REQ_FAILFAST_MASK;
req->errors = 0;
- req->__sector = bio->bi_sector;
+ req->__sector = bio->bi_iter.bi_sector;
req->ioprio = bio_prio(bio);
blk_rq_bio_prep(req->q, req, bio);
}
@@ -1326,6 +1559,11 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio)
*/
blk_queue_bounce(q, &bio);
+ if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
+ bio_endio(bio, -EIO);
+ return;
+ }
+
if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
spin_lock_irq(q->queue_lock);
where = ELEVATOR_INSERT_FLUSH;
@@ -1336,7 +1574,8 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio)
* Check if we can merge with the plugged list before grabbing
* any locks.
*/
- if (attempt_plug_merge(q, bio, &request_count))
+ if (!blk_queue_nomerges(q) &&
+ blk_attempt_plug_merge(q, bio, &request_count))
return;
spin_lock_irq(q->queue_lock);
@@ -1372,7 +1611,7 @@ get_rq:
* Grab a free request. This is might sleep but can not fail.
* Returns with the queue unlocked.
*/
- req = get_request_wait(q, rw_flags, bio);
+ req = get_request(q, rw_flags, bio, GFP_NOIO);
if (unlikely(!req)) {
bio_endio(bio, -ENODEV); /* @q is dead */
goto out_unlock;
@@ -1393,27 +1632,18 @@ get_rq:
if (plug) {
/*
* If this is the first request added after a plug, fire
- * of a plug trace. If others have been added before, check
- * if we have multiple devices in this plug. If so, make a
- * note to sort the list before dispatch.
+ * of a plug trace.
*/
- if (list_empty(&plug->list))
+ if (!request_count)
trace_block_plug(q);
else {
- if (!plug->should_sort) {
- struct request *__rq;
-
- __rq = list_entry_rq(plug->list.prev);
- if (__rq->q != q)
- plug->should_sort = 1;
- }
if (request_count >= BLK_MAX_REQUEST_COUNT) {
blk_flush_plug_list(plug, false);
trace_block_plug(q);
}
}
list_add_tail(&req->queuelist, &plug->list);
- drive_stat_acct(req, 1);
+ blk_account_io_start(req, true);
} else {
spin_lock_irq(q->queue_lock);
add_acct_request(q, req, where);
@@ -1434,12 +1664,12 @@ static inline void blk_partition_remap(struct bio *bio)
if (bio_sectors(bio) && bdev != bdev->bd_contains) {
struct hd_struct *p = bdev->bd_part;
- bio->bi_sector += p->start_sect;
+ bio->bi_iter.bi_sector += p->start_sect;
bio->bi_bdev = bdev->bd_contains;
trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
bdev->bd_dev,
- bio->bi_sector - p->start_sect);
+ bio->bi_iter.bi_sector - p->start_sect);
}
}
@@ -1451,7 +1681,7 @@ static void handle_bad_sector(struct bio *bio)
printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
bdevname(bio->bi_bdev, b),
bio->bi_rw,
- (unsigned long long)bio->bi_sector + bio_sectors(bio),
+ (unsigned long long)bio_end_sector(bio),
(long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
set_bit(BIO_EOF, &bio->bi_flags);
@@ -1477,7 +1707,7 @@ static int __init fail_make_request_debugfs(void)
struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
NULL, &fail_make_request);
- return IS_ERR(dir) ? PTR_ERR(dir) : 0;
+ return PTR_ERR_OR_ZERO(dir);
}
late_initcall(fail_make_request_debugfs);
@@ -1505,7 +1735,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
/* Test device or partition size, when known. */
maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
if (maxsector) {
- sector_t sector = bio->bi_sector;
+ sector_t sector = bio->bi_iter.bi_sector;
if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
/*
@@ -1541,12 +1771,12 @@ generic_make_request_checks(struct bio *bio)
"generic_make_request: Trying to access "
"nonexistent block-device %s (%Lu)\n",
bdevname(bio->bi_bdev, b),
- (long long) bio->bi_sector);
+ (long long) bio->bi_iter.bi_sector);
goto end_io;
}
- if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
- nr_sectors > queue_max_hw_sectors(q))) {
+ if (likely(bio_is_rw(bio) &&
+ nr_sectors > queue_max_hw_sectors(q))) {
printk(KERN_ERR "bio too big device %s (%u > %u)\n",
bdevname(bio->bi_bdev, b),
bio_sectors(bio),
@@ -1555,9 +1785,9 @@ generic_make_request_checks(struct bio *bio)
}
part = bio->bi_bdev->bd_part;
- if (should_fail_request(part, bio->bi_size) ||
+ if (should_fail_request(part, bio->bi_iter.bi_size) ||
should_fail_request(&part_to_disk(part)->part0,
- bio->bi_size))
+ bio->bi_iter.bi_size))
goto end_io;
/*
@@ -1566,9 +1796,6 @@ generic_make_request_checks(struct bio *bio)
*/
blk_partition_remap(bio);
- if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
- goto end_io;
-
if (bio_check_eod(bio, nr_sectors))
goto end_io;
@@ -1587,12 +1814,24 @@ generic_make_request_checks(struct bio *bio)
if ((bio->bi_rw & REQ_DISCARD) &&
(!blk_queue_discard(q) ||
- ((bio->bi_rw & REQ_SECURE) &&
- !blk_queue_secdiscard(q)))) {
+ ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) {
+ err = -EOPNOTSUPP;
+ goto end_io;
+ }
+
+ if (bio->bi_rw & REQ_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) {
err = -EOPNOTSUPP;
goto end_io;
}
+ /*
+ * Various block parts want %current->io_context and lazy ioc
+ * allocation ends up trading a lot of pain for a small amount of
+ * memory. Just allocate it upfront. This may fail and block
+ * layer knows how to live with it.
+ */
+ create_io_context(GFP_ATOMIC, q->node);
+
if (blk_throtl_bio(q, bio))
return false; /* throttled, will be resubmitted later */
@@ -1690,19 +1929,24 @@ EXPORT_SYMBOL(generic_make_request);
*/
void submit_bio(int rw, struct bio *bio)
{
- int count = bio_sectors(bio);
-
bio->bi_rw |= rw;
/*
* If it's a regular read/write or a barrier with data attached,
* go through the normal accounting stuff before submission.
*/
- if (bio_has_data(bio) && !(rw & REQ_DISCARD)) {
+ if (bio_has_data(bio)) {
+ unsigned int count;
+
+ if (unlikely(rw & REQ_WRITE_SAME))
+ count = bdev_logical_block_size(bio->bi_bdev) >> 9;
+ else
+ count = bio_sectors(bio);
+
if (rw & WRITE) {
count_vm_events(PGPGOUT, count);
} else {
- task_io_account_read(bio->bi_size);
+ task_io_account_read(bio->bi_iter.bi_size);
count_vm_events(PGPGIN, count);
}
@@ -1711,7 +1955,7 @@ void submit_bio(int rw, struct bio *bio)
printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
current->comm, task_pid_nr(current),
(rw & WRITE) ? "WRITE" : "READ",
- (unsigned long long)bio->bi_sector,
+ (unsigned long long)bio->bi_iter.bi_sector,
bdevname(bio->bi_bdev, b),
count);
}
@@ -1737,18 +1981,17 @@ EXPORT_SYMBOL(submit_bio);
* in some cases below, so export this function.
* Request stacking drivers like request-based dm may change the queue
* limits while requests are in the queue (e.g. dm's table swapping).
- * Such request stacking drivers should check those requests agaist
+ * Such request stacking drivers should check those requests against
* the new queue limits again when they dispatch those requests,
* although such checkings are also done against the old queue limits
* when submitting requests.
*/
int blk_rq_check_limits(struct request_queue *q, struct request *rq)
{
- if (rq->cmd_flags & REQ_DISCARD)
+ if (!rq_mergeable(rq))
return 0;
- if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
- blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
+ if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) {
printk(KERN_ERR "%s: over max size limit.\n", __func__);
return -EIO;
}
@@ -1787,7 +2030,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
return -EIO;
spin_lock_irqsave(q->queue_lock, flags);
- if (unlikely(blk_queue_dead(q))) {
+ if (unlikely(blk_queue_dying(q))) {
spin_unlock_irqrestore(q->queue_lock, flags);
return -ENODEV;
}
@@ -1845,7 +2088,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
for (bio = rq->bio; bio; bio = bio->bi_next) {
if ((bio->bi_rw & ff) != ff)
break;
- bytes += bio->bi_size;
+ bytes += bio->bi_iter.bi_size;
}
/* this could lead to infinite loop */
@@ -1854,7 +2097,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
}
EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
-static void blk_account_io_completion(struct request *req, unsigned int bytes)
+void blk_account_io_completion(struct request *req, unsigned int bytes)
{
if (blk_do_io_stat(req)) {
const int rw = rq_data_dir(req);
@@ -1868,7 +2111,7 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
}
}
-static void blk_account_io_done(struct request *req)
+void blk_account_io_done(struct request *req)
{
/*
* Account IO completion. flush_rq isn't accounted as a
@@ -1894,6 +2137,64 @@ static void blk_account_io_done(struct request *req)
}
}
+#ifdef CONFIG_PM_RUNTIME
+/*
+ * Don't process normal requests when queue is suspended
+ * or in the process of suspending/resuming
+ */
+static struct request *blk_pm_peek_request(struct request_queue *q,
+ struct request *rq)
+{
+ if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
+ (q->rpm_status != RPM_ACTIVE && !(rq->cmd_flags & REQ_PM))))
+ return NULL;
+ else
+ return rq;
+}
+#else
+static inline struct request *blk_pm_peek_request(struct request_queue *q,
+ struct request *rq)
+{
+ return rq;
+}
+#endif
+
+void blk_account_io_start(struct request *rq, bool new_io)
+{
+ struct hd_struct *part;
+ int rw = rq_data_dir(rq);
+ int cpu;
+
+ if (!blk_do_io_stat(rq))
+ return;
+
+ cpu = part_stat_lock();
+
+ if (!new_io) {
+ part = rq->part;
+ part_stat_inc(cpu, part, merges[rw]);
+ } else {
+ part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
+ if (!hd_struct_try_get(part)) {
+ /*
+ * The partition is already being removed,
+ * the request will be accounted on the disk only
+ *
+ * We take a reference on disk->part0 although that
+ * partition will never be deleted, so we can treat
+ * it as any other partition.
+ */
+ part = &rq->rq_disk->part0;
+ hd_struct_get(part);
+ }
+ part_round_stats(cpu, part);
+ part_inc_in_flight(part, rw);
+ rq->part = part;
+ }
+
+ part_stat_unlock();
+}
+
/**
* blk_peek_request - peek at the top of a request queue
* @q: request queue to peek at
@@ -1916,6 +2217,11 @@ struct request *blk_peek_request(struct request_queue *q)
int ret;
while ((rq = __elv_next_request(q)) != NULL) {
+
+ rq = blk_pm_peek_request(q, rq);
+ if (!rq)
+ break;
+
if (!(rq->cmd_flags & REQ_STARTED)) {
/*
* This is the first time the device driver
@@ -2040,6 +2346,7 @@ void blk_start_request(struct request *req)
if (unlikely(blk_bidi_rq(req)))
req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
+ BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
blk_add_timer(req);
}
EXPORT_SYMBOL(blk_start_request);
@@ -2094,13 +2401,12 @@ EXPORT_SYMBOL(blk_fetch_request);
**/
bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
{
- int total_bytes, bio_nbytes, next_idx = 0;
- struct bio *bio;
+ int total_bytes;
if (!req->bio)
return false;
- trace_block_rq_complete(req->q, req);
+ trace_block_rq_complete(req->q, req, nr_bytes);
/*
* For fs requests, rq is just carrier of independent bio's
@@ -2127,68 +2433,44 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
case -EBADE:
error_type = "critical nexus";
break;
+ case -ETIMEDOUT:
+ error_type = "timeout";
+ break;
+ case -ENOSPC:
+ error_type = "critical space allocation";
+ break;
+ case -ENODATA:
+ error_type = "critical medium";
+ break;
case -EIO:
default:
error_type = "I/O";
break;
}
- printk(KERN_ERR "end_request: %s error, dev %s, sector %llu\n",
- error_type, req->rq_disk ? req->rq_disk->disk_name : "?",
- (unsigned long long)blk_rq_pos(req));
+ printk_ratelimited(KERN_ERR "end_request: %s error, dev %s, sector %llu\n",
+ error_type, req->rq_disk ?
+ req->rq_disk->disk_name : "?",
+ (unsigned long long)blk_rq_pos(req));
+
}
blk_account_io_completion(req, nr_bytes);
- total_bytes = bio_nbytes = 0;
- while ((bio = req->bio) != NULL) {
- int nbytes;
+ total_bytes = 0;
+ while (req->bio) {
+ struct bio *bio = req->bio;
+ unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
- if (nr_bytes >= bio->bi_size) {
+ if (bio_bytes == bio->bi_iter.bi_size)
req->bio = bio->bi_next;
- nbytes = bio->bi_size;
- req_bio_endio(req, bio, nbytes, error);
- next_idx = 0;
- bio_nbytes = 0;
- } else {
- int idx = bio->bi_idx + next_idx;
-
- if (unlikely(idx >= bio->bi_vcnt)) {
- blk_dump_rq_flags(req, "__end_that");
- printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
- __func__, idx, bio->bi_vcnt);
- break;
- }
-
- nbytes = bio_iovec_idx(bio, idx)->bv_len;
- BIO_BUG_ON(nbytes > bio->bi_size);
- /*
- * not a complete bvec done
- */
- if (unlikely(nbytes > nr_bytes)) {
- bio_nbytes += nr_bytes;
- total_bytes += nr_bytes;
- break;
- }
-
- /*
- * advance to the next vector
- */
- next_idx++;
- bio_nbytes += nbytes;
- }
+ req_bio_endio(req, bio, bio_bytes, error);
- total_bytes += nbytes;
- nr_bytes -= nbytes;
+ total_bytes += bio_bytes;
+ nr_bytes -= bio_bytes;
- bio = req->bio;
- if (bio) {
- /*
- * end more in this run, or just return 'not-done'
- */
- if (unlikely(nr_bytes <= 0))
- break;
- }
+ if (!nr_bytes)
+ break;
}
/*
@@ -2204,21 +2486,10 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
return false;
}
- /*
- * if the request wasn't completed, update state
- */
- if (bio_nbytes) {
- req_bio_endio(req, bio, bio_nbytes, error);
- bio->bi_idx += next_idx;
- bio_iovec(bio)->bv_offset += nr_bytes;
- bio_iovec(bio)->bv_len -= nr_bytes;
- }
-
req->__data_len -= total_bytes;
- req->buffer = bio_data(req->bio);
/* update sector only for requests with clear definition of sector */
- if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD))
+ if (req->cmd_type == REQ_TYPE_FS)
req->__sector += total_bytes >> 9;
/* mixed attributes always follow the first bio */
@@ -2284,7 +2555,7 @@ EXPORT_SYMBOL_GPL(blk_unprep_request);
/*
* queue lock must be held
*/
-static void blk_finish_request(struct request *req, int error)
+void blk_finish_request(struct request *req, int error)
{
if (blk_rq_tagged(req))
blk_queue_end_tag(req->q, req);
@@ -2299,7 +2570,6 @@ static void blk_finish_request(struct request *req, int error)
if (req->cmd_flags & REQ_DONTPREP)
blk_unprep_request(req);
-
blk_account_io_done(req);
if (req->end_io)
@@ -2311,6 +2581,7 @@ static void blk_finish_request(struct request *req, int error)
__blk_put_request(req->q, req);
}
}
+EXPORT_SYMBOL(blk_finish_request);
/**
* blk_end_bidi_request - Complete a bidi request
@@ -2534,11 +2805,10 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
/* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
rq->cmd_flags |= bio->bi_rw & REQ_WRITE;
- if (bio_has_data(bio)) {
+ if (bio_has_data(bio))
rq->nr_phys_segments = bio_phys_segments(q, bio);
- rq->buffer = bio_data(bio);
- }
- rq->__data_len = bio->bi_size;
+
+ rq->__data_len = bio->bi_iter.bi_size;
rq->bio = rq->biotail = bio;
if (bio->bi_bdev)
@@ -2556,10 +2826,10 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
void rq_flush_dcache_pages(struct request *rq)
{
struct req_iterator iter;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
rq_for_each_segment(bvec, rq, iter)
- flush_dcache_page(bvec->bv_page);
+ flush_dcache_page(bvec.bv_page);
}
EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
#endif
@@ -2613,7 +2883,7 @@ EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
/*
* Copy attributes of the original request to the clone request.
- * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied.
+ * The actual data parts (e.g. ->cmd, ->sense) are not copied.
*/
static void __blk_rq_prep_clone(struct request *dst, struct request *src)
{
@@ -2639,7 +2909,7 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src)
*
* Description:
* Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
- * The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense)
+ * The actual data parts of @rq_src (e.g. ->cmd, ->sense)
* are not copied, and copying such parts is the caller's responsibility.
* Also, pages which the original bios are pointing to are not copied
* and the cloned bios just point same pages.
@@ -2659,16 +2929,10 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
blk_rq_init(NULL, rq);
__rq_for_each_bio(bio_src, rq_src) {
- bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bs);
+ bio = bio_clone_bioset(bio_src, gfp_mask, bs);
if (!bio)
goto free_and_out;
- __bio_clone(bio, bio_src);
-
- if (bio_integrity(bio_src) &&
- bio_integrity_clone(bio, bio_src, gfp_mask, bs))
- goto free_and_out;
-
if (bio_ctr && bio_ctr(bio, bio_src, data))
goto free_and_out;
@@ -2685,27 +2949,32 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
free_and_out:
if (bio)
- bio_free(bio, bs);
+ bio_put(bio);
blk_rq_unprep_clone(rq);
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
-int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
+int kblockd_schedule_work(struct work_struct *work)
{
return queue_work(kblockd_workqueue, work);
}
EXPORT_SYMBOL(kblockd_schedule_work);
-int kblockd_schedule_delayed_work(struct request_queue *q,
- struct delayed_work *dwork, unsigned long delay)
+int kblockd_schedule_delayed_work(struct delayed_work *dwork,
+ unsigned long delay)
{
return queue_delayed_work(kblockd_workqueue, dwork, delay);
}
EXPORT_SYMBOL(kblockd_schedule_delayed_work);
-#define PLUG_MAGIC 0x91827364
+int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
+ unsigned long delay)
+{
+ return queue_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
+}
+EXPORT_SYMBOL(kblockd_schedule_delayed_work_on);
/**
* blk_start_plug - initialize blk_plug and track it inside the task_struct
@@ -2725,10 +2994,9 @@ void blk_start_plug(struct blk_plug *plug)
{
struct task_struct *tsk = current;
- plug->magic = PLUG_MAGIC;
INIT_LIST_HEAD(&plug->list);
+ INIT_LIST_HEAD(&plug->mq_list);
INIT_LIST_HEAD(&plug->cb_list);
- plug->should_sort = 0;
/*
* If this is a nested plug, don't actually assign it. It will be
@@ -2749,7 +3017,8 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
struct request *rqa = container_of(a, struct request, queuelist);
struct request *rqb = container_of(b, struct request, queuelist);
- return !(rqa->q <= rqb->q);
+ return !(rqa->q < rqb->q ||
+ (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb)));
}
/*
@@ -2764,47 +3033,55 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
{
trace_block_unplug(q, depth, !from_schedule);
- /*
- * Don't mess with dead queue.
- */
- if (unlikely(blk_queue_dead(q))) {
- spin_unlock(q->queue_lock);
- return;
- }
-
- /*
- * If we are punting this to kblockd, then we can safely drop
- * the queue_lock before waking kblockd (which needs to take
- * this lock).
- */
- if (from_schedule) {
- spin_unlock(q->queue_lock);
+ if (from_schedule)
blk_run_queue_async(q);
- } else {
+ else
__blk_run_queue(q);
- spin_unlock(q->queue_lock);
- }
-
+ spin_unlock(q->queue_lock);
}
-static void flush_plug_callbacks(struct blk_plug *plug)
+static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
{
LIST_HEAD(callbacks);
- if (list_empty(&plug->cb_list))
- return;
-
- list_splice_init(&plug->cb_list, &callbacks);
+ while (!list_empty(&plug->cb_list)) {
+ list_splice_init(&plug->cb_list, &callbacks);
- while (!list_empty(&callbacks)) {
- struct blk_plug_cb *cb = list_first_entry(&callbacks,
+ while (!list_empty(&callbacks)) {
+ struct blk_plug_cb *cb = list_first_entry(&callbacks,
struct blk_plug_cb,
list);
- list_del(&cb->list);
- cb->callback(cb);
+ list_del(&cb->list);
+ cb->callback(cb, from_schedule);
+ }
}
}
+struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
+ int size)
+{
+ struct blk_plug *plug = current->plug;
+ struct blk_plug_cb *cb;
+
+ if (!plug)
+ return NULL;
+
+ list_for_each_entry(cb, &plug->cb_list, list)
+ if (cb->callback == unplug && cb->data == data)
+ return cb;
+
+ /* Not currently on the callback list */
+ BUG_ON(size < sizeof(*cb));
+ cb = kzalloc(size, GFP_ATOMIC);
+ if (cb) {
+ cb->data = data;
+ cb->callback = unplug;
+ list_add(&cb->list, &plug->cb_list);
+ }
+ return cb;
+}
+EXPORT_SYMBOL(blk_check_plugged);
+
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
struct request_queue *q;
@@ -2813,18 +3090,17 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
LIST_HEAD(list);
unsigned int depth;
- BUG_ON(plug->magic != PLUG_MAGIC);
+ flush_plug_callbacks(plug, from_schedule);
+
+ if (!list_empty(&plug->mq_list))
+ blk_mq_flush_plug_list(plug, from_schedule);
- flush_plug_callbacks(plug);
if (list_empty(&plug->list))
return;
list_splice_init(&plug->list, &list);
- if (plug->should_sort) {
- list_sort(NULL, &list, plug_rq_cmp);
- plug->should_sort = 0;
- }
+ list_sort(NULL, &list, plug_rq_cmp);
q = NULL;
depth = 0;
@@ -2852,7 +3128,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
/*
* Short-circuit if @q is dead
*/
- if (unlikely(blk_queue_dead(q))) {
+ if (unlikely(blk_queue_dying(q))) {
__blk_end_request_all(rq, -ENODEV);
continue;
}
@@ -2886,6 +3162,149 @@ void blk_finish_plug(struct blk_plug *plug)
}
EXPORT_SYMBOL(blk_finish_plug);
+#ifdef CONFIG_PM_RUNTIME
+/**
+ * blk_pm_runtime_init - Block layer runtime PM initialization routine
+ * @q: the queue of the device
+ * @dev: the device the queue belongs to
+ *
+ * Description:
+ * Initialize runtime-PM-related fields for @q and start auto suspend for
+ * @dev. Drivers that want to take advantage of request-based runtime PM
+ * should call this function after @dev has been initialized, and its
+ * request queue @q has been allocated, and runtime PM for it can not happen
+ * yet(either due to disabled/forbidden or its usage_count > 0). In most
+ * cases, driver should call this function before any I/O has taken place.
+ *
+ * This function takes care of setting up using auto suspend for the device,
+ * the autosuspend delay is set to -1 to make runtime suspend impossible
+ * until an updated value is either set by user or by driver. Drivers do
+ * not need to touch other autosuspend settings.
+ *
+ * The block layer runtime PM is request based, so only works for drivers
+ * that use request as their IO unit instead of those directly use bio's.
+ */
+void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
+{
+ q->dev = dev;
+ q->rpm_status = RPM_ACTIVE;
+ pm_runtime_set_autosuspend_delay(q->dev, -1);
+ pm_runtime_use_autosuspend(q->dev);
+}
+EXPORT_SYMBOL(blk_pm_runtime_init);
+
+/**
+ * blk_pre_runtime_suspend - Pre runtime suspend check
+ * @q: the queue of the device
+ *
+ * Description:
+ * This function will check if runtime suspend is allowed for the device
+ * by examining if there are any requests pending in the queue. If there
+ * are requests pending, the device can not be runtime suspended; otherwise,
+ * the queue's status will be updated to SUSPENDING and the driver can
+ * proceed to suspend the device.
+ *
+ * For the not allowed case, we mark last busy for the device so that
+ * runtime PM core will try to autosuspend it some time later.
+ *
+ * This function should be called near the start of the device's
+ * runtime_suspend callback.
+ *
+ * Return:
+ * 0 - OK to runtime suspend the device
+ * -EBUSY - Device should not be runtime suspended
+ */
+int blk_pre_runtime_suspend(struct request_queue *q)
+{
+ int ret = 0;
+
+ spin_lock_irq(q->queue_lock);
+ if (q->nr_pending) {
+ ret = -EBUSY;
+ pm_runtime_mark_last_busy(q->dev);
+ } else {
+ q->rpm_status = RPM_SUSPENDING;
+ }
+ spin_unlock_irq(q->queue_lock);
+ return ret;
+}
+EXPORT_SYMBOL(blk_pre_runtime_suspend);
+
+/**
+ * blk_post_runtime_suspend - Post runtime suspend processing
+ * @q: the queue of the device
+ * @err: return value of the device's runtime_suspend function
+ *
+ * Description:
+ * Update the queue's runtime status according to the return value of the
+ * device's runtime suspend function and mark last busy for the device so
+ * that PM core will try to auto suspend the device at a later time.
+ *
+ * This function should be called near the end of the device's
+ * runtime_suspend callback.
+ */
+void blk_post_runtime_suspend(struct request_queue *q, int err)
+{
+ spin_lock_irq(q->queue_lock);
+ if (!err) {
+ q->rpm_status = RPM_SUSPENDED;
+ } else {
+ q->rpm_status = RPM_ACTIVE;
+ pm_runtime_mark_last_busy(q->dev);
+ }
+ spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL(blk_post_runtime_suspend);
+
+/**
+ * blk_pre_runtime_resume - Pre runtime resume processing
+ * @q: the queue of the device
+ *
+ * Description:
+ * Update the queue's runtime status to RESUMING in preparation for the
+ * runtime resume of the device.
+ *
+ * This function should be called near the start of the device's
+ * runtime_resume callback.
+ */
+void blk_pre_runtime_resume(struct request_queue *q)
+{
+ spin_lock_irq(q->queue_lock);
+ q->rpm_status = RPM_RESUMING;
+ spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL(blk_pre_runtime_resume);
+
+/**
+ * blk_post_runtime_resume - Post runtime resume processing
+ * @q: the queue of the device
+ * @err: return value of the device's runtime_resume function
+ *
+ * Description:
+ * Update the queue's runtime status according to the return value of the
+ * device's runtime_resume function. If it is successfully resumed, process
+ * the requests that are queued into the device's queue when it is resuming
+ * and then mark last busy and initiate autosuspend for it.
+ *
+ * This function should be called near the end of the device's
+ * runtime_resume callback.
+ */
+void blk_post_runtime_resume(struct request_queue *q, int err)
+{
+ spin_lock_irq(q->queue_lock);
+ if (!err) {
+ q->rpm_status = RPM_ACTIVE;
+ __blk_run_queue(q);
+ pm_runtime_mark_last_busy(q->dev);
+ pm_request_autosuspend(q->dev);
+ } else {
+ q->rpm_status = RPM_SUSPENDED;
+ }
+ spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL(blk_post_runtime_resume);
+#endif
+
int __init blk_dev_init(void)
{
BUILD_BUG_ON(__REQ_NR_BITS > 8 *
diff --git a/block/blk-exec.c b/block/blk-exec.c
index fb2cbd55162..f4d27b12c90 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -5,6 +5,8 @@
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
+#include <linux/sched/sysctl.h>
#include "blk.h"
@@ -23,7 +25,6 @@ static void blk_end_sync_rq(struct request *rq, int error)
struct completion *waiting = rq->end_io_data;
rq->end_io_data = NULL;
- __blk_put_request(rq->q, rq);
/*
* complete last, if this is a stack request the process (and thus
@@ -43,31 +44,52 @@ static void blk_end_sync_rq(struct request *rq, int error)
* Description:
* Insert a fully prepared request at the back of the I/O scheduler queue
* for execution. Don't wait for completion.
+ *
+ * Note:
+ * This function will invoke @done directly if the queue is dead.
*/
void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
struct request *rq, int at_head,
rq_end_io_fn *done)
{
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
+ bool is_pm_resume;
WARN_ON(irqs_disabled());
+
+ rq->rq_disk = bd_disk;
+ rq->end_io = done;
+
+ /*
+ * don't check dying flag for MQ because the request won't
+ * be resued after dying flag is set
+ */
+ if (q->mq_ops) {
+ blk_mq_insert_request(rq, at_head, true, false);
+ return;
+ }
+
+ /*
+ * need to check this before __blk_run_queue(), because rq can
+ * be freed before that returns.
+ */
+ is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME;
+
spin_lock_irq(q->queue_lock);
- if (unlikely(blk_queue_dead(q))) {
- spin_unlock_irq(q->queue_lock);
+ if (unlikely(blk_queue_dying(q))) {
+ rq->cmd_flags |= REQ_QUIET;
rq->errors = -ENXIO;
- if (rq->end_io)
- rq->end_io(rq, rq->errors);
+ __blk_end_request_all(rq, rq->errors);
+ spin_unlock_irq(q->queue_lock);
return;
}
- rq->rq_disk = bd_disk;
- rq->end_io = done;
__elv_add_request(q, rq, where);
__blk_run_queue(q);
/* the queue is stopped so it won't be run */
- if (rq->cmd_type == REQ_TYPE_PM_RESUME)
- q->request_fn(q);
+ if (is_pm_resume)
+ __blk_run_queue_uncond(q);
spin_unlock_irq(q->queue_lock);
}
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
@@ -91,12 +113,6 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
int err = 0;
unsigned long hang_check;
- /*
- * we need an extra reference to the request, so we can look at
- * it after io completion
- */
- rq->ref_count++;
-
if (!rq->sense) {
memset(sense, 0, sizeof(sense));
rq->sense = sense;
@@ -109,13 +125,18 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
/* Prevent hang_check timer from firing at us during very long I/O */
hang_check = sysctl_hung_task_timeout_secs;
if (hang_check)
- while (!wait_for_completion_timeout(&wait, hang_check * (HZ/2)));
+ while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
else
- wait_for_completion(&wait);
+ wait_for_completion_io(&wait);
if (rq->errors)
err = -EIO;
+ if (rq->sense == sense) {
+ rq->sense = NULL;
+ rq->sense_len = 0;
+ }
+
return err;
}
EXPORT_SYMBOL(blk_execute_rq);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 720ad607ff9..3cb5e9e7108 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -69,8 +69,10 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/gfp.h>
+#include <linux/blk-mq.h>
#include "blk.h"
+#include "blk-mq.h"
/* FLUSH/FUA sequences */
enum {
@@ -124,6 +126,25 @@ static void blk_flush_restore_request(struct request *rq)
/* make @rq a normal request */
rq->cmd_flags &= ~REQ_FLUSH_SEQ;
rq->end_io = rq->flush.saved_end_io;
+
+ blk_clear_rq_complete(rq);
+}
+
+static bool blk_flush_queue_rq(struct request *rq, bool add_front)
+{
+ if (rq->q->mq_ops) {
+ struct request_queue *q = rq->q;
+
+ blk_mq_add_to_requeue_list(rq, add_front);
+ blk_mq_kick_requeue_list(q);
+ return false;
+ } else {
+ if (add_front)
+ list_add(&rq->queuelist, &rq->q->queue_head);
+ else
+ list_add_tail(&rq->queuelist, &rq->q->queue_head);
+ return true;
+ }
}
/**
@@ -136,7 +157,7 @@ static void blk_flush_restore_request(struct request *rq)
* completion and trigger the next step.
*
* CONTEXT:
- * spin_lock_irq(q->queue_lock)
+ * spin_lock_irq(q->queue_lock or q->mq_flush_lock)
*
* RETURNS:
* %true if requests were added to the dispatch queue, %false otherwise.
@@ -146,7 +167,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
{
struct request_queue *q = rq->q;
struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
- bool queued = false;
+ bool queued = false, kicked;
BUG_ON(rq->flush.seq & seq);
rq->flush.seq |= seq;
@@ -167,8 +188,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
case REQ_FSEQ_DATA:
list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
- list_add(&rq->queuelist, &q->queue_head);
- queued = true;
+ queued = blk_flush_queue_rq(rq, true);
break;
case REQ_FSEQ_DONE:
@@ -181,28 +201,41 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
BUG_ON(!list_empty(&rq->queuelist));
list_del_init(&rq->flush.list);
blk_flush_restore_request(rq);
- __blk_end_request_all(rq, error);
+ if (q->mq_ops)
+ blk_mq_end_io(rq, error);
+ else
+ __blk_end_request_all(rq, error);
break;
default:
BUG();
}
- return blk_kick_flush(q) | queued;
+ kicked = blk_kick_flush(q);
+ return kicked | queued;
}
static void flush_end_io(struct request *flush_rq, int error)
{
struct request_queue *q = flush_rq->q;
- struct list_head *running = &q->flush_queue[q->flush_running_idx];
+ struct list_head *running;
bool queued = false;
struct request *rq, *n;
+ unsigned long flags = 0;
+ if (q->mq_ops) {
+ spin_lock_irqsave(&q->mq_flush_lock, flags);
+ q->flush_rq->tag = -1;
+ }
+
+ running = &q->flush_queue[q->flush_running_idx];
BUG_ON(q->flush_pending_idx == q->flush_running_idx);
/* account completion of the flush request */
q->flush_running_idx ^= 1;
- elv_completed_request(q, flush_rq);
+
+ if (!q->mq_ops)
+ elv_completed_request(q, flush_rq);
/* and push the waiting requests to the next stage */
list_for_each_entry_safe(rq, n, running, flush.list) {
@@ -223,9 +256,13 @@ static void flush_end_io(struct request *flush_rq, int error)
* directly into request_fn may confuse the driver. Always use
* kblockd.
*/
- if (queued || q->flush_queue_delayed)
+ if (queued || q->flush_queue_delayed) {
+ WARN_ON(q->mq_ops);
blk_run_queue_async(q);
+ }
q->flush_queue_delayed = 0;
+ if (q->mq_ops)
+ spin_unlock_irqrestore(&q->mq_flush_lock, flags);
}
/**
@@ -236,7 +273,7 @@ static void flush_end_io(struct request *flush_rq, int error)
* Please read the comment at the top of this file for more info.
*
* CONTEXT:
- * spin_lock_irq(q->queue_lock)
+ * spin_lock_irq(q->queue_lock or q->mq_flush_lock)
*
* RETURNS:
* %true if flush was issued, %false otherwise.
@@ -261,15 +298,18 @@ static bool blk_kick_flush(struct request_queue *q)
* Issue flush and toggle pending_idx. This makes pending_idx
* different from running_idx, which means flush is in flight.
*/
- blk_rq_init(q, &q->flush_rq);
- q->flush_rq.cmd_type = REQ_TYPE_FS;
- q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
- q->flush_rq.rq_disk = first_rq->rq_disk;
- q->flush_rq.end_io = flush_end_io;
-
q->flush_pending_idx ^= 1;
- list_add_tail(&q->flush_rq.queuelist, &q->queue_head);
- return true;
+
+ blk_rq_init(q, q->flush_rq);
+ if (q->mq_ops)
+ blk_mq_clone_flush_request(q->flush_rq, first_rq);
+
+ q->flush_rq->cmd_type = REQ_TYPE_FS;
+ q->flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
+ q->flush_rq->rq_disk = first_rq->rq_disk;
+ q->flush_rq->end_io = flush_end_io;
+
+ return blk_flush_queue_rq(q->flush_rq, false);
}
static void flush_data_end_io(struct request *rq, int error)
@@ -284,16 +324,37 @@ static void flush_data_end_io(struct request *rq, int error)
blk_run_queue_async(q);
}
+static void mq_flush_data_end_io(struct request *rq, int error)
+{
+ struct request_queue *q = rq->q;
+ struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_ctx *ctx;
+ unsigned long flags;
+
+ ctx = rq->mq_ctx;
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+ /*
+ * After populating an empty queue, kick it to avoid stall. Read
+ * the comment in flush_end_io().
+ */
+ spin_lock_irqsave(&q->mq_flush_lock, flags);
+ if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
+ blk_mq_run_hw_queue(hctx, true);
+ spin_unlock_irqrestore(&q->mq_flush_lock, flags);
+}
+
/**
* blk_insert_flush - insert a new FLUSH/FUA request
* @rq: request to insert
*
* To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
+ * or __blk_mq_run_hw_queue() to dispatch request.
* @rq is being submitted. Analyze what needs to be done and put it on the
* right queue.
*
* CONTEXT:
- * spin_lock_irq(q->queue_lock)
+ * spin_lock_irq(q->queue_lock) in !mq case
*/
void blk_insert_flush(struct request *rq)
{
@@ -316,7 +377,10 @@ void blk_insert_flush(struct request *rq)
* complete the request.
*/
if (!policy) {
- __blk_end_bidi_request(rq, 0, 0, 0);
+ if (q->mq_ops)
+ blk_mq_end_io(rq, 0);
+ else
+ __blk_end_bidi_request(rq, 0, 0, 0);
return;
}
@@ -329,7 +393,10 @@ void blk_insert_flush(struct request *rq)
*/
if ((policy & REQ_FSEQ_DATA) &&
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
- list_add_tail(&rq->queuelist, &q->queue_head);
+ if (q->mq_ops) {
+ blk_mq_insert_request(rq, false, false, true);
+ } else
+ list_add_tail(&rq->queuelist, &q->queue_head);
return;
}
@@ -341,56 +408,17 @@ void blk_insert_flush(struct request *rq)
INIT_LIST_HEAD(&rq->flush.list);
rq->cmd_flags |= REQ_FLUSH_SEQ;
rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
- rq->end_io = flush_data_end_io;
+ if (q->mq_ops) {
+ rq->end_io = mq_flush_data_end_io;
- blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
-}
-
-/**
- * blk_abort_flushes - @q is being aborted, abort flush requests
- * @q: request_queue being aborted
- *
- * To be called from elv_abort_queue(). @q is being aborted. Prepare all
- * FLUSH/FUA requests for abortion.
- *
- * CONTEXT:
- * spin_lock_irq(q->queue_lock)
- */
-void blk_abort_flushes(struct request_queue *q)
-{
- struct request *rq, *n;
- int i;
-
- /*
- * Requests in flight for data are already owned by the dispatch
- * queue or the device driver. Just restore for normal completion.
- */
- list_for_each_entry_safe(rq, n, &q->flush_data_in_flight, flush.list) {
- list_del_init(&rq->flush.list);
- blk_flush_restore_request(rq);
- }
-
- /*
- * We need to give away requests on flush queues. Restore for
- * normal completion and put them on the dispatch queue.
- */
- for (i = 0; i < ARRAY_SIZE(q->flush_queue); i++) {
- list_for_each_entry_safe(rq, n, &q->flush_queue[i],
- flush.list) {
- list_del_init(&rq->flush.list);
- blk_flush_restore_request(rq);
- list_add_tail(&rq->queuelist, &q->queue_head);
- }
+ spin_lock_irq(&q->mq_flush_lock);
+ blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
+ spin_unlock_irq(&q->mq_flush_lock);
+ return;
}
-}
+ rq->end_io = flush_data_end_io;
-static void bio_end_flush(struct bio *bio, int err)
-{
- if (err)
- clear_bit(BIO_UPTODATE, &bio->bi_flags);
- if (bio->bi_private)
- complete(bio->bi_private);
- bio_put(bio);
+ blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
}
/**
@@ -408,7 +436,6 @@ static void bio_end_flush(struct bio *bio, int err)
int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
sector_t *error_sector)
{
- DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q;
struct bio *bio;
int ret = 0;
@@ -430,13 +457,9 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
return -ENXIO;
bio = bio_alloc(gfp_mask, 0);
- bio->bi_end_io = bio_end_flush;
bio->bi_bdev = bdev;
- bio->bi_private = &wait;
- bio_get(bio);
- submit_bio(WRITE_FLUSH, bio);
- wait_for_completion(&wait);
+ ret = submit_bio_wait(WRITE_FLUSH, bio);
/*
* The driver must store the error location in ->bi_sector, if
@@ -444,12 +467,14 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
* copied from blk_rq_pos(rq).
*/
if (error_sector)
- *error_sector = bio->bi_sector;
-
- if (!bio_flagged(bio, BIO_UPTODATE))
- ret = -EIO;
+ *error_sector = bio->bi_iter.bi_sector;
bio_put(bio);
return ret;
}
EXPORT_SYMBOL(blkdev_issue_flush);
+
+void blk_mq_init_flush(struct request_queue *q)
+{
+ spin_lock_init(&q->mq_flush_lock);
+}
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index da2a818c3a9..7fbab84399e 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -43,30 +43,32 @@ static const char *bi_unsupported_name = "unsupported";
*/
int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
{
- struct bio_vec *iv, *ivprv = NULL;
+ struct bio_vec iv, ivprv = { NULL };
unsigned int segments = 0;
unsigned int seg_size = 0;
- unsigned int i = 0;
+ struct bvec_iter iter;
+ int prev = 0;
- bio_for_each_integrity_vec(iv, bio, i) {
+ bio_for_each_integrity_vec(iv, bio, iter) {
- if (ivprv) {
- if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv))
+ if (prev) {
+ if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv))
goto new_segment;
- if (!BIOVEC_SEG_BOUNDARY(q, ivprv, iv))
+ if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv))
goto new_segment;
- if (seg_size + iv->bv_len > queue_max_segment_size(q))
+ if (seg_size + iv.bv_len > queue_max_segment_size(q))
goto new_segment;
- seg_size += iv->bv_len;
+ seg_size += iv.bv_len;
} else {
new_segment:
segments++;
- seg_size = iv->bv_len;
+ seg_size = iv.bv_len;
}
+ prev = 1;
ivprv = iv;
}
@@ -87,37 +89,39 @@ EXPORT_SYMBOL(blk_rq_count_integrity_sg);
int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
struct scatterlist *sglist)
{
- struct bio_vec *iv, *ivprv = NULL;
+ struct bio_vec iv, ivprv = { NULL };
struct scatterlist *sg = NULL;
unsigned int segments = 0;
- unsigned int i = 0;
+ struct bvec_iter iter;
+ int prev = 0;
- bio_for_each_integrity_vec(iv, bio, i) {
+ bio_for_each_integrity_vec(iv, bio, iter) {
- if (ivprv) {
- if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv))
+ if (prev) {
+ if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv))
goto new_segment;
- if (!BIOVEC_SEG_BOUNDARY(q, ivprv, iv))
+ if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv))
goto new_segment;
- if (sg->length + iv->bv_len > queue_max_segment_size(q))
+ if (sg->length + iv.bv_len > queue_max_segment_size(q))
goto new_segment;
- sg->length += iv->bv_len;
+ sg->length += iv.bv_len;
} else {
new_segment:
if (!sg)
sg = sglist;
else {
- sg->page_link &= ~0x02;
+ sg_unmark_end(sg);
sg = sg_next(sg);
}
- sg_set_page(sg, iv->bv_page, iv->bv_len, iv->bv_offset);
+ sg_set_page(sg, iv.bv_page, iv.bv_len, iv.bv_offset);
segments++;
}
+ prev = 1;
ivprv = iv;
}
@@ -420,6 +424,8 @@ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
} else
bi->name = bi_unsupported_name;
+ disk->queue->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
+
return 0;
}
EXPORT_SYMBOL(blk_integrity_register);
@@ -438,6 +444,8 @@ void blk_integrity_unregister(struct gendisk *disk)
if (!disk || !disk->integrity)
return;
+ disk->queue->backing_dev_info.capabilities &= ~BDI_CAP_STABLE_WRITES;
+
bi = disk->integrity;
kobject_uevent(&bi->kobj, KOBJ_REMOVE);
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index fb95dd2f889..1a27f45ec77 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -6,7 +6,6 @@
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
-#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
#include <linux/slab.h>
#include "blk.h"
@@ -69,7 +68,7 @@ static void ioc_destroy_icq(struct io_cq *icq)
* under queue_lock. If it's not pointing to @icq now, it never
* will. Hint assignment itself can race safely.
*/
- if (rcu_dereference_raw(ioc->icq_hint) == icq)
+ if (rcu_access_pointer(ioc->icq_hint) == icq)
rcu_assign_pointer(ioc->icq_hint, NULL);
ioc_exit_icq(icq);
@@ -144,7 +143,8 @@ void put_io_context(struct io_context *ioc)
if (atomic_long_dec_and_test(&ioc->refcount)) {
spin_lock_irqsave(&ioc->lock, flags);
if (!hlist_empty(&ioc->icq_list))
- schedule_work(&ioc->release_work);
+ queue_work(system_power_efficient_wq,
+ &ioc->release_work);
else
free_ioc = true;
spin_unlock_irqrestore(&ioc->lock, flags);
@@ -155,20 +155,19 @@ void put_io_context(struct io_context *ioc)
}
EXPORT_SYMBOL(put_io_context);
-/* Called by the exiting task */
-void exit_io_context(struct task_struct *task)
+/**
+ * put_io_context_active - put active reference on ioc
+ * @ioc: ioc of interest
+ *
+ * Undo get_io_context_active(). If active reference reaches zero after
+ * put, @ioc can never issue further IOs and ioscheds are notified.
+ */
+void put_io_context_active(struct io_context *ioc)
{
- struct io_context *ioc;
- struct io_cq *icq;
- struct hlist_node *n;
unsigned long flags;
+ struct io_cq *icq;
- task_lock(task);
- ioc = task->io_context;
- task->io_context = NULL;
- task_unlock(task);
-
- if (!atomic_dec_and_test(&ioc->nr_tasks)) {
+ if (!atomic_dec_and_test(&ioc->active_ref)) {
put_io_context(ioc);
return;
}
@@ -180,7 +179,7 @@ void exit_io_context(struct task_struct *task)
*/
retry:
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
- hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) {
+ hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
if (icq->flags & ICQ_EXITED)
continue;
if (spin_trylock(icq->q->queue_lock)) {
@@ -197,6 +196,20 @@ retry:
put_io_context(ioc);
}
+/* Called by the exiting task */
+void exit_io_context(struct task_struct *task)
+{
+ struct io_context *ioc;
+
+ task_lock(task);
+ ioc = task->io_context;
+ task->io_context = NULL;
+ task_unlock(task);
+
+ atomic_dec(&ioc->nr_tasks);
+ put_io_context_active(ioc);
+}
+
/**
* ioc_clear_queue - break any ioc association with the specified queue
* @q: request_queue being cleared
@@ -218,19 +231,20 @@ void ioc_clear_queue(struct request_queue *q)
}
}
-void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
- int node)
+int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
{
struct io_context *ioc;
+ int ret;
ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
node);
if (unlikely(!ioc))
- return;
+ return -ENOMEM;
/* initialize */
atomic_long_set(&ioc->refcount, 1);
atomic_set(&ioc->nr_tasks, 1);
+ atomic_set(&ioc->active_ref, 1);
spin_lock_init(&ioc->lock);
INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
INIT_HLIST_HEAD(&ioc->icq_list);
@@ -249,7 +263,12 @@ void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
task->io_context = ioc;
else
kmem_cache_free(iocontext_cachep, ioc);
+
+ ret = task->io_context ? 0 : -EBUSY;
+
task_unlock(task);
+
+ return ret;
}
/**
@@ -281,7 +300,7 @@ struct io_context *get_task_io_context(struct task_struct *task,
return ioc;
}
task_unlock(task);
- } while (create_io_context(task, gfp_flags, node));
+ } while (!create_task_io_context(task, gfp_flags, node));
return NULL;
}
@@ -325,32 +344,29 @@ EXPORT_SYMBOL(ioc_lookup_icq);
/**
* ioc_create_icq - create and link io_cq
+ * @ioc: io_context of interest
* @q: request_queue of interest
* @gfp_mask: allocation mask
*
- * Make sure io_cq linking %current->io_context and @q exists. If either
- * io_context and/or icq don't exist, they will be created using @gfp_mask.
+ * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
+ * will be created using @gfp_mask.
*
* The caller is responsible for ensuring @ioc won't go away and @q is
* alive and will stay alive until this function returns.
*/
-struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
+struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
+ gfp_t gfp_mask)
{
struct elevator_type *et = q->elevator->type;
- struct io_context *ioc;
struct io_cq *icq;
/* allocate stuff */
- ioc = create_io_context(current, gfp_mask, q->node);
- if (!ioc)
- return NULL;
-
icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
q->node);
if (!icq)
return NULL;
- if (radix_tree_preload(gfp_mask) < 0) {
+ if (radix_tree_maybe_preload(gfp_mask) < 0) {
kmem_cache_free(et->icq_cache, icq);
return NULL;
}
@@ -382,74 +398,6 @@ struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
return icq;
}
-void ioc_set_icq_flags(struct io_context *ioc, unsigned int flags)
-{
- struct io_cq *icq;
- struct hlist_node *n;
-
- hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node)
- icq->flags |= flags;
-}
-
-/**
- * ioc_ioprio_changed - notify ioprio change
- * @ioc: io_context of interest
- * @ioprio: new ioprio
- *
- * @ioc's ioprio has changed to @ioprio. Set %ICQ_IOPRIO_CHANGED for all
- * icq's. iosched is responsible for checking the bit and applying it on
- * request issue path.
- */
-void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ioc->lock, flags);
- ioc->ioprio = ioprio;
- ioc_set_icq_flags(ioc, ICQ_IOPRIO_CHANGED);
- spin_unlock_irqrestore(&ioc->lock, flags);
-}
-
-/**
- * ioc_cgroup_changed - notify cgroup change
- * @ioc: io_context of interest
- *
- * @ioc's cgroup has changed. Set %ICQ_CGROUP_CHANGED for all icq's.
- * iosched is responsible for checking the bit and applying it on request
- * issue path.
- */
-void ioc_cgroup_changed(struct io_context *ioc)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ioc->lock, flags);
- ioc_set_icq_flags(ioc, ICQ_CGROUP_CHANGED);
- spin_unlock_irqrestore(&ioc->lock, flags);
-}
-EXPORT_SYMBOL(ioc_cgroup_changed);
-
-/**
- * icq_get_changed - fetch and clear icq changed mask
- * @icq: icq of interest
- *
- * Fetch and clear ICQ_*_CHANGED bits from @icq. Grabs and releases
- * @icq->ioc->lock.
- */
-unsigned icq_get_changed(struct io_cq *icq)
-{
- unsigned int changed = 0;
- unsigned long flags;
-
- if (unlikely(icq->flags & ICQ_CHANGED_MASK)) {
- spin_lock_irqsave(&icq->ioc->lock, flags);
- changed = icq->flags & ICQ_CHANGED_MASK;
- icq->flags &= ~ICQ_CHANGED_MASK;
- spin_unlock_irqrestore(&icq->ioc->lock, flags);
- }
- return changed;
-}
-EXPORT_SYMBOL(icq_get_changed);
-
static int __init blk_ioc_init(void)
{
iocontext_cachep = kmem_cache_create("blkdev_ioc",
diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
index 58916afbbda..0736729d649 100644
--- a/block/blk-iopoll.c
+++ b/block/blk-iopoll.c
@@ -14,9 +14,6 @@
#include "blk.h"
-int blk_iopoll_enabled = 1;
-EXPORT_SYMBOL(blk_iopoll_enabled);
-
static unsigned int blk_iopoll_budget __read_mostly = 256;
static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
@@ -35,7 +32,7 @@ void blk_iopoll_sched(struct blk_iopoll *iop)
unsigned long flags;
local_irq_save(flags);
- list_add_tail(&iop->list, &__get_cpu_var(blk_cpu_iopoll));
+ list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
__raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
local_irq_restore(flags);
}
@@ -52,7 +49,7 @@ EXPORT_SYMBOL(blk_iopoll_sched);
void __blk_iopoll_complete(struct blk_iopoll *iop)
{
list_del(&iop->list);
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit_unlock(IOPOLL_F_SCHED, &iop->state);
}
EXPORT_SYMBOL(__blk_iopoll_complete);
@@ -67,19 +64,19 @@ EXPORT_SYMBOL(__blk_iopoll_complete);
* iopoll handler will not be invoked again before blk_iopoll_sched_prep()
* is called.
**/
-void blk_iopoll_complete(struct blk_iopoll *iopoll)
+void blk_iopoll_complete(struct blk_iopoll *iop)
{
unsigned long flags;
local_irq_save(flags);
- __blk_iopoll_complete(iopoll);
+ __blk_iopoll_complete(iop);
local_irq_restore(flags);
}
EXPORT_SYMBOL(blk_iopoll_complete);
static void blk_iopoll_softirq(struct softirq_action *h)
{
- struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
+ struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
int rearm = 0, budget = blk_iopoll_budget;
unsigned long start_time = jiffies;
@@ -164,7 +161,7 @@ EXPORT_SYMBOL(blk_iopoll_disable);
void blk_iopoll_enable(struct blk_iopoll *iop)
{
BUG_ON(!test_bit(IOPOLL_F_SCHED, &iop->state));
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit_unlock(IOPOLL_F_SCHED, &iop->state);
}
EXPORT_SYMBOL(blk_iopoll_enable);
@@ -189,8 +186,8 @@ void blk_iopoll_init(struct blk_iopoll *iop, int weight, blk_iopoll_fn *poll_fn)
}
EXPORT_SYMBOL(blk_iopoll_init);
-static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
+static int blk_iopoll_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
{
/*
* If a CPU goes away, splice its entries to the current CPU
@@ -201,7 +198,7 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
local_irq_disable();
list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
- &__get_cpu_var(blk_cpu_iopoll));
+ this_cpu_ptr(&blk_cpu_iopoll));
__raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
local_irq_enable();
}
@@ -209,7 +206,7 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata blk_iopoll_cpu_notifier = {
+static struct notifier_block blk_iopoll_cpu_notifier = {
.notifier_call = blk_iopoll_cpu_notify,
};
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 2b461b496a7..8411be3c19d 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -43,10 +43,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q = bdev_get_queue(bdev);
int type = REQ_WRITE | REQ_DISCARD;
- unsigned int max_discard_sectors;
+ unsigned int max_discard_sectors, granularity;
+ int alignment;
struct bio_batch bb;
struct bio *bio;
int ret = 0;
+ struct blk_plug plug;
if (!q)
return -ENXIO;
@@ -54,18 +56,19 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
if (!blk_queue_discard(q))
return -EOPNOTSUPP;
+ /* Zero-sector (unknown) and one-sector granularities are the same. */
+ granularity = max(q->limits.discard_granularity >> 9, 1U);
+ alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
+
/*
* Ensure that max_discard_sectors is of the proper
- * granularity
+ * granularity, so that requests stay aligned after a split.
*/
max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
+ max_discard_sectors -= max_discard_sectors % granularity;
if (unlikely(!max_discard_sectors)) {
/* Avoid infinite loop below. Being cautious never hurts. */
return -EOPNOTSUPP;
- } else if (q->limits.discard_granularity) {
- unsigned int disc_sects = q->limits.discard_granularity >> 9;
-
- max_discard_sectors &= ~(disc_sects - 1);
}
if (flags & BLKDEV_DISCARD_SECURE) {
@@ -78,34 +81,58 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
bb.flags = 1 << BIO_UPTODATE;
bb.wait = &wait;
+ blk_start_plug(&plug);
while (nr_sects) {
+ unsigned int req_sects;
+ sector_t end_sect, tmp;
+
bio = bio_alloc(gfp_mask, 1);
if (!bio) {
ret = -ENOMEM;
break;
}
- bio->bi_sector = sector;
+ req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
+
+ /*
+ * If splitting a request, and the next starting sector would be
+ * misaligned, stop the discard at the previous aligned sector.
+ */
+ end_sect = sector + req_sects;
+ tmp = end_sect;
+ if (req_sects < nr_sects &&
+ sector_div(tmp, granularity) != alignment) {
+ end_sect = end_sect - alignment;
+ sector_div(end_sect, granularity);
+ end_sect = end_sect * granularity + alignment;
+ req_sects = end_sect - sector;
+ }
+
+ bio->bi_iter.bi_sector = sector;
bio->bi_end_io = bio_batch_end_io;
bio->bi_bdev = bdev;
bio->bi_private = &bb;
- if (nr_sects > max_discard_sectors) {
- bio->bi_size = max_discard_sectors << 9;
- nr_sects -= max_discard_sectors;
- sector += max_discard_sectors;
- } else {
- bio->bi_size = nr_sects << 9;
- nr_sects = 0;
- }
+ bio->bi_iter.bi_size = req_sects << 9;
+ nr_sects -= req_sects;
+ sector = end_sect;
atomic_inc(&bb.done);
submit_bio(type, bio);
+
+ /*
+ * We can loop for a long time in here, if someone does
+ * full device discards (like mkfs). Be nice and allow
+ * us to schedule out to avoid softlocking if preempt
+ * is disabled.
+ */
+ cond_resched();
}
+ blk_finish_plug(&plug);
/* Wait for bios in-flight */
if (!atomic_dec_and_test(&bb.done))
- wait_for_completion(&wait);
+ wait_for_completion_io(&wait);
if (!test_bit(BIO_UPTODATE, &bb.flags))
ret = -EIO;
@@ -115,6 +142,80 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
EXPORT_SYMBOL(blkdev_issue_discard);
/**
+ * blkdev_issue_write_same - queue a write same operation
+ * @bdev: target blockdev
+ * @sector: start sector
+ * @nr_sects: number of sectors to write
+ * @gfp_mask: memory allocation flags (for bio_alloc)
+ * @page: page containing data to write
+ *
+ * Description:
+ * Issue a write same request for the sectors in question.
+ */
+int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask,
+ struct page *page)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+ struct request_queue *q = bdev_get_queue(bdev);
+ unsigned int max_write_same_sectors;
+ struct bio_batch bb;
+ struct bio *bio;
+ int ret = 0;
+
+ if (!q)
+ return -ENXIO;
+
+ max_write_same_sectors = q->limits.max_write_same_sectors;
+
+ if (max_write_same_sectors == 0)
+ return -EOPNOTSUPP;
+
+ atomic_set(&bb.done, 1);
+ bb.flags = 1 << BIO_UPTODATE;
+ bb.wait = &wait;
+
+ while (nr_sects) {
+ bio = bio_alloc(gfp_mask, 1);
+ if (!bio) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_end_io = bio_batch_end_io;
+ bio->bi_bdev = bdev;
+ bio->bi_private = &bb;
+ bio->bi_vcnt = 1;
+ bio->bi_io_vec->bv_page = page;
+ bio->bi_io_vec->bv_offset = 0;
+ bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
+
+ if (nr_sects > max_write_same_sectors) {
+ bio->bi_iter.bi_size = max_write_same_sectors << 9;
+ nr_sects -= max_write_same_sectors;
+ sector += max_write_same_sectors;
+ } else {
+ bio->bi_iter.bi_size = nr_sects << 9;
+ nr_sects = 0;
+ }
+
+ atomic_inc(&bb.done);
+ submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
+ }
+
+ /* Wait for bios in-flight */
+ if (!atomic_dec_and_test(&bb.done))
+ wait_for_completion_io(&wait);
+
+ if (!test_bit(BIO_UPTODATE, &bb.flags))
+ ret = -ENOTSUPP;
+
+ return ret;
+}
+EXPORT_SYMBOL(blkdev_issue_write_same);
+
+/**
* blkdev_issue_zeroout - generate number of zero filed write bios
* @bdev: blockdev to issue
* @sector: start sector
@@ -125,8 +226,8 @@ EXPORT_SYMBOL(blkdev_issue_discard);
* Generate and issue number of bios with zerofiled pages.
*/
-int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask)
+static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask)
{
int ret;
struct bio *bio;
@@ -147,7 +248,7 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
break;
}
- bio->bi_sector = sector;
+ bio->bi_iter.bi_sector = sector;
bio->bi_bdev = bdev;
bio->bi_end_io = bio_batch_end_io;
bio->bi_private = &bb;
@@ -167,7 +268,7 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
/* Wait for bios in-flight */
if (!atomic_dec_and_test(&bb.done))
- wait_for_completion(&wait);
+ wait_for_completion_io(&wait);
if (!test_bit(BIO_UPTODATE, &bb.flags))
/* One of bios in the batch was completed with error.*/
@@ -175,4 +276,32 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
return ret;
}
+
+/**
+ * blkdev_issue_zeroout - zero-fill a block range
+ * @bdev: blockdev to write
+ * @sector: start sector
+ * @nr_sects: number of sectors to write
+ * @gfp_mask: memory allocation flags (for bio_alloc)
+ *
+ * Description:
+ * Generate and issue number of bios with zerofiled pages.
+ */
+
+int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask)
+{
+ if (bdev_write_same(bdev)) {
+ unsigned char bdn[BDEVNAME_SIZE];
+
+ if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
+ ZERO_PAGE(0)))
+ return 0;
+
+ bdevname(bdev, bdn);
+ pr_err("%s: WRITE SAME failed. Manually zeroing.\n", bdn);
+ }
+
+ return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
+}
EXPORT_SYMBOL(blkdev_issue_zeroout);
diff --git a/block/blk-map.c b/block/blk-map.c
index 623e1cd4cff..f890d4345b0 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -20,7 +20,7 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
rq->biotail->bi_next = bio;
rq->biotail = bio;
- rq->__data_len += bio->bi_size;
+ rq->__data_len += bio->bi_iter.bi_size;
}
return 0;
}
@@ -76,7 +76,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
ret = blk_rq_append_bio(q, rq, bio);
if (!ret)
- return bio->bi_size;
+ return bio->bi_iter.bi_size;
/* if it was boucned we must call the end io function */
bio_endio(bio, 0);
@@ -155,7 +155,6 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
if (!bio_flagged(bio, BIO_USER_MAPPED))
rq->cmd_flags |= REQ_COPY_USER;
- rq->buffer = NULL;
return 0;
unmap_rq:
blk_rq_unmap_user(bio);
@@ -188,7 +187,7 @@ EXPORT_SYMBOL(blk_rq_map_user);
* unmapping.
*/
int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
- struct rq_map_data *map_data, struct sg_iovec *iov,
+ struct rq_map_data *map_data, const struct sg_iovec *iov,
int iov_count, unsigned int len, gfp_t gfp_mask)
{
struct bio *bio;
@@ -220,7 +219,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
if (IS_ERR(bio))
return PTR_ERR(bio);
- if (bio->bi_size != len) {
+ if (bio->bi_iter.bi_size != len) {
/*
* Grab an extra reference to this bio, as bio_unmap_user()
* expects to be able to drop it twice as it happens on the
@@ -238,7 +237,6 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
blk_queue_bounce(q, &bio);
bio_get(bio);
blk_rq_bio_prep(q, rq, bio);
- rq->buffer = NULL;
return 0;
}
EXPORT_SYMBOL(blk_rq_map_user_iov);
@@ -285,7 +283,7 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
*
* Description:
* Data will be mapped directly if possible. Otherwise a bounce
- * buffer is used. Can be called multple times to append multple
+ * buffer is used. Can be called multiple times to append multiple
* buffers.
*/
int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
@@ -325,7 +323,6 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
}
blk_queue_bounce(q, &rq->bio);
- rq->buffer = NULL;
return 0;
}
EXPORT_SYMBOL(blk_rq_map_kern);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 160035f5488..54535831f1e 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -12,38 +12,56 @@
static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
struct bio *bio)
{
- struct bio_vec *bv, *bvprv = NULL;
- int cluster, i, high, highprv = 1;
+ struct bio_vec bv, bvprv = { NULL };
+ int cluster, high, highprv = 1, no_sg_merge;
unsigned int seg_size, nr_phys_segs;
struct bio *fbio, *bbio;
+ struct bvec_iter iter;
if (!bio)
return 0;
+ /*
+ * This should probably be returning 0, but blk_add_request_payload()
+ * (Christoph!!!!)
+ */
+ if (bio->bi_rw & REQ_DISCARD)
+ return 1;
+
+ if (bio->bi_rw & REQ_WRITE_SAME)
+ return 1;
+
fbio = bio;
cluster = blk_queue_cluster(q);
seg_size = 0;
nr_phys_segs = 0;
+ no_sg_merge = test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
+ high = 0;
for_each_bio(bio) {
- bio_for_each_segment(bv, bio, i) {
+ bio_for_each_segment(bv, bio, iter) {
/*
- * the trick here is making sure that a high page is
- * never considered part of another segment, since that
- * might change with the bounce page.
+ * If SG merging is disabled, each bio vector is
+ * a segment
*/
- high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
- if (high || highprv)
+ if (no_sg_merge)
goto new_segment;
- if (cluster) {
- if (seg_size + bv->bv_len
+
+ /*
+ * the trick here is making sure that a high page is
+ * never considered part of another segment, since
+ * that might change with the bounce page.
+ */
+ high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q);
+ if (!high && !highprv && cluster) {
+ if (seg_size + bv.bv_len
> queue_max_segment_size(q))
goto new_segment;
- if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
+ if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
goto new_segment;
- if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
+ if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
goto new_segment;
- seg_size += bv->bv_len;
+ seg_size += bv.bv_len;
bvprv = bv;
continue;
}
@@ -54,7 +72,7 @@ new_segment:
nr_phys_segs++;
bvprv = bv;
- seg_size = bv->bv_len;
+ seg_size = bv.bv_len;
highprv = high;
}
bbio = bio;
@@ -75,11 +93,16 @@ void blk_recalc_rq_segments(struct request *rq)
void blk_recount_segments(struct request_queue *q, struct bio *bio)
{
- struct bio *nxt = bio->bi_next;
+ if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags))
+ bio->bi_phys_segments = bio->bi_vcnt;
+ else {
+ struct bio *nxt = bio->bi_next;
+
+ bio->bi_next = NULL;
+ bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
+ bio->bi_next = nxt;
+ }
- bio->bi_next = NULL;
- bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
- bio->bi_next = nxt;
bio->bi_flags |= (1 << BIO_SEG_VALID);
}
EXPORT_SYMBOL(blk_recount_segments);
@@ -87,6 +110,9 @@ EXPORT_SYMBOL(blk_recount_segments);
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
struct bio *nxt)
{
+ struct bio_vec end_bv = { NULL }, nxt_bv;
+ struct bvec_iter iter;
+
if (!blk_queue_cluster(q))
return 0;
@@ -97,77 +123,122 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
if (!bio_has_data(bio))
return 1;
- if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
+ bio_for_each_segment(end_bv, bio, iter)
+ if (end_bv.bv_len == iter.bi_size)
+ break;
+
+ nxt_bv = bio_iovec(nxt);
+
+ if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
return 0;
/*
* bio and nxt are contiguous in memory; check if the queue allows
* these two to be merged into one
*/
- if (BIO_SEG_BOUNDARY(q, bio, nxt))
+ if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
return 1;
return 0;
}
-/*
- * map a request to scatterlist, return number of sg entries setup. Caller
- * must make sure sg can hold rq->nr_phys_segments entries
- */
-int blk_rq_map_sg(struct request_queue *q, struct request *rq,
- struct scatterlist *sglist)
+static inline void
+__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
+ struct scatterlist *sglist, struct bio_vec *bvprv,
+ struct scatterlist **sg, int *nsegs, int *cluster)
+{
+
+ int nbytes = bvec->bv_len;
+
+ if (*sg && *cluster) {
+ if ((*sg)->length + nbytes > queue_max_segment_size(q))
+ goto new_segment;
+
+ if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
+ goto new_segment;
+ if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
+ goto new_segment;
+
+ (*sg)->length += nbytes;
+ } else {
+new_segment:
+ if (!*sg)
+ *sg = sglist;
+ else {
+ /*
+ * If the driver previously mapped a shorter
+ * list, we could see a termination bit
+ * prematurely unless it fully inits the sg
+ * table on each mapping. We KNOW that there
+ * must be more entries here or the driver
+ * would be buggy, so force clear the
+ * termination bit to avoid doing a full
+ * sg_init_table() in drivers for each command.
+ */
+ sg_unmark_end(*sg);
+ *sg = sg_next(*sg);
+ }
+
+ sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
+ (*nsegs)++;
+ }
+ *bvprv = *bvec;
+}
+
+static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
+ struct scatterlist *sglist,
+ struct scatterlist **sg)
{
- struct bio_vec *bvec, *bvprv;
- struct req_iterator iter;
- struct scatterlist *sg;
+ struct bio_vec bvec, bvprv = { NULL };
+ struct bvec_iter iter;
int nsegs, cluster;
nsegs = 0;
cluster = blk_queue_cluster(q);
- /*
- * for each bio in rq
- */
- bvprv = NULL;
- sg = NULL;
- rq_for_each_segment(bvec, rq, iter) {
- int nbytes = bvec->bv_len;
+ if (bio->bi_rw & REQ_DISCARD) {
+ /*
+ * This is a hack - drivers should be neither modifying the
+ * biovec, nor relying on bi_vcnt - but because of
+ * blk_add_request_payload(), a discard bio may or may not have
+ * a payload we need to set up here (thank you Christoph) and
+ * bi_vcnt is really the only way of telling if we need to.
+ */
- if (bvprv && cluster) {
- if (sg->length + nbytes > queue_max_segment_size(q))
- goto new_segment;
+ if (bio->bi_vcnt)
+ goto single_segment;
- if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
- goto new_segment;
- if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
- goto new_segment;
+ return 0;
+ }
- sg->length += nbytes;
- } else {
-new_segment:
- if (!sg)
- sg = sglist;
- else {
- /*
- * If the driver previously mapped a shorter
- * list, we could see a termination bit
- * prematurely unless it fully inits the sg
- * table on each mapping. We KNOW that there
- * must be more entries here or the driver
- * would be buggy, so force clear the
- * termination bit to avoid doing a full
- * sg_init_table() in drivers for each command.
- */
- sg->page_link &= ~0x02;
- sg = sg_next(sg);
- }
+ if (bio->bi_rw & REQ_WRITE_SAME) {
+single_segment:
+ *sg = sglist;
+ bvec = bio_iovec(bio);
+ sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
+ return 1;
+ }
- sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset);
- nsegs++;
- }
- bvprv = bvec;
- } /* segments in rq */
+ for_each_bio(bio)
+ bio_for_each_segment(bvec, bio, iter)
+ __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
+ &nsegs, &cluster);
+
+ return nsegs;
+}
+
+/*
+ * map a request to scatterlist, return number of sg entries setup. Caller
+ * must make sure sg can hold rq->nr_phys_segments entries
+ */
+int blk_rq_map_sg(struct request_queue *q, struct request *rq,
+ struct scatterlist *sglist)
+{
+ struct scatterlist *sg = NULL;
+ int nsegs = 0;
+ if (rq->bio)
+ nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
(blk_rq_bytes(rq) & q->dma_pad_mask)) {
@@ -199,6 +270,35 @@ new_segment:
}
EXPORT_SYMBOL(blk_rq_map_sg);
+/**
+ * blk_bio_map_sg - map a bio to a scatterlist
+ * @q: request_queue in question
+ * @bio: bio being mapped
+ * @sglist: scatterlist being mapped
+ *
+ * Note:
+ * Caller must make sure sg can hold bio->bi_phys_segments entries
+ *
+ * Will return the number of sg entries setup
+ */
+int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
+ struct scatterlist *sglist)
+{
+ struct scatterlist *sg = NULL;
+ int nsegs;
+ struct bio *next = bio->bi_next;
+ bio->bi_next = NULL;
+
+ nsegs = __blk_bios_map_sg(q, bio, sglist, &sg);
+ bio->bi_next = next;
+ if (sg)
+ sg_mark_end(sg);
+
+ BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments);
+ return nsegs;
+}
+EXPORT_SYMBOL(blk_bio_map_sg);
+
static inline int ll_new_hw_segment(struct request_queue *q,
struct request *req,
struct bio *bio)
@@ -228,14 +328,8 @@ no_merge:
int ll_back_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio)
{
- unsigned short max_sectors;
-
- if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
- max_sectors = queue_max_hw_sectors(q);
- else
- max_sectors = queue_max_sectors(q);
-
- if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
+ if (blk_rq_sectors(req) + bio_sectors(bio) >
+ blk_rq_get_max_sectors(req)) {
req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge)
q->last_merge = NULL;
@@ -252,15 +346,8 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
int ll_front_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio)
{
- unsigned short max_sectors;
-
- if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
- max_sectors = queue_max_hw_sectors(q);
- else
- max_sectors = queue_max_sectors(q);
-
-
- if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
+ if (blk_rq_sectors(req) + bio_sectors(bio) >
+ blk_rq_get_max_sectors(req)) {
req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge)
q->last_merge = NULL;
@@ -274,6 +361,17 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
return ll_new_hw_segment(q, req, bio);
}
+/*
+ * blk-mq uses req->special to carry normal driver per-request payload, it
+ * does not indicate a prepared command that we cannot merge with.
+ */
+static bool req_no_special_merge(struct request *req)
+{
+ struct request_queue *q = req->q;
+
+ return !q->mq_ops && req->special;
+}
+
static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
struct request *next)
{
@@ -285,13 +383,14 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
* First check if the either of the requests are re-queued
* requests. Can't merge them if they are.
*/
- if (req->special || next->special)
+ if (req_no_special_merge(req) || req_no_special_merge(next))
return 0;
/*
* Will it become too large?
*/
- if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
+ if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
+ blk_rq_get_max_sectors(req))
return 0;
total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
@@ -370,16 +469,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
if (!rq_mergeable(req) || !rq_mergeable(next))
return 0;
- /*
- * Don't merge file system requests and discard requests
- */
- if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD))
- return 0;
-
- /*
- * Don't merge discard requests and secure discard requests
- */
- if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE))
+ if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
return 0;
/*
@@ -390,7 +480,11 @@ static int attempt_merge(struct request_queue *q, struct request *req,
if (rq_data_dir(req) != rq_data_dir(next)
|| req->rq_disk != next->rq_disk
- || next->special)
+ || req_no_special_merge(next))
+ return 0;
+
+ if (req->cmd_flags & REQ_WRITE_SAME &&
+ !blk_write_same_mergeable(req->bio, next->bio))
return 0;
/*
@@ -474,15 +568,12 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
{
- if (!rq_mergeable(rq))
- return false;
+ struct request_queue *q = rq->q;
- /* don't merge file system requests and discard requests */
- if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
+ if (!rq_mergeable(rq) || !bio_mergeable(bio))
return false;
- /* don't merge discard requests and secure discard requests */
- if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
+ if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
return false;
/* different data direction or already started, don't merge */
@@ -490,21 +581,34 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
return false;
/* must be same device and not a special request */
- if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
+ if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq))
return false;
/* only merge integrity protected bio into ditto rq */
if (bio_integrity(bio) != blk_integrity_rq(rq))
return false;
+ /* must be using the same buffer */
+ if (rq->cmd_flags & REQ_WRITE_SAME &&
+ !blk_write_same_mergeable(rq->bio, bio))
+ return false;
+
+ if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) {
+ struct bio_vec *bprev;
+
+ bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1];
+ if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
+ return false;
+ }
+
return true;
}
int blk_try_merge(struct request *rq, struct bio *bio)
{
- if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector)
+ if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
return ELEVATOR_BACK_MERGE;
- else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector)
+ else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
return ELEVATOR_FRONT_MERGE;
return ELEVATOR_NO_MERGE;
}
diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c
new file mode 100644
index 00000000000..bb3ed488f7b
--- /dev/null
+++ b/block/blk-mq-cpu.c
@@ -0,0 +1,67 @@
+/*
+ * CPU notifier helper code for blk-mq
+ *
+ * Copyright (C) 2013-2014 Jens Axboe
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/list.h>
+#include <linux/llist.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+
+#include <linux/blk-mq.h>
+#include "blk-mq.h"
+
+static LIST_HEAD(blk_mq_cpu_notify_list);
+static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock);
+
+static int blk_mq_main_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long) hcpu;
+ struct blk_mq_cpu_notifier *notify;
+ int ret = NOTIFY_OK;
+
+ raw_spin_lock(&blk_mq_cpu_notify_lock);
+
+ list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) {
+ ret = notify->notify(notify->data, action, cpu);
+ if (ret != NOTIFY_OK)
+ break;
+ }
+
+ raw_spin_unlock(&blk_mq_cpu_notify_lock);
+ return ret;
+}
+
+void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
+{
+ BUG_ON(!notifier->notify);
+
+ raw_spin_lock(&blk_mq_cpu_notify_lock);
+ list_add_tail(&notifier->list, &blk_mq_cpu_notify_list);
+ raw_spin_unlock(&blk_mq_cpu_notify_lock);
+}
+
+void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
+{
+ raw_spin_lock(&blk_mq_cpu_notify_lock);
+ list_del(&notifier->list);
+ raw_spin_unlock(&blk_mq_cpu_notify_lock);
+}
+
+void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
+ int (*fn)(void *, unsigned long, unsigned int),
+ void *data)
+{
+ notifier->notify = fn;
+ notifier->data = data;
+}
+
+void __init blk_mq_cpu_init(void)
+{
+ hotcpu_notifier(blk_mq_main_cpu_notify, 0);
+}
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
new file mode 100644
index 00000000000..1065d7c65fa
--- /dev/null
+++ b/block/blk-mq-cpumap.c
@@ -0,0 +1,119 @@
+/*
+ * CPU <-> hardware queue mapping helpers
+ *
+ * Copyright (C) 2013-2014 Jens Axboe
+ */
+#include <linux/kernel.h>
+#include <linux/threads.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+
+#include <linux/blk-mq.h>
+#include "blk.h"
+#include "blk-mq.h"
+
+static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues,
+ const int cpu)
+{
+ return cpu / ((nr_cpus + nr_queues - 1) / nr_queues);
+}
+
+static int get_first_sibling(unsigned int cpu)
+{
+ unsigned int ret;
+
+ ret = cpumask_first(topology_thread_cpumask(cpu));
+ if (ret < nr_cpu_ids)
+ return ret;
+
+ return cpu;
+}
+
+int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
+{
+ unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
+ cpumask_var_t cpus;
+
+ if (!alloc_cpumask_var(&cpus, GFP_ATOMIC))
+ return 1;
+
+ cpumask_clear(cpus);
+ nr_cpus = nr_uniq_cpus = 0;
+ for_each_online_cpu(i) {
+ nr_cpus++;
+ first_sibling = get_first_sibling(i);
+ if (!cpumask_test_cpu(first_sibling, cpus))
+ nr_uniq_cpus++;
+ cpumask_set_cpu(i, cpus);
+ }
+
+ queue = 0;
+ for_each_possible_cpu(i) {
+ if (!cpu_online(i)) {
+ map[i] = 0;
+ continue;
+ }
+
+ /*
+ * Easy case - we have equal or more hardware queues. Or
+ * there are no thread siblings to take into account. Do
+ * 1:1 if enough, or sequential mapping if less.
+ */
+ if (nr_queues >= nr_cpus || nr_cpus == nr_uniq_cpus) {
+ map[i] = cpu_to_queue_index(nr_cpus, nr_queues, queue);
+ queue++;
+ continue;
+ }
+
+ /*
+ * Less then nr_cpus queues, and we have some number of
+ * threads per cores. Map sibling threads to the same
+ * queue.
+ */
+ first_sibling = get_first_sibling(i);
+ if (first_sibling == i) {
+ map[i] = cpu_to_queue_index(nr_uniq_cpus, nr_queues,
+ queue);
+ queue++;
+ } else
+ map[i] = map[first_sibling];
+ }
+
+ free_cpumask_var(cpus);
+ return 0;
+}
+
+unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set)
+{
+ unsigned int *map;
+
+ /* If cpus are offline, map them to first hctx */
+ map = kzalloc_node(sizeof(*map) * num_possible_cpus(), GFP_KERNEL,
+ set->numa_node);
+ if (!map)
+ return NULL;
+
+ if (!blk_mq_update_queue_map(map, set->nr_hw_queues))
+ return map;
+
+ kfree(map);
+ return NULL;
+}
+
+/*
+ * We have no quick way of doing reverse lookups. This is only used at
+ * queue init time, so runtime isn't important.
+ */
+int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ if (index == mq_map[i])
+ return cpu_to_node(i);
+ }
+
+ return NUMA_NO_NODE;
+}
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
new file mode 100644
index 00000000000..ed521786755
--- /dev/null
+++ b/block/blk-mq-sysfs.c
@@ -0,0 +1,456 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/backing-dev.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/smp.h>
+
+#include <linux/blk-mq.h>
+#include "blk-mq.h"
+#include "blk-mq-tag.h"
+
+static void blk_mq_sysfs_release(struct kobject *kobj)
+{
+}
+
+struct blk_mq_ctx_sysfs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct blk_mq_ctx *, char *);
+ ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
+};
+
+struct blk_mq_hw_ctx_sysfs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
+ ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
+};
+
+static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
+ char *page)
+{
+ struct blk_mq_ctx_sysfs_entry *entry;
+ struct blk_mq_ctx *ctx;
+ struct request_queue *q;
+ ssize_t res;
+
+ entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
+ ctx = container_of(kobj, struct blk_mq_ctx, kobj);
+ q = ctx->queue;
+
+ if (!entry->show)
+ return -EIO;
+
+ res = -ENOENT;
+ mutex_lock(&q->sysfs_lock);
+ if (!blk_queue_dying(q))
+ res = entry->show(ctx, page);
+ mutex_unlock(&q->sysfs_lock);
+ return res;
+}
+
+static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
+ const char *page, size_t length)
+{
+ struct blk_mq_ctx_sysfs_entry *entry;
+ struct blk_mq_ctx *ctx;
+ struct request_queue *q;
+ ssize_t res;
+
+ entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
+ ctx = container_of(kobj, struct blk_mq_ctx, kobj);
+ q = ctx->queue;
+
+ if (!entry->store)
+ return -EIO;
+
+ res = -ENOENT;
+ mutex_lock(&q->sysfs_lock);
+ if (!blk_queue_dying(q))
+ res = entry->store(ctx, page, length);
+ mutex_unlock(&q->sysfs_lock);
+ return res;
+}
+
+static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
+ struct attribute *attr, char *page)
+{
+ struct blk_mq_hw_ctx_sysfs_entry *entry;
+ struct blk_mq_hw_ctx *hctx;
+ struct request_queue *q;
+ ssize_t res;
+
+ entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
+ hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
+ q = hctx->queue;
+
+ if (!entry->show)
+ return -EIO;
+
+ res = -ENOENT;
+ mutex_lock(&q->sysfs_lock);
+ if (!blk_queue_dying(q))
+ res = entry->show(hctx, page);
+ mutex_unlock(&q->sysfs_lock);
+ return res;
+}
+
+static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
+ struct attribute *attr, const char *page,
+ size_t length)
+{
+ struct blk_mq_hw_ctx_sysfs_entry *entry;
+ struct blk_mq_hw_ctx *hctx;
+ struct request_queue *q;
+ ssize_t res;
+
+ entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
+ hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
+ q = hctx->queue;
+
+ if (!entry->store)
+ return -EIO;
+
+ res = -ENOENT;
+ mutex_lock(&q->sysfs_lock);
+ if (!blk_queue_dying(q))
+ res = entry->store(hctx, page, length);
+ mutex_unlock(&q->sysfs_lock);
+ return res;
+}
+
+static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page)
+{
+ return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1],
+ ctx->rq_dispatched[0]);
+}
+
+static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page)
+{
+ return sprintf(page, "%lu\n", ctx->rq_merged);
+}
+
+static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
+{
+ return sprintf(page, "%lu %lu\n", ctx->rq_completed[1],
+ ctx->rq_completed[0]);
+}
+
+static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
+{
+ char *start_page = page;
+ struct request *rq;
+
+ page += sprintf(page, "%s:\n", msg);
+
+ list_for_each_entry(rq, list, queuelist)
+ page += sprintf(page, "\t%p\n", rq);
+
+ return page - start_page;
+}
+
+static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
+{
+ ssize_t ret;
+
+ spin_lock(&ctx->lock);
+ ret = sysfs_list_show(page, &ctx->rq_list, "CTX pending");
+ spin_unlock(&ctx->lock);
+
+ return ret;
+}
+
+static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
+ char *page)
+{
+ return sprintf(page, "%lu\n", hctx->queued);
+}
+
+static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page)
+{
+ return sprintf(page, "%lu\n", hctx->run);
+}
+
+static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx,
+ char *page)
+{
+ char *start_page = page;
+ int i;
+
+ page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
+
+ for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) {
+ unsigned long d = 1U << (i - 1);
+
+ page += sprintf(page, "%8lu\t%lu\n", d, hctx->dispatched[i]);
+ }
+
+ return page - start_page;
+}
+
+static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx,
+ char *page)
+{
+ ssize_t ret;
+
+ spin_lock(&hctx->lock);
+ ret = sysfs_list_show(page, &hctx->dispatch, "HCTX pending");
+ spin_unlock(&hctx->lock);
+
+ return ret;
+}
+
+static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
+{
+ return blk_mq_tag_sysfs_show(hctx->tags, page);
+}
+
+static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *page)
+{
+ return sprintf(page, "%u\n", atomic_read(&hctx->nr_active));
+}
+
+static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
+{
+ unsigned int i, first = 1;
+ ssize_t ret = 0;
+
+ blk_mq_disable_hotplug();
+
+ for_each_cpu(i, hctx->cpumask) {
+ if (first)
+ ret += sprintf(ret + page, "%u", i);
+ else
+ ret += sprintf(ret + page, ", %u", i);
+
+ first = 0;
+ }
+
+ blk_mq_enable_hotplug();
+
+ ret += sprintf(ret + page, "\n");
+ return ret;
+}
+
+static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
+ .attr = {.name = "dispatched", .mode = S_IRUGO },
+ .show = blk_mq_sysfs_dispatched_show,
+};
+static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = {
+ .attr = {.name = "merged", .mode = S_IRUGO },
+ .show = blk_mq_sysfs_merged_show,
+};
+static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = {
+ .attr = {.name = "completed", .mode = S_IRUGO },
+ .show = blk_mq_sysfs_completed_show,
+};
+static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list = {
+ .attr = {.name = "rq_list", .mode = S_IRUGO },
+ .show = blk_mq_sysfs_rq_list_show,
+};
+
+static struct attribute *default_ctx_attrs[] = {
+ &blk_mq_sysfs_dispatched.attr,
+ &blk_mq_sysfs_merged.attr,
+ &blk_mq_sysfs_completed.attr,
+ &blk_mq_sysfs_rq_list.attr,
+ NULL,
+};
+
+static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = {
+ .attr = {.name = "queued", .mode = S_IRUGO },
+ .show = blk_mq_hw_sysfs_queued_show,
+};
+static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = {
+ .attr = {.name = "run", .mode = S_IRUGO },
+ .show = blk_mq_hw_sysfs_run_show,
+};
+static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
+ .attr = {.name = "dispatched", .mode = S_IRUGO },
+ .show = blk_mq_hw_sysfs_dispatched_show,
+};
+static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active = {
+ .attr = {.name = "active", .mode = S_IRUGO },
+ .show = blk_mq_hw_sysfs_active_show,
+};
+static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = {
+ .attr = {.name = "pending", .mode = S_IRUGO },
+ .show = blk_mq_hw_sysfs_rq_list_show,
+};
+static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
+ .attr = {.name = "tags", .mode = S_IRUGO },
+ .show = blk_mq_hw_sysfs_tags_show,
+};
+static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
+ .attr = {.name = "cpu_list", .mode = S_IRUGO },
+ .show = blk_mq_hw_sysfs_cpus_show,
+};
+
+static struct attribute *default_hw_ctx_attrs[] = {
+ &blk_mq_hw_sysfs_queued.attr,
+ &blk_mq_hw_sysfs_run.attr,
+ &blk_mq_hw_sysfs_dispatched.attr,
+ &blk_mq_hw_sysfs_pending.attr,
+ &blk_mq_hw_sysfs_tags.attr,
+ &blk_mq_hw_sysfs_cpus.attr,
+ &blk_mq_hw_sysfs_active.attr,
+ NULL,
+};
+
+static const struct sysfs_ops blk_mq_sysfs_ops = {
+ .show = blk_mq_sysfs_show,
+ .store = blk_mq_sysfs_store,
+};
+
+static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
+ .show = blk_mq_hw_sysfs_show,
+ .store = blk_mq_hw_sysfs_store,
+};
+
+static struct kobj_type blk_mq_ktype = {
+ .sysfs_ops = &blk_mq_sysfs_ops,
+ .release = blk_mq_sysfs_release,
+};
+
+static struct kobj_type blk_mq_ctx_ktype = {
+ .sysfs_ops = &blk_mq_sysfs_ops,
+ .default_attrs = default_ctx_attrs,
+ .release = blk_mq_sysfs_release,
+};
+
+static struct kobj_type blk_mq_hw_ktype = {
+ .sysfs_ops = &blk_mq_hw_sysfs_ops,
+ .default_attrs = default_hw_ctx_attrs,
+ .release = blk_mq_sysfs_release,
+};
+
+static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
+{
+ struct blk_mq_ctx *ctx;
+ int i;
+
+ if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
+ return;
+
+ hctx_for_each_ctx(hctx, ctx, i)
+ kobject_del(&ctx->kobj);
+
+ kobject_del(&hctx->kobj);
+}
+
+static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
+{
+ struct request_queue *q = hctx->queue;
+ struct blk_mq_ctx *ctx;
+ int i, ret;
+
+ if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
+ return 0;
+
+ ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
+ if (ret)
+ return ret;
+
+ hctx_for_each_ctx(hctx, ctx, i) {
+ ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+void blk_mq_unregister_disk(struct gendisk *disk)
+{
+ struct request_queue *q = disk->queue;
+ struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_ctx *ctx;
+ int i, j;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ blk_mq_unregister_hctx(hctx);
+
+ hctx_for_each_ctx(hctx, ctx, j)
+ kobject_put(&ctx->kobj);
+
+ kobject_put(&hctx->kobj);
+ }
+
+ kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
+ kobject_del(&q->mq_kobj);
+ kobject_put(&q->mq_kobj);
+
+ kobject_put(&disk_to_dev(disk)->kobj);
+}
+
+static void blk_mq_sysfs_init(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_ctx *ctx;
+ int i, j;
+
+ kobject_init(&q->mq_kobj, &blk_mq_ktype);
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
+
+ hctx_for_each_ctx(hctx, ctx, j)
+ kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
+ }
+}
+
+int blk_mq_register_disk(struct gendisk *disk)
+{
+ struct device *dev = disk_to_dev(disk);
+ struct request_queue *q = disk->queue;
+ struct blk_mq_hw_ctx *hctx;
+ int ret, i;
+
+ blk_mq_sysfs_init(q);
+
+ ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
+ if (ret < 0)
+ return ret;
+
+ kobject_uevent(&q->mq_kobj, KOBJ_ADD);
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ hctx->flags |= BLK_MQ_F_SYSFS_UP;
+ ret = blk_mq_register_hctx(hctx);
+ if (ret)
+ break;
+ }
+
+ if (ret) {
+ blk_mq_unregister_disk(disk);
+ return ret;
+ }
+
+ return 0;
+}
+
+void blk_mq_sysfs_unregister(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx;
+ int i;
+
+ queue_for_each_hw_ctx(q, hctx, i)
+ blk_mq_unregister_hctx(hctx);
+}
+
+int blk_mq_sysfs_register(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx;
+ int i, ret = 0;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ ret = blk_mq_register_hctx(hctx);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
new file mode 100644
index 00000000000..c1b92426c95
--- /dev/null
+++ b/block/blk-mq-tag.c
@@ -0,0 +1,618 @@
+/*
+ * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread
+ * over multiple cachelines to avoid ping-pong between multiple submitters
+ * or submitter and completer. Uses rolling wakeups to avoid falling of
+ * the scaling cliff when we run out of tags and have to start putting
+ * submitters to sleep.
+ *
+ * Uses active queue tracking to support fairer distribution of tags
+ * between multiple submitters when a shared tag map is used.
+ *
+ * Copyright (C) 2013-2014 Jens Axboe
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/random.h>
+
+#include <linux/blk-mq.h>
+#include "blk.h"
+#include "blk-mq.h"
+#include "blk-mq-tag.h"
+
+static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt)
+{
+ int i;
+
+ for (i = 0; i < bt->map_nr; i++) {
+ struct blk_align_bitmap *bm = &bt->map[i];
+ int ret;
+
+ ret = find_first_zero_bit(&bm->word, bm->depth);
+ if (ret < bm->depth)
+ return true;
+ }
+
+ return false;
+}
+
+bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
+{
+ if (!tags)
+ return true;
+
+ return bt_has_free_tags(&tags->bitmap_tags);
+}
+
+static inline int bt_index_inc(int index)
+{
+ return (index + 1) & (BT_WAIT_QUEUES - 1);
+}
+
+static inline void bt_index_atomic_inc(atomic_t *index)
+{
+ int old = atomic_read(index);
+ int new = bt_index_inc(old);
+ atomic_cmpxchg(index, old, new);
+}
+
+/*
+ * If a previously inactive queue goes active, bump the active user count.
+ */
+bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
+{
+ if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
+ !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
+ atomic_inc(&hctx->tags->active_queues);
+
+ return true;
+}
+
+/*
+ * Wakeup all potentially sleeping on normal (non-reserved) tags
+ */
+static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
+{
+ struct blk_mq_bitmap_tags *bt;
+ int i, wake_index;
+
+ bt = &tags->bitmap_tags;
+ wake_index = atomic_read(&bt->wake_index);
+ for (i = 0; i < BT_WAIT_QUEUES; i++) {
+ struct bt_wait_state *bs = &bt->bs[wake_index];
+
+ if (waitqueue_active(&bs->wait))
+ wake_up(&bs->wait);
+
+ wake_index = bt_index_inc(wake_index);
+ }
+}
+
+/*
+ * If a previously busy queue goes inactive, potential waiters could now
+ * be allowed to queue. Wake them up and check.
+ */
+void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
+{
+ struct blk_mq_tags *tags = hctx->tags;
+
+ if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
+ return;
+
+ atomic_dec(&tags->active_queues);
+
+ blk_mq_tag_wakeup_all(tags);
+}
+
+/*
+ * For shared tag users, we track the number of currently active users
+ * and attempt to provide a fair share of the tag depth for each of them.
+ */
+static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
+ struct blk_mq_bitmap_tags *bt)
+{
+ unsigned int depth, users;
+
+ if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
+ return true;
+ if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
+ return true;
+
+ /*
+ * Don't try dividing an ant
+ */
+ if (bt->depth == 1)
+ return true;
+
+ users = atomic_read(&hctx->tags->active_queues);
+ if (!users)
+ return true;
+
+ /*
+ * Allow at least some tags
+ */
+ depth = max((bt->depth + users - 1) / users, 4U);
+ return atomic_read(&hctx->nr_active) < depth;
+}
+
+static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag)
+{
+ int tag, org_last_tag, end;
+
+ org_last_tag = last_tag;
+ end = bm->depth;
+ do {
+restart:
+ tag = find_next_zero_bit(&bm->word, end, last_tag);
+ if (unlikely(tag >= end)) {
+ /*
+ * We started with an offset, start from 0 to
+ * exhaust the map.
+ */
+ if (org_last_tag && last_tag) {
+ end = last_tag;
+ last_tag = 0;
+ goto restart;
+ }
+ return -1;
+ }
+ last_tag = tag + 1;
+ } while (test_and_set_bit_lock(tag, &bm->word));
+
+ return tag;
+}
+
+/*
+ * Straight forward bitmap tag implementation, where each bit is a tag
+ * (cleared == free, and set == busy). The small twist is using per-cpu
+ * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue
+ * contexts. This enables us to drastically limit the space searched,
+ * without dirtying an extra shared cacheline like we would if we stored
+ * the cache value inside the shared blk_mq_bitmap_tags structure. On top
+ * of that, each word of tags is in a separate cacheline. This means that
+ * multiple users will tend to stick to different cachelines, at least
+ * until the map is exhausted.
+ */
+static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
+ unsigned int *tag_cache)
+{
+ unsigned int last_tag, org_last_tag;
+ int index, i, tag;
+
+ if (!hctx_may_queue(hctx, bt))
+ return -1;
+
+ last_tag = org_last_tag = *tag_cache;
+ index = TAG_TO_INDEX(bt, last_tag);
+
+ for (i = 0; i < bt->map_nr; i++) {
+ tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag));
+ if (tag != -1) {
+ tag += (index << bt->bits_per_word);
+ goto done;
+ }
+
+ last_tag = 0;
+ if (++index >= bt->map_nr)
+ index = 0;
+ }
+
+ *tag_cache = 0;
+ return -1;
+
+ /*
+ * Only update the cache from the allocation path, if we ended
+ * up using the specific cached tag.
+ */
+done:
+ if (tag == org_last_tag) {
+ last_tag = tag + 1;
+ if (last_tag >= bt->depth - 1)
+ last_tag = 0;
+
+ *tag_cache = last_tag;
+ }
+
+ return tag;
+}
+
+static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
+ struct blk_mq_hw_ctx *hctx)
+{
+ struct bt_wait_state *bs;
+ int wait_index;
+
+ if (!hctx)
+ return &bt->bs[0];
+
+ wait_index = atomic_read(&hctx->wait_index);
+ bs = &bt->bs[wait_index];
+ bt_index_atomic_inc(&hctx->wait_index);
+ return bs;
+}
+
+static int bt_get(struct blk_mq_alloc_data *data,
+ struct blk_mq_bitmap_tags *bt,
+ struct blk_mq_hw_ctx *hctx,
+ unsigned int *last_tag)
+{
+ struct bt_wait_state *bs;
+ DEFINE_WAIT(wait);
+ int tag;
+
+ tag = __bt_get(hctx, bt, last_tag);
+ if (tag != -1)
+ return tag;
+
+ if (!(data->gfp & __GFP_WAIT))
+ return -1;
+
+ bs = bt_wait_ptr(bt, hctx);
+ do {
+ prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
+
+ tag = __bt_get(hctx, bt, last_tag);
+ if (tag != -1)
+ break;
+
+ blk_mq_put_ctx(data->ctx);
+
+ io_schedule();
+
+ data->ctx = blk_mq_get_ctx(data->q);
+ data->hctx = data->q->mq_ops->map_queue(data->q,
+ data->ctx->cpu);
+ if (data->reserved) {
+ bt = &data->hctx->tags->breserved_tags;
+ } else {
+ last_tag = &data->ctx->last_tag;
+ hctx = data->hctx;
+ bt = &hctx->tags->bitmap_tags;
+ }
+ finish_wait(&bs->wait, &wait);
+ bs = bt_wait_ptr(bt, hctx);
+ } while (1);
+
+ finish_wait(&bs->wait, &wait);
+ return tag;
+}
+
+static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
+{
+ int tag;
+
+ tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx,
+ &data->ctx->last_tag);
+ if (tag >= 0)
+ return tag + data->hctx->tags->nr_reserved_tags;
+
+ return BLK_MQ_TAG_FAIL;
+}
+
+static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data)
+{
+ int tag, zero = 0;
+
+ if (unlikely(!data->hctx->tags->nr_reserved_tags)) {
+ WARN_ON_ONCE(1);
+ return BLK_MQ_TAG_FAIL;
+ }
+
+ tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero);
+ if (tag < 0)
+ return BLK_MQ_TAG_FAIL;
+
+ return tag;
+}
+
+unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
+{
+ if (!data->reserved)
+ return __blk_mq_get_tag(data);
+
+ return __blk_mq_get_reserved_tag(data);
+}
+
+static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
+{
+ int i, wake_index;
+
+ wake_index = atomic_read(&bt->wake_index);
+ for (i = 0; i < BT_WAIT_QUEUES; i++) {
+ struct bt_wait_state *bs = &bt->bs[wake_index];
+
+ if (waitqueue_active(&bs->wait)) {
+ int o = atomic_read(&bt->wake_index);
+ if (wake_index != o)
+ atomic_cmpxchg(&bt->wake_index, o, wake_index);
+
+ return bs;
+ }
+
+ wake_index = bt_index_inc(wake_index);
+ }
+
+ return NULL;
+}
+
+static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
+{
+ const int index = TAG_TO_INDEX(bt, tag);
+ struct bt_wait_state *bs;
+ int wait_cnt;
+
+ /*
+ * The unlock memory barrier need to order access to req in free
+ * path and clearing tag bit
+ */
+ clear_bit_unlock(TAG_TO_BIT(bt, tag), &bt->map[index].word);
+
+ bs = bt_wake_ptr(bt);
+ if (!bs)
+ return;
+
+ wait_cnt = atomic_dec_return(&bs->wait_cnt);
+ if (wait_cnt == 0) {
+wake:
+ atomic_add(bt->wake_cnt, &bs->wait_cnt);
+ bt_index_atomic_inc(&bt->wake_index);
+ wake_up(&bs->wait);
+ } else if (wait_cnt < 0) {
+ wait_cnt = atomic_inc_return(&bs->wait_cnt);
+ if (!wait_cnt)
+ goto wake;
+ }
+}
+
+static void __blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag)
+{
+ BUG_ON(tag >= tags->nr_tags);
+
+ bt_clear_tag(&tags->bitmap_tags, tag);
+}
+
+static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags,
+ unsigned int tag)
+{
+ BUG_ON(tag >= tags->nr_reserved_tags);
+
+ bt_clear_tag(&tags->breserved_tags, tag);
+}
+
+void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
+ unsigned int *last_tag)
+{
+ struct blk_mq_tags *tags = hctx->tags;
+
+ if (tag >= tags->nr_reserved_tags) {
+ const int real_tag = tag - tags->nr_reserved_tags;
+
+ __blk_mq_put_tag(tags, real_tag);
+ *last_tag = real_tag;
+ } else
+ __blk_mq_put_reserved_tag(tags, tag);
+}
+
+static void bt_for_each_free(struct blk_mq_bitmap_tags *bt,
+ unsigned long *free_map, unsigned int off)
+{
+ int i;
+
+ for (i = 0; i < bt->map_nr; i++) {
+ struct blk_align_bitmap *bm = &bt->map[i];
+ int bit = 0;
+
+ do {
+ bit = find_next_zero_bit(&bm->word, bm->depth, bit);
+ if (bit >= bm->depth)
+ break;
+
+ __set_bit(bit + off, free_map);
+ bit++;
+ } while (1);
+
+ off += (1 << bt->bits_per_word);
+ }
+}
+
+void blk_mq_tag_busy_iter(struct blk_mq_tags *tags,
+ void (*fn)(void *, unsigned long *), void *data)
+{
+ unsigned long *tag_map;
+ size_t map_size;
+
+ map_size = ALIGN(tags->nr_tags, BITS_PER_LONG) / BITS_PER_LONG;
+ tag_map = kzalloc(map_size * sizeof(unsigned long), GFP_ATOMIC);
+ if (!tag_map)
+ return;
+
+ bt_for_each_free(&tags->bitmap_tags, tag_map, tags->nr_reserved_tags);
+ if (tags->nr_reserved_tags)
+ bt_for_each_free(&tags->breserved_tags, tag_map, 0);
+
+ fn(data, tag_map);
+ kfree(tag_map);
+}
+EXPORT_SYMBOL(blk_mq_tag_busy_iter);
+
+static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
+{
+ unsigned int i, used;
+
+ for (i = 0, used = 0; i < bt->map_nr; i++) {
+ struct blk_align_bitmap *bm = &bt->map[i];
+
+ used += bitmap_weight(&bm->word, bm->depth);
+ }
+
+ return bt->depth - used;
+}
+
+static void bt_update_count(struct blk_mq_bitmap_tags *bt,
+ unsigned int depth)
+{
+ unsigned int tags_per_word = 1U << bt->bits_per_word;
+ unsigned int map_depth = depth;
+
+ if (depth) {
+ int i;
+
+ for (i = 0; i < bt->map_nr; i++) {
+ bt->map[i].depth = min(map_depth, tags_per_word);
+ map_depth -= bt->map[i].depth;
+ }
+ }
+
+ bt->wake_cnt = BT_WAIT_BATCH;
+ if (bt->wake_cnt > depth / 4)
+ bt->wake_cnt = max(1U, depth / 4);
+
+ bt->depth = depth;
+}
+
+static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
+ int node, bool reserved)
+{
+ int i;
+
+ bt->bits_per_word = ilog2(BITS_PER_LONG);
+
+ /*
+ * Depth can be zero for reserved tags, that's not a failure
+ * condition.
+ */
+ if (depth) {
+ unsigned int nr, tags_per_word;
+
+ tags_per_word = (1 << bt->bits_per_word);
+
+ /*
+ * If the tag space is small, shrink the number of tags
+ * per word so we spread over a few cachelines, at least.
+ * If less than 4 tags, just forget about it, it's not
+ * going to work optimally anyway.
+ */
+ if (depth >= 4) {
+ while (tags_per_word * 4 > depth) {
+ bt->bits_per_word--;
+ tags_per_word = (1 << bt->bits_per_word);
+ }
+ }
+
+ nr = ALIGN(depth, tags_per_word) / tags_per_word;
+ bt->map = kzalloc_node(nr * sizeof(struct blk_align_bitmap),
+ GFP_KERNEL, node);
+ if (!bt->map)
+ return -ENOMEM;
+
+ bt->map_nr = nr;
+ }
+
+ bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL);
+ if (!bt->bs) {
+ kfree(bt->map);
+ return -ENOMEM;
+ }
+
+ bt_update_count(bt, depth);
+
+ for (i = 0; i < BT_WAIT_QUEUES; i++) {
+ init_waitqueue_head(&bt->bs[i].wait);
+ atomic_set(&bt->bs[i].wait_cnt, bt->wake_cnt);
+ }
+
+ return 0;
+}
+
+static void bt_free(struct blk_mq_bitmap_tags *bt)
+{
+ kfree(bt->map);
+ kfree(bt->bs);
+}
+
+static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
+ int node)
+{
+ unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
+
+ if (bt_alloc(&tags->bitmap_tags, depth, node, false))
+ goto enomem;
+ if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true))
+ goto enomem;
+
+ return tags;
+enomem:
+ bt_free(&tags->bitmap_tags);
+ kfree(tags);
+ return NULL;
+}
+
+struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
+ unsigned int reserved_tags, int node)
+{
+ struct blk_mq_tags *tags;
+
+ if (total_tags > BLK_MQ_TAG_MAX) {
+ pr_err("blk-mq: tag depth too large\n");
+ return NULL;
+ }
+
+ tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
+ if (!tags)
+ return NULL;
+
+ tags->nr_tags = total_tags;
+ tags->nr_reserved_tags = reserved_tags;
+
+ return blk_mq_init_bitmap_tags(tags, node);
+}
+
+void blk_mq_free_tags(struct blk_mq_tags *tags)
+{
+ bt_free(&tags->bitmap_tags);
+ bt_free(&tags->breserved_tags);
+ kfree(tags);
+}
+
+void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag)
+{
+ unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
+
+ *tag = prandom_u32() % depth;
+}
+
+int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
+{
+ tdepth -= tags->nr_reserved_tags;
+ if (tdepth > tags->nr_tags)
+ return -EINVAL;
+
+ /*
+ * Don't need (or can't) update reserved tags here, they remain
+ * static and should never need resizing.
+ */
+ bt_update_count(&tags->bitmap_tags, tdepth);
+ blk_mq_tag_wakeup_all(tags);
+ return 0;
+}
+
+ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
+{
+ char *orig_page = page;
+ unsigned int free, res;
+
+ if (!tags)
+ return 0;
+
+ page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
+ "bits_per_word=%u\n",
+ tags->nr_tags, tags->nr_reserved_tags,
+ tags->bitmap_tags.bits_per_word);
+
+ free = bt_unused_tags(&tags->bitmap_tags);
+ res = bt_unused_tags(&tags->breserved_tags);
+
+ page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res);
+ page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues));
+
+ return page - orig_page;
+}
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
new file mode 100644
index 00000000000..6206ed17ef7
--- /dev/null
+++ b/block/blk-mq-tag.h
@@ -0,0 +1,88 @@
+#ifndef INT_BLK_MQ_TAG_H
+#define INT_BLK_MQ_TAG_H
+
+#include "blk-mq.h"
+
+enum {
+ BT_WAIT_QUEUES = 8,
+ BT_WAIT_BATCH = 8,
+};
+
+struct bt_wait_state {
+ atomic_t wait_cnt;
+ wait_queue_head_t wait;
+} ____cacheline_aligned_in_smp;
+
+#define TAG_TO_INDEX(bt, tag) ((tag) >> (bt)->bits_per_word)
+#define TAG_TO_BIT(bt, tag) ((tag) & ((1 << (bt)->bits_per_word) - 1))
+
+struct blk_mq_bitmap_tags {
+ unsigned int depth;
+ unsigned int wake_cnt;
+ unsigned int bits_per_word;
+
+ unsigned int map_nr;
+ struct blk_align_bitmap *map;
+
+ atomic_t wake_index;
+ struct bt_wait_state *bs;
+};
+
+/*
+ * Tag address space map.
+ */
+struct blk_mq_tags {
+ unsigned int nr_tags;
+ unsigned int nr_reserved_tags;
+
+ atomic_t active_queues;
+
+ struct blk_mq_bitmap_tags bitmap_tags;
+ struct blk_mq_bitmap_tags breserved_tags;
+
+ struct request **rqs;
+ struct list_head page_list;
+};
+
+
+extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node);
+extern void blk_mq_free_tags(struct blk_mq_tags *tags);
+
+extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
+extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag);
+extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
+extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
+extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
+extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
+
+enum {
+ BLK_MQ_TAG_CACHE_MIN = 1,
+ BLK_MQ_TAG_CACHE_MAX = 64,
+};
+
+enum {
+ BLK_MQ_TAG_FAIL = -1U,
+ BLK_MQ_TAG_MIN = BLK_MQ_TAG_CACHE_MIN,
+ BLK_MQ_TAG_MAX = BLK_MQ_TAG_FAIL - 1,
+};
+
+extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
+extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
+
+static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
+{
+ if (!(hctx->flags & BLK_MQ_F_TAG_SHARED))
+ return false;
+
+ return __blk_mq_tag_busy(hctx);
+}
+
+static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
+{
+ if (!(hctx->flags & BLK_MQ_F_TAG_SHARED))
+ return;
+
+ __blk_mq_tag_idle(hctx);
+}
+
+#endif
diff --git a/block/blk-mq.c b/block/blk-mq.c
new file mode 100644
index 00000000000..ad69ef657e8
--- /dev/null
+++ b/block/blk-mq.c
@@ -0,0 +1,2058 @@
+/*
+ * Block multiqueue core code
+ *
+ * Copyright (C) 2013-2014 Jens Axboe
+ * Copyright (C) 2013-2014 Christoph Hellwig
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/backing-dev.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/smp.h>
+#include <linux/llist.h>
+#include <linux/list_sort.h>
+#include <linux/cpu.h>
+#include <linux/cache.h>
+#include <linux/sched/sysctl.h>
+#include <linux/delay.h>
+
+#include <trace/events/block.h>
+
+#include <linux/blk-mq.h>
+#include "blk.h"
+#include "blk-mq.h"
+#include "blk-mq-tag.h"
+
+static DEFINE_MUTEX(all_q_mutex);
+static LIST_HEAD(all_q_list);
+
+static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
+
+/*
+ * Check if any of the ctx's have pending work in this hardware queue
+ */
+static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
+{
+ unsigned int i;
+
+ for (i = 0; i < hctx->ctx_map.map_size; i++)
+ if (hctx->ctx_map.map[i].word)
+ return true;
+
+ return false;
+}
+
+static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
+ struct blk_mq_ctx *ctx)
+{
+ return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
+}
+
+#define CTX_TO_BIT(hctx, ctx) \
+ ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
+
+/*
+ * Mark this ctx as having pending work in this hardware queue
+ */
+static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
+ struct blk_mq_ctx *ctx)
+{
+ struct blk_align_bitmap *bm = get_bm(hctx, ctx);
+
+ if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
+ set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
+}
+
+static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
+ struct blk_mq_ctx *ctx)
+{
+ struct blk_align_bitmap *bm = get_bm(hctx, ctx);
+
+ clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
+}
+
+static int blk_mq_queue_enter(struct request_queue *q)
+{
+ int ret;
+
+ __percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
+ smp_wmb();
+
+ /* we have problems freezing the queue if it's initializing */
+ if (!blk_queue_dying(q) &&
+ (!blk_queue_bypass(q) || !blk_queue_init_done(q)))
+ return 0;
+
+ __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
+
+ spin_lock_irq(q->queue_lock);
+ ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq,
+ !blk_queue_bypass(q) || blk_queue_dying(q),
+ *q->queue_lock);
+ /* inc usage with lock hold to avoid freeze_queue runs here */
+ if (!ret && !blk_queue_dying(q))
+ __percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
+ else if (blk_queue_dying(q))
+ ret = -ENODEV;
+ spin_unlock_irq(q->queue_lock);
+
+ return ret;
+}
+
+static void blk_mq_queue_exit(struct request_queue *q)
+{
+ __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
+}
+
+void blk_mq_drain_queue(struct request_queue *q)
+{
+ while (true) {
+ s64 count;
+
+ spin_lock_irq(q->queue_lock);
+ count = percpu_counter_sum(&q->mq_usage_counter);
+ spin_unlock_irq(q->queue_lock);
+
+ if (count == 0)
+ break;
+ blk_mq_start_hw_queues(q);
+ msleep(10);
+ }
+}
+
+/*
+ * Guarantee no request is in use, so we can change any data structure of
+ * the queue afterward.
+ */
+static void blk_mq_freeze_queue(struct request_queue *q)
+{
+ bool drain;
+
+ spin_lock_irq(q->queue_lock);
+ drain = !q->bypass_depth++;
+ queue_flag_set(QUEUE_FLAG_BYPASS, q);
+ spin_unlock_irq(q->queue_lock);
+
+ if (drain)
+ blk_mq_drain_queue(q);
+}
+
+static void blk_mq_unfreeze_queue(struct request_queue *q)
+{
+ bool wake = false;
+
+ spin_lock_irq(q->queue_lock);
+ if (!--q->bypass_depth) {
+ queue_flag_clear(QUEUE_FLAG_BYPASS, q);
+ wake = true;
+ }
+ WARN_ON_ONCE(q->bypass_depth < 0);
+ spin_unlock_irq(q->queue_lock);
+ if (wake)
+ wake_up_all(&q->mq_freeze_wq);
+}
+
+bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
+{
+ return blk_mq_has_free_tags(hctx->tags);
+}
+EXPORT_SYMBOL(blk_mq_can_queue);
+
+static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
+ struct request *rq, unsigned int rw_flags)
+{
+ if (blk_queue_io_stat(q))
+ rw_flags |= REQ_IO_STAT;
+
+ INIT_LIST_HEAD(&rq->queuelist);
+ /* csd/requeue_work/fifo_time is initialized before use */
+ rq->q = q;
+ rq->mq_ctx = ctx;
+ rq->cmd_flags |= rw_flags;
+ /* do not touch atomic flags, it needs atomic ops against the timer */
+ rq->cpu = -1;
+ INIT_HLIST_NODE(&rq->hash);
+ RB_CLEAR_NODE(&rq->rb_node);
+ rq->rq_disk = NULL;
+ rq->part = NULL;
+ rq->start_time = jiffies;
+#ifdef CONFIG_BLK_CGROUP
+ rq->rl = NULL;
+ set_start_time_ns(rq);
+ rq->io_start_time_ns = 0;
+#endif
+ rq->nr_phys_segments = 0;
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+ rq->nr_integrity_segments = 0;
+#endif
+ rq->special = NULL;
+ /* tag was already set */
+ rq->errors = 0;
+
+ rq->extra_len = 0;
+ rq->sense_len = 0;
+ rq->resid_len = 0;
+ rq->sense = NULL;
+
+ INIT_LIST_HEAD(&rq->timeout_list);
+ rq->timeout = 0;
+
+ rq->end_io = NULL;
+ rq->end_io_data = NULL;
+ rq->next_rq = NULL;
+
+ ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
+}
+
+static struct request *
+__blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
+{
+ struct request *rq;
+ unsigned int tag;
+
+ tag = blk_mq_get_tag(data);
+ if (tag != BLK_MQ_TAG_FAIL) {
+ rq = data->hctx->tags->rqs[tag];
+
+ rq->cmd_flags = 0;
+ if (blk_mq_tag_busy(data->hctx)) {
+ rq->cmd_flags = REQ_MQ_INFLIGHT;
+ atomic_inc(&data->hctx->nr_active);
+ }
+
+ rq->tag = tag;
+ blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw);
+ return rq;
+ }
+
+ return NULL;
+}
+
+struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
+ bool reserved)
+{
+ struct blk_mq_ctx *ctx;
+ struct blk_mq_hw_ctx *hctx;
+ struct request *rq;
+ struct blk_mq_alloc_data alloc_data;
+
+ if (blk_mq_queue_enter(q))
+ return NULL;
+
+ ctx = blk_mq_get_ctx(q);
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT,
+ reserved, ctx, hctx);
+
+ rq = __blk_mq_alloc_request(&alloc_data, rw);
+ if (!rq && (gfp & __GFP_WAIT)) {
+ __blk_mq_run_hw_queue(hctx);
+ blk_mq_put_ctx(ctx);
+
+ ctx = blk_mq_get_ctx(q);
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx,
+ hctx);
+ rq = __blk_mq_alloc_request(&alloc_data, rw);
+ ctx = alloc_data.ctx;
+ }
+ blk_mq_put_ctx(ctx);
+ return rq;
+}
+EXPORT_SYMBOL(blk_mq_alloc_request);
+
+static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
+ struct blk_mq_ctx *ctx, struct request *rq)
+{
+ const int tag = rq->tag;
+ struct request_queue *q = rq->q;
+
+ if (rq->cmd_flags & REQ_MQ_INFLIGHT)
+ atomic_dec(&hctx->nr_active);
+
+ clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+ blk_mq_put_tag(hctx, tag, &ctx->last_tag);
+ blk_mq_queue_exit(q);
+}
+
+void blk_mq_free_request(struct request *rq)
+{
+ struct blk_mq_ctx *ctx = rq->mq_ctx;
+ struct blk_mq_hw_ctx *hctx;
+ struct request_queue *q = rq->q;
+
+ ctx->rq_completed[rq_is_sync(rq)]++;
+
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ __blk_mq_free_request(hctx, ctx, rq);
+}
+
+/*
+ * Clone all relevant state from a request that has been put on hold in
+ * the flush state machine into the preallocated flush request that hangs
+ * off the request queue.
+ *
+ * For a driver the flush request should be invisible, that's why we are
+ * impersonating the original request here.
+ */
+void blk_mq_clone_flush_request(struct request *flush_rq,
+ struct request *orig_rq)
+{
+ struct blk_mq_hw_ctx *hctx =
+ orig_rq->q->mq_ops->map_queue(orig_rq->q, orig_rq->mq_ctx->cpu);
+
+ flush_rq->mq_ctx = orig_rq->mq_ctx;
+ flush_rq->tag = orig_rq->tag;
+ memcpy(blk_mq_rq_to_pdu(flush_rq), blk_mq_rq_to_pdu(orig_rq),
+ hctx->cmd_size);
+}
+
+inline void __blk_mq_end_io(struct request *rq, int error)
+{
+ blk_account_io_done(rq);
+
+ if (rq->end_io) {
+ rq->end_io(rq, error);
+ } else {
+ if (unlikely(blk_bidi_rq(rq)))
+ blk_mq_free_request(rq->next_rq);
+ blk_mq_free_request(rq);
+ }
+}
+EXPORT_SYMBOL(__blk_mq_end_io);
+
+void blk_mq_end_io(struct request *rq, int error)
+{
+ if (blk_update_request(rq, error, blk_rq_bytes(rq)))
+ BUG();
+ __blk_mq_end_io(rq, error);
+}
+EXPORT_SYMBOL(blk_mq_end_io);
+
+static void __blk_mq_complete_request_remote(void *data)
+{
+ struct request *rq = data;
+
+ rq->q->softirq_done_fn(rq);
+}
+
+static void blk_mq_ipi_complete_request(struct request *rq)
+{
+ struct blk_mq_ctx *ctx = rq->mq_ctx;
+ bool shared = false;
+ int cpu;
+
+ if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
+ rq->q->softirq_done_fn(rq);
+ return;
+ }
+
+ cpu = get_cpu();
+ if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
+ shared = cpus_share_cache(cpu, ctx->cpu);
+
+ if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
+ rq->csd.func = __blk_mq_complete_request_remote;
+ rq->csd.info = rq;
+ rq->csd.flags = 0;
+ smp_call_function_single_async(ctx->cpu, &rq->csd);
+ } else {
+ rq->q->softirq_done_fn(rq);
+ }
+ put_cpu();
+}
+
+void __blk_mq_complete_request(struct request *rq)
+{
+ struct request_queue *q = rq->q;
+
+ if (!q->softirq_done_fn)
+ blk_mq_end_io(rq, rq->errors);
+ else
+ blk_mq_ipi_complete_request(rq);
+}
+
+/**
+ * blk_mq_complete_request - end I/O on a request
+ * @rq: the request being processed
+ *
+ * Description:
+ * Ends all I/O on a request. It does not handle partial completions.
+ * The actual completion happens out-of-order, through a IPI handler.
+ **/
+void blk_mq_complete_request(struct request *rq)
+{
+ struct request_queue *q = rq->q;
+
+ if (unlikely(blk_should_fake_timeout(q)))
+ return;
+ if (!blk_mark_rq_complete(rq))
+ __blk_mq_complete_request(rq);
+}
+EXPORT_SYMBOL(blk_mq_complete_request);
+
+static void blk_mq_start_request(struct request *rq, bool last)
+{
+ struct request_queue *q = rq->q;
+
+ trace_block_rq_issue(q, rq);
+
+ rq->resid_len = blk_rq_bytes(rq);
+ if (unlikely(blk_bidi_rq(rq)))
+ rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
+
+ blk_add_timer(rq);
+
+ /*
+ * Mark us as started and clear complete. Complete might have been
+ * set if requeue raced with timeout, which then marked it as
+ * complete. So be sure to clear complete again when we start
+ * the request, otherwise we'll ignore the completion event.
+ */
+ if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
+ set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+ if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
+ clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
+
+ if (q->dma_drain_size && blk_rq_bytes(rq)) {
+ /*
+ * Make sure space for the drain appears. We know we can do
+ * this because max_hw_segments has been adjusted to be one
+ * fewer than the device can handle.
+ */
+ rq->nr_phys_segments++;
+ }
+
+ /*
+ * Flag the last request in the series so that drivers know when IO
+ * should be kicked off, if they don't do it on a per-request basis.
+ *
+ * Note: the flag isn't the only condition drivers should do kick off.
+ * If drive is busy, the last request might not have the bit set.
+ */
+ if (last)
+ rq->cmd_flags |= REQ_END;
+}
+
+static void __blk_mq_requeue_request(struct request *rq)
+{
+ struct request_queue *q = rq->q;
+
+ trace_block_rq_requeue(q, rq);
+ clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+
+ rq->cmd_flags &= ~REQ_END;
+
+ if (q->dma_drain_size && blk_rq_bytes(rq))
+ rq->nr_phys_segments--;
+}
+
+void blk_mq_requeue_request(struct request *rq)
+{
+ __blk_mq_requeue_request(rq);
+ blk_clear_rq_complete(rq);
+
+ BUG_ON(blk_queued_rq(rq));
+ blk_mq_add_to_requeue_list(rq, true);
+}
+EXPORT_SYMBOL(blk_mq_requeue_request);
+
+static void blk_mq_requeue_work(struct work_struct *work)
+{
+ struct request_queue *q =
+ container_of(work, struct request_queue, requeue_work);
+ LIST_HEAD(rq_list);
+ struct request *rq, *next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->requeue_lock, flags);
+ list_splice_init(&q->requeue_list, &rq_list);
+ spin_unlock_irqrestore(&q->requeue_lock, flags);
+
+ list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
+ if (!(rq->cmd_flags & REQ_SOFTBARRIER))
+ continue;
+
+ rq->cmd_flags &= ~REQ_SOFTBARRIER;
+ list_del_init(&rq->queuelist);
+ blk_mq_insert_request(rq, true, false, false);
+ }
+
+ while (!list_empty(&rq_list)) {
+ rq = list_entry(rq_list.next, struct request, queuelist);
+ list_del_init(&rq->queuelist);
+ blk_mq_insert_request(rq, false, false, false);
+ }
+
+ blk_mq_run_queues(q, false);
+}
+
+void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
+{
+ struct request_queue *q = rq->q;
+ unsigned long flags;
+
+ /*
+ * We abuse this flag that is otherwise used by the I/O scheduler to
+ * request head insertation from the workqueue.
+ */
+ BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
+
+ spin_lock_irqsave(&q->requeue_lock, flags);
+ if (at_head) {
+ rq->cmd_flags |= REQ_SOFTBARRIER;
+ list_add(&rq->queuelist, &q->requeue_list);
+ } else {
+ list_add_tail(&rq->queuelist, &q->requeue_list);
+ }
+ spin_unlock_irqrestore(&q->requeue_lock, flags);
+}
+EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
+
+void blk_mq_kick_requeue_list(struct request_queue *q)
+{
+ kblockd_schedule_work(&q->requeue_work);
+}
+EXPORT_SYMBOL(blk_mq_kick_requeue_list);
+
+static inline bool is_flush_request(struct request *rq, unsigned int tag)
+{
+ return ((rq->cmd_flags & REQ_FLUSH_SEQ) &&
+ rq->q->flush_rq->tag == tag);
+}
+
+struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
+{
+ struct request *rq = tags->rqs[tag];
+
+ if (!is_flush_request(rq, tag))
+ return rq;
+
+ return rq->q->flush_rq;
+}
+EXPORT_SYMBOL(blk_mq_tag_to_rq);
+
+struct blk_mq_timeout_data {
+ struct blk_mq_hw_ctx *hctx;
+ unsigned long *next;
+ unsigned int *next_set;
+};
+
+static void blk_mq_timeout_check(void *__data, unsigned long *free_tags)
+{
+ struct blk_mq_timeout_data *data = __data;
+ struct blk_mq_hw_ctx *hctx = data->hctx;
+ unsigned int tag;
+
+ /* It may not be in flight yet (this is where
+ * the REQ_ATOMIC_STARTED flag comes in). The requests are
+ * statically allocated, so we know it's always safe to access the
+ * memory associated with a bit offset into ->rqs[].
+ */
+ tag = 0;
+ do {
+ struct request *rq;
+
+ tag = find_next_zero_bit(free_tags, hctx->tags->nr_tags, tag);
+ if (tag >= hctx->tags->nr_tags)
+ break;
+
+ rq = blk_mq_tag_to_rq(hctx->tags, tag++);
+ if (rq->q != hctx->queue)
+ continue;
+ if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
+ continue;
+
+ blk_rq_check_expired(rq, data->next, data->next_set);
+ } while (1);
+}
+
+static void blk_mq_hw_ctx_check_timeout(struct blk_mq_hw_ctx *hctx,
+ unsigned long *next,
+ unsigned int *next_set)
+{
+ struct blk_mq_timeout_data data = {
+ .hctx = hctx,
+ .next = next,
+ .next_set = next_set,
+ };
+
+ /*
+ * Ask the tagging code to iterate busy requests, so we can
+ * check them for timeout.
+ */
+ blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data);
+}
+
+static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq)
+{
+ struct request_queue *q = rq->q;
+
+ /*
+ * We know that complete is set at this point. If STARTED isn't set
+ * anymore, then the request isn't active and the "timeout" should
+ * just be ignored. This can happen due to the bitflag ordering.
+ * Timeout first checks if STARTED is set, and if it is, assumes
+ * the request is active. But if we race with completion, then
+ * we both flags will get cleared. So check here again, and ignore
+ * a timeout event with a request that isn't active.
+ */
+ if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
+ return BLK_EH_NOT_HANDLED;
+
+ if (!q->mq_ops->timeout)
+ return BLK_EH_RESET_TIMER;
+
+ return q->mq_ops->timeout(rq);
+}
+
+static void blk_mq_rq_timer(unsigned long data)
+{
+ struct request_queue *q = (struct request_queue *) data;
+ struct blk_mq_hw_ctx *hctx;
+ unsigned long next = 0;
+ int i, next_set = 0;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ /*
+ * If not software queues are currently mapped to this
+ * hardware queue, there's nothing to check
+ */
+ if (!hctx->nr_ctx || !hctx->tags)
+ continue;
+
+ blk_mq_hw_ctx_check_timeout(hctx, &next, &next_set);
+ }
+
+ if (next_set) {
+ next = blk_rq_timeout(round_jiffies_up(next));
+ mod_timer(&q->timeout, next);
+ } else {
+ queue_for_each_hw_ctx(q, hctx, i)
+ blk_mq_tag_idle(hctx);
+ }
+}
+
+/*
+ * Reverse check our software queue for entries that we could potentially
+ * merge with. Currently includes a hand-wavy stop count of 8, to not spend
+ * too much time checking for merges.
+ */
+static bool blk_mq_attempt_merge(struct request_queue *q,
+ struct blk_mq_ctx *ctx, struct bio *bio)
+{
+ struct request *rq;
+ int checked = 8;
+
+ list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
+ int el_ret;
+
+ if (!checked--)
+ break;
+
+ if (!blk_rq_merge_ok(rq, bio))
+ continue;
+
+ el_ret = blk_try_merge(rq, bio);
+ if (el_ret == ELEVATOR_BACK_MERGE) {
+ if (bio_attempt_back_merge(q, rq, bio)) {
+ ctx->rq_merged++;
+ return true;
+ }
+ break;
+ } else if (el_ret == ELEVATOR_FRONT_MERGE) {
+ if (bio_attempt_front_merge(q, rq, bio)) {
+ ctx->rq_merged++;
+ return true;
+ }
+ break;
+ }
+ }
+
+ return false;
+}
+
+/*
+ * Process software queues that have been marked busy, splicing them
+ * to the for-dispatch
+ */
+static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
+{
+ struct blk_mq_ctx *ctx;
+ int i;
+
+ for (i = 0; i < hctx->ctx_map.map_size; i++) {
+ struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
+ unsigned int off, bit;
+
+ if (!bm->word)
+ continue;
+
+ bit = 0;
+ off = i * hctx->ctx_map.bits_per_word;
+ do {
+ bit = find_next_bit(&bm->word, bm->depth, bit);
+ if (bit >= bm->depth)
+ break;
+
+ ctx = hctx->ctxs[bit + off];
+ clear_bit(bit, &bm->word);
+ spin_lock(&ctx->lock);
+ list_splice_tail_init(&ctx->rq_list, list);
+ spin_unlock(&ctx->lock);
+
+ bit++;
+ } while (1);
+ }
+}
+
+/*
+ * Run this hardware queue, pulling any software queues mapped to it in.
+ * Note that this function currently has various problems around ordering
+ * of IO. In particular, we'd like FIFO behaviour on handling existing
+ * items on the hctx->dispatch list. Ignore that for now.
+ */
+static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
+{
+ struct request_queue *q = hctx->queue;
+ struct request *rq;
+ LIST_HEAD(rq_list);
+ int queued;
+
+ WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
+
+ if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
+ return;
+
+ hctx->run++;
+
+ /*
+ * Touch any software queue that has pending entries.
+ */
+ flush_busy_ctxs(hctx, &rq_list);
+
+ /*
+ * If we have previous entries on our dispatch list, grab them
+ * and stuff them at the front for more fair dispatch.
+ */
+ if (!list_empty_careful(&hctx->dispatch)) {
+ spin_lock(&hctx->lock);
+ if (!list_empty(&hctx->dispatch))
+ list_splice_init(&hctx->dispatch, &rq_list);
+ spin_unlock(&hctx->lock);
+ }
+
+ /*
+ * Now process all the entries, sending them to the driver.
+ */
+ queued = 0;
+ while (!list_empty(&rq_list)) {
+ int ret;
+
+ rq = list_first_entry(&rq_list, struct request, queuelist);
+ list_del_init(&rq->queuelist);
+
+ blk_mq_start_request(rq, list_empty(&rq_list));
+
+ ret = q->mq_ops->queue_rq(hctx, rq);
+ switch (ret) {
+ case BLK_MQ_RQ_QUEUE_OK:
+ queued++;
+ continue;
+ case BLK_MQ_RQ_QUEUE_BUSY:
+ list_add(&rq->queuelist, &rq_list);
+ __blk_mq_requeue_request(rq);
+ break;
+ default:
+ pr_err("blk-mq: bad return on queue: %d\n", ret);
+ case BLK_MQ_RQ_QUEUE_ERROR:
+ rq->errors = -EIO;
+ blk_mq_end_io(rq, rq->errors);
+ break;
+ }
+
+ if (ret == BLK_MQ_RQ_QUEUE_BUSY)
+ break;
+ }
+
+ if (!queued)
+ hctx->dispatched[0]++;
+ else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
+ hctx->dispatched[ilog2(queued) + 1]++;
+
+ /*
+ * Any items that need requeuing? Stuff them into hctx->dispatch,
+ * that is where we will continue on next queue run.
+ */
+ if (!list_empty(&rq_list)) {
+ spin_lock(&hctx->lock);
+ list_splice(&rq_list, &hctx->dispatch);
+ spin_unlock(&hctx->lock);
+ }
+}
+
+/*
+ * It'd be great if the workqueue API had a way to pass
+ * in a mask and had some smarts for more clever placement.
+ * For now we just round-robin here, switching for every
+ * BLK_MQ_CPU_WORK_BATCH queued items.
+ */
+static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
+{
+ int cpu = hctx->next_cpu;
+
+ if (--hctx->next_cpu_batch <= 0) {
+ int next_cpu;
+
+ next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
+ if (next_cpu >= nr_cpu_ids)
+ next_cpu = cpumask_first(hctx->cpumask);
+
+ hctx->next_cpu = next_cpu;
+ hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
+ }
+
+ return cpu;
+}
+
+void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
+{
+ if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
+ return;
+
+ if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask))
+ __blk_mq_run_hw_queue(hctx);
+ else if (hctx->queue->nr_hw_queues == 1)
+ kblockd_schedule_delayed_work(&hctx->run_work, 0);
+ else {
+ unsigned int cpu;
+
+ cpu = blk_mq_hctx_next_cpu(hctx);
+ kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0);
+ }
+}
+
+void blk_mq_run_queues(struct request_queue *q, bool async)
+{
+ struct blk_mq_hw_ctx *hctx;
+ int i;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if ((!blk_mq_hctx_has_pending(hctx) &&
+ list_empty_careful(&hctx->dispatch)) ||
+ test_bit(BLK_MQ_S_STOPPED, &hctx->state))
+ continue;
+
+ preempt_disable();
+ blk_mq_run_hw_queue(hctx, async);
+ preempt_enable();
+ }
+}
+EXPORT_SYMBOL(blk_mq_run_queues);
+
+void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
+{
+ cancel_delayed_work(&hctx->run_work);
+ cancel_delayed_work(&hctx->delay_work);
+ set_bit(BLK_MQ_S_STOPPED, &hctx->state);
+}
+EXPORT_SYMBOL(blk_mq_stop_hw_queue);
+
+void blk_mq_stop_hw_queues(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx;
+ int i;
+
+ queue_for_each_hw_ctx(q, hctx, i)
+ blk_mq_stop_hw_queue(hctx);
+}
+EXPORT_SYMBOL(blk_mq_stop_hw_queues);
+
+void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
+{
+ clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
+
+ preempt_disable();
+ blk_mq_run_hw_queue(hctx, false);
+ preempt_enable();
+}
+EXPORT_SYMBOL(blk_mq_start_hw_queue);
+
+void blk_mq_start_hw_queues(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx;
+ int i;
+
+ queue_for_each_hw_ctx(q, hctx, i)
+ blk_mq_start_hw_queue(hctx);
+}
+EXPORT_SYMBOL(blk_mq_start_hw_queues);
+
+
+void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
+{
+ struct blk_mq_hw_ctx *hctx;
+ int i;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
+ continue;
+
+ clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
+ preempt_disable();
+ blk_mq_run_hw_queue(hctx, async);
+ preempt_enable();
+ }
+}
+EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
+
+static void blk_mq_run_work_fn(struct work_struct *work)
+{
+ struct blk_mq_hw_ctx *hctx;
+
+ hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
+
+ __blk_mq_run_hw_queue(hctx);
+}
+
+static void blk_mq_delay_work_fn(struct work_struct *work)
+{
+ struct blk_mq_hw_ctx *hctx;
+
+ hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
+
+ if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
+ __blk_mq_run_hw_queue(hctx);
+}
+
+void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
+{
+ unsigned long tmo = msecs_to_jiffies(msecs);
+
+ if (hctx->queue->nr_hw_queues == 1)
+ kblockd_schedule_delayed_work(&hctx->delay_work, tmo);
+ else {
+ unsigned int cpu;
+
+ cpu = blk_mq_hctx_next_cpu(hctx);
+ kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo);
+ }
+}
+EXPORT_SYMBOL(blk_mq_delay_queue);
+
+static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
+ struct request *rq, bool at_head)
+{
+ struct blk_mq_ctx *ctx = rq->mq_ctx;
+
+ trace_block_rq_insert(hctx->queue, rq);
+
+ if (at_head)
+ list_add(&rq->queuelist, &ctx->rq_list);
+ else
+ list_add_tail(&rq->queuelist, &ctx->rq_list);
+
+ blk_mq_hctx_mark_pending(hctx, ctx);
+}
+
+void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
+ bool async)
+{
+ struct request_queue *q = rq->q;
+ struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
+
+ current_ctx = blk_mq_get_ctx(q);
+ if (!cpu_online(ctx->cpu))
+ rq->mq_ctx = ctx = current_ctx;
+
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+ if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
+ !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
+ blk_insert_flush(rq);
+ } else {
+ spin_lock(&ctx->lock);
+ __blk_mq_insert_request(hctx, rq, at_head);
+ spin_unlock(&ctx->lock);
+ }
+
+ if (run_queue)
+ blk_mq_run_hw_queue(hctx, async);
+
+ blk_mq_put_ctx(current_ctx);
+}
+
+static void blk_mq_insert_requests(struct request_queue *q,
+ struct blk_mq_ctx *ctx,
+ struct list_head *list,
+ int depth,
+ bool from_schedule)
+
+{
+ struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_ctx *current_ctx;
+
+ trace_block_unplug(q, depth, !from_schedule);
+
+ current_ctx = blk_mq_get_ctx(q);
+
+ if (!cpu_online(ctx->cpu))
+ ctx = current_ctx;
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+ /*
+ * preemption doesn't flush plug list, so it's possible ctx->cpu is
+ * offline now
+ */
+ spin_lock(&ctx->lock);
+ while (!list_empty(list)) {
+ struct request *rq;
+
+ rq = list_first_entry(list, struct request, queuelist);
+ list_del_init(&rq->queuelist);
+ rq->mq_ctx = ctx;
+ __blk_mq_insert_request(hctx, rq, false);
+ }
+ spin_unlock(&ctx->lock);
+
+ blk_mq_run_hw_queue(hctx, from_schedule);
+ blk_mq_put_ctx(current_ctx);
+}
+
+static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+ struct request *rqa = container_of(a, struct request, queuelist);
+ struct request *rqb = container_of(b, struct request, queuelist);
+
+ return !(rqa->mq_ctx < rqb->mq_ctx ||
+ (rqa->mq_ctx == rqb->mq_ctx &&
+ blk_rq_pos(rqa) < blk_rq_pos(rqb)));
+}
+
+void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
+{
+ struct blk_mq_ctx *this_ctx;
+ struct request_queue *this_q;
+ struct request *rq;
+ LIST_HEAD(list);
+ LIST_HEAD(ctx_list);
+ unsigned int depth;
+
+ list_splice_init(&plug->mq_list, &list);
+
+ list_sort(NULL, &list, plug_ctx_cmp);
+
+ this_q = NULL;
+ this_ctx = NULL;
+ depth = 0;
+
+ while (!list_empty(&list)) {
+ rq = list_entry_rq(list.next);
+ list_del_init(&rq->queuelist);
+ BUG_ON(!rq->q);
+ if (rq->mq_ctx != this_ctx) {
+ if (this_ctx) {
+ blk_mq_insert_requests(this_q, this_ctx,
+ &ctx_list, depth,
+ from_schedule);
+ }
+
+ this_ctx = rq->mq_ctx;
+ this_q = rq->q;
+ depth = 0;
+ }
+
+ depth++;
+ list_add_tail(&rq->queuelist, &ctx_list);
+ }
+
+ /*
+ * If 'this_ctx' is set, we know we have entries to complete
+ * on 'ctx_list'. Do those.
+ */
+ if (this_ctx) {
+ blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
+ from_schedule);
+ }
+}
+
+static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
+{
+ init_request_from_bio(rq, bio);
+
+ if (blk_do_io_stat(rq))
+ blk_account_io_start(rq, 1);
+}
+
+static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
+ struct blk_mq_ctx *ctx,
+ struct request *rq, struct bio *bio)
+{
+ struct request_queue *q = hctx->queue;
+
+ if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE)) {
+ blk_mq_bio_to_request(rq, bio);
+ spin_lock(&ctx->lock);
+insert_rq:
+ __blk_mq_insert_request(hctx, rq, false);
+ spin_unlock(&ctx->lock);
+ return false;
+ } else {
+ spin_lock(&ctx->lock);
+ if (!blk_mq_attempt_merge(q, ctx, bio)) {
+ blk_mq_bio_to_request(rq, bio);
+ goto insert_rq;
+ }
+
+ spin_unlock(&ctx->lock);
+ __blk_mq_free_request(hctx, ctx, rq);
+ return true;
+ }
+}
+
+struct blk_map_ctx {
+ struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_ctx *ctx;
+};
+
+static struct request *blk_mq_map_request(struct request_queue *q,
+ struct bio *bio,
+ struct blk_map_ctx *data)
+{
+ struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_ctx *ctx;
+ struct request *rq;
+ int rw = bio_data_dir(bio);
+ struct blk_mq_alloc_data alloc_data;
+
+ if (unlikely(blk_mq_queue_enter(q))) {
+ bio_endio(bio, -EIO);
+ return NULL;
+ }
+
+ ctx = blk_mq_get_ctx(q);
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+ if (rw_is_sync(bio->bi_rw))
+ rw |= REQ_SYNC;
+
+ trace_block_getrq(q, bio, rw);
+ blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx,
+ hctx);
+ rq = __blk_mq_alloc_request(&alloc_data, rw);
+ if (unlikely(!rq)) {
+ __blk_mq_run_hw_queue(hctx);
+ blk_mq_put_ctx(ctx);
+ trace_block_sleeprq(q, bio, rw);
+
+ ctx = blk_mq_get_ctx(q);
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ blk_mq_set_alloc_data(&alloc_data, q,
+ __GFP_WAIT|GFP_ATOMIC, false, ctx, hctx);
+ rq = __blk_mq_alloc_request(&alloc_data, rw);
+ ctx = alloc_data.ctx;
+ hctx = alloc_data.hctx;
+ }
+
+ hctx->queued++;
+ data->hctx = hctx;
+ data->ctx = ctx;
+ return rq;
+}
+
+/*
+ * Multiple hardware queue variant. This will not use per-process plugs,
+ * but will attempt to bypass the hctx queueing if we can go straight to
+ * hardware for SYNC IO.
+ */
+static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
+{
+ const int is_sync = rw_is_sync(bio->bi_rw);
+ const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
+ struct blk_map_ctx data;
+ struct request *rq;
+
+ blk_queue_bounce(q, &bio);
+
+ if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
+ bio_endio(bio, -EIO);
+ return;
+ }
+
+ rq = blk_mq_map_request(q, bio, &data);
+ if (unlikely(!rq))
+ return;
+
+ if (unlikely(is_flush_fua)) {
+ blk_mq_bio_to_request(rq, bio);
+ blk_insert_flush(rq);
+ goto run_queue;
+ }
+
+ if (is_sync) {
+ int ret;
+
+ blk_mq_bio_to_request(rq, bio);
+ blk_mq_start_request(rq, true);
+
+ /*
+ * For OK queue, we are done. For error, kill it. Any other
+ * error (busy), just add it to our list as we previously
+ * would have done
+ */
+ ret = q->mq_ops->queue_rq(data.hctx, rq);
+ if (ret == BLK_MQ_RQ_QUEUE_OK)
+ goto done;
+ else {
+ __blk_mq_requeue_request(rq);
+
+ if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
+ rq->errors = -EIO;
+ blk_mq_end_io(rq, rq->errors);
+ goto done;
+ }
+ }
+ }
+
+ if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
+ /*
+ * For a SYNC request, send it to the hardware immediately. For
+ * an ASYNC request, just ensure that we run it later on. The
+ * latter allows for merging opportunities and more efficient
+ * dispatching.
+ */
+run_queue:
+ blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
+ }
+done:
+ blk_mq_put_ctx(data.ctx);
+}
+
+/*
+ * Single hardware queue variant. This will attempt to use any per-process
+ * plug for merging and IO deferral.
+ */
+static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
+{
+ const int is_sync = rw_is_sync(bio->bi_rw);
+ const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
+ unsigned int use_plug, request_count = 0;
+ struct blk_map_ctx data;
+ struct request *rq;
+
+ /*
+ * If we have multiple hardware queues, just go directly to
+ * one of those for sync IO.
+ */
+ use_plug = !is_flush_fua && !is_sync;
+
+ blk_queue_bounce(q, &bio);
+
+ if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
+ bio_endio(bio, -EIO);
+ return;
+ }
+
+ if (use_plug && !blk_queue_nomerges(q) &&
+ blk_attempt_plug_merge(q, bio, &request_count))
+ return;
+
+ rq = blk_mq_map_request(q, bio, &data);
+ if (unlikely(!rq))
+ return;
+
+ if (unlikely(is_flush_fua)) {
+ blk_mq_bio_to_request(rq, bio);
+ blk_insert_flush(rq);
+ goto run_queue;
+ }
+
+ /*
+ * A task plug currently exists. Since this is completely lockless,
+ * utilize that to temporarily store requests until the task is
+ * either done or scheduled away.
+ */
+ if (use_plug) {
+ struct blk_plug *plug = current->plug;
+
+ if (plug) {
+ blk_mq_bio_to_request(rq, bio);
+ if (list_empty(&plug->mq_list))
+ trace_block_plug(q);
+ else if (request_count >= BLK_MAX_REQUEST_COUNT) {
+ blk_flush_plug_list(plug, false);
+ trace_block_plug(q);
+ }
+ list_add_tail(&rq->queuelist, &plug->mq_list);
+ blk_mq_put_ctx(data.ctx);
+ return;
+ }
+ }
+
+ if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
+ /*
+ * For a SYNC request, send it to the hardware immediately. For
+ * an ASYNC request, just ensure that we run it later on. The
+ * latter allows for merging opportunities and more efficient
+ * dispatching.
+ */
+run_queue:
+ blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
+ }
+
+ blk_mq_put_ctx(data.ctx);
+}
+
+/*
+ * Default mapping to a software queue, since we use one per CPU.
+ */
+struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
+{
+ return q->queue_hw_ctx[q->mq_map[cpu]];
+}
+EXPORT_SYMBOL(blk_mq_map_queue);
+
+static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
+ struct blk_mq_tags *tags, unsigned int hctx_idx)
+{
+ struct page *page;
+
+ if (tags->rqs && set->ops->exit_request) {
+ int i;
+
+ for (i = 0; i < tags->nr_tags; i++) {
+ if (!tags->rqs[i])
+ continue;
+ set->ops->exit_request(set->driver_data, tags->rqs[i],
+ hctx_idx, i);
+ }
+ }
+
+ while (!list_empty(&tags->page_list)) {
+ page = list_first_entry(&tags->page_list, struct page, lru);
+ list_del_init(&page->lru);
+ __free_pages(page, page->private);
+ }
+
+ kfree(tags->rqs);
+
+ blk_mq_free_tags(tags);
+}
+
+static size_t order_to_size(unsigned int order)
+{
+ return (size_t)PAGE_SIZE << order;
+}
+
+static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
+ unsigned int hctx_idx)
+{
+ struct blk_mq_tags *tags;
+ unsigned int i, j, entries_per_page, max_order = 4;
+ size_t rq_size, left;
+
+ tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
+ set->numa_node);
+ if (!tags)
+ return NULL;
+
+ INIT_LIST_HEAD(&tags->page_list);
+
+ tags->rqs = kmalloc_node(set->queue_depth * sizeof(struct request *),
+ GFP_KERNEL, set->numa_node);
+ if (!tags->rqs) {
+ blk_mq_free_tags(tags);
+ return NULL;
+ }
+
+ /*
+ * rq_size is the size of the request plus driver payload, rounded
+ * to the cacheline size
+ */
+ rq_size = round_up(sizeof(struct request) + set->cmd_size,
+ cache_line_size());
+ left = rq_size * set->queue_depth;
+
+ for (i = 0; i < set->queue_depth; ) {
+ int this_order = max_order;
+ struct page *page;
+ int to_do;
+ void *p;
+
+ while (left < order_to_size(this_order - 1) && this_order)
+ this_order--;
+
+ do {
+ page = alloc_pages_node(set->numa_node, GFP_KERNEL,
+ this_order);
+ if (page)
+ break;
+ if (!this_order--)
+ break;
+ if (order_to_size(this_order) < rq_size)
+ break;
+ } while (1);
+
+ if (!page)
+ goto fail;
+
+ page->private = this_order;
+ list_add_tail(&page->lru, &tags->page_list);
+
+ p = page_address(page);
+ entries_per_page = order_to_size(this_order) / rq_size;
+ to_do = min(entries_per_page, set->queue_depth - i);
+ left -= to_do * rq_size;
+ for (j = 0; j < to_do; j++) {
+ tags->rqs[i] = p;
+ if (set->ops->init_request) {
+ if (set->ops->init_request(set->driver_data,
+ tags->rqs[i], hctx_idx, i,
+ set->numa_node))
+ goto fail;
+ }
+
+ p += rq_size;
+ i++;
+ }
+ }
+
+ return tags;
+
+fail:
+ pr_warn("%s: failed to allocate requests\n", __func__);
+ blk_mq_free_rq_map(set, tags, hctx_idx);
+ return NULL;
+}
+
+static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
+{
+ kfree(bitmap->map);
+}
+
+static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
+{
+ unsigned int bpw = 8, total, num_maps, i;
+
+ bitmap->bits_per_word = bpw;
+
+ num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
+ bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
+ GFP_KERNEL, node);
+ if (!bitmap->map)
+ return -ENOMEM;
+
+ bitmap->map_size = num_maps;
+
+ total = nr_cpu_ids;
+ for (i = 0; i < num_maps; i++) {
+ bitmap->map[i].depth = min(total, bitmap->bits_per_word);
+ total -= bitmap->map[i].depth;
+ }
+
+ return 0;
+}
+
+static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
+{
+ struct request_queue *q = hctx->queue;
+ struct blk_mq_ctx *ctx;
+ LIST_HEAD(tmp);
+
+ /*
+ * Move ctx entries to new CPU, if this one is going away.
+ */
+ ctx = __blk_mq_get_ctx(q, cpu);
+
+ spin_lock(&ctx->lock);
+ if (!list_empty(&ctx->rq_list)) {
+ list_splice_init(&ctx->rq_list, &tmp);
+ blk_mq_hctx_clear_pending(hctx, ctx);
+ }
+ spin_unlock(&ctx->lock);
+
+ if (list_empty(&tmp))
+ return NOTIFY_OK;
+
+ ctx = blk_mq_get_ctx(q);
+ spin_lock(&ctx->lock);
+
+ while (!list_empty(&tmp)) {
+ struct request *rq;
+
+ rq = list_first_entry(&tmp, struct request, queuelist);
+ rq->mq_ctx = ctx;
+ list_move_tail(&rq->queuelist, &ctx->rq_list);
+ }
+
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ blk_mq_hctx_mark_pending(hctx, ctx);
+
+ spin_unlock(&ctx->lock);
+
+ blk_mq_run_hw_queue(hctx, true);
+ blk_mq_put_ctx(ctx);
+ return NOTIFY_OK;
+}
+
+static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
+{
+ struct request_queue *q = hctx->queue;
+ struct blk_mq_tag_set *set = q->tag_set;
+
+ if (set->tags[hctx->queue_num])
+ return NOTIFY_OK;
+
+ set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
+ if (!set->tags[hctx->queue_num])
+ return NOTIFY_STOP;
+
+ hctx->tags = set->tags[hctx->queue_num];
+ return NOTIFY_OK;
+}
+
+static int blk_mq_hctx_notify(void *data, unsigned long action,
+ unsigned int cpu)
+{
+ struct blk_mq_hw_ctx *hctx = data;
+
+ if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
+ return blk_mq_hctx_cpu_offline(hctx, cpu);
+ else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
+ return blk_mq_hctx_cpu_online(hctx, cpu);
+
+ return NOTIFY_OK;
+}
+
+static void blk_mq_exit_hw_queues(struct request_queue *q,
+ struct blk_mq_tag_set *set, int nr_queue)
+{
+ struct blk_mq_hw_ctx *hctx;
+ unsigned int i;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (i == nr_queue)
+ break;
+
+ blk_mq_tag_idle(hctx);
+
+ if (set->ops->exit_hctx)
+ set->ops->exit_hctx(hctx, i);
+
+ blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
+ kfree(hctx->ctxs);
+ blk_mq_free_bitmap(&hctx->ctx_map);
+ }
+
+}
+
+static void blk_mq_free_hw_queues(struct request_queue *q,
+ struct blk_mq_tag_set *set)
+{
+ struct blk_mq_hw_ctx *hctx;
+ unsigned int i;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ free_cpumask_var(hctx->cpumask);
+ kfree(hctx);
+ }
+}
+
+static int blk_mq_init_hw_queues(struct request_queue *q,
+ struct blk_mq_tag_set *set)
+{
+ struct blk_mq_hw_ctx *hctx;
+ unsigned int i;
+
+ /*
+ * Initialize hardware queues
+ */
+ queue_for_each_hw_ctx(q, hctx, i) {
+ int node;
+
+ node = hctx->numa_node;
+ if (node == NUMA_NO_NODE)
+ node = hctx->numa_node = set->numa_node;
+
+ INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
+ INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
+ spin_lock_init(&hctx->lock);
+ INIT_LIST_HEAD(&hctx->dispatch);
+ hctx->queue = q;
+ hctx->queue_num = i;
+ hctx->flags = set->flags;
+ hctx->cmd_size = set->cmd_size;
+
+ blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
+ blk_mq_hctx_notify, hctx);
+ blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
+
+ hctx->tags = set->tags[i];
+
+ /*
+ * Allocate space for all possible cpus to avoid allocation in
+ * runtime
+ */
+ hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
+ GFP_KERNEL, node);
+ if (!hctx->ctxs)
+ break;
+
+ if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
+ break;
+
+ hctx->nr_ctx = 0;
+
+ if (set->ops->init_hctx &&
+ set->ops->init_hctx(hctx, set->driver_data, i))
+ break;
+ }
+
+ if (i == q->nr_hw_queues)
+ return 0;
+
+ /*
+ * Init failed
+ */
+ blk_mq_exit_hw_queues(q, set, i);
+
+ return 1;
+}
+
+static void blk_mq_init_cpu_queues(struct request_queue *q,
+ unsigned int nr_hw_queues)
+{
+ unsigned int i;
+
+ for_each_possible_cpu(i) {
+ struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
+ struct blk_mq_hw_ctx *hctx;
+
+ memset(__ctx, 0, sizeof(*__ctx));
+ __ctx->cpu = i;
+ spin_lock_init(&__ctx->lock);
+ INIT_LIST_HEAD(&__ctx->rq_list);
+ __ctx->queue = q;
+
+ /* If the cpu isn't online, the cpu is mapped to first hctx */
+ if (!cpu_online(i))
+ continue;
+
+ hctx = q->mq_ops->map_queue(q, i);
+ cpumask_set_cpu(i, hctx->cpumask);
+ hctx->nr_ctx++;
+
+ /*
+ * Set local node, IFF we have more than one hw queue. If
+ * not, we remain on the home node of the device
+ */
+ if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
+ hctx->numa_node = cpu_to_node(i);
+ }
+}
+
+static void blk_mq_map_swqueue(struct request_queue *q)
+{
+ unsigned int i;
+ struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_ctx *ctx;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ cpumask_clear(hctx->cpumask);
+ hctx->nr_ctx = 0;
+ }
+
+ /*
+ * Map software to hardware queues
+ */
+ queue_for_each_ctx(q, ctx, i) {
+ /* If the cpu isn't online, the cpu is mapped to first hctx */
+ if (!cpu_online(i))
+ continue;
+
+ hctx = q->mq_ops->map_queue(q, i);
+ cpumask_set_cpu(i, hctx->cpumask);
+ ctx->index_hw = hctx->nr_ctx;
+ hctx->ctxs[hctx->nr_ctx++] = ctx;
+ }
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ /*
+ * If not software queues are mapped to this hardware queue,
+ * disable it and free the request entries
+ */
+ if (!hctx->nr_ctx) {
+ struct blk_mq_tag_set *set = q->tag_set;
+
+ if (set->tags[i]) {
+ blk_mq_free_rq_map(set, set->tags[i], i);
+ set->tags[i] = NULL;
+ hctx->tags = NULL;
+ }
+ continue;
+ }
+
+ /*
+ * Initialize batch roundrobin counts
+ */
+ hctx->next_cpu = cpumask_first(hctx->cpumask);
+ hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
+ }
+}
+
+static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
+{
+ struct blk_mq_hw_ctx *hctx;
+ struct request_queue *q;
+ bool shared;
+ int i;
+
+ if (set->tag_list.next == set->tag_list.prev)
+ shared = false;
+ else
+ shared = true;
+
+ list_for_each_entry(q, &set->tag_list, tag_set_list) {
+ blk_mq_freeze_queue(q);
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (shared)
+ hctx->flags |= BLK_MQ_F_TAG_SHARED;
+ else
+ hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
+ }
+ blk_mq_unfreeze_queue(q);
+ }
+}
+
+static void blk_mq_del_queue_tag_set(struct request_queue *q)
+{
+ struct blk_mq_tag_set *set = q->tag_set;
+
+ blk_mq_freeze_queue(q);
+
+ mutex_lock(&set->tag_list_lock);
+ list_del_init(&q->tag_set_list);
+ blk_mq_update_tag_set_depth(set);
+ mutex_unlock(&set->tag_list_lock);
+
+ blk_mq_unfreeze_queue(q);
+}
+
+static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
+ struct request_queue *q)
+{
+ q->tag_set = set;
+
+ mutex_lock(&set->tag_list_lock);
+ list_add_tail(&q->tag_set_list, &set->tag_list);
+ blk_mq_update_tag_set_depth(set);
+ mutex_unlock(&set->tag_list_lock);
+}
+
+struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
+{
+ struct blk_mq_hw_ctx **hctxs;
+ struct blk_mq_ctx __percpu *ctx;
+ struct request_queue *q;
+ unsigned int *map;
+ int i;
+
+ ctx = alloc_percpu(struct blk_mq_ctx);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
+ set->numa_node);
+
+ if (!hctxs)
+ goto err_percpu;
+
+ map = blk_mq_make_queue_map(set);
+ if (!map)
+ goto err_map;
+
+ for (i = 0; i < set->nr_hw_queues; i++) {
+ int node = blk_mq_hw_queue_to_node(map, i);
+
+ hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
+ GFP_KERNEL, node);
+ if (!hctxs[i])
+ goto err_hctxs;
+
+ if (!zalloc_cpumask_var(&hctxs[i]->cpumask, GFP_KERNEL))
+ goto err_hctxs;
+
+ atomic_set(&hctxs[i]->nr_active, 0);
+ hctxs[i]->numa_node = node;
+ hctxs[i]->queue_num = i;
+ }
+
+ q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
+ if (!q)
+ goto err_hctxs;
+
+ if (percpu_counter_init(&q->mq_usage_counter, 0))
+ goto err_map;
+
+ setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
+ blk_queue_rq_timeout(q, 30000);
+
+ q->nr_queues = nr_cpu_ids;
+ q->nr_hw_queues = set->nr_hw_queues;
+ q->mq_map = map;
+
+ q->queue_ctx = ctx;
+ q->queue_hw_ctx = hctxs;
+
+ q->mq_ops = set->ops;
+ q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
+
+ if (!(set->flags & BLK_MQ_F_SG_MERGE))
+ q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
+
+ q->sg_reserved_size = INT_MAX;
+
+ INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
+ INIT_LIST_HEAD(&q->requeue_list);
+ spin_lock_init(&q->requeue_lock);
+
+ if (q->nr_hw_queues > 1)
+ blk_queue_make_request(q, blk_mq_make_request);
+ else
+ blk_queue_make_request(q, blk_sq_make_request);
+
+ blk_queue_rq_timed_out(q, blk_mq_rq_timed_out);
+ if (set->timeout)
+ blk_queue_rq_timeout(q, set->timeout);
+
+ /*
+ * Do this after blk_queue_make_request() overrides it...
+ */
+ q->nr_requests = set->queue_depth;
+
+ if (set->ops->complete)
+ blk_queue_softirq_done(q, set->ops->complete);
+
+ blk_mq_init_flush(q);
+ blk_mq_init_cpu_queues(q, set->nr_hw_queues);
+
+ q->flush_rq = kzalloc(round_up(sizeof(struct request) +
+ set->cmd_size, cache_line_size()),
+ GFP_KERNEL);
+ if (!q->flush_rq)
+ goto err_hw;
+
+ if (blk_mq_init_hw_queues(q, set))
+ goto err_flush_rq;
+
+ mutex_lock(&all_q_mutex);
+ list_add_tail(&q->all_q_node, &all_q_list);
+ mutex_unlock(&all_q_mutex);
+
+ blk_mq_add_queue_tag_set(set, q);
+
+ blk_mq_map_swqueue(q);
+
+ return q;
+
+err_flush_rq:
+ kfree(q->flush_rq);
+err_hw:
+ blk_cleanup_queue(q);
+err_hctxs:
+ kfree(map);
+ for (i = 0; i < set->nr_hw_queues; i++) {
+ if (!hctxs[i])
+ break;
+ free_cpumask_var(hctxs[i]->cpumask);
+ kfree(hctxs[i]);
+ }
+err_map:
+ kfree(hctxs);
+err_percpu:
+ free_percpu(ctx);
+ return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL(blk_mq_init_queue);
+
+void blk_mq_free_queue(struct request_queue *q)
+{
+ struct blk_mq_tag_set *set = q->tag_set;
+
+ blk_mq_del_queue_tag_set(q);
+
+ blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
+ blk_mq_free_hw_queues(q, set);
+
+ percpu_counter_destroy(&q->mq_usage_counter);
+
+ free_percpu(q->queue_ctx);
+ kfree(q->queue_hw_ctx);
+ kfree(q->mq_map);
+
+ q->queue_ctx = NULL;
+ q->queue_hw_ctx = NULL;
+ q->mq_map = NULL;
+
+ mutex_lock(&all_q_mutex);
+ list_del_init(&q->all_q_node);
+ mutex_unlock(&all_q_mutex);
+}
+
+/* Basically redo blk_mq_init_queue with queue frozen */
+static void blk_mq_queue_reinit(struct request_queue *q)
+{
+ blk_mq_freeze_queue(q);
+
+ blk_mq_sysfs_unregister(q);
+
+ blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
+
+ /*
+ * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
+ * we should change hctx numa_node according to new topology (this
+ * involves free and re-allocate memory, worthy doing?)
+ */
+
+ blk_mq_map_swqueue(q);
+
+ blk_mq_sysfs_register(q);
+
+ blk_mq_unfreeze_queue(q);
+}
+
+static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
+ unsigned long action, void *hcpu)
+{
+ struct request_queue *q;
+
+ /*
+ * Before new mappings are established, hotadded cpu might already
+ * start handling requests. This doesn't break anything as we map
+ * offline CPUs to first hardware queue. We will re-init the queue
+ * below to get optimal settings.
+ */
+ if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
+ action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
+ return NOTIFY_OK;
+
+ mutex_lock(&all_q_mutex);
+ list_for_each_entry(q, &all_q_list, all_q_node)
+ blk_mq_queue_reinit(q);
+ mutex_unlock(&all_q_mutex);
+ return NOTIFY_OK;
+}
+
+/*
+ * Alloc a tag set to be associated with one or more request queues.
+ * May fail with EINVAL for various error conditions. May adjust the
+ * requested depth down, if if it too large. In that case, the set
+ * value will be stored in set->queue_depth.
+ */
+int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
+{
+ int i;
+
+ if (!set->nr_hw_queues)
+ return -EINVAL;
+ if (!set->queue_depth)
+ return -EINVAL;
+ if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
+ return -EINVAL;
+
+ if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
+ return -EINVAL;
+
+ if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
+ pr_info("blk-mq: reduced tag depth to %u\n",
+ BLK_MQ_MAX_DEPTH);
+ set->queue_depth = BLK_MQ_MAX_DEPTH;
+ }
+
+ set->tags = kmalloc_node(set->nr_hw_queues *
+ sizeof(struct blk_mq_tags *),
+ GFP_KERNEL, set->numa_node);
+ if (!set->tags)
+ goto out;
+
+ for (i = 0; i < set->nr_hw_queues; i++) {
+ set->tags[i] = blk_mq_init_rq_map(set, i);
+ if (!set->tags[i])
+ goto out_unwind;
+ }
+
+ mutex_init(&set->tag_list_lock);
+ INIT_LIST_HEAD(&set->tag_list);
+
+ return 0;
+
+out_unwind:
+ while (--i >= 0)
+ blk_mq_free_rq_map(set, set->tags[i], i);
+out:
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(blk_mq_alloc_tag_set);
+
+void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
+{
+ int i;
+
+ for (i = 0; i < set->nr_hw_queues; i++) {
+ if (set->tags[i])
+ blk_mq_free_rq_map(set, set->tags[i], i);
+ }
+
+ kfree(set->tags);
+}
+EXPORT_SYMBOL(blk_mq_free_tag_set);
+
+int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
+{
+ struct blk_mq_tag_set *set = q->tag_set;
+ struct blk_mq_hw_ctx *hctx;
+ int i, ret;
+
+ if (!set || nr > set->queue_depth)
+ return -EINVAL;
+
+ ret = 0;
+ queue_for_each_hw_ctx(q, hctx, i) {
+ ret = blk_mq_tag_update_depth(hctx->tags, nr);
+ if (ret)
+ break;
+ }
+
+ if (!ret)
+ q->nr_requests = nr;
+
+ return ret;
+}
+
+void blk_mq_disable_hotplug(void)
+{
+ mutex_lock(&all_q_mutex);
+}
+
+void blk_mq_enable_hotplug(void)
+{
+ mutex_unlock(&all_q_mutex);
+}
+
+static int __init blk_mq_init(void)
+{
+ blk_mq_cpu_init();
+
+ /* Must be called after percpu_counter_hotcpu_callback() */
+ hotcpu_notifier(blk_mq_queue_reinit_notify, -10);
+
+ return 0;
+}
+subsys_initcall(blk_mq_init);
diff --git a/block/blk-mq.h b/block/blk-mq.h
new file mode 100644
index 00000000000..26460884c6c
--- /dev/null
+++ b/block/blk-mq.h
@@ -0,0 +1,117 @@
+#ifndef INT_BLK_MQ_H
+#define INT_BLK_MQ_H
+
+struct blk_mq_tag_set;
+
+struct blk_mq_ctx {
+ struct {
+ spinlock_t lock;
+ struct list_head rq_list;
+ } ____cacheline_aligned_in_smp;
+
+ unsigned int cpu;
+ unsigned int index_hw;
+
+ unsigned int last_tag ____cacheline_aligned_in_smp;
+
+ /* incremented at dispatch time */
+ unsigned long rq_dispatched[2];
+ unsigned long rq_merged;
+
+ /* incremented at completion time */
+ unsigned long ____cacheline_aligned_in_smp rq_completed[2];
+
+ struct request_queue *queue;
+ struct kobject kobj;
+} ____cacheline_aligned_in_smp;
+
+void __blk_mq_complete_request(struct request *rq);
+void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
+void blk_mq_init_flush(struct request_queue *q);
+void blk_mq_drain_queue(struct request_queue *q);
+void blk_mq_free_queue(struct request_queue *q);
+void blk_mq_clone_flush_request(struct request *flush_rq,
+ struct request *orig_rq);
+int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
+
+/*
+ * CPU hotplug helpers
+ */
+struct blk_mq_cpu_notifier;
+void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
+ int (*fn)(void *, unsigned long, unsigned int),
+ void *data);
+void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
+void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
+void blk_mq_cpu_init(void);
+void blk_mq_enable_hotplug(void);
+void blk_mq_disable_hotplug(void);
+
+/*
+ * CPU -> queue mappings
+ */
+extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
+extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues);
+extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
+
+/*
+ * sysfs helpers
+ */
+extern int blk_mq_sysfs_register(struct request_queue *q);
+extern void blk_mq_sysfs_unregister(struct request_queue *q);
+
+/*
+ * Basic implementation of sparser bitmap, allowing the user to spread
+ * the bits over more cachelines.
+ */
+struct blk_align_bitmap {
+ unsigned long word;
+ unsigned long depth;
+} ____cacheline_aligned_in_smp;
+
+static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
+ unsigned int cpu)
+{
+ return per_cpu_ptr(q->queue_ctx, cpu);
+}
+
+/*
+ * This assumes per-cpu software queueing queues. They could be per-node
+ * as well, for instance. For now this is hardcoded as-is. Note that we don't
+ * care about preemption, since we know the ctx's are persistent. This does
+ * mean that we can't rely on ctx always matching the currently running CPU.
+ */
+static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
+{
+ return __blk_mq_get_ctx(q, get_cpu());
+}
+
+static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
+{
+ put_cpu();
+}
+
+struct blk_mq_alloc_data {
+ /* input parameter */
+ struct request_queue *q;
+ gfp_t gfp;
+ bool reserved;
+
+ /* input & output parameter */
+ struct blk_mq_ctx *ctx;
+ struct blk_mq_hw_ctx *hctx;
+};
+
+static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data,
+ struct request_queue *q, gfp_t gfp, bool reserved,
+ struct blk_mq_ctx *ctx,
+ struct blk_mq_hw_ctx *hctx)
+{
+ data->q = q;
+ data->gfp = gfp;
+ data->reserved = reserved;
+ data->ctx = ctx;
+ data->hctx = hctx;
+}
+
+#endif
diff --git a/block/blk-settings.c b/block/blk-settings.c
index d3234fc494a..f1a1795a568 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -113,6 +113,8 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
+ lim->chunk_sectors = 0;
+ lim->max_write_same_sectors = 0;
lim->max_discard_sectors = 0;
lim->discard_granularity = 0;
lim->discard_alignment = 0;
@@ -143,8 +145,9 @@ void blk_set_stacking_limits(struct queue_limits *lim)
lim->discard_zeroes_data = 1;
lim->max_segments = USHRT_MAX;
lim->max_hw_sectors = UINT_MAX;
-
- lim->max_sectors = BLK_DEF_MAX_SECTORS;
+ lim->max_segment_size = UINT_MAX;
+ lim->max_sectors = UINT_MAX;
+ lim->max_write_same_sectors = UINT_MAX;
}
EXPORT_SYMBOL(blk_set_stacking_limits);
@@ -194,17 +197,17 @@ EXPORT_SYMBOL(blk_queue_make_request);
/**
* blk_queue_bounce_limit - set bounce buffer limit for queue
* @q: the request queue for the device
- * @dma_mask: the maximum address the device can handle
+ * @max_addr: the maximum address the device can handle
*
* Description:
* Different hardware can have different requirements as to what pages
* it can do I/O directly to. A low level driver can call
* blk_queue_bounce_limit to have lower memory pages allocated as bounce
- * buffers for doing I/O to pages residing above @dma_mask.
+ * buffers for doing I/O to pages residing above @max_addr.
**/
-void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
+void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
{
- unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
+ unsigned long b_pfn = max_addr >> PAGE_SHIFT;
int dma = 0;
q->bounce_gfp = GFP_NOIO;
@@ -275,6 +278,26 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
/**
+ * blk_queue_chunk_sectors - set size of the chunk for this queue
+ * @q: the request queue for the device
+ * @chunk_sectors: chunk sectors in the usual 512b unit
+ *
+ * Description:
+ * If a driver doesn't want IOs to cross a given chunk size, it can set
+ * this limit and prevent merging across chunks. Note that the chunk size
+ * must currently be a power-of-2 in sectors. Also note that the block
+ * layer must accept a page worth of data at any offset. So if the
+ * crossing of chunks is a hard limitation in the driver, it must still be
+ * prepared to split single page bios.
+ **/
+void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
+{
+ BUG_ON(!is_power_of_2(chunk_sectors));
+ q->limits.chunk_sectors = chunk_sectors;
+}
+EXPORT_SYMBOL(blk_queue_chunk_sectors);
+
+/**
* blk_queue_max_discard_sectors - set max sectors for a single discard
* @q: the request queue for the device
* @max_discard_sectors: maximum number of sectors to discard
@@ -287,6 +310,18 @@ void blk_queue_max_discard_sectors(struct request_queue *q,
EXPORT_SYMBOL(blk_queue_max_discard_sectors);
/**
+ * blk_queue_max_write_same_sectors - set max sectors for a single write same
+ * @q: the request queue for the device
+ * @max_write_same_sectors: maximum number of sectors to write per command
+ **/
+void blk_queue_max_write_same_sectors(struct request_queue *q,
+ unsigned int max_write_same_sectors)
+{
+ q->limits.max_write_same_sectors = max_write_same_sectors;
+}
+EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
+
+/**
* blk_queue_max_segments - set max hw segments for a request for this queue
* @q: the request queue for the device
* @max_segments: max number of segments
@@ -511,6 +546,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
+ t->max_write_same_sectors = min(t->max_write_same_sectors,
+ b->max_write_same_sectors);
t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
@@ -576,6 +613,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
ret = -1;
}
+ t->raid_partial_stripes_expensive =
+ max(t->raid_partial_stripes_expensive,
+ b->raid_partial_stripes_expensive);
+
/* Find lowest common alignment_offset */
t->alignment_offset = lcm(t->alignment_offset, alignment)
& (max(t->physical_block_size, t->io_min) - 1);
@@ -596,7 +637,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
bottom = b->discard_granularity + alignment;
/* Verify that top and bottom intervals line up */
- if (max(top, bottom) & (min(top, bottom) - 1))
+ if ((max(top, bottom) % min(top, bottom)) != 0)
t->discard_misaligned = 1;
}
@@ -604,8 +645,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
b->max_discard_sectors);
t->discard_granularity = max(t->discard_granularity,
b->discard_granularity);
- t->discard_alignment = lcm(t->discard_alignment, alignment) &
- (t->discard_granularity - 1);
+ t->discard_alignment = lcm(t->discard_alignment, alignment) %
+ t->discard_granularity;
}
return ret;
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 1366a89d8e6..53b1737e978 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -8,6 +8,7 @@
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/cpu.h>
+#include <linux/sched.h>
#include "blk.h"
@@ -22,20 +23,20 @@ static void blk_done_softirq(struct softirq_action *h)
struct list_head *cpu_list, local_list;
local_irq_disable();
- cpu_list = &__get_cpu_var(blk_cpu_done);
+ cpu_list = this_cpu_ptr(&blk_cpu_done);
list_replace_init(cpu_list, &local_list);
local_irq_enable();
while (!list_empty(&local_list)) {
struct request *rq;
- rq = list_entry(local_list.next, struct request, csd.list);
- list_del_init(&rq->csd.list);
+ rq = list_entry(local_list.next, struct request, ipi_list);
+ list_del_init(&rq->ipi_list);
rq->q->softirq_done_fn(rq);
}
}
-#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS)
+#ifdef CONFIG_SMP
static void trigger_softirq(void *data)
{
struct request *rq = data;
@@ -43,10 +44,10 @@ static void trigger_softirq(void *data)
struct list_head *list;
local_irq_save(flags);
- list = &__get_cpu_var(blk_cpu_done);
- list_add_tail(&rq->csd.list, list);
+ list = this_cpu_ptr(&blk_cpu_done);
+ list_add_tail(&rq->ipi_list, list);
- if (list->next == &rq->csd.list)
+ if (list->next == &rq->ipi_list)
raise_softirq_irqoff(BLOCK_SOFTIRQ);
local_irq_restore(flags);
@@ -64,21 +65,21 @@ static int raise_blk_irq(int cpu, struct request *rq)
data->info = rq;
data->flags = 0;
- __smp_call_function_single(cpu, data, 0);
+ smp_call_function_single_async(cpu, data);
return 0;
}
return 1;
}
-#else /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */
+#else /* CONFIG_SMP */
static int raise_blk_irq(int cpu, struct request *rq)
{
return 1;
}
#endif
-static int __cpuinit blk_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
+static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
+ void *hcpu)
{
/*
* If a CPU goes away, splice its entries to the current CPU
@@ -89,7 +90,7 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self,
local_irq_disable();
list_splice_init(&per_cpu(blk_cpu_done, cpu),
- &__get_cpu_var(blk_cpu_done));
+ this_cpu_ptr(&blk_cpu_done));
raise_softirq_irqoff(BLOCK_SOFTIRQ);
local_irq_enable();
}
@@ -97,15 +98,16 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata blk_cpu_notifier = {
+static struct notifier_block blk_cpu_notifier = {
.notifier_call = blk_cpu_notify,
};
void __blk_complete_request(struct request *req)
{
- int ccpu, cpu, group_cpu = NR_CPUS;
+ int ccpu, cpu;
struct request_queue *q = req->q;
unsigned long flags;
+ bool shared = false;
BUG_ON(!q->softirq_done_fn);
@@ -117,26 +119,24 @@ void __blk_complete_request(struct request *req)
*/
if (req->cpu != -1) {
ccpu = req->cpu;
- if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) {
- ccpu = blk_cpu_to_group(ccpu);
- group_cpu = blk_cpu_to_group(cpu);
- }
+ if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags))
+ shared = cpus_share_cache(cpu, ccpu);
} else
ccpu = cpu;
/*
- * If current CPU and requested CPU are in the same group, running
- * softirq in current CPU. One might concern this is just like
+ * If current CPU and requested CPU share a cache, run the softirq on
+ * the current CPU. One might concern this is just like
* QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is
* running in interrupt handler, and currently I/O controller doesn't
* support multiple interrupts, so current CPU is unique actually. This
* avoids IPI sending from current CPU to the first CPU of a group.
*/
- if (ccpu == cpu || ccpu == group_cpu) {
+ if (ccpu == cpu || shared) {
struct list_head *list;
do_local:
- list = &__get_cpu_var(blk_cpu_done);
- list_add_tail(&req->csd.list, list);
+ list = this_cpu_ptr(&blk_cpu_done);
+ list_add_tail(&req->ipi_list, list);
/*
* if the list only contains our just added request,
@@ -144,7 +144,7 @@ do_local:
* entries there, someone already raised the irq but it
* hasn't run yet.
*/
- if (list->next == &req->csd.list)
+ if (list->next == &req->ipi_list)
raise_softirq_irqoff(BLOCK_SOFTIRQ);
} else if (raise_blk_irq(ccpu, req))
goto do_local;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index cf150011d80..23321fbab29 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -7,8 +7,11 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/blktrace_api.h>
+#include <linux/blk-mq.h>
#include "blk.h"
+#include "blk-cgroup.h"
+#include "blk-mq.h"
struct queue_sysfs_entry {
struct attribute attr;
@@ -25,9 +28,15 @@ queue_var_show(unsigned long var, char *page)
static ssize_t
queue_var_store(unsigned long *var, const char *page, size_t count)
{
- char *p = (char *) page;
+ int err;
+ unsigned long v;
+
+ err = kstrtoul(page, 10, &v);
+ if (err || v > UINT_MAX)
+ return -EINVAL;
+
+ *var = v;
- *var = simple_strtoul(p, &p, 10);
return count;
}
@@ -39,45 +48,27 @@ static ssize_t queue_requests_show(struct request_queue *q, char *page)
static ssize_t
queue_requests_store(struct request_queue *q, const char *page, size_t count)
{
- struct request_list *rl = &q->rq;
unsigned long nr;
- int ret;
+ int ret, err;
- if (!q->request_fn)
+ if (!q->request_fn && !q->mq_ops)
return -EINVAL;
ret = queue_var_store(&nr, page, count);
+ if (ret < 0)
+ return ret;
+
if (nr < BLKDEV_MIN_RQ)
nr = BLKDEV_MIN_RQ;
- spin_lock_irq(q->queue_lock);
- q->nr_requests = nr;
- blk_queue_congestion_threshold(q);
-
- if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
- blk_set_queue_congested(q, BLK_RW_SYNC);
- else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
- blk_clear_queue_congested(q, BLK_RW_SYNC);
-
- if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q))
- blk_set_queue_congested(q, BLK_RW_ASYNC);
- else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
- blk_clear_queue_congested(q, BLK_RW_ASYNC);
-
- if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
- blk_set_queue_full(q, BLK_RW_SYNC);
- } else {
- blk_clear_queue_full(q, BLK_RW_SYNC);
- wake_up(&rl->wait[BLK_RW_SYNC]);
- }
+ if (q->request_fn)
+ err = blk_update_nr_requests(q, nr);
+ else
+ err = blk_mq_update_nr_requests(q, nr);
+
+ if (err)
+ return err;
- if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
- blk_set_queue_full(q, BLK_RW_ASYNC);
- } else {
- blk_clear_queue_full(q, BLK_RW_ASYNC);
- wake_up(&rl->wait[BLK_RW_ASYNC]);
- }
- spin_unlock_irq(q->queue_lock);
return ret;
}
@@ -95,6 +86,9 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
unsigned long ra_kb;
ssize_t ret = queue_var_store(&ra_kb, page, count);
+ if (ret < 0)
+ return ret;
+
q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
return ret;
@@ -161,6 +155,13 @@ static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *pag
return queue_var_show(queue_discard_zeroes_data(q), page);
}
+static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
+{
+ return sprintf(page, "%llu\n",
+ (unsigned long long)q->limits.max_write_same_sectors << 9);
+}
+
+
static ssize_t
queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
{
@@ -169,6 +170,9 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
+ if (ret < 0)
+ return ret;
+
if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
return -EINVAL;
@@ -200,6 +204,8 @@ queue_store_##name(struct request_queue *q, const char *page, size_t count) \
unsigned long val; \
ssize_t ret; \
ret = queue_var_store(&val, page, count); \
+ if (ret < 0) \
+ return ret; \
if (neg) \
val = !val; \
\
@@ -229,6 +235,9 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
unsigned long nm;
ssize_t ret = queue_var_store(&nm, page, count);
+ if (ret < 0)
+ return ret;
+
spin_lock_irq(q->queue_lock);
queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
@@ -253,10 +262,13 @@ static ssize_t
queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
{
ssize_t ret = -EINVAL;
-#if defined(CONFIG_USE_GENERIC_SMP_HELPERS)
+#ifdef CONFIG_SMP
unsigned long val;
ret = queue_var_store(&val, page, count);
+ if (ret < 0)
+ return ret;
+
spin_lock_irq(q->queue_lock);
if (val == 2) {
queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
@@ -357,6 +369,11 @@ static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
.show = queue_discard_zeroes_data_show,
};
+static struct queue_sysfs_entry queue_write_same_max_entry = {
+ .attr = {.name = "write_same_max_bytes", .mode = S_IRUGO },
+ .show = queue_write_same_max_show,
+};
+
static struct queue_sysfs_entry queue_nonrot_entry = {
.attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
.show = queue_show_nonrot,
@@ -404,6 +421,7 @@ static struct attribute *default_attrs[] = {
&queue_discard_granularity_entry.attr,
&queue_discard_max_entry.attr,
&queue_discard_zeroes_data_entry.attr,
+ &queue_write_same_max_entry.attr,
&queue_nonrot_entry.attr,
&queue_nomerges_entry.attr,
&queue_rq_affinity_entry.attr,
@@ -425,7 +443,7 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
if (!entry->show)
return -EIO;
mutex_lock(&q->sysfs_lock);
- if (blk_queue_dead(q)) {
+ if (blk_queue_dying(q)) {
mutex_unlock(&q->sysfs_lock);
return -ENOENT;
}
@@ -447,7 +465,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
q = container_of(kobj, struct request_queue, kobj);
mutex_lock(&q->sysfs_lock);
- if (blk_queue_dead(q)) {
+ if (blk_queue_dying(q)) {
mutex_unlock(&q->sysfs_lock);
return -ENOENT;
}
@@ -456,6 +474,13 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
return res;
}
+static void blk_free_queue_rcu(struct rcu_head *rcu_head)
+{
+ struct request_queue *q = container_of(rcu_head, struct request_queue,
+ rcu_head);
+ kmem_cache_free(blk_requestq_cachep, q);
+}
+
/**
* blk_release_queue: - release a &struct request_queue when it is no longer needed
* @kobj: the kobj belonging to the request queue to be released
@@ -475,10 +500,11 @@ static void blk_release_queue(struct kobject *kobj)
{
struct request_queue *q =
container_of(kobj, struct request_queue, kobj);
- struct request_list *rl = &q->rq;
blk_sync_queue(q);
+ blkcg_exit_queue(q);
+
if (q->elevator) {
spin_lock_irq(q->queue_lock);
ioc_clear_queue(q);
@@ -486,21 +512,22 @@ static void blk_release_queue(struct kobject *kobj)
elevator_exit(q->elevator);
}
- blk_throtl_exit(q);
-
- if (rl->rq_pool)
- mempool_destroy(rl->rq_pool);
+ blk_exit_rl(&q->root_rl);
if (q->queue_tags)
__blk_queue_free_tags(q);
- blk_throtl_release(q);
+ if (q->mq_ops)
+ blk_mq_free_queue(q);
+
+ kfree(q->flush_rq);
+
blk_trace_shutdown(q);
bdi_destroy(&q->backing_dev_info);
ida_simple_remove(&blk_queue_ida, q->id);
- kmem_cache_free(blk_requestq_cachep, q);
+ call_rcu(&q->rcu_head, blk_free_queue_rcu);
}
static const struct sysfs_ops queue_sysfs_ops = {
@@ -523,6 +550,13 @@ int blk_register_queue(struct gendisk *disk)
if (WARN_ON(!q))
return -ENXIO;
+ /*
+ * Initialization must be complete by now. Finish the initial
+ * bypass from queue allocation.
+ */
+ blk_queue_bypass_end(q);
+ queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
+
ret = blk_trace_init_sysfs(dev);
if (ret)
return ret;
@@ -535,6 +569,9 @@ int blk_register_queue(struct gendisk *disk)
kobject_uevent(&q->kobj, KOBJ_ADD);
+ if (q->mq_ops)
+ blk_mq_register_disk(disk);
+
if (!q->request_fn)
return 0;
@@ -557,6 +594,9 @@ void blk_unregister_queue(struct gendisk *disk)
if (WARN_ON(!q))
return;
+ if (q->mq_ops)
+ blk_mq_unregister_disk(disk);
+
if (q->request_fn)
elv_unregister_queue(q);
diff --git a/block/blk-tag.c b/block/blk-tag.c
index 4af6f5cc116..a185b86741e 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -27,18 +27,15 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag)
EXPORT_SYMBOL(blk_queue_find_tag);
/**
- * __blk_free_tags - release a given set of tag maintenance info
+ * blk_free_tags - release a given set of tag maintenance info
* @bqt: the tag map to free
*
- * Tries to free the specified @bqt. Returns true if it was
- * actually freed and false if there are still references using it
+ * Drop the reference count on @bqt and frees it when the last reference
+ * is dropped.
*/
-static int __blk_free_tags(struct blk_queue_tag *bqt)
+void blk_free_tags(struct blk_queue_tag *bqt)
{
- int retval;
-
- retval = atomic_dec_and_test(&bqt->refcnt);
- if (retval) {
+ if (atomic_dec_and_test(&bqt->refcnt)) {
BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
bqt->max_depth);
@@ -50,9 +47,8 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
kfree(bqt);
}
-
- return retval;
}
+EXPORT_SYMBOL(blk_free_tags);
/**
* __blk_queue_free_tags - release tag maintenance info
@@ -69,28 +65,13 @@ void __blk_queue_free_tags(struct request_queue *q)
if (!bqt)
return;
- __blk_free_tags(bqt);
+ blk_free_tags(bqt);
q->queue_tags = NULL;
queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
}
/**
- * blk_free_tags - release a given set of tag maintenance info
- * @bqt: the tag map to free
- *
- * For externally managed @bqt frees the map. Callers of this
- * function must guarantee to have released all the queues that
- * might have been using this tag map.
- */
-void blk_free_tags(struct blk_queue_tag *bqt)
-{
- if (unlikely(!__blk_free_tags(bqt)))
- BUG();
-}
-EXPORT_SYMBOL(blk_free_tags);
-
-/**
* blk_queue_free_tags - release tag maintenance info
* @q: the request queue for the device
*
@@ -186,7 +167,8 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
tags = __blk_queue_init_tags(q, depth);
if (!tags)
- goto fail;
+ return -ENOMEM;
+
} else if (q->queue_tags) {
rc = blk_queue_resize_tags(q, depth);
if (rc)
@@ -203,9 +185,6 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
INIT_LIST_HEAD(&q->tag_busy_list);
return 0;
-fail:
- kfree(tags);
- return -ENOMEM;
}
EXPORT_SYMBOL(blk_queue_init_tags);
@@ -350,9 +329,16 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
*/
max_depth = bqt->max_depth;
if (!rq_is_sync(rq) && max_depth > 1) {
- max_depth -= 2;
- if (!max_depth)
+ switch (max_depth) {
+ case 2:
max_depth = 1;
+ break;
+ case 3:
+ max_depth = 2;
+ break;
+ default:
+ max_depth -= 2;
+ }
if (q->in_flight[BLK_RW_ASYNC] > max_depth)
return 1;
}
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 5eed6a76721..3fdb21a390c 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -21,30 +21,100 @@ static int throtl_quantum = 32;
/* Throttling is performed over 100ms slice and after that slice is renewed */
static unsigned long throtl_slice = HZ/10; /* 100 ms */
+static struct blkcg_policy blkcg_policy_throtl;
+
/* A workqueue to queue throttle related work */
static struct workqueue_struct *kthrotld_workqueue;
-static void throtl_schedule_delayed_work(struct throtl_data *td,
- unsigned long delay);
-
-struct throtl_rb_root {
- struct rb_root rb;
- struct rb_node *left;
- unsigned int count;
- unsigned long min_disptime;
+
+/*
+ * To implement hierarchical throttling, throtl_grps form a tree and bios
+ * are dispatched upwards level by level until they reach the top and get
+ * issued. When dispatching bios from the children and local group at each
+ * level, if the bios are dispatched into a single bio_list, there's a risk
+ * of a local or child group which can queue many bios at once filling up
+ * the list starving others.
+ *
+ * To avoid such starvation, dispatched bios are queued separately
+ * according to where they came from. When they are again dispatched to
+ * the parent, they're popped in round-robin order so that no single source
+ * hogs the dispatch window.
+ *
+ * throtl_qnode is used to keep the queued bios separated by their sources.
+ * Bios are queued to throtl_qnode which in turn is queued to
+ * throtl_service_queue and then dispatched in round-robin order.
+ *
+ * It's also used to track the reference counts on blkg's. A qnode always
+ * belongs to a throtl_grp and gets queued on itself or the parent, so
+ * incrementing the reference of the associated throtl_grp when a qnode is
+ * queued and decrementing when dequeued is enough to keep the whole blkg
+ * tree pinned while bios are in flight.
+ */
+struct throtl_qnode {
+ struct list_head node; /* service_queue->queued[] */
+ struct bio_list bios; /* queued bios */
+ struct throtl_grp *tg; /* tg this qnode belongs to */
};
-#define THROTL_RB_ROOT (struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \
- .count = 0, .min_disptime = 0}
+struct throtl_service_queue {
+ struct throtl_service_queue *parent_sq; /* the parent service_queue */
+
+ /*
+ * Bios queued directly to this service_queue or dispatched from
+ * children throtl_grp's.
+ */
+ struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */
+ unsigned int nr_queued[2]; /* number of queued bios */
+
+ /*
+ * RB tree of active children throtl_grp's, which are sorted by
+ * their ->disptime.
+ */
+ struct rb_root pending_tree; /* RB tree of active tgs */
+ struct rb_node *first_pending; /* first node in the tree */
+ unsigned int nr_pending; /* # queued in the tree */
+ unsigned long first_pending_disptime; /* disptime of the first tg */
+ struct timer_list pending_timer; /* fires on first_pending_disptime */
+};
+
+enum tg_state_flags {
+ THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */
+ THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */
+};
#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
+/* Per-cpu group stats */
+struct tg_stats_cpu {
+ /* total bytes transferred */
+ struct blkg_rwstat service_bytes;
+ /* total IOs serviced, post merge */
+ struct blkg_rwstat serviced;
+};
+
struct throtl_grp {
- /* List of throtl groups on the request queue*/
- struct hlist_node tg_node;
+ /* must be the first member */
+ struct blkg_policy_data pd;
- /* active throtl group service_tree member */
+ /* active throtl group service_queue member */
struct rb_node rb_node;
+ /* throtl_data this group belongs to */
+ struct throtl_data *td;
+
+ /* this group's service queue */
+ struct throtl_service_queue service_queue;
+
+ /*
+ * qnode_on_self is used when bios are directly queued to this
+ * throtl_grp so that local bios compete fairly with bios
+ * dispatched from children. qnode_on_parent is used when bios are
+ * dispatched from this throtl_grp into its parent and will compete
+ * with the sibling qnode_on_parents and the parent's
+ * qnode_on_self.
+ */
+ struct throtl_qnode qnode_on_self[2];
+ struct throtl_qnode qnode_on_parent[2];
+
/*
* Dispatch time in jiffies. This is the estimated time when group
* will unthrottle and is ready to dispatch more bio. It is used as
@@ -52,15 +122,10 @@ struct throtl_grp {
*/
unsigned long disptime;
- struct blkio_group blkg;
- atomic_t ref;
unsigned int flags;
- /* Two lists for READ and WRITE */
- struct bio_list bio_lists[2];
-
- /* Number of queued bios on READ and WRITE lists */
- unsigned int nr_queued[2];
+ /* are there any throtl rules between this group and td? */
+ bool has_rules[2];
/* bytes per second rate limits */
uint64_t bps[2];
@@ -77,21 +142,18 @@ struct throtl_grp {
unsigned long slice_start[2];
unsigned long slice_end[2];
- /* Some throttle limits got updated for the group */
- int limits_changed;
+ /* Per cpu stats pointer */
+ struct tg_stats_cpu __percpu *stats_cpu;
- struct rcu_head rcu_head;
+ /* List of tgs waiting for per cpu stats memory to be allocated */
+ struct list_head stats_alloc_node;
};
struct throtl_data
{
- /* List of throtl groups */
- struct hlist_head tg_list;
-
/* service tree for active throtl groups */
- struct throtl_rb_root tg_service_tree;
+ struct throtl_service_queue service_queue;
- struct throtl_grp *root_tg;
struct request_queue *queue;
/* Total Number of queued bios on READ and WRITE lists */
@@ -103,283 +165,402 @@ struct throtl_data
unsigned int nr_undestroyed_grps;
/* Work for dispatching throttled bios */
- struct delayed_work throtl_work;
-
- int limits_changed;
+ struct work_struct dispatch_work;
};
-enum tg_state_flags {
- THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */
-};
+/* list and work item to allocate percpu group stats */
+static DEFINE_SPINLOCK(tg_stats_alloc_lock);
+static LIST_HEAD(tg_stats_alloc_list);
-#define THROTL_TG_FNS(name) \
-static inline void throtl_mark_tg_##name(struct throtl_grp *tg) \
-{ \
- (tg)->flags |= (1 << THROTL_TG_FLAG_##name); \
-} \
-static inline void throtl_clear_tg_##name(struct throtl_grp *tg) \
-{ \
- (tg)->flags &= ~(1 << THROTL_TG_FLAG_##name); \
-} \
-static inline int throtl_tg_##name(const struct throtl_grp *tg) \
-{ \
- return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0; \
-}
+static void tg_stats_alloc_fn(struct work_struct *);
+static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn);
-THROTL_TG_FNS(on_rr);
+static void throtl_pending_timer_fn(unsigned long arg);
-#define throtl_log_tg(td, tg, fmt, args...) \
- blk_add_trace_msg((td)->queue, "throtl %s " fmt, \
- blkg_path(&(tg)->blkg), ##args); \
-
-#define throtl_log(td, fmt, args...) \
- blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
+static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
+{
+ return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
+}
-static inline struct throtl_grp *tg_of_blkg(struct blkio_group *blkg)
+static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
{
- if (blkg)
- return container_of(blkg, struct throtl_grp, blkg);
+ return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
+}
- return NULL;
+static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
+{
+ return pd_to_blkg(&tg->pd);
}
-static inline unsigned int total_nr_queued(struct throtl_data *td)
+static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
{
- return td->nr_queued[0] + td->nr_queued[1];
+ return blkg_to_tg(td->queue->root_blkg);
}
-static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg)
+/**
+ * sq_to_tg - return the throl_grp the specified service queue belongs to
+ * @sq: the throtl_service_queue of interest
+ *
+ * Return the throtl_grp @sq belongs to. If @sq is the top-level one
+ * embedded in throtl_data, %NULL is returned.
+ */
+static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
{
- atomic_inc(&tg->ref);
- return tg;
+ if (sq && sq->parent_sq)
+ return container_of(sq, struct throtl_grp, service_queue);
+ else
+ return NULL;
}
-static void throtl_free_tg(struct rcu_head *head)
+/**
+ * sq_to_td - return throtl_data the specified service queue belongs to
+ * @sq: the throtl_service_queue of interest
+ *
+ * A service_queue can be embeded in either a throtl_grp or throtl_data.
+ * Determine the associated throtl_data accordingly and return it.
+ */
+static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
{
- struct throtl_grp *tg;
+ struct throtl_grp *tg = sq_to_tg(sq);
- tg = container_of(head, struct throtl_grp, rcu_head);
- free_percpu(tg->blkg.stats_cpu);
- kfree(tg);
+ if (tg)
+ return tg->td;
+ else
+ return container_of(sq, struct throtl_data, service_queue);
}
-static void throtl_put_tg(struct throtl_grp *tg)
+/**
+ * throtl_log - log debug message via blktrace
+ * @sq: the service_queue being reported
+ * @fmt: printf format string
+ * @args: printf args
+ *
+ * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
+ * throtl_grp; otherwise, just "throtl".
+ *
+ * TODO: this should be made a function and name formatting should happen
+ * after testing whether blktrace is enabled.
+ */
+#define throtl_log(sq, fmt, args...) do { \
+ struct throtl_grp *__tg = sq_to_tg((sq)); \
+ struct throtl_data *__td = sq_to_td((sq)); \
+ \
+ (void)__td; \
+ if ((__tg)) { \
+ char __pbuf[128]; \
+ \
+ blkg_path(tg_to_blkg(__tg), __pbuf, sizeof(__pbuf)); \
+ blk_add_trace_msg(__td->queue, "throtl %s " fmt, __pbuf, ##args); \
+ } else { \
+ blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
+ } \
+} while (0)
+
+static void tg_stats_init(struct tg_stats_cpu *tg_stats)
{
- BUG_ON(atomic_read(&tg->ref) <= 0);
- if (!atomic_dec_and_test(&tg->ref))
- return;
-
- /*
- * A group is freed in rcu manner. But having an rcu lock does not
- * mean that one can access all the fields of blkg and assume these
- * are valid. For example, don't try to follow throtl_data and
- * request queue links.
- *
- * Having a reference to blkg under an rcu allows acess to only
- * values local to groups like group stats and group rate limits
- */
- call_rcu(&tg->rcu_head, throtl_free_tg);
+ blkg_rwstat_init(&tg_stats->service_bytes);
+ blkg_rwstat_init(&tg_stats->serviced);
}
-static void throtl_init_group(struct throtl_grp *tg)
+/*
+ * Worker for allocating per cpu stat for tgs. This is scheduled on the
+ * system_wq once there are some groups on the alloc_list waiting for
+ * allocation.
+ */
+static void tg_stats_alloc_fn(struct work_struct *work)
{
- INIT_HLIST_NODE(&tg->tg_node);
- RB_CLEAR_NODE(&tg->rb_node);
- bio_list_init(&tg->bio_lists[0]);
- bio_list_init(&tg->bio_lists[1]);
- tg->limits_changed = false;
+ static struct tg_stats_cpu *stats_cpu; /* this fn is non-reentrant */
+ struct delayed_work *dwork = to_delayed_work(work);
+ bool empty = false;
+
+alloc_stats:
+ if (!stats_cpu) {
+ int cpu;
+
+ stats_cpu = alloc_percpu(struct tg_stats_cpu);
+ if (!stats_cpu) {
+ /* allocation failed, try again after some time */
+ schedule_delayed_work(dwork, msecs_to_jiffies(10));
+ return;
+ }
+ for_each_possible_cpu(cpu)
+ tg_stats_init(per_cpu_ptr(stats_cpu, cpu));
+ }
- /* Practically unlimited BW */
- tg->bps[0] = tg->bps[1] = -1;
- tg->iops[0] = tg->iops[1] = -1;
+ spin_lock_irq(&tg_stats_alloc_lock);
- /*
- * Take the initial reference that will be released on destroy
- * This can be thought of a joint reference by cgroup and
- * request queue which will be dropped by either request queue
- * exit or cgroup deletion path depending on who is exiting first.
- */
- atomic_set(&tg->ref, 1);
+ if (!list_empty(&tg_stats_alloc_list)) {
+ struct throtl_grp *tg = list_first_entry(&tg_stats_alloc_list,
+ struct throtl_grp,
+ stats_alloc_node);
+ swap(tg->stats_cpu, stats_cpu);
+ list_del_init(&tg->stats_alloc_node);
+ }
+
+ empty = list_empty(&tg_stats_alloc_list);
+ spin_unlock_irq(&tg_stats_alloc_lock);
+ if (!empty)
+ goto alloc_stats;
}
-/* Should be called with rcu read lock held (needed for blkcg) */
-static void
-throtl_add_group_to_td_list(struct throtl_data *td, struct throtl_grp *tg)
+static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
{
- hlist_add_head(&tg->tg_node, &td->tg_list);
- td->nr_undestroyed_grps++;
+ INIT_LIST_HEAD(&qn->node);
+ bio_list_init(&qn->bios);
+ qn->tg = tg;
}
-static void
-__throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
+/**
+ * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
+ * @bio: bio being added
+ * @qn: qnode to add bio to
+ * @queued: the service_queue->queued[] list @qn belongs to
+ *
+ * Add @bio to @qn and put @qn on @queued if it's not already on.
+ * @qn->tg's reference count is bumped when @qn is activated. See the
+ * comment on top of throtl_qnode definition for details.
+ */
+static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
+ struct list_head *queued)
{
- struct backing_dev_info *bdi = &td->queue->backing_dev_info;
- unsigned int major, minor;
-
- if (!tg || tg->blkg.dev)
- return;
-
- /*
- * Fill in device details for a group which might not have been
- * filled at group creation time as queue was being instantiated
- * and driver had not attached a device yet
- */
- if (bdi->dev && dev_name(bdi->dev)) {
- sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
- tg->blkg.dev = MKDEV(major, minor);
+ bio_list_add(&qn->bios, bio);
+ if (list_empty(&qn->node)) {
+ list_add_tail(&qn->node, queued);
+ blkg_get(tg_to_blkg(qn->tg));
}
}
-/*
- * Should be called with without queue lock held. Here queue lock will be
- * taken rarely. It will be taken only once during life time of a group
- * if need be
+/**
+ * throtl_peek_queued - peek the first bio on a qnode list
+ * @queued: the qnode list to peek
*/
-static void
-throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
+static struct bio *throtl_peek_queued(struct list_head *queued)
{
- if (!tg || tg->blkg.dev)
- return;
+ struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
+ struct bio *bio;
+
+ if (list_empty(queued))
+ return NULL;
- spin_lock_irq(td->queue->queue_lock);
- __throtl_tg_fill_dev_details(td, tg);
- spin_unlock_irq(td->queue->queue_lock);
+ bio = bio_list_peek(&qn->bios);
+ WARN_ON_ONCE(!bio);
+ return bio;
}
-static void throtl_init_add_tg_lists(struct throtl_data *td,
- struct throtl_grp *tg, struct blkio_cgroup *blkcg)
+/**
+ * throtl_pop_queued - pop the first bio form a qnode list
+ * @queued: the qnode list to pop a bio from
+ * @tg_to_put: optional out argument for throtl_grp to put
+ *
+ * Pop the first bio from the qnode list @queued. After popping, the first
+ * qnode is removed from @queued if empty or moved to the end of @queued so
+ * that the popping order is round-robin.
+ *
+ * When the first qnode is removed, its associated throtl_grp should be put
+ * too. If @tg_to_put is NULL, this function automatically puts it;
+ * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
+ * responsible for putting it.
+ */
+static struct bio *throtl_pop_queued(struct list_head *queued,
+ struct throtl_grp **tg_to_put)
{
- __throtl_tg_fill_dev_details(td, tg);
+ struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
+ struct bio *bio;
- /* Add group onto cgroup list */
- blkiocg_add_blkio_group(blkcg, &tg->blkg, (void *)td,
- tg->blkg.dev, BLKIO_POLICY_THROTL);
+ if (list_empty(queued))
+ return NULL;
- tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev);
- tg->bps[WRITE] = blkcg_get_write_bps(blkcg, tg->blkg.dev);
- tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev);
- tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev);
+ bio = bio_list_pop(&qn->bios);
+ WARN_ON_ONCE(!bio);
+
+ if (bio_list_empty(&qn->bios)) {
+ list_del_init(&qn->node);
+ if (tg_to_put)
+ *tg_to_put = qn->tg;
+ else
+ blkg_put(tg_to_blkg(qn->tg));
+ } else {
+ list_move_tail(&qn->node, queued);
+ }
- throtl_add_group_to_td_list(td, tg);
+ return bio;
}
-/* Should be called without queue lock and outside of rcu period */
-static struct throtl_grp *throtl_alloc_tg(struct throtl_data *td)
+/* init a service_queue, assumes the caller zeroed it */
+static void throtl_service_queue_init(struct throtl_service_queue *sq,
+ struct throtl_service_queue *parent_sq)
{
- struct throtl_grp *tg = NULL;
- int ret;
+ INIT_LIST_HEAD(&sq->queued[0]);
+ INIT_LIST_HEAD(&sq->queued[1]);
+ sq->pending_tree = RB_ROOT;
+ sq->parent_sq = parent_sq;
+ setup_timer(&sq->pending_timer, throtl_pending_timer_fn,
+ (unsigned long)sq);
+}
- tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, td->queue->node);
- if (!tg)
- return NULL;
+static void throtl_service_queue_exit(struct throtl_service_queue *sq)
+{
+ del_timer_sync(&sq->pending_timer);
+}
- ret = blkio_alloc_blkg_stats(&tg->blkg);
+static void throtl_pd_init(struct blkcg_gq *blkg)
+{
+ struct throtl_grp *tg = blkg_to_tg(blkg);
+ struct throtl_data *td = blkg->q->td;
+ struct throtl_service_queue *parent_sq;
+ unsigned long flags;
+ int rw;
- if (ret) {
- kfree(tg);
- return NULL;
+ /*
+ * If sane_hierarchy is enabled, we switch to properly hierarchical
+ * behavior where limits on a given throtl_grp are applied to the
+ * whole subtree rather than just the group itself. e.g. If 16M
+ * read_bps limit is set on the root group, the whole system can't
+ * exceed 16M for the device.
+ *
+ * If sane_hierarchy is not enabled, the broken flat hierarchy
+ * behavior is retained where all throtl_grps are treated as if
+ * they're all separate root groups right below throtl_data.
+ * Limits of a group don't interact with limits of other groups
+ * regardless of the position of the group in the hierarchy.
+ */
+ parent_sq = &td->service_queue;
+
+ if (cgroup_sane_behavior(blkg->blkcg->css.cgroup) && blkg->parent)
+ parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
+
+ throtl_service_queue_init(&tg->service_queue, parent_sq);
+
+ for (rw = READ; rw <= WRITE; rw++) {
+ throtl_qnode_init(&tg->qnode_on_self[rw], tg);
+ throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
}
- throtl_init_group(tg);
- return tg;
+ RB_CLEAR_NODE(&tg->rb_node);
+ tg->td = td;
+
+ tg->bps[READ] = -1;
+ tg->bps[WRITE] = -1;
+ tg->iops[READ] = -1;
+ tg->iops[WRITE] = -1;
+
+ /*
+ * Ugh... We need to perform per-cpu allocation for tg->stats_cpu
+ * but percpu allocator can't be called from IO path. Queue tg on
+ * tg_stats_alloc_list and allocate from work item.
+ */
+ spin_lock_irqsave(&tg_stats_alloc_lock, flags);
+ list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
+ schedule_delayed_work(&tg_stats_alloc_work, 0);
+ spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
}
-static struct
-throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
+/*
+ * Set has_rules[] if @tg or any of its parents have limits configured.
+ * This doesn't require walking up to the top of the hierarchy as the
+ * parent's has_rules[] is guaranteed to be correct.
+ */
+static void tg_update_has_rules(struct throtl_grp *tg)
{
- struct throtl_grp *tg = NULL;
- void *key = td;
+ struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
+ int rw;
- /*
- * This is the common case when there are no blkio cgroups.
- * Avoid lookup in this case
- */
- if (blkcg == &blkio_root_cgroup)
- tg = td->root_tg;
- else
- tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
+ for (rw = READ; rw <= WRITE; rw++)
+ tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
+ (tg->bps[rw] != -1 || tg->iops[rw] != -1);
+}
- __throtl_tg_fill_dev_details(td, tg);
- return tg;
+static void throtl_pd_online(struct blkcg_gq *blkg)
+{
+ /*
+ * We don't want new groups to escape the limits of its ancestors.
+ * Update has_rules[] after a new group is brought online.
+ */
+ tg_update_has_rules(blkg_to_tg(blkg));
}
-static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
+static void throtl_pd_exit(struct blkcg_gq *blkg)
{
- struct throtl_grp *tg = NULL, *__tg = NULL;
- struct blkio_cgroup *blkcg;
- struct request_queue *q = td->queue;
+ struct throtl_grp *tg = blkg_to_tg(blkg);
+ unsigned long flags;
- /* no throttling for dead queue */
- if (unlikely(blk_queue_dead(q)))
- return NULL;
+ spin_lock_irqsave(&tg_stats_alloc_lock, flags);
+ list_del_init(&tg->stats_alloc_node);
+ spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
- rcu_read_lock();
- blkcg = task_blkio_cgroup(current);
- tg = throtl_find_tg(td, blkcg);
- if (tg) {
- rcu_read_unlock();
- return tg;
- }
+ free_percpu(tg->stats_cpu);
- /*
- * Need to allocate a group. Allocation of group also needs allocation
- * of per cpu stats which in-turn takes a mutex() and can block. Hence
- * we need to drop rcu lock and queue_lock before we call alloc.
- */
- rcu_read_unlock();
- spin_unlock_irq(q->queue_lock);
+ throtl_service_queue_exit(&tg->service_queue);
+}
- tg = throtl_alloc_tg(td);
+static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
+{
+ struct throtl_grp *tg = blkg_to_tg(blkg);
+ int cpu;
- /* Group allocated and queue is still alive. take the lock */
- spin_lock_irq(q->queue_lock);
+ if (tg->stats_cpu == NULL)
+ return;
- /* Make sure @q is still alive */
- if (unlikely(blk_queue_dead(q))) {
- kfree(tg);
- return NULL;
+ for_each_possible_cpu(cpu) {
+ struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
+
+ blkg_rwstat_reset(&sc->service_bytes);
+ blkg_rwstat_reset(&sc->serviced);
}
+}
+static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td,
+ struct blkcg *blkcg)
+{
/*
- * Initialize the new group. After sleeping, read the blkcg again.
+ * This is the common case when there are no blkcgs. Avoid lookup
+ * in this case
*/
- rcu_read_lock();
- blkcg = task_blkio_cgroup(current);
+ if (blkcg == &blkcg_root)
+ return td_root_tg(td);
- /*
- * If some other thread already allocated the group while we were
- * not holding queue lock, free up the group
- */
- __tg = throtl_find_tg(td, blkcg);
+ return blkg_to_tg(blkg_lookup(blkcg, td->queue));
+}
- if (__tg) {
- kfree(tg);
- rcu_read_unlock();
- return __tg;
- }
+static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
+ struct blkcg *blkcg)
+{
+ struct request_queue *q = td->queue;
+ struct throtl_grp *tg = NULL;
- /* Group allocation failed. Account the IO to root group */
- if (!tg) {
- tg = td->root_tg;
- return tg;
+ /*
+ * This is the common case when there are no blkcgs. Avoid lookup
+ * in this case
+ */
+ if (blkcg == &blkcg_root) {
+ tg = td_root_tg(td);
+ } else {
+ struct blkcg_gq *blkg;
+
+ blkg = blkg_lookup_create(blkcg, q);
+
+ /* if %NULL and @q is alive, fall back to root_tg */
+ if (!IS_ERR(blkg))
+ tg = blkg_to_tg(blkg);
+ else if (!blk_queue_dying(q))
+ tg = td_root_tg(td);
}
- throtl_init_add_tg_lists(td, tg, blkcg);
- rcu_read_unlock();
return tg;
}
-static struct throtl_grp *throtl_rb_first(struct throtl_rb_root *root)
+static struct throtl_grp *
+throtl_rb_first(struct throtl_service_queue *parent_sq)
{
/* Service tree is empty */
- if (!root->count)
+ if (!parent_sq->nr_pending)
return NULL;
- if (!root->left)
- root->left = rb_first(&root->rb);
+ if (!parent_sq->first_pending)
+ parent_sq->first_pending = rb_first(&parent_sq->pending_tree);
- if (root->left)
- return rb_entry_tg(root->left);
+ if (parent_sq->first_pending)
+ return rb_entry_tg(parent_sq->first_pending);
return NULL;
}
@@ -390,29 +571,30 @@ static void rb_erase_init(struct rb_node *n, struct rb_root *root)
RB_CLEAR_NODE(n);
}
-static void throtl_rb_erase(struct rb_node *n, struct throtl_rb_root *root)
+static void throtl_rb_erase(struct rb_node *n,
+ struct throtl_service_queue *parent_sq)
{
- if (root->left == n)
- root->left = NULL;
- rb_erase_init(n, &root->rb);
- --root->count;
+ if (parent_sq->first_pending == n)
+ parent_sq->first_pending = NULL;
+ rb_erase_init(n, &parent_sq->pending_tree);
+ --parent_sq->nr_pending;
}
-static void update_min_dispatch_time(struct throtl_rb_root *st)
+static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
{
struct throtl_grp *tg;
- tg = throtl_rb_first(st);
+ tg = throtl_rb_first(parent_sq);
if (!tg)
return;
- st->min_disptime = tg->disptime;
+ parent_sq->first_pending_disptime = tg->disptime;
}
-static void
-tg_service_tree_add(struct throtl_rb_root *st, struct throtl_grp *tg)
+static void tg_service_queue_add(struct throtl_grp *tg)
{
- struct rb_node **node = &st->rb.rb_node;
+ struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
+ struct rb_node **node = &parent_sq->pending_tree.rb_node;
struct rb_node *parent = NULL;
struct throtl_grp *__tg;
unsigned long key = tg->disptime;
@@ -431,99 +613,144 @@ tg_service_tree_add(struct throtl_rb_root *st, struct throtl_grp *tg)
}
if (left)
- st->left = &tg->rb_node;
+ parent_sq->first_pending = &tg->rb_node;
rb_link_node(&tg->rb_node, parent, node);
- rb_insert_color(&tg->rb_node, &st->rb);
+ rb_insert_color(&tg->rb_node, &parent_sq->pending_tree);
}
-static void __throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
+static void __throtl_enqueue_tg(struct throtl_grp *tg)
{
- struct throtl_rb_root *st = &td->tg_service_tree;
+ tg_service_queue_add(tg);
+ tg->flags |= THROTL_TG_PENDING;
+ tg->service_queue.parent_sq->nr_pending++;
+}
- tg_service_tree_add(st, tg);
- throtl_mark_tg_on_rr(tg);
- st->count++;
+static void throtl_enqueue_tg(struct throtl_grp *tg)
+{
+ if (!(tg->flags & THROTL_TG_PENDING))
+ __throtl_enqueue_tg(tg);
}
-static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
+static void __throtl_dequeue_tg(struct throtl_grp *tg)
{
- if (!throtl_tg_on_rr(tg))
- __throtl_enqueue_tg(td, tg);
+ throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
+ tg->flags &= ~THROTL_TG_PENDING;
}
-static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
+static void throtl_dequeue_tg(struct throtl_grp *tg)
{
- throtl_rb_erase(&tg->rb_node, &td->tg_service_tree);
- throtl_clear_tg_on_rr(tg);
+ if (tg->flags & THROTL_TG_PENDING)
+ __throtl_dequeue_tg(tg);
}
-static void throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
+/* Call with queue lock held */
+static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
+ unsigned long expires)
{
- if (throtl_tg_on_rr(tg))
- __throtl_dequeue_tg(td, tg);
+ mod_timer(&sq->pending_timer, expires);
+ throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
+ expires - jiffies, jiffies);
}
-static void throtl_schedule_next_dispatch(struct throtl_data *td)
+/**
+ * throtl_schedule_next_dispatch - schedule the next dispatch cycle
+ * @sq: the service_queue to schedule dispatch for
+ * @force: force scheduling
+ *
+ * Arm @sq->pending_timer so that the next dispatch cycle starts on the
+ * dispatch time of the first pending child. Returns %true if either timer
+ * is armed or there's no pending child left. %false if the current
+ * dispatch window is still open and the caller should continue
+ * dispatching.
+ *
+ * If @force is %true, the dispatch timer is always scheduled and this
+ * function is guaranteed to return %true. This is to be used when the
+ * caller can't dispatch itself and needs to invoke pending_timer
+ * unconditionally. Note that forced scheduling is likely to induce short
+ * delay before dispatch starts even if @sq->first_pending_disptime is not
+ * in the future and thus shouldn't be used in hot paths.
+ */
+static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
+ bool force)
{
- struct throtl_rb_root *st = &td->tg_service_tree;
+ /* any pending children left? */
+ if (!sq->nr_pending)
+ return true;
- /*
- * If there are more bios pending, schedule more work.
- */
- if (!total_nr_queued(td))
- return;
+ update_min_dispatch_time(sq);
- BUG_ON(!st->count);
+ /* is the next dispatch time in the future? */
+ if (force || time_after(sq->first_pending_disptime, jiffies)) {
+ throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
+ return true;
+ }
- update_min_dispatch_time(st);
+ /* tell the caller to continue dispatching */
+ return false;
+}
- if (time_before_eq(st->min_disptime, jiffies))
- throtl_schedule_delayed_work(td, 0);
- else
- throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
+static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
+ bool rw, unsigned long start)
+{
+ tg->bytes_disp[rw] = 0;
+ tg->io_disp[rw] = 0;
+
+ /*
+ * Previous slice has expired. We must have trimmed it after last
+ * bio dispatch. That means since start of last slice, we never used
+ * that bandwidth. Do try to make use of that bandwidth while giving
+ * credit.
+ */
+ if (time_after_eq(start, tg->slice_start[rw]))
+ tg->slice_start[rw] = start;
+
+ tg->slice_end[rw] = jiffies + throtl_slice;
+ throtl_log(&tg->service_queue,
+ "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
+ rw == READ ? 'R' : 'W', tg->slice_start[rw],
+ tg->slice_end[rw], jiffies);
}
-static inline void
-throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
+static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
{
tg->bytes_disp[rw] = 0;
tg->io_disp[rw] = 0;
tg->slice_start[rw] = jiffies;
tg->slice_end[rw] = jiffies + throtl_slice;
- throtl_log_tg(td, tg, "[%c] new slice start=%lu end=%lu jiffies=%lu",
- rw == READ ? 'R' : 'W', tg->slice_start[rw],
- tg->slice_end[rw], jiffies);
+ throtl_log(&tg->service_queue,
+ "[%c] new slice start=%lu end=%lu jiffies=%lu",
+ rw == READ ? 'R' : 'W', tg->slice_start[rw],
+ tg->slice_end[rw], jiffies);
}
-static inline void throtl_set_slice_end(struct throtl_data *td,
- struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
+static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
+ unsigned long jiffy_end)
{
tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
}
-static inline void throtl_extend_slice(struct throtl_data *td,
- struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
+static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
+ unsigned long jiffy_end)
{
tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
- throtl_log_tg(td, tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
- rw == READ ? 'R' : 'W', tg->slice_start[rw],
- tg->slice_end[rw], jiffies);
+ throtl_log(&tg->service_queue,
+ "[%c] extend slice start=%lu end=%lu jiffies=%lu",
+ rw == READ ? 'R' : 'W', tg->slice_start[rw],
+ tg->slice_end[rw], jiffies);
}
/* Determine if previously allocated or extended slice is complete or not */
-static bool
-throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw)
+static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
{
if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
- return 0;
+ return false;
return 1;
}
/* Trim the used slices and adjust slice start accordingly */
-static inline void
-throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
+static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
{
unsigned long nr_slices, time_elapsed, io_trim;
u64 bytes_trim, tmp;
@@ -535,7 +762,7 @@ throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
* renewed. Don't try to trim the slice if slice is used. A new
* slice will start when appropriate.
*/
- if (throtl_slice_used(td, tg, rw))
+ if (throtl_slice_used(tg, rw))
return;
/*
@@ -546,7 +773,7 @@ throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
* is bad because it does not allow new slice to start.
*/
- throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice);
+ throtl_set_slice_end(tg, rw, jiffies + throtl_slice);
time_elapsed = jiffies - tg->slice_start[rw];
@@ -575,14 +802,14 @@ throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
tg->slice_start[rw] += nr_slices * throtl_slice;
- throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
- " start=%lu end=%lu jiffies=%lu",
- rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
- tg->slice_start[rw], tg->slice_end[rw], jiffies);
+ throtl_log(&tg->service_queue,
+ "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
+ rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
+ tg->slice_start[rw], tg->slice_end[rw], jiffies);
}
-static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
- struct bio *bio, unsigned long *wait)
+static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
+ unsigned long *wait)
{
bool rw = bio_data_dir(bio);
unsigned int io_allowed;
@@ -615,7 +842,7 @@ static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
if (tg->io_disp[rw] + 1 <= io_allowed) {
if (wait)
*wait = 0;
- return 1;
+ return true;
}
/* Calc approx time to dispatch */
@@ -631,8 +858,8 @@ static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
return 0;
}
-static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
- struct bio *bio, unsigned long *wait)
+static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
+ unsigned long *wait)
{
bool rw = bio_data_dir(bio);
u64 bytes_allowed, extra_bytes, tmp;
@@ -650,14 +877,14 @@ static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
do_div(tmp, HZ);
bytes_allowed = tmp;
- if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
+ if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
if (wait)
*wait = 0;
- return 1;
+ return true;
}
/* Calc approx time to dispatch */
- extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
+ extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed;
jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
if (!jiffy_wait)
@@ -673,18 +900,12 @@ static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
return 0;
}
-static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
- if (tg->bps[rw] == -1 && tg->iops[rw] == -1)
- return 1;
- return 0;
-}
-
/*
* Returns whether one can dispatch a bio or not. Also returns approx number
* of jiffies to wait before this bio is with-in IO rate and can be dispatched
*/
-static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
- struct bio *bio, unsigned long *wait)
+static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
+ unsigned long *wait)
{
bool rw = bio_data_dir(bio);
unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
@@ -695,13 +916,14 @@ static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
* this function with a different bio if there are other bios
* queued.
*/
- BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
+ BUG_ON(tg->service_queue.nr_queued[rw] &&
+ bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
/* If tg->bps = -1, then BW is unlimited */
if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
if (wait)
*wait = 0;
- return 1;
+ return true;
}
/*
@@ -709,15 +931,15 @@ static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
* existing slice to make sure it is at least throtl_slice interval
* long since now.
*/
- if (throtl_slice_used(td, tg, rw))
- throtl_start_new_slice(td, tg, rw);
+ if (throtl_slice_used(tg, rw))
+ throtl_start_new_slice(tg, rw);
else {
if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
- throtl_extend_slice(td, tg, rw, jiffies + throtl_slice);
+ throtl_extend_slice(tg, rw, jiffies + throtl_slice);
}
- if (tg_with_in_bps_limit(td, tg, bio, &bps_wait)
- && tg_with_in_iops_limit(td, tg, bio, &iops_wait)) {
+ if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
+ tg_with_in_iops_limit(tg, bio, &iops_wait)) {
if (wait)
*wait = 0;
return 1;
@@ -729,79 +951,175 @@ static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
*wait = max_wait;
if (time_before(tg->slice_end[rw], jiffies + max_wait))
- throtl_extend_slice(td, tg, rw, jiffies + max_wait);
+ throtl_extend_slice(tg, rw, jiffies + max_wait);
return 0;
}
+static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes,
+ int rw)
+{
+ struct throtl_grp *tg = blkg_to_tg(blkg);
+ struct tg_stats_cpu *stats_cpu;
+ unsigned long flags;
+
+ /* If per cpu stats are not allocated yet, don't do any accounting. */
+ if (tg->stats_cpu == NULL)
+ return;
+
+ /*
+ * Disabling interrupts to provide mutual exclusion between two
+ * writes on same cpu. It probably is not needed for 64bit. Not
+ * optimizing that case yet.
+ */
+ local_irq_save(flags);
+
+ stats_cpu = this_cpu_ptr(tg->stats_cpu);
+
+ blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
+ blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
+
+ local_irq_restore(flags);
+}
+
static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
{
bool rw = bio_data_dir(bio);
- bool sync = rw_is_sync(bio->bi_rw);
/* Charge the bio to the group */
- tg->bytes_disp[rw] += bio->bi_size;
+ tg->bytes_disp[rw] += bio->bi_iter.bi_size;
tg->io_disp[rw]++;
- blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync);
+ /*
+ * REQ_THROTTLED is used to prevent the same bio to be throttled
+ * more than once as a throttled bio will go through blk-throtl the
+ * second time when it eventually gets issued. Set it when a bio
+ * is being charged to a tg.
+ *
+ * Dispatch stats aren't recursive and each @bio should only be
+ * accounted by the @tg it was originally associated with. Let's
+ * update the stats when setting REQ_THROTTLED for the first time
+ * which is guaranteed to be for the @bio's original tg.
+ */
+ if (!(bio->bi_rw & REQ_THROTTLED)) {
+ bio->bi_rw |= REQ_THROTTLED;
+ throtl_update_dispatch_stats(tg_to_blkg(tg),
+ bio->bi_iter.bi_size, bio->bi_rw);
+ }
}
-static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
- struct bio *bio)
+/**
+ * throtl_add_bio_tg - add a bio to the specified throtl_grp
+ * @bio: bio to add
+ * @qn: qnode to use
+ * @tg: the target throtl_grp
+ *
+ * Add @bio to @tg's service_queue using @qn. If @qn is not specified,
+ * tg->qnode_on_self[] is used.
+ */
+static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
+ struct throtl_grp *tg)
{
+ struct throtl_service_queue *sq = &tg->service_queue;
bool rw = bio_data_dir(bio);
- bio_list_add(&tg->bio_lists[rw], bio);
- /* Take a bio reference on tg */
- throtl_ref_get_tg(tg);
- tg->nr_queued[rw]++;
- td->nr_queued[rw]++;
- throtl_enqueue_tg(td, tg);
+ if (!qn)
+ qn = &tg->qnode_on_self[rw];
+
+ /*
+ * If @tg doesn't currently have any bios queued in the same
+ * direction, queueing @bio can change when @tg should be
+ * dispatched. Mark that @tg was empty. This is automatically
+ * cleaered on the next tg_update_disptime().
+ */
+ if (!sq->nr_queued[rw])
+ tg->flags |= THROTL_TG_WAS_EMPTY;
+
+ throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
+
+ sq->nr_queued[rw]++;
+ throtl_enqueue_tg(tg);
}
-static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg)
+static void tg_update_disptime(struct throtl_grp *tg)
{
+ struct throtl_service_queue *sq = &tg->service_queue;
unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
struct bio *bio;
- if ((bio = bio_list_peek(&tg->bio_lists[READ])))
- tg_may_dispatch(td, tg, bio, &read_wait);
+ if ((bio = throtl_peek_queued(&sq->queued[READ])))
+ tg_may_dispatch(tg, bio, &read_wait);
- if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
- tg_may_dispatch(td, tg, bio, &write_wait);
+ if ((bio = throtl_peek_queued(&sq->queued[WRITE])))
+ tg_may_dispatch(tg, bio, &write_wait);
min_wait = min(read_wait, write_wait);
disptime = jiffies + min_wait;
/* Update dispatch time */
- throtl_dequeue_tg(td, tg);
+ throtl_dequeue_tg(tg);
tg->disptime = disptime;
- throtl_enqueue_tg(td, tg);
+ throtl_enqueue_tg(tg);
+
+ /* see throtl_add_bio_tg() */
+ tg->flags &= ~THROTL_TG_WAS_EMPTY;
}
-static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
- bool rw, struct bio_list *bl)
+static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
+ struct throtl_grp *parent_tg, bool rw)
{
- struct bio *bio;
+ if (throtl_slice_used(parent_tg, rw)) {
+ throtl_start_new_slice_with_credit(parent_tg, rw,
+ child_tg->slice_start[rw]);
+ }
- bio = bio_list_pop(&tg->bio_lists[rw]);
- tg->nr_queued[rw]--;
- /* Drop bio reference on tg */
- throtl_put_tg(tg);
+}
- BUG_ON(td->nr_queued[rw] <= 0);
- td->nr_queued[rw]--;
+static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
+{
+ struct throtl_service_queue *sq = &tg->service_queue;
+ struct throtl_service_queue *parent_sq = sq->parent_sq;
+ struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
+ struct throtl_grp *tg_to_put = NULL;
+ struct bio *bio;
+
+ /*
+ * @bio is being transferred from @tg to @parent_sq. Popping a bio
+ * from @tg may put its reference and @parent_sq might end up
+ * getting released prematurely. Remember the tg to put and put it
+ * after @bio is transferred to @parent_sq.
+ */
+ bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
+ sq->nr_queued[rw]--;
throtl_charge_bio(tg, bio);
- bio_list_add(bl, bio);
- bio->bi_rw |= REQ_THROTTLED;
- throtl_trim_slice(td, tg, rw);
+ /*
+ * If our parent is another tg, we just need to transfer @bio to
+ * the parent using throtl_add_bio_tg(). If our parent is
+ * @td->service_queue, @bio is ready to be issued. Put it on its
+ * bio_lists[] and decrease total number queued. The caller is
+ * responsible for issuing these bios.
+ */
+ if (parent_tg) {
+ throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
+ start_parent_slice_with_credit(tg, parent_tg, rw);
+ } else {
+ throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
+ &parent_sq->queued[rw]);
+ BUG_ON(tg->td->nr_queued[rw] <= 0);
+ tg->td->nr_queued[rw]--;
+ }
+
+ throtl_trim_slice(tg, rw);
+
+ if (tg_to_put)
+ blkg_put(tg_to_blkg(tg_to_put));
}
-static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
- struct bio_list *bl)
+static int throtl_dispatch_tg(struct throtl_grp *tg)
{
+ struct throtl_service_queue *sq = &tg->service_queue;
unsigned int nr_reads = 0, nr_writes = 0;
unsigned int max_nr_reads = throtl_grp_quantum*3/4;
unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
@@ -809,20 +1127,20 @@ static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
/* Try to dispatch 75% READS and 25% WRITES */
- while ((bio = bio_list_peek(&tg->bio_lists[READ]))
- && tg_may_dispatch(td, tg, bio, NULL)) {
+ while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
+ tg_may_dispatch(tg, bio, NULL)) {
- tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
+ tg_dispatch_one_bio(tg, bio_data_dir(bio));
nr_reads++;
if (nr_reads >= max_nr_reads)
break;
}
- while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))
- && tg_may_dispatch(td, tg, bio, NULL)) {
+ while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
+ tg_may_dispatch(tg, bio, NULL)) {
- tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
+ tg_dispatch_one_bio(tg, bio_data_dir(bio));
nr_writes++;
if (nr_writes >= max_nr_writes)
@@ -832,14 +1150,13 @@ static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
return nr_reads + nr_writes;
}
-static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
+static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
{
unsigned int nr_disp = 0;
- struct throtl_grp *tg;
- struct throtl_rb_root *st = &td->tg_service_tree;
while (1) {
- tg = throtl_rb_first(st);
+ struct throtl_grp *tg = throtl_rb_first(parent_sq);
+ struct throtl_service_queue *sq = &tg->service_queue;
if (!tg)
break;
@@ -847,14 +1164,12 @@ static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
if (time_before(jiffies, tg->disptime))
break;
- throtl_dequeue_tg(td, tg);
+ throtl_dequeue_tg(tg);
- nr_disp += throtl_dispatch_tg(td, tg, bl);
+ nr_disp += throtl_dispatch_tg(tg);
- if (tg->nr_queued[0] || tg->nr_queued[1]) {
- tg_update_disptime(td, tg);
- throtl_enqueue_tg(td, tg);
- }
+ if (sq->nr_queued[0] || sq->nr_queued[1])
+ tg_update_disptime(tg);
if (nr_disp >= throtl_quantum)
break;
@@ -863,307 +1178,353 @@ static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
return nr_disp;
}
-static void throtl_process_limit_change(struct throtl_data *td)
+/**
+ * throtl_pending_timer_fn - timer function for service_queue->pending_timer
+ * @arg: the throtl_service_queue being serviced
+ *
+ * This timer is armed when a child throtl_grp with active bio's become
+ * pending and queued on the service_queue's pending_tree and expires when
+ * the first child throtl_grp should be dispatched. This function
+ * dispatches bio's from the children throtl_grps to the parent
+ * service_queue.
+ *
+ * If the parent's parent is another throtl_grp, dispatching is propagated
+ * by either arming its pending_timer or repeating dispatch directly. If
+ * the top-level service_tree is reached, throtl_data->dispatch_work is
+ * kicked so that the ready bio's are issued.
+ */
+static void throtl_pending_timer_fn(unsigned long arg)
{
- struct throtl_grp *tg;
- struct hlist_node *pos, *n;
-
- if (!td->limits_changed)
- return;
-
- xchg(&td->limits_changed, false);
-
- throtl_log(td, "limits changed");
+ struct throtl_service_queue *sq = (void *)arg;
+ struct throtl_grp *tg = sq_to_tg(sq);
+ struct throtl_data *td = sq_to_td(sq);
+ struct request_queue *q = td->queue;
+ struct throtl_service_queue *parent_sq;
+ bool dispatched;
+ int ret;
- hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
- if (!tg->limits_changed)
- continue;
+ spin_lock_irq(q->queue_lock);
+again:
+ parent_sq = sq->parent_sq;
+ dispatched = false;
+
+ while (true) {
+ throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
+ sq->nr_queued[READ] + sq->nr_queued[WRITE],
+ sq->nr_queued[READ], sq->nr_queued[WRITE]);
+
+ ret = throtl_select_dispatch(sq);
+ if (ret) {
+ throtl_log(sq, "bios disp=%u", ret);
+ dispatched = true;
+ }
- if (!xchg(&tg->limits_changed, false))
- continue;
+ if (throtl_schedule_next_dispatch(sq, false))
+ break;
- throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
- " riops=%u wiops=%u", tg->bps[READ], tg->bps[WRITE],
- tg->iops[READ], tg->iops[WRITE]);
+ /* this dispatch windows is still open, relax and repeat */
+ spin_unlock_irq(q->queue_lock);
+ cpu_relax();
+ spin_lock_irq(q->queue_lock);
+ }
- /*
- * Restart the slices for both READ and WRITES. It
- * might happen that a group's limit are dropped
- * suddenly and we don't want to account recently
- * dispatched IO with new low rate
- */
- throtl_start_new_slice(td, tg, 0);
- throtl_start_new_slice(td, tg, 1);
+ if (!dispatched)
+ goto out_unlock;
- if (throtl_tg_on_rr(tg))
- tg_update_disptime(td, tg);
+ if (parent_sq) {
+ /* @parent_sq is another throl_grp, propagate dispatch */
+ if (tg->flags & THROTL_TG_WAS_EMPTY) {
+ tg_update_disptime(tg);
+ if (!throtl_schedule_next_dispatch(parent_sq, false)) {
+ /* window is already open, repeat dispatching */
+ sq = parent_sq;
+ tg = sq_to_tg(sq);
+ goto again;
+ }
+ }
+ } else {
+ /* reached the top-level, queue issueing */
+ queue_work(kthrotld_workqueue, &td->dispatch_work);
}
+out_unlock:
+ spin_unlock_irq(q->queue_lock);
}
-/* Dispatch throttled bios. Should be called without queue lock held. */
-static int throtl_dispatch(struct request_queue *q)
+/**
+ * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
+ * @work: work item being executed
+ *
+ * This function is queued for execution when bio's reach the bio_lists[]
+ * of throtl_data->service_queue. Those bio's are ready and issued by this
+ * function.
+ */
+static void blk_throtl_dispatch_work_fn(struct work_struct *work)
{
- struct throtl_data *td = q->td;
- unsigned int nr_disp = 0;
+ struct throtl_data *td = container_of(work, struct throtl_data,
+ dispatch_work);
+ struct throtl_service_queue *td_sq = &td->service_queue;
+ struct request_queue *q = td->queue;
struct bio_list bio_list_on_stack;
struct bio *bio;
struct blk_plug plug;
-
- spin_lock_irq(q->queue_lock);
-
- throtl_process_limit_change(td);
-
- if (!total_nr_queued(td))
- goto out;
+ int rw;
bio_list_init(&bio_list_on_stack);
- throtl_log(td, "dispatch nr_queued=%u read=%u write=%u",
- total_nr_queued(td), td->nr_queued[READ],
- td->nr_queued[WRITE]);
-
- nr_disp = throtl_select_dispatch(td, &bio_list_on_stack);
-
- if (nr_disp)
- throtl_log(td, "bios disp=%u", nr_disp);
-
- throtl_schedule_next_dispatch(td);
-out:
+ spin_lock_irq(q->queue_lock);
+ for (rw = READ; rw <= WRITE; rw++)
+ while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
+ bio_list_add(&bio_list_on_stack, bio);
spin_unlock_irq(q->queue_lock);
- /*
- * If we dispatched some requests, unplug the queue to make sure
- * immediate dispatch
- */
- if (nr_disp) {
+ if (!bio_list_empty(&bio_list_on_stack)) {
blk_start_plug(&plug);
while((bio = bio_list_pop(&bio_list_on_stack)))
generic_make_request(bio);
blk_finish_plug(&plug);
}
- return nr_disp;
}
-void blk_throtl_work(struct work_struct *work)
-{
- struct throtl_data *td = container_of(work, struct throtl_data,
- throtl_work.work);
- struct request_queue *q = td->queue;
-
- throtl_dispatch(q);
-}
-
-/* Call with queue lock held */
-static void
-throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
+static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
{
+ struct throtl_grp *tg = pd_to_tg(pd);
+ struct blkg_rwstat rwstat = { }, tmp;
+ int i, cpu;
- struct delayed_work *dwork = &td->throtl_work;
+ for_each_possible_cpu(cpu) {
+ struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
- /* schedule work if limits changed even if no bio is queued */
- if (total_nr_queued(td) || td->limits_changed) {
- /*
- * We might have a work scheduled to be executed in future.
- * Cancel that and schedule a new one.
- */
- __cancel_delayed_work(dwork);
- queue_delayed_work(kthrotld_workqueue, dwork, delay);
- throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
- delay, jiffies);
+ tmp = blkg_rwstat_read((void *)sc + off);
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ rwstat.cnt[i] += tmp.cnt[i];
}
+
+ return __blkg_prfill_rwstat(sf, pd, &rwstat);
}
-static void
-throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
+static int tg_print_cpu_rwstat(struct seq_file *sf, void *v)
{
- /* Something wrong if we are trying to remove same group twice */
- BUG_ON(hlist_unhashed(&tg->tg_node));
-
- hlist_del_init(&tg->tg_node);
-
- /*
- * Put the reference taken at the time of creation so that when all
- * queues are gone, group can be destroyed.
- */
- throtl_put_tg(tg);
- td->nr_undestroyed_grps--;
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_cpu_rwstat,
+ &blkcg_policy_throtl, seq_cft(sf)->private, true);
+ return 0;
}
-static void throtl_release_tgs(struct throtl_data *td)
+static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
+ int off)
{
- struct hlist_node *pos, *n;
- struct throtl_grp *tg;
+ struct throtl_grp *tg = pd_to_tg(pd);
+ u64 v = *(u64 *)((void *)tg + off);
- hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
- /*
- * If cgroup removal path got to blk_group first and removed
- * it from cgroup list, then it will take care of destroying
- * cfqg also.
- */
- if (!blkiocg_del_blkio_group(&tg->blkg))
- throtl_destroy_tg(td, tg);
- }
+ if (v == -1)
+ return 0;
+ return __blkg_prfill_u64(sf, pd, v);
}
-/*
- * Blk cgroup controller notification saying that blkio_group object is being
- * delinked as associated cgroup object is going away. That also means that
- * no new IO will come in this group. So get rid of this group as soon as
- * any pending IO in the group is finished.
- *
- * This function is called under rcu_read_lock(). key is the rcu protected
- * pointer. That means "key" is a valid throtl_data pointer as long as we are
- * rcu read lock.
- *
- * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
- * it should not be NULL as even if queue was going away, cgroup deltion
- * path got to it first.
- */
-void throtl_unlink_blkio_group(void *key, struct blkio_group *blkg)
+static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
+ int off)
{
- unsigned long flags;
- struct throtl_data *td = key;
+ struct throtl_grp *tg = pd_to_tg(pd);
+ unsigned int v = *(unsigned int *)((void *)tg + off);
- spin_lock_irqsave(td->queue->queue_lock, flags);
- throtl_destroy_tg(td, tg_of_blkg(blkg));
- spin_unlock_irqrestore(td->queue->queue_lock, flags);
+ if (v == -1)
+ return 0;
+ return __blkg_prfill_u64(sf, pd, v);
}
-static void throtl_update_blkio_group_common(struct throtl_data *td,
- struct throtl_grp *tg)
+static int tg_print_conf_u64(struct seq_file *sf, void *v)
{
- xchg(&tg->limits_changed, true);
- xchg(&td->limits_changed, true);
- /* Schedule a work now to process the limit change */
- throtl_schedule_delayed_work(td, 0);
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
+ &blkcg_policy_throtl, seq_cft(sf)->private, false);
+ return 0;
}
-/*
- * For all update functions, key should be a valid pointer because these
- * update functions are called under blkcg_lock, that means, blkg is
- * valid and in turn key is valid. queue exit path can not race because
- * of blkcg_lock
- *
- * Can not take queue lock in update functions as queue lock under blkcg_lock
- * is not allowed. Under other paths we take blkcg_lock under queue_lock.
- */
-static void throtl_update_blkio_group_read_bps(void *key,
- struct blkio_group *blkg, u64 read_bps)
+static int tg_print_conf_uint(struct seq_file *sf, void *v)
{
- struct throtl_data *td = key;
- struct throtl_grp *tg = tg_of_blkg(blkg);
-
- tg->bps[READ] = read_bps;
- throtl_update_blkio_group_common(td, tg);
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
+ &blkcg_policy_throtl, seq_cft(sf)->private, false);
+ return 0;
}
-static void throtl_update_blkio_group_write_bps(void *key,
- struct blkio_group *blkg, u64 write_bps)
+static ssize_t tg_set_conf(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off, bool is_u64)
{
- struct throtl_data *td = key;
- struct throtl_grp *tg = tg_of_blkg(blkg);
+ struct blkcg *blkcg = css_to_blkcg(of_css(of));
+ struct blkg_conf_ctx ctx;
+ struct throtl_grp *tg;
+ struct throtl_service_queue *sq;
+ struct blkcg_gq *blkg;
+ struct cgroup_subsys_state *pos_css;
+ int ret;
- tg->bps[WRITE] = write_bps;
- throtl_update_blkio_group_common(td, tg);
-}
+ ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
+ if (ret)
+ return ret;
-static void throtl_update_blkio_group_read_iops(void *key,
- struct blkio_group *blkg, unsigned int read_iops)
-{
- struct throtl_data *td = key;
- struct throtl_grp *tg = tg_of_blkg(blkg);
+ tg = blkg_to_tg(ctx.blkg);
+ sq = &tg->service_queue;
- tg->iops[READ] = read_iops;
- throtl_update_blkio_group_common(td, tg);
+ if (!ctx.v)
+ ctx.v = -1;
+
+ if (is_u64)
+ *(u64 *)((void *)tg + of_cft(of)->private) = ctx.v;
+ else
+ *(unsigned int *)((void *)tg + of_cft(of)->private) = ctx.v;
+
+ throtl_log(&tg->service_queue,
+ "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
+ tg->bps[READ], tg->bps[WRITE],
+ tg->iops[READ], tg->iops[WRITE]);
+
+ /*
+ * Update has_rules[] flags for the updated tg's subtree. A tg is
+ * considered to have rules if either the tg itself or any of its
+ * ancestors has rules. This identifies groups without any
+ * restrictions in the whole hierarchy and allows them to bypass
+ * blk-throttle.
+ */
+ blkg_for_each_descendant_pre(blkg, pos_css, ctx.blkg)
+ tg_update_has_rules(blkg_to_tg(blkg));
+
+ /*
+ * We're already holding queue_lock and know @tg is valid. Let's
+ * apply the new config directly.
+ *
+ * Restart the slices for both READ and WRITES. It might happen
+ * that a group's limit are dropped suddenly and we don't want to
+ * account recently dispatched IO with new low rate.
+ */
+ throtl_start_new_slice(tg, 0);
+ throtl_start_new_slice(tg, 1);
+
+ if (tg->flags & THROTL_TG_PENDING) {
+ tg_update_disptime(tg);
+ throtl_schedule_next_dispatch(sq->parent_sq, true);
+ }
+
+ blkg_conf_finish(&ctx);
+ return nbytes;
}
-static void throtl_update_blkio_group_write_iops(void *key,
- struct blkio_group *blkg, unsigned int write_iops)
+static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off)
{
- struct throtl_data *td = key;
- struct throtl_grp *tg = tg_of_blkg(blkg);
+ return tg_set_conf(of, buf, nbytes, off, true);
+}
- tg->iops[WRITE] = write_iops;
- throtl_update_blkio_group_common(td, tg);
+static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off)
+{
+ return tg_set_conf(of, buf, nbytes, off, false);
}
+static struct cftype throtl_files[] = {
+ {
+ .name = "throttle.read_bps_device",
+ .private = offsetof(struct throtl_grp, bps[READ]),
+ .seq_show = tg_print_conf_u64,
+ .write = tg_set_conf_u64,
+ },
+ {
+ .name = "throttle.write_bps_device",
+ .private = offsetof(struct throtl_grp, bps[WRITE]),
+ .seq_show = tg_print_conf_u64,
+ .write = tg_set_conf_u64,
+ },
+ {
+ .name = "throttle.read_iops_device",
+ .private = offsetof(struct throtl_grp, iops[READ]),
+ .seq_show = tg_print_conf_uint,
+ .write = tg_set_conf_uint,
+ },
+ {
+ .name = "throttle.write_iops_device",
+ .private = offsetof(struct throtl_grp, iops[WRITE]),
+ .seq_show = tg_print_conf_uint,
+ .write = tg_set_conf_uint,
+ },
+ {
+ .name = "throttle.io_service_bytes",
+ .private = offsetof(struct tg_stats_cpu, service_bytes),
+ .seq_show = tg_print_cpu_rwstat,
+ },
+ {
+ .name = "throttle.io_serviced",
+ .private = offsetof(struct tg_stats_cpu, serviced),
+ .seq_show = tg_print_cpu_rwstat,
+ },
+ { } /* terminate */
+};
+
static void throtl_shutdown_wq(struct request_queue *q)
{
struct throtl_data *td = q->td;
- cancel_delayed_work_sync(&td->throtl_work);
+ cancel_work_sync(&td->dispatch_work);
}
-static struct blkio_policy_type blkio_policy_throtl = {
- .ops = {
- .blkio_unlink_group_fn = throtl_unlink_blkio_group,
- .blkio_update_group_read_bps_fn =
- throtl_update_blkio_group_read_bps,
- .blkio_update_group_write_bps_fn =
- throtl_update_blkio_group_write_bps,
- .blkio_update_group_read_iops_fn =
- throtl_update_blkio_group_read_iops,
- .blkio_update_group_write_iops_fn =
- throtl_update_blkio_group_write_iops,
- },
- .plid = BLKIO_POLICY_THROTL,
+static struct blkcg_policy blkcg_policy_throtl = {
+ .pd_size = sizeof(struct throtl_grp),
+ .cftypes = throtl_files,
+
+ .pd_init_fn = throtl_pd_init,
+ .pd_online_fn = throtl_pd_online,
+ .pd_exit_fn = throtl_pd_exit,
+ .pd_reset_stats_fn = throtl_pd_reset_stats,
};
bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
{
struct throtl_data *td = q->td;
+ struct throtl_qnode *qn = NULL;
struct throtl_grp *tg;
- bool rw = bio_data_dir(bio), update_disptime = true;
- struct blkio_cgroup *blkcg;
+ struct throtl_service_queue *sq;
+ bool rw = bio_data_dir(bio);
+ struct blkcg *blkcg;
bool throttled = false;
- if (bio->bi_rw & REQ_THROTTLED) {
- bio->bi_rw &= ~REQ_THROTTLED;
+ /* see throtl_charge_bio() */
+ if (bio->bi_rw & REQ_THROTTLED)
goto out;
- }
/*
* A throtl_grp pointer retrieved under rcu can be used to access
* basic fields like stats and io rates. If a group has no rules,
* just update the dispatch stats in lockless manner and return.
*/
-
rcu_read_lock();
- blkcg = task_blkio_cgroup(current);
- tg = throtl_find_tg(td, blkcg);
+ blkcg = bio_blkcg(bio);
+ tg = throtl_lookup_tg(td, blkcg);
if (tg) {
- throtl_tg_fill_dev_details(td, tg);
-
- if (tg_no_rule_group(tg, rw)) {
- blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
- rw, rw_is_sync(bio->bi_rw));
- rcu_read_unlock();
- goto out;
+ if (!tg->has_rules[rw]) {
+ throtl_update_dispatch_stats(tg_to_blkg(tg),
+ bio->bi_iter.bi_size, bio->bi_rw);
+ goto out_unlock_rcu;
}
}
- rcu_read_unlock();
/*
* Either group has not been allocated yet or it is not an unlimited
* IO group
*/
spin_lock_irq(q->queue_lock);
- tg = throtl_get_tg(td);
+ tg = throtl_lookup_create_tg(td, blkcg);
if (unlikely(!tg))
goto out_unlock;
- if (tg->nr_queued[rw]) {
- /*
- * There is already another bio queued in same dir. No
- * need to update dispatch time.
- */
- update_disptime = false;
- goto queue_bio;
+ sq = &tg->service_queue;
- }
+ while (true) {
+ /* throtl is FIFO - if bios are already queued, should queue */
+ if (sq->nr_queued[rw])
+ break;
+
+ /* if above limits, break to queue */
+ if (!tg_may_dispatch(tg, bio, NULL))
+ break;
- /* Bio is with-in rate limit of group */
- if (tg_may_dispatch(td, tg, bio, NULL)) {
+ /* within limits, let's charge and dispatch directly */
throtl_charge_bio(tg, bio);
/*
@@ -1177,32 +1538,80 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
*
* So keep on trimming slice even if bio is not queued.
*/
- throtl_trim_slice(td, tg, rw);
- goto out_unlock;
+ throtl_trim_slice(tg, rw);
+
+ /*
+ * @bio passed through this layer without being throttled.
+ * Climb up the ladder. If we''re already at the top, it
+ * can be executed directly.
+ */
+ qn = &tg->qnode_on_parent[rw];
+ sq = sq->parent_sq;
+ tg = sq_to_tg(sq);
+ if (!tg)
+ goto out_unlock;
}
-queue_bio:
- throtl_log_tg(td, tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
- " iodisp=%u iops=%u queued=%d/%d",
- rw == READ ? 'R' : 'W',
- tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
- tg->io_disp[rw], tg->iops[rw],
- tg->nr_queued[READ], tg->nr_queued[WRITE]);
+ /* out-of-limit, queue to @tg */
+ throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
+ rw == READ ? 'R' : 'W',
+ tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw],
+ tg->io_disp[rw], tg->iops[rw],
+ sq->nr_queued[READ], sq->nr_queued[WRITE]);
- throtl_add_bio_tg(q->td, tg, bio);
+ bio_associate_current(bio);
+ tg->td->nr_queued[rw]++;
+ throtl_add_bio_tg(bio, qn, tg);
throttled = true;
- if (update_disptime) {
- tg_update_disptime(td, tg);
- throtl_schedule_next_dispatch(td);
+ /*
+ * Update @tg's dispatch time and force schedule dispatch if @tg
+ * was empty before @bio. The forced scheduling isn't likely to
+ * cause undue delay as @bio is likely to be dispatched directly if
+ * its @tg's disptime is not in the future.
+ */
+ if (tg->flags & THROTL_TG_WAS_EMPTY) {
+ tg_update_disptime(tg);
+ throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
}
out_unlock:
spin_unlock_irq(q->queue_lock);
+out_unlock_rcu:
+ rcu_read_unlock();
out:
+ /*
+ * As multiple blk-throtls may stack in the same issue path, we
+ * don't want bios to leave with the flag set. Clear the flag if
+ * being issued.
+ */
+ if (!throttled)
+ bio->bi_rw &= ~REQ_THROTTLED;
return throttled;
}
+/*
+ * Dispatch all bios from all children tg's queued on @parent_sq. On
+ * return, @parent_sq is guaranteed to not have any active children tg's
+ * and all bios from previously active tg's are on @parent_sq->bio_lists[].
+ */
+static void tg_drain_bios(struct throtl_service_queue *parent_sq)
+{
+ struct throtl_grp *tg;
+
+ while ((tg = throtl_rb_first(parent_sq))) {
+ struct throtl_service_queue *sq = &tg->service_queue;
+ struct bio *bio;
+
+ throtl_dequeue_tg(tg);
+
+ while ((bio = throtl_peek_queued(&sq->queued[READ])))
+ tg_dispatch_one_bio(tg, bio_data_dir(bio));
+ while ((bio = throtl_peek_queued(&sq->queued[WRITE])))
+ tg_dispatch_one_bio(tg, bio_data_dir(bio));
+ }
+}
+
/**
* blk_throtl_drain - drain throttled bios
* @q: request_queue to drain throttled bios for
@@ -1213,27 +1622,34 @@ void blk_throtl_drain(struct request_queue *q)
__releases(q->queue_lock) __acquires(q->queue_lock)
{
struct throtl_data *td = q->td;
- struct throtl_rb_root *st = &td->tg_service_tree;
- struct throtl_grp *tg;
- struct bio_list bl;
+ struct blkcg_gq *blkg;
+ struct cgroup_subsys_state *pos_css;
struct bio *bio;
+ int rw;
- WARN_ON_ONCE(!queue_is_locked(q));
+ queue_lockdep_assert_held(q);
+ rcu_read_lock();
- bio_list_init(&bl);
+ /*
+ * Drain each tg while doing post-order walk on the blkg tree, so
+ * that all bios are propagated to td->service_queue. It'd be
+ * better to walk service_queue tree directly but blkg walk is
+ * easier.
+ */
+ blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
+ tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
- while ((tg = throtl_rb_first(st))) {
- throtl_dequeue_tg(td, tg);
+ /* finally, transfer bios from top-level tg's into the td */
+ tg_drain_bios(&td->service_queue);
- while ((bio = bio_list_peek(&tg->bio_lists[READ])))
- tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
- while ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
- tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
- }
+ rcu_read_unlock();
spin_unlock_irq(q->queue_lock);
- while ((bio = bio_list_pop(&bl)))
- generic_make_request(bio);
+ /* all bios now should be in td->service_queue, issue them */
+ for (rw = READ; rw <= WRITE; rw++)
+ while ((bio = throtl_pop_queued(&td->service_queue.queued[rw],
+ NULL)))
+ generic_make_request(bio);
spin_lock_irq(q->queue_lock);
}
@@ -1241,79 +1657,30 @@ void blk_throtl_drain(struct request_queue *q)
int blk_throtl_init(struct request_queue *q)
{
struct throtl_data *td;
- struct throtl_grp *tg;
+ int ret;
td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
if (!td)
return -ENOMEM;
- INIT_HLIST_HEAD(&td->tg_list);
- td->tg_service_tree = THROTL_RB_ROOT;
- td->limits_changed = false;
- INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
+ INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
+ throtl_service_queue_init(&td->service_queue, NULL);
- /* alloc and Init root group. */
+ q->td = td;
td->queue = q;
- tg = throtl_alloc_tg(td);
- if (!tg) {
+ /* activate policy */
+ ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
+ if (ret)
kfree(td);
- return -ENOMEM;
- }
-
- td->root_tg = tg;
-
- rcu_read_lock();
- throtl_init_add_tg_lists(td, tg, &blkio_root_cgroup);
- rcu_read_unlock();
-
- /* Attach throtl data to request queue */
- q->td = td;
- return 0;
+ return ret;
}
void blk_throtl_exit(struct request_queue *q)
{
- struct throtl_data *td = q->td;
- bool wait = false;
-
- BUG_ON(!td);
-
- throtl_shutdown_wq(q);
-
- spin_lock_irq(q->queue_lock);
- throtl_release_tgs(td);
-
- /* If there are other groups */
- if (td->nr_undestroyed_grps > 0)
- wait = true;
-
- spin_unlock_irq(q->queue_lock);
-
- /*
- * Wait for tg->blkg->key accessors to exit their grace periods.
- * Do this wait only if there are other undestroyed groups out
- * there (other than root group). This can happen if cgroup deletion
- * path claimed the responsibility of cleaning up a group before
- * queue cleanup code get to the group.
- *
- * Do not call synchronize_rcu() unconditionally as there are drivers
- * which create/delete request queue hundreds of times during scan/boot
- * and synchronize_rcu() can take significant time and slow down boot.
- */
- if (wait)
- synchronize_rcu();
-
- /*
- * Just being safe to make sure after previous flush if some body did
- * update limits through cgroup and another work got queued, cancel
- * it.
- */
+ BUG_ON(!q->td);
throtl_shutdown_wq(q);
-}
-
-void blk_throtl_release(struct request_queue *q)
-{
+ blkcg_deactivate_policy(q, &blkcg_policy_throtl);
kfree(q->td);
}
@@ -1323,8 +1690,7 @@ static int __init throtl_init(void)
if (!kthrotld_workqueue)
panic("Failed to create kthrotld\n");
- blkio_policy_register(&blkio_policy_throtl);
- return 0;
+ return blkcg_policy_register(&blkcg_policy_throtl);
}
module_init(throtl_init);
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 78035488895..95a09590ccf 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -7,6 +7,7 @@
#include <linux/fault-inject.h>
#include "blk.h"
+#include "blk-mq.h"
#ifdef CONFIG_FAIL_IO_TIMEOUT
@@ -31,7 +32,7 @@ static int __init fail_io_timeout_debugfs(void)
struct dentry *dir = fault_create_debugfs_attr("fail_io_timeout",
NULL, &fail_io_timeout);
- return IS_ERR(dir) ? PTR_ERR(dir) : 0;
+ return PTR_ERR_OR_ZERO(dir);
}
late_initcall(fail_io_timeout_debugfs);
@@ -82,16 +83,21 @@ void blk_delete_timer(struct request *req)
static void blk_rq_timed_out(struct request *req)
{
struct request_queue *q = req->q;
- enum blk_eh_timer_return ret;
+ enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
- ret = q->rq_timed_out_fn(req);
+ if (q->rq_timed_out_fn)
+ ret = q->rq_timed_out_fn(req);
switch (ret) {
case BLK_EH_HANDLED:
- __blk_complete_request(req);
+ /* Can we use req->errors here? */
+ if (q->mq_ops)
+ __blk_mq_complete_request(req);
+ else
+ __blk_complete_request(req);
break;
case BLK_EH_RESET_TIMER:
- blk_clear_rq_complete(req);
blk_add_timer(req);
+ blk_clear_rq_complete(req);
break;
case BLK_EH_NOT_HANDLED:
/*
@@ -107,6 +113,23 @@ static void blk_rq_timed_out(struct request *req)
}
}
+void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
+ unsigned int *next_set)
+{
+ if (time_after_eq(jiffies, rq->deadline)) {
+ list_del_init(&rq->timeout_list);
+
+ /*
+ * Check if we raced with end io completion
+ */
+ if (!blk_mark_rq_complete(rq))
+ blk_rq_timed_out(rq);
+ } else if (!*next_set || time_after(*next_timeout, rq->deadline)) {
+ *next_timeout = rq->deadline;
+ *next_set = 1;
+ }
+}
+
void blk_rq_timed_out_timer(unsigned long data)
{
struct request_queue *q = (struct request_queue *) data;
@@ -116,21 +139,8 @@ void blk_rq_timed_out_timer(unsigned long data)
spin_lock_irqsave(q->queue_lock, flags);
- list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) {
- if (time_after_eq(jiffies, rq->deadline)) {
- list_del_init(&rq->timeout_list);
-
- /*
- * Check if we raced with end io completion
- */
- if (blk_mark_rq_complete(rq))
- continue;
- blk_rq_timed_out(rq);
- } else if (!next_set || time_after(next, rq->deadline)) {
- next = rq->deadline;
- next_set = 1;
- }
- }
+ list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
+ blk_rq_check_expired(rq, &next, &next_set);
if (next_set)
mod_timer(&q->timeout, round_jiffies_up(next));
@@ -156,6 +166,17 @@ void blk_abort_request(struct request *req)
}
EXPORT_SYMBOL_GPL(blk_abort_request);
+unsigned long blk_rq_timeout(unsigned long timeout)
+{
+ unsigned long maxt;
+
+ maxt = round_jiffies_up(jiffies + BLK_MAX_TIMEOUT);
+ if (time_after(timeout, maxt))
+ timeout = maxt;
+
+ return timeout;
+}
+
/**
* blk_add_timer - Start timeout timer for a single request
* @req: request that is about to start running.
@@ -173,7 +194,6 @@ void blk_add_timer(struct request *req)
return;
BUG_ON(!list_empty(&req->timeout_list));
- BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
/*
* Some LLDs, like scsi, peek at the timeout to prevent a
@@ -183,58 +203,29 @@ void blk_add_timer(struct request *req)
req->timeout = q->rq_timeout;
req->deadline = jiffies + req->timeout;
- list_add_tail(&req->timeout_list, &q->timeout_list);
+ if (!q->mq_ops)
+ list_add_tail(&req->timeout_list, &req->q->timeout_list);
/*
* If the timer isn't already pending or this timeout is earlier
* than an existing one, modify the timer. Round up to next nearest
* second.
*/
- expiry = round_jiffies_up(req->deadline);
+ expiry = blk_rq_timeout(round_jiffies_up(req->deadline));
if (!timer_pending(&q->timeout) ||
- time_before(expiry, q->timeout.expires))
- mod_timer(&q->timeout, expiry);
-}
-
-/**
- * blk_abort_queue -- Abort all request on given queue
- * @queue: pointer to queue
- *
- */
-void blk_abort_queue(struct request_queue *q)
-{
- unsigned long flags;
- struct request *rq, *tmp;
- LIST_HEAD(list);
-
- /*
- * Not a request based block device, nothing to abort
- */
- if (!q->request_fn)
- return;
-
- spin_lock_irqsave(q->queue_lock, flags);
-
- elv_abort_queue(q);
+ time_before(expiry, q->timeout.expires)) {
+ unsigned long diff = q->timeout.expires - expiry;
- /*
- * Splice entries to local list, to avoid deadlocking if entries
- * get readded to the timeout list by error handling
- */
- list_splice_init(&q->timeout_list, &list);
-
- list_for_each_entry_safe(rq, tmp, &list, timeout_list)
- blk_abort_request(rq);
-
- /*
- * Occasionally, blk_abort_request() will return without
- * deleting the element from the list. Make sure we add those back
- * instead of leaving them on the local stack list.
- */
- list_splice(&list, &q->timeout_list);
-
- spin_unlock_irqrestore(q->queue_lock, flags);
+ /*
+ * Due to added timer slack to group timers, the timer
+ * will often be a little in front of what we asked for.
+ * So apply some tolerance here too, otherwise we keep
+ * modifying the timer because expires for value X
+ * will be X + something.
+ */
+ if (!timer_pending(&q->timeout) || (diff >= HZ / 2))
+ mod_timer(&q->timeout, expiry);
+ }
}
-EXPORT_SYMBOL_GPL(blk_abort_queue);
diff --git a/block/blk.h b/block/blk.h
index 9c12f80882b..6748c4f8d7a 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -9,7 +9,11 @@
/* Number of requests a "batching" process may submit */
#define BLK_BATCH_REQ 32
+/* Max future timer expiry for timeouts */
+#define BLK_MAX_TIMEOUT (5 * HZ)
+
extern struct kmem_cache *blk_requestq_cachep;
+extern struct kmem_cache *request_cachep;
extern struct kobj_type blk_queue_ktype;
extern struct ida blk_queue_ida;
@@ -18,27 +22,46 @@ static inline void __blk_get_queue(struct request_queue *q)
kobject_get(&q->kobj);
}
+int blk_init_rl(struct request_list *rl, struct request_queue *q,
+ gfp_t gfp_mask);
+void blk_exit_rl(struct request_list *rl);
void init_request_from_bio(struct request *req, struct bio *bio);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio);
int blk_rq_append_bio(struct request_queue *q, struct request *rq,
struct bio *bio);
-void blk_drain_queue(struct request_queue *q, bool drain_all);
+void blk_queue_bypass_start(struct request_queue *q);
+void blk_queue_bypass_end(struct request_queue *q);
void blk_dequeue_request(struct request *rq);
void __blk_queue_free_tags(struct request_queue *q);
bool __blk_end_bidi_request(struct request *rq, int error,
unsigned int nr_bytes, unsigned int bidi_bytes);
void blk_rq_timed_out_timer(unsigned long data);
+void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
+ unsigned int *next_set);
+unsigned long blk_rq_timeout(unsigned long timeout);
+void blk_add_timer(struct request *req);
void blk_delete_timer(struct request *);
-void blk_add_timer(struct request *);
-void __generic_unplug_device(struct request_queue *);
+
+
+bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
+ struct bio *bio);
+bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
+ struct bio *bio);
+bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
+ unsigned int *request_count);
+
+void blk_account_io_start(struct request *req, bool new_io);
+void blk_account_io_completion(struct request *req, unsigned int bytes);
+void blk_account_io_done(struct request *req);
/*
* Internal atomic flags for request handling
*/
enum rq_atomic_flags {
REQ_ATOM_COMPLETE = 0,
+ REQ_ATOM_STARTED,
};
/*
@@ -58,10 +81,9 @@ static inline void blk_clear_rq_complete(struct request *rq)
/*
* Internal elevator interface
*/
-#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
+#define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED)
void blk_insert_flush(struct request *rq);
-void blk_abort_flushes(struct request_queue *q);
static inline struct request *__elv_next_request(struct request_queue *q)
{
@@ -93,7 +115,7 @@ static inline struct request *__elv_next_request(struct request_queue *q)
q->flush_queue_delayed = 1;
return NULL;
}
- if (unlikely(blk_queue_dead(q)) ||
+ if (unlikely(blk_queue_bypass(q)) ||
!q->elevator->type->ops.elevator_dispatch_fn(q, 0))
return NULL;
}
@@ -142,10 +164,9 @@ int blk_try_merge(struct request *rq, struct bio *bio);
void blk_queue_congestion_threshold(struct request_queue *q);
-int blk_dev_init(void);
+void __blk_run_queue_uncond(struct request_queue *q);
-void elv_quiesce_start(struct request_queue *q);
-void elv_quiesce_end(struct request_queue *q);
+int blk_dev_init(void);
/*
@@ -166,35 +187,20 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
return q->nr_congestion_off;
}
-static inline int blk_cpu_to_group(int cpu)
-{
- int group = NR_CPUS;
-#ifdef CONFIG_SCHED_MC
- const struct cpumask *mask = cpu_coregroup_mask(cpu);
- group = cpumask_first(mask);
-#elif defined(CONFIG_SCHED_SMT)
- group = cpumask_first(topology_thread_cpumask(cpu));
-#else
- return cpu;
-#endif
- if (likely(group < NR_CPUS))
- return group;
- return cpu;
-}
+extern int blk_update_nr_requests(struct request_queue *, unsigned int);
/*
* Contribute to IO statistics IFF:
*
* a) it's attached to a gendisk, and
* b) the queue had IO stats enabled when this request was started, and
- * c) it's a file system request or a discard request
+ * c) it's a file system request
*/
static inline int blk_do_io_stat(struct request *rq)
{
return rq->rq_disk &&
(rq->cmd_flags & REQ_IO_STAT) &&
- (rq->cmd_type == REQ_TYPE_FS ||
- (rq->cmd_flags & REQ_DISCARD));
+ (rq->cmd_type == REQ_TYPE_FS);
}
/*
@@ -202,32 +208,30 @@ static inline int blk_do_io_stat(struct request *rq)
*/
void get_io_context(struct io_context *ioc);
struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
-struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask);
+struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
+ gfp_t gfp_mask);
void ioc_clear_queue(struct request_queue *q);
-void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask,
- int node);
+int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
/**
* create_io_context - try to create task->io_context
- * @task: target task
* @gfp_mask: allocation mask
* @node: allocation node
*
- * If @task->io_context is %NULL, allocate a new io_context and install it.
- * Returns the current @task->io_context which may be %NULL if allocation
- * failed.
+ * If %current->io_context is %NULL, allocate a new io_context and install
+ * it. Returns the current %current->io_context which may be %NULL if
+ * allocation failed.
*
* Note that this function can't be called with IRQ disabled because
- * task_lock which protects @task->io_context is IRQ-unsafe.
+ * task_lock which protects %current->io_context is IRQ-unsafe.
*/
-static inline struct io_context *create_io_context(struct task_struct *task,
- gfp_t gfp_mask, int node)
+static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
{
WARN_ON_ONCE(irqs_disabled());
- if (unlikely(!task->io_context))
- create_io_context_slowpath(task, gfp_mask, node);
- return task->io_context;
+ if (unlikely(!current->io_context))
+ create_task_io_context(current, gfp_mask, node);
+ return current->io_context;
}
/*
@@ -238,7 +242,6 @@ extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
extern void blk_throtl_drain(struct request_queue *q);
extern int blk_throtl_init(struct request_queue *q);
extern void blk_throtl_exit(struct request_queue *q);
-extern void blk_throtl_release(struct request_queue *q);
#else /* CONFIG_BLK_DEV_THROTTLING */
static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
{
@@ -247,7 +250,6 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
static inline void blk_throtl_drain(struct request_queue *q) { }
static inline int blk_throtl_init(struct request_queue *q) { return 0; }
static inline void blk_throtl_exit(struct request_queue *q) { }
-static inline void blk_throtl_release(struct request_queue *q) { }
#endif /* CONFIG_BLK_DEV_THROTTLING */
#endif /* BLK_INTERNAL_H */
diff --git a/block/bounce.c b/block/bounce.c
new file mode 100644
index 00000000000..ab21ba203d5
--- /dev/null
+++ b/block/bounce.c
@@ -0,0 +1,290 @@
+/* bounce buffer handling for block devices
+ *
+ * - Split from highmem.c
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mm.h>
+#include <linux/export.h>
+#include <linux/swap.h>
+#include <linux/gfp.h>
+#include <linux/bio.h>
+#include <linux/pagemap.h>
+#include <linux/mempool.h>
+#include <linux/blkdev.h>
+#include <linux/init.h>
+#include <linux/hash.h>
+#include <linux/highmem.h>
+#include <linux/bootmem.h>
+#include <linux/printk.h>
+#include <asm/tlbflush.h>
+
+#include <trace/events/block.h>
+
+#define POOL_SIZE 64
+#define ISA_POOL_SIZE 16
+
+static mempool_t *page_pool, *isa_page_pool;
+
+#if defined(CONFIG_HIGHMEM) || defined(CONFIG_NEED_BOUNCE_POOL)
+static __init int init_emergency_pool(void)
+{
+#if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG)
+ if (max_pfn <= max_low_pfn)
+ return 0;
+#endif
+
+ page_pool = mempool_create_page_pool(POOL_SIZE, 0);
+ BUG_ON(!page_pool);
+ pr_info("pool size: %d pages\n", POOL_SIZE);
+
+ return 0;
+}
+
+__initcall(init_emergency_pool);
+#endif
+
+#ifdef CONFIG_HIGHMEM
+/*
+ * highmem version, map in to vec
+ */
+static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
+{
+ unsigned long flags;
+ unsigned char *vto;
+
+ local_irq_save(flags);
+ vto = kmap_atomic(to->bv_page);
+ memcpy(vto + to->bv_offset, vfrom, to->bv_len);
+ kunmap_atomic(vto);
+ local_irq_restore(flags);
+}
+
+#else /* CONFIG_HIGHMEM */
+
+#define bounce_copy_vec(to, vfrom) \
+ memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
+
+#endif /* CONFIG_HIGHMEM */
+
+/*
+ * allocate pages in the DMA region for the ISA pool
+ */
+static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
+{
+ return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
+}
+
+/*
+ * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
+ * as the max address, so check if the pool has already been created.
+ */
+int init_emergency_isa_pool(void)
+{
+ if (isa_page_pool)
+ return 0;
+
+ isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
+ mempool_free_pages, (void *) 0);
+ BUG_ON(!isa_page_pool);
+
+ pr_info("isa pool size: %d pages\n", ISA_POOL_SIZE);
+ return 0;
+}
+
+/*
+ * Simple bounce buffer support for highmem pages. Depending on the
+ * queue gfp mask set, *to may or may not be a highmem page. kmap it
+ * always, it will do the Right Thing
+ */
+static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
+{
+ unsigned char *vfrom;
+ struct bio_vec tovec, *fromvec = from->bi_io_vec;
+ struct bvec_iter iter;
+
+ bio_for_each_segment(tovec, to, iter) {
+ if (tovec.bv_page != fromvec->bv_page) {
+ /*
+ * fromvec->bv_offset and fromvec->bv_len might have
+ * been modified by the block layer, so use the original
+ * copy, bounce_copy_vec already uses tovec->bv_len
+ */
+ vfrom = page_address(fromvec->bv_page) +
+ tovec.bv_offset;
+
+ bounce_copy_vec(&tovec, vfrom);
+ flush_dcache_page(tovec.bv_page);
+ }
+
+ fromvec++;
+ }
+}
+
+static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
+{
+ struct bio *bio_orig = bio->bi_private;
+ struct bio_vec *bvec, *org_vec;
+ int i;
+
+ if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
+ set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags);
+
+ /*
+ * free up bounce indirect pages used
+ */
+ bio_for_each_segment_all(bvec, bio, i) {
+ org_vec = bio_orig->bi_io_vec + i;
+ if (bvec->bv_page == org_vec->bv_page)
+ continue;
+
+ dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
+ mempool_free(bvec->bv_page, pool);
+ }
+
+ bio_endio(bio_orig, err);
+ bio_put(bio);
+}
+
+static void bounce_end_io_write(struct bio *bio, int err)
+{
+ bounce_end_io(bio, page_pool, err);
+}
+
+static void bounce_end_io_write_isa(struct bio *bio, int err)
+{
+
+ bounce_end_io(bio, isa_page_pool, err);
+}
+
+static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err)
+{
+ struct bio *bio_orig = bio->bi_private;
+
+ if (test_bit(BIO_UPTODATE, &bio->bi_flags))
+ copy_to_high_bio_irq(bio_orig, bio);
+
+ bounce_end_io(bio, pool, err);
+}
+
+static void bounce_end_io_read(struct bio *bio, int err)
+{
+ __bounce_end_io_read(bio, page_pool, err);
+}
+
+static void bounce_end_io_read_isa(struct bio *bio, int err)
+{
+ __bounce_end_io_read(bio, isa_page_pool, err);
+}
+
+#ifdef CONFIG_NEED_BOUNCE_POOL
+static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
+{
+ if (bio_data_dir(bio) != WRITE)
+ return 0;
+
+ if (!bdi_cap_stable_pages_required(&q->backing_dev_info))
+ return 0;
+
+ return test_bit(BIO_SNAP_STABLE, &bio->bi_flags);
+}
+#else
+static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
+{
+ return 0;
+}
+#endif /* CONFIG_NEED_BOUNCE_POOL */
+
+static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
+ mempool_t *pool, int force)
+{
+ struct bio *bio;
+ int rw = bio_data_dir(*bio_orig);
+ struct bio_vec *to, from;
+ struct bvec_iter iter;
+ unsigned i;
+
+ if (force)
+ goto bounce;
+ bio_for_each_segment(from, *bio_orig, iter)
+ if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q))
+ goto bounce;
+
+ return;
+bounce:
+ bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set);
+
+ bio_for_each_segment_all(to, bio, i) {
+ struct page *page = to->bv_page;
+
+ if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force)
+ continue;
+
+ inc_zone_page_state(to->bv_page, NR_BOUNCE);
+ to->bv_page = mempool_alloc(pool, q->bounce_gfp);
+
+ if (rw == WRITE) {
+ char *vto, *vfrom;
+
+ flush_dcache_page(page);
+
+ vto = page_address(to->bv_page) + to->bv_offset;
+ vfrom = kmap_atomic(page) + to->bv_offset;
+ memcpy(vto, vfrom, to->bv_len);
+ kunmap_atomic(vfrom);
+ }
+ }
+
+ trace_block_bio_bounce(q, *bio_orig);
+
+ bio->bi_flags |= (1 << BIO_BOUNCED);
+
+ if (pool == page_pool) {
+ bio->bi_end_io = bounce_end_io_write;
+ if (rw == READ)
+ bio->bi_end_io = bounce_end_io_read;
+ } else {
+ bio->bi_end_io = bounce_end_io_write_isa;
+ if (rw == READ)
+ bio->bi_end_io = bounce_end_io_read_isa;
+ }
+
+ bio->bi_private = *bio_orig;
+ *bio_orig = bio;
+}
+
+void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
+{
+ int must_bounce;
+ mempool_t *pool;
+
+ /*
+ * Data-less bio, nothing to bounce
+ */
+ if (!bio_has_data(*bio_orig))
+ return;
+
+ must_bounce = must_snapshot_stable_pages(q, *bio_orig);
+
+ /*
+ * for non-isa bounce case, just check if the bounce pfn is equal
+ * to or bigger than the highest pfn in the system -- in that case,
+ * don't waste time iterating over bio segments
+ */
+ if (!(q->bounce_gfp & GFP_DMA)) {
+ if (queue_bounce_pfn(q) >= blk_max_pfn && !must_bounce)
+ return;
+ pool = page_pool;
+ } else {
+ BUG_ON(!isa_page_pool);
+ pool = isa_page_pool;
+ }
+
+ /*
+ * slow path
+ */
+ __blk_queue_bounce(q, bio_orig, pool, must_bounce);
+}
+
+EXPORT_SYMBOL(blk_queue_bounce);
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index 7ad49c88f6b..650f427d915 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -151,19 +151,6 @@ failjob_rls_job:
return -ENOMEM;
}
-/*
- * bsg_goose_queue - restart queue in case it was stopped
- * @q: request q to be restarted
- */
-void bsg_goose_queue(struct request_queue *q)
-{
- if (!q)
- return;
-
- blk_run_queue_async(q);
-}
-EXPORT_SYMBOL_GPL(bsg_goose_queue);
-
/**
* bsg_request_fn - generic handler for bsg requests
* @q: request queue to manage
@@ -243,56 +230,3 @@ int bsg_setup_queue(struct device *dev, struct request_queue *q,
return 0;
}
EXPORT_SYMBOL_GPL(bsg_setup_queue);
-
-/**
- * bsg_remove_queue - Deletes the bsg dev from the q
- * @q: the request_queue that is to be torn down.
- *
- * Notes:
- * Before unregistering the queue empty any requests that are blocked
- */
-void bsg_remove_queue(struct request_queue *q)
-{
- struct request *req; /* block request */
- int counts; /* totals for request_list count and starved */
-
- if (!q)
- return;
-
- /* Stop taking in new requests */
- spin_lock_irq(q->queue_lock);
- blk_stop_queue(q);
-
- /* drain all requests in the queue */
- while (1) {
- /* need the lock to fetch a request
- * this may fetch the same reqeust as the previous pass
- */
- req = blk_fetch_request(q);
- /* save requests in use and starved */
- counts = q->rq.count[0] + q->rq.count[1] +
- q->rq.starved[0] + q->rq.starved[1];
- spin_unlock_irq(q->queue_lock);
- /* any requests still outstanding? */
- if (counts == 0)
- break;
-
- /* This may be the same req as the previous iteration,
- * always send the blk_end_request_all after a prefetch.
- * It is not okay to not end the request because the
- * prefetch started the request.
- */
- if (req) {
- /* return -ENXIO to indicate that this queue is
- * going away
- */
- req->errors = -ENXIO;
- blk_end_request_all(req, -ENXIO);
- }
-
- msleep(200); /* allow bsg to possibly finish */
- spin_lock_irq(q->queue_lock);
- }
- bsg_unregister_queue(q);
-}
-EXPORT_SYMBOL_GPL(bsg_remove_queue);
diff --git a/block/bsg.c b/block/bsg.c
index ff64ae3bace..ff46addde5d 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -196,7 +196,6 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
* fill in request structure
*/
rq->cmd_len = hdr->request_len;
- rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->timeout = msecs_to_jiffies(hdr->timeout);
if (!rq->timeout)
@@ -273,6 +272,8 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
rq = blk_get_request(q, rw, GFP_KERNEL);
if (!rq)
return ERR_PTR(-ENOMEM);
+ blk_rq_set_block_pc(rq);
+
ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm);
if (ret)
goto out;
@@ -800,11 +801,10 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
{
struct bsg_device *bd;
- struct hlist_node *entry;
mutex_lock(&bsg_mutex);
- hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) {
+ hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
if (bd->queue == q) {
atomic_inc(&bd->ref_count);
goto found;
@@ -997,7 +997,7 @@ int bsg_register_queue(struct request_queue *q, struct device *parent,
{
struct bsg_class_device *bcd;
dev_t dev;
- int ret, minor;
+ int ret;
struct device *class_dev = NULL;
const char *devname;
@@ -1009,7 +1009,7 @@ int bsg_register_queue(struct request_queue *q, struct device *parent,
/*
* we need a proper transport to send commands, not a stacked device
*/
- if (!q->request_fn)
+ if (!queue_is_rq_based(q))
return 0;
bcd = &q->bsg_dev;
@@ -1017,23 +1017,16 @@ int bsg_register_queue(struct request_queue *q, struct device *parent,
mutex_lock(&bsg_mutex);
- ret = idr_pre_get(&bsg_minor_idr, GFP_KERNEL);
- if (!ret) {
- ret = -ENOMEM;
- goto unlock;
- }
-
- ret = idr_get_new(&bsg_minor_idr, bcd, &minor);
- if (ret < 0)
+ ret = idr_alloc(&bsg_minor_idr, bcd, 0, BSG_MAX_DEVS, GFP_KERNEL);
+ if (ret < 0) {
+ if (ret == -ENOSPC) {
+ printk(KERN_ERR "bsg: too many bsg devices\n");
+ ret = -EINVAL;
+ }
goto unlock;
-
- if (minor >= BSG_MAX_DEVS) {
- printk(KERN_ERR "bsg: too many bsg devices\n");
- ret = -EINVAL;
- goto remove_idr;
}
- bcd->minor = minor;
+ bcd->minor = ret;
bcd->queue = q;
bcd->parent = get_device(parent);
bcd->release = release;
@@ -1059,8 +1052,7 @@ unregister_class_dev:
device_unregister(class_dev);
put_dev:
put_device(parent);
-remove_idr:
- idr_remove(&bsg_minor_idr, minor);
+ idr_remove(&bsg_minor_idr, bcd->minor);
unlock:
mutex_unlock(&bsg_mutex);
return ret;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 45729525356..cadc3784174 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -15,7 +15,7 @@
#include <linux/ioprio.h>
#include <linux/blktrace_api.h>
#include "blk.h"
-#include "cfq.h"
+#include "blk-cgroup.h"
/*
* tunables
@@ -85,7 +85,6 @@ struct cfq_rb_root {
struct rb_root rb;
struct rb_node *left;
unsigned count;
- unsigned total_weight;
u64 min_vdisktime;
struct cfq_ttime ttime;
};
@@ -155,7 +154,7 @@ struct cfq_queue {
* First index in the service_trees.
* IDLE is handled separately, so it has negative index
*/
-enum wl_prio_t {
+enum wl_class_t {
BE_WORKLOAD = 0,
RT_WORKLOAD = 1,
IDLE_WORKLOAD = 2,
@@ -171,16 +170,96 @@ enum wl_type_t {
SYNC_WORKLOAD = 2
};
+struct cfqg_stats {
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+ /* total bytes transferred */
+ struct blkg_rwstat service_bytes;
+ /* total IOs serviced, post merge */
+ struct blkg_rwstat serviced;
+ /* number of ios merged */
+ struct blkg_rwstat merged;
+ /* total time spent on device in ns, may not be accurate w/ queueing */
+ struct blkg_rwstat service_time;
+ /* total time spent waiting in scheduler queue in ns */
+ struct blkg_rwstat wait_time;
+ /* number of IOs queued up */
+ struct blkg_rwstat queued;
+ /* total sectors transferred */
+ struct blkg_stat sectors;
+ /* total disk time and nr sectors dispatched by this group */
+ struct blkg_stat time;
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ /* time not charged to this cgroup */
+ struct blkg_stat unaccounted_time;
+ /* sum of number of ios queued across all samples */
+ struct blkg_stat avg_queue_size_sum;
+ /* count of samples taken for average */
+ struct blkg_stat avg_queue_size_samples;
+ /* how many times this group has been removed from service tree */
+ struct blkg_stat dequeue;
+ /* total time spent waiting for it to be assigned a timeslice. */
+ struct blkg_stat group_wait_time;
+ /* time spent idling for this blkcg_gq */
+ struct blkg_stat idle_time;
+ /* total time with empty current active q with other requests queued */
+ struct blkg_stat empty_time;
+ /* fields after this shouldn't be cleared on stat reset */
+ uint64_t start_group_wait_time;
+ uint64_t start_idle_time;
+ uint64_t start_empty_time;
+ uint16_t flags;
+#endif /* CONFIG_DEBUG_BLK_CGROUP */
+#endif /* CONFIG_CFQ_GROUP_IOSCHED */
+};
+
/* This is per cgroup per device grouping structure */
struct cfq_group {
+ /* must be the first member */
+ struct blkg_policy_data pd;
+
/* group service_tree member */
struct rb_node rb_node;
/* group service_tree key */
u64 vdisktime;
+
+ /*
+ * The number of active cfqgs and sum of their weights under this
+ * cfqg. This covers this cfqg's leaf_weight and all children's
+ * weights, but does not cover weights of further descendants.
+ *
+ * If a cfqg is on the service tree, it's active. An active cfqg
+ * also activates its parent and contributes to the children_weight
+ * of the parent.
+ */
+ int nr_active;
+ unsigned int children_weight;
+
+ /*
+ * vfraction is the fraction of vdisktime that the tasks in this
+ * cfqg are entitled to. This is determined by compounding the
+ * ratios walking up from this cfqg to the root.
+ *
+ * It is in fixed point w/ CFQ_SERVICE_SHIFT and the sum of all
+ * vfractions on a service tree is approximately 1. The sum may
+ * deviate a bit due to rounding errors and fluctuations caused by
+ * cfqgs entering and leaving the service tree.
+ */
+ unsigned int vfraction;
+
+ /*
+ * There are two weights - (internal) weight is the weight of this
+ * cfqg against the sibling cfqgs. leaf_weight is the wight of
+ * this cfqg against the child cfqgs. For the root cfqg, both
+ * weights are kept in sync for backward compatibility.
+ */
unsigned int weight;
unsigned int new_weight;
- bool needs_update;
+ unsigned int dev_weight;
+
+ unsigned int leaf_weight;
+ unsigned int new_leaf_weight;
+ unsigned int dev_leaf_weight;
/* number of cfqq currently on this group */
int nr_cfqq;
@@ -203,23 +282,25 @@ struct cfq_group {
struct cfq_rb_root service_trees[2][3];
struct cfq_rb_root service_tree_idle;
- unsigned long saved_workload_slice;
- enum wl_type_t saved_workload;
- enum wl_prio_t saved_serving_prio;
- struct blkio_group blkg;
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- struct hlist_node cfqd_node;
- int ref;
-#endif
+ unsigned long saved_wl_slice;
+ enum wl_type_t saved_wl_type;
+ enum wl_class_t saved_wl_class;
+
/* number of requests that are on the dispatch list or inside driver */
int dispatched;
struct cfq_ttime ttime;
+ struct cfqg_stats stats; /* stats for this cfqg */
+ struct cfqg_stats dead_stats; /* stats pushed from dead children */
};
struct cfq_io_cq {
struct io_cq icq; /* must be the first member */
struct cfq_queue *cfqq[2];
struct cfq_ttime ttime;
+ int ioprio; /* the current ioprio */
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+ uint64_t blkcg_id; /* the current blkcg ID */
+#endif
};
/*
@@ -229,13 +310,13 @@ struct cfq_data {
struct request_queue *queue;
/* Root service tree for cfq_groups */
struct cfq_rb_root grp_service_tree;
- struct cfq_group root_group;
+ struct cfq_group *root_group;
/*
* The priority currently being served
*/
- enum wl_prio_t serving_prio;
- enum wl_type_t serving_type;
+ enum wl_class_t serving_wl_class;
+ enum wl_type_t serving_wl_type;
unsigned long workload_expires;
struct cfq_group *serving_group;
@@ -295,6 +376,7 @@ struct cfq_data {
unsigned int cfq_slice_idle;
unsigned int cfq_group_idle;
unsigned int cfq_latency;
+ unsigned int cfq_target_latency;
/*
* Fallback dummy cfqq for extreme OOM conditions
@@ -302,27 +384,21 @@ struct cfq_data {
struct cfq_queue oom_cfqq;
unsigned long last_delayed_sync;
-
- /* List of cfq groups being managed on this device*/
- struct hlist_head cfqg_list;
-
- /* Number of groups which are on blkcg->blkg_list */
- unsigned int nr_blkcg_linked_grps;
};
static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
-static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
- enum wl_prio_t prio,
+static struct cfq_rb_root *st_for(struct cfq_group *cfqg,
+ enum wl_class_t class,
enum wl_type_t type)
{
if (!cfqg)
return NULL;
- if (prio == IDLE_WORKLOAD)
+ if (class == IDLE_WORKLOAD)
return &cfqg->service_tree_idle;
- return &cfqg->service_trees[prio][type];
+ return &cfqg->service_trees[class][type];
}
enum cfqq_state_flags {
@@ -370,21 +446,337 @@ CFQ_CFQQ_FNS(deep);
CFQ_CFQQ_FNS(wait_busy);
#undef CFQ_CFQQ_FNS
+static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
+{
+ return pd ? container_of(pd, struct cfq_group, pd) : NULL;
+}
+
+static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
+{
+ return pd_to_blkg(&cfqg->pd);
+}
+
+#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+
+/* cfqg stats flags */
+enum cfqg_stats_flags {
+ CFQG_stats_waiting = 0,
+ CFQG_stats_idling,
+ CFQG_stats_empty,
+};
+
+#define CFQG_FLAG_FNS(name) \
+static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats) \
+{ \
+ stats->flags |= (1 << CFQG_stats_##name); \
+} \
+static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats) \
+{ \
+ stats->flags &= ~(1 << CFQG_stats_##name); \
+} \
+static inline int cfqg_stats_##name(struct cfqg_stats *stats) \
+{ \
+ return (stats->flags & (1 << CFQG_stats_##name)) != 0; \
+} \
+
+CFQG_FLAG_FNS(waiting)
+CFQG_FLAG_FNS(idling)
+CFQG_FLAG_FNS(empty)
+#undef CFQG_FLAG_FNS
+
+/* This should be called with the queue_lock held. */
+static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
+{
+ unsigned long long now;
+
+ if (!cfqg_stats_waiting(stats))
+ return;
+
+ now = sched_clock();
+ if (time_after64(now, stats->start_group_wait_time))
+ blkg_stat_add(&stats->group_wait_time,
+ now - stats->start_group_wait_time);
+ cfqg_stats_clear_waiting(stats);
+}
+
+/* This should be called with the queue_lock held. */
+static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
+ struct cfq_group *curr_cfqg)
+{
+ struct cfqg_stats *stats = &cfqg->stats;
+
+ if (cfqg_stats_waiting(stats))
+ return;
+ if (cfqg == curr_cfqg)
+ return;
+ stats->start_group_wait_time = sched_clock();
+ cfqg_stats_mark_waiting(stats);
+}
+
+/* This should be called with the queue_lock held. */
+static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
+{
+ unsigned long long now;
+
+ if (!cfqg_stats_empty(stats))
+ return;
+
+ now = sched_clock();
+ if (time_after64(now, stats->start_empty_time))
+ blkg_stat_add(&stats->empty_time,
+ now - stats->start_empty_time);
+ cfqg_stats_clear_empty(stats);
+}
+
+static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
+{
+ blkg_stat_add(&cfqg->stats.dequeue, 1);
+}
+
+static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
+{
+ struct cfqg_stats *stats = &cfqg->stats;
+
+ if (blkg_rwstat_total(&stats->queued))
+ return;
+
+ /*
+ * group is already marked empty. This can happen if cfqq got new
+ * request in parent group and moved to this group while being added
+ * to service tree. Just ignore the event and move on.
+ */
+ if (cfqg_stats_empty(stats))
+ return;
+
+ stats->start_empty_time = sched_clock();
+ cfqg_stats_mark_empty(stats);
+}
+
+static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
+{
+ struct cfqg_stats *stats = &cfqg->stats;
+
+ if (cfqg_stats_idling(stats)) {
+ unsigned long long now = sched_clock();
+
+ if (time_after64(now, stats->start_idle_time))
+ blkg_stat_add(&stats->idle_time,
+ now - stats->start_idle_time);
+ cfqg_stats_clear_idling(stats);
+ }
+}
+
+static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
+{
+ struct cfqg_stats *stats = &cfqg->stats;
+
+ BUG_ON(cfqg_stats_idling(stats));
+
+ stats->start_idle_time = sched_clock();
+ cfqg_stats_mark_idling(stats);
+}
+
+static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
+{
+ struct cfqg_stats *stats = &cfqg->stats;
+
+ blkg_stat_add(&stats->avg_queue_size_sum,
+ blkg_rwstat_total(&stats->queued));
+ blkg_stat_add(&stats->avg_queue_size_samples, 1);
+ cfqg_stats_update_group_wait_time(stats);
+}
+
+#else /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
+
+static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
+static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
+static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
+static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
+static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
+static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
+static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
+
+#endif /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
+
#ifdef CONFIG_CFQ_GROUP_IOSCHED
-#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
- blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
- cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
- blkg_path(&(cfqq)->cfqg->blkg), ##args)
-#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \
- blk_add_trace_msg((cfqd)->queue, "%s " fmt, \
- blkg_path(&(cfqg)->blkg), ##args) \
+static struct blkcg_policy blkcg_policy_cfq;
+
+static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
+{
+ return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
+}
+
+static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg)
+{
+ struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent;
+
+ return pblkg ? blkg_to_cfqg(pblkg) : NULL;
+}
+
+static inline void cfqg_get(struct cfq_group *cfqg)
+{
+ return blkg_get(cfqg_to_blkg(cfqg));
+}
+
+static inline void cfqg_put(struct cfq_group *cfqg)
+{
+ return blkg_put(cfqg_to_blkg(cfqg));
+}
+
+#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do { \
+ char __pbuf[128]; \
+ \
+ blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf)); \
+ blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \
+ cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
+ cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
+ __pbuf, ##args); \
+} while (0)
+
+#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \
+ char __pbuf[128]; \
+ \
+ blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf)); \
+ blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args); \
+} while (0)
+
+static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
+ struct cfq_group *curr_cfqg, int rw)
+{
+ blkg_rwstat_add(&cfqg->stats.queued, rw, 1);
+ cfqg_stats_end_empty_time(&cfqg->stats);
+ cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
+}
+
+static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
+ unsigned long time, unsigned long unaccounted_time)
+{
+ blkg_stat_add(&cfqg->stats.time, time);
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
+#endif
+}
+
+static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw)
+{
+ blkg_rwstat_add(&cfqg->stats.queued, rw, -1);
+}
+
+static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw)
+{
+ blkg_rwstat_add(&cfqg->stats.merged, rw, 1);
+}
+
+static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
+ uint64_t bytes, int rw)
+{
+ blkg_stat_add(&cfqg->stats.sectors, bytes >> 9);
+ blkg_rwstat_add(&cfqg->stats.serviced, rw, 1);
+ blkg_rwstat_add(&cfqg->stats.service_bytes, rw, bytes);
+}
+
+static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
+ uint64_t start_time, uint64_t io_start_time, int rw)
+{
+ struct cfqg_stats *stats = &cfqg->stats;
+ unsigned long long now = sched_clock();
+
+ if (time_after64(now, io_start_time))
+ blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
+ if (time_after64(io_start_time, start_time))
+ blkg_rwstat_add(&stats->wait_time, rw,
+ io_start_time - start_time);
+}
+
+/* @stats = 0 */
+static void cfqg_stats_reset(struct cfqg_stats *stats)
+{
+ /* queued stats shouldn't be cleared */
+ blkg_rwstat_reset(&stats->service_bytes);
+ blkg_rwstat_reset(&stats->serviced);
+ blkg_rwstat_reset(&stats->merged);
+ blkg_rwstat_reset(&stats->service_time);
+ blkg_rwstat_reset(&stats->wait_time);
+ blkg_stat_reset(&stats->time);
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ blkg_stat_reset(&stats->unaccounted_time);
+ blkg_stat_reset(&stats->avg_queue_size_sum);
+ blkg_stat_reset(&stats->avg_queue_size_samples);
+ blkg_stat_reset(&stats->dequeue);
+ blkg_stat_reset(&stats->group_wait_time);
+ blkg_stat_reset(&stats->idle_time);
+ blkg_stat_reset(&stats->empty_time);
+#endif
+}
+
+/* @to += @from */
+static void cfqg_stats_merge(struct cfqg_stats *to, struct cfqg_stats *from)
+{
+ /* queued stats shouldn't be cleared */
+ blkg_rwstat_merge(&to->service_bytes, &from->service_bytes);
+ blkg_rwstat_merge(&to->serviced, &from->serviced);
+ blkg_rwstat_merge(&to->merged, &from->merged);
+ blkg_rwstat_merge(&to->service_time, &from->service_time);
+ blkg_rwstat_merge(&to->wait_time, &from->wait_time);
+ blkg_stat_merge(&from->time, &from->time);
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ blkg_stat_merge(&to->unaccounted_time, &from->unaccounted_time);
+ blkg_stat_merge(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
+ blkg_stat_merge(&to->avg_queue_size_samples, &from->avg_queue_size_samples);
+ blkg_stat_merge(&to->dequeue, &from->dequeue);
+ blkg_stat_merge(&to->group_wait_time, &from->group_wait_time);
+ blkg_stat_merge(&to->idle_time, &from->idle_time);
+ blkg_stat_merge(&to->empty_time, &from->empty_time);
+#endif
+}
+
+/*
+ * Transfer @cfqg's stats to its parent's dead_stats so that the ancestors'
+ * recursive stats can still account for the amount used by this cfqg after
+ * it's gone.
+ */
+static void cfqg_stats_xfer_dead(struct cfq_group *cfqg)
+{
+ struct cfq_group *parent = cfqg_parent(cfqg);
+
+ lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock);
+
+ if (unlikely(!parent))
+ return;
+
+ cfqg_stats_merge(&parent->dead_stats, &cfqg->stats);
+ cfqg_stats_merge(&parent->dead_stats, &cfqg->dead_stats);
+ cfqg_stats_reset(&cfqg->stats);
+ cfqg_stats_reset(&cfqg->dead_stats);
+}
+
+#else /* CONFIG_CFQ_GROUP_IOSCHED */
+
+static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; }
+static inline void cfqg_get(struct cfq_group *cfqg) { }
+static inline void cfqg_put(struct cfq_group *cfqg) { }
-#else
#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
- blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
+ blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \
+ cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
+ cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
+ ##args)
#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
-#endif
+
+static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
+ struct cfq_group *curr_cfqg, int rw) { }
+static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
+ unsigned long time, unsigned long unaccounted_time) { }
+static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { }
+static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { }
+static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
+ uint64_t bytes, int rw) { }
+static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
+ uint64_t start_time, uint64_t io_start_time, int rw) { }
+
+#endif /* CONFIG_CFQ_GROUP_IOSCHED */
+
#define cfq_log(cfqd, fmt, args...) \
blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
@@ -426,7 +818,7 @@ static inline bool iops_mode(struct cfq_data *cfqd)
return false;
}
-static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
+static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq)
{
if (cfq_class_idle(cfqq))
return IDLE_WORKLOAD;
@@ -445,28 +837,29 @@ static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
return SYNC_WORKLOAD;
}
-static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
+static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class,
struct cfq_data *cfqd,
struct cfq_group *cfqg)
{
- if (wl == IDLE_WORKLOAD)
+ if (wl_class == IDLE_WORKLOAD)
return cfqg->service_tree_idle.count;
- return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
- + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
- + cfqg->service_trees[wl][SYNC_WORKLOAD].count;
+ return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count +
+ cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count +
+ cfqg->service_trees[wl_class][SYNC_WORKLOAD].count;
}
static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
struct cfq_group *cfqg)
{
- return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
- + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
+ return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count +
+ cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
}
static void cfq_dispatch_insert(struct request_queue *, struct request *);
-static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
- struct io_context *, gfp_t);
+static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
+ struct cfq_io_cq *cic, struct bio *bio,
+ gfp_t gfp_mask);
static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
{
@@ -515,7 +908,7 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
{
if (cfqd->busy_queues) {
cfq_log(cfqd, "schedule dispatch");
- kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
+ kblockd_schedule_work(&cfqd->unplug_work);
}
}
@@ -540,13 +933,27 @@ cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
}
-static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
+/**
+ * cfqg_scale_charge - scale disk time charge according to cfqg weight
+ * @charge: disk time being charged
+ * @vfraction: vfraction of the cfqg, fixed point w/ CFQ_SERVICE_SHIFT
+ *
+ * Scale @charge according to @vfraction, which is in range (0, 1]. The
+ * scaling is inversely proportional.
+ *
+ * scaled = charge / vfraction
+ *
+ * The result is also in fixed point w/ CFQ_SERVICE_SHIFT.
+ */
+static inline u64 cfqg_scale_charge(unsigned long charge,
+ unsigned int vfraction)
{
- u64 d = delta << CFQ_SERVICE_SHIFT;
+ u64 c = charge << CFQ_SERVICE_SHIFT; /* make it fixed point */
- d = d * BLKIO_WEIGHT_DEFAULT;
- do_div(d, cfqg->weight);
- return d;
+ /* charge / vfraction */
+ c <<= CFQ_SERVICE_SHIFT;
+ do_div(c, vfraction);
+ return c;
}
static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
@@ -602,9 +1009,7 @@ static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
static inline unsigned
cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
{
- struct cfq_rb_root *st = &cfqd->grp_service_tree;
-
- return cfq_target_latency * cfqg->weight / st->total_weight;
+ return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT;
}
static inline unsigned
@@ -871,20 +1276,61 @@ static void
cfq_update_group_weight(struct cfq_group *cfqg)
{
BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
- if (cfqg->needs_update) {
+
+ if (cfqg->new_weight) {
cfqg->weight = cfqg->new_weight;
- cfqg->needs_update = false;
+ cfqg->new_weight = 0;
+ }
+
+ if (cfqg->new_leaf_weight) {
+ cfqg->leaf_weight = cfqg->new_leaf_weight;
+ cfqg->new_leaf_weight = 0;
}
}
static void
cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
{
+ unsigned int vfr = 1 << CFQ_SERVICE_SHIFT; /* start with 1 */
+ struct cfq_group *pos = cfqg;
+ struct cfq_group *parent;
+ bool propagate;
+
+ /* add to the service tree */
BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
cfq_update_group_weight(cfqg);
__cfq_group_service_tree_add(st, cfqg);
- st->total_weight += cfqg->weight;
+
+ /*
+ * Activate @cfqg and calculate the portion of vfraction @cfqg is
+ * entitled to. vfraction is calculated by walking the tree
+ * towards the root calculating the fraction it has at each level.
+ * The compounded ratio is how much vfraction @cfqg owns.
+ *
+ * Start with the proportion tasks in this cfqg has against active
+ * children cfqgs - its leaf_weight against children_weight.
+ */
+ propagate = !pos->nr_active++;
+ pos->children_weight += pos->leaf_weight;
+ vfr = vfr * pos->leaf_weight / pos->children_weight;
+
+ /*
+ * Compound ->weight walking up the tree. Both activation and
+ * vfraction calculation are done in the same loop. Propagation
+ * stops once an already activated node is met. vfraction
+ * calculation should always continue to the root.
+ */
+ while ((parent = cfqg_parent(pos))) {
+ if (propagate) {
+ propagate = !parent->nr_active++;
+ parent->children_weight += pos->weight;
+ }
+ vfr = vfr * pos->weight / parent->children_weight;
+ pos = parent;
+ }
+
+ cfqg->vfraction = max_t(unsigned, vfr, 1);
}
static void
@@ -915,7 +1361,32 @@ cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
static void
cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
{
- st->total_weight -= cfqg->weight;
+ struct cfq_group *pos = cfqg;
+ bool propagate;
+
+ /*
+ * Undo activation from cfq_group_service_tree_add(). Deactivate
+ * @cfqg and propagate deactivation upwards.
+ */
+ propagate = !--pos->nr_active;
+ pos->children_weight -= pos->leaf_weight;
+
+ while (propagate) {
+ struct cfq_group *parent = cfqg_parent(pos);
+
+ /* @pos has 0 nr_active at this point */
+ WARN_ON_ONCE(pos->children_weight);
+ pos->vfraction = 0;
+
+ if (!parent)
+ break;
+
+ propagate = !--parent->nr_active;
+ parent->children_weight -= pos->weight;
+ pos = parent;
+ }
+
+ /* remove from the service tree */
if (!RB_EMPTY_NODE(&cfqg->rb_node))
cfq_rb_erase(&cfqg->rb_node, st);
}
@@ -934,8 +1405,8 @@ cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
cfq_group_service_tree_del(st, cfqg);
- cfqg->saved_workload_slice = 0;
- cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
+ cfqg->saved_wl_slice = 0;
+ cfqg_stats_update_dequeue(cfqg);
}
static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
@@ -977,6 +1448,7 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
unsigned int used_sl, charge, unaccounted_sl = 0;
int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
- cfqg->service_tree_idle.count;
+ unsigned int vfr;
BUG_ON(nr_sync < 0);
used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
@@ -986,20 +1458,25 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
charge = cfqq->allocated_slice;
- /* Can't update vdisktime while group is on service tree */
+ /*
+ * Can't update vdisktime while on service tree and cfqg->vfraction
+ * is valid only while on it. Cache vfr, leave the service tree,
+ * update vdisktime and go back on. The re-addition to the tree
+ * will also update the weights as necessary.
+ */
+ vfr = cfqg->vfraction;
cfq_group_service_tree_del(st, cfqg);
- cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
- /* If a new weight was requested, update now, off tree */
+ cfqg->vdisktime += cfqg_scale_charge(charge, vfr);
cfq_group_service_tree_add(st, cfqg);
/* This group is being expired. Save the context */
if (time_after(cfqd->workload_expires, jiffies)) {
- cfqg->saved_workload_slice = cfqd->workload_expires
+ cfqg->saved_wl_slice = cfqd->workload_expires
- jiffies;
- cfqg->saved_workload = cfqd->serving_type;
- cfqg->saved_serving_prio = cfqd->serving_prio;
+ cfqg->saved_wl_type = cfqd->serving_wl_type;
+ cfqg->saved_wl_class = cfqd->serving_wl_class;
} else
- cfqg->saved_workload_slice = 0;
+ cfqg->saved_wl_slice = 0;
cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
st->min_vdisktime);
@@ -1007,273 +1484,514 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
"sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
used_sl, cfqq->slice_dispatch, charge,
iops_mode(cfqd), cfqq->nr_sectors);
- cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl,
- unaccounted_sl);
- cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
+ cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
+ cfqg_stats_set_start_empty_time(cfqg);
}
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
-static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
+/**
+ * cfq_init_cfqg_base - initialize base part of a cfq_group
+ * @cfqg: cfq_group to initialize
+ *
+ * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
+ * is enabled or not.
+ */
+static void cfq_init_cfqg_base(struct cfq_group *cfqg)
{
- if (blkg)
- return container_of(blkg, struct cfq_group, blkg);
- return NULL;
+ struct cfq_rb_root *st;
+ int i, j;
+
+ for_each_cfqg_st(cfqg, i, j, st)
+ *st = CFQ_RB_ROOT;
+ RB_CLEAR_NODE(&cfqg->rb_node);
+
+ cfqg->ttime.last_end_request = jiffies;
}
-static void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
- unsigned int weight)
-{
- struct cfq_group *cfqg = cfqg_of_blkg(blkg);
- cfqg->new_weight = weight;
- cfqg->needs_update = true;
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+static void cfqg_stats_init(struct cfqg_stats *stats)
+{
+ blkg_rwstat_init(&stats->service_bytes);
+ blkg_rwstat_init(&stats->serviced);
+ blkg_rwstat_init(&stats->merged);
+ blkg_rwstat_init(&stats->service_time);
+ blkg_rwstat_init(&stats->wait_time);
+ blkg_rwstat_init(&stats->queued);
+
+ blkg_stat_init(&stats->sectors);
+ blkg_stat_init(&stats->time);
+
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ blkg_stat_init(&stats->unaccounted_time);
+ blkg_stat_init(&stats->avg_queue_size_sum);
+ blkg_stat_init(&stats->avg_queue_size_samples);
+ blkg_stat_init(&stats->dequeue);
+ blkg_stat_init(&stats->group_wait_time);
+ blkg_stat_init(&stats->idle_time);
+ blkg_stat_init(&stats->empty_time);
+#endif
}
-static void cfq_init_add_cfqg_lists(struct cfq_data *cfqd,
- struct cfq_group *cfqg, struct blkio_cgroup *blkcg)
+static void cfq_pd_init(struct blkcg_gq *blkg)
{
- struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
- unsigned int major, minor;
+ struct cfq_group *cfqg = blkg_to_cfqg(blkg);
+
+ cfq_init_cfqg_base(cfqg);
+ cfqg->weight = blkg->blkcg->cfq_weight;
+ cfqg->leaf_weight = blkg->blkcg->cfq_leaf_weight;
+ cfqg_stats_init(&cfqg->stats);
+ cfqg_stats_init(&cfqg->dead_stats);
+}
+static void cfq_pd_offline(struct blkcg_gq *blkg)
+{
/*
- * Add group onto cgroup list. It might happen that bdi->dev is
- * not initialized yet. Initialize this new group without major
- * and minor info and this info will be filled in once a new thread
- * comes for IO.
+ * @blkg is going offline and will be ignored by
+ * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
+ * that they don't get lost. If IOs complete after this point, the
+ * stats for them will be lost. Oh well...
*/
- if (bdi->dev) {
- sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
- cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
- (void *)cfqd, MKDEV(major, minor));
- } else
- cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
- (void *)cfqd, 0);
+ cfqg_stats_xfer_dead(blkg_to_cfqg(blkg));
+}
- cfqd->nr_blkcg_linked_grps++;
- cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
+/* offset delta from cfqg->stats to cfqg->dead_stats */
+static const int dead_stats_off_delta = offsetof(struct cfq_group, dead_stats) -
+ offsetof(struct cfq_group, stats);
- /* Add group on cfqd list */
- hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
+/* to be used by recursive prfill, sums live and dead stats recursively */
+static u64 cfqg_stat_pd_recursive_sum(struct blkg_policy_data *pd, int off)
+{
+ u64 sum = 0;
+
+ sum += blkg_stat_recursive_sum(pd, off);
+ sum += blkg_stat_recursive_sum(pd, off + dead_stats_off_delta);
+ return sum;
}
-/*
- * Should be called from sleepable context. No request queue lock as per
- * cpu stats are allocated dynamically and alloc_percpu needs to be called
- * from sleepable context.
- */
-static struct cfq_group * cfq_alloc_cfqg(struct cfq_data *cfqd)
+/* to be used by recursive prfill, sums live and dead rwstats recursively */
+static struct blkg_rwstat cfqg_rwstat_pd_recursive_sum(struct blkg_policy_data *pd,
+ int off)
{
- struct cfq_group *cfqg = NULL;
- int i, j, ret;
- struct cfq_rb_root *st;
+ struct blkg_rwstat a, b;
- cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
- if (!cfqg)
- return NULL;
+ a = blkg_rwstat_recursive_sum(pd, off);
+ b = blkg_rwstat_recursive_sum(pd, off + dead_stats_off_delta);
+ blkg_rwstat_merge(&a, &b);
+ return a;
+}
- for_each_cfqg_st(cfqg, i, j, st)
- *st = CFQ_RB_ROOT;
- RB_CLEAR_NODE(&cfqg->rb_node);
+static void cfq_pd_reset_stats(struct blkcg_gq *blkg)
+{
+ struct cfq_group *cfqg = blkg_to_cfqg(blkg);
- cfqg->ttime.last_end_request = jiffies;
+ cfqg_stats_reset(&cfqg->stats);
+ cfqg_stats_reset(&cfqg->dead_stats);
+}
- /*
- * Take the initial reference that will be released on destroy
- * This can be thought of a joint reference by cgroup and
- * elevator which will be dropped by either elevator exit
- * or cgroup deletion path depending on who is exiting first.
- */
- cfqg->ref = 1;
+/*
+ * Search for the cfq group current task belongs to. request_queue lock must
+ * be held.
+ */
+static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
+ struct blkcg *blkcg)
+{
+ struct request_queue *q = cfqd->queue;
+ struct cfq_group *cfqg = NULL;
- ret = blkio_alloc_blkg_stats(&cfqg->blkg);
- if (ret) {
- kfree(cfqg);
- return NULL;
+ /* avoid lookup for the common case where there's no blkcg */
+ if (blkcg == &blkcg_root) {
+ cfqg = cfqd->root_group;
+ } else {
+ struct blkcg_gq *blkg;
+
+ blkg = blkg_lookup_create(blkcg, q);
+ if (!IS_ERR(blkg))
+ cfqg = blkg_to_cfqg(blkg);
}
return cfqg;
}
-static struct cfq_group *
-cfq_find_cfqg(struct cfq_data *cfqd, struct blkio_cgroup *blkcg)
+static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
{
- struct cfq_group *cfqg = NULL;
- void *key = cfqd;
- struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
- unsigned int major, minor;
+ /* Currently, all async queues are mapped to root group */
+ if (!cfq_cfqq_sync(cfqq))
+ cfqg = cfqq->cfqd->root_group;
- /*
- * This is the common case when there are no blkio cgroups.
- * Avoid lookup in this case
- */
- if (blkcg == &blkio_root_cgroup)
- cfqg = &cfqd->root_group;
- else
- cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
+ cfqq->cfqg = cfqg;
+ /* cfqq reference on cfqg */
+ cfqg_get(cfqg);
+}
- if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
- sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
- cfqg->blkg.dev = MKDEV(major, minor);
- }
+static u64 cfqg_prfill_weight_device(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
+{
+ struct cfq_group *cfqg = pd_to_cfqg(pd);
- return cfqg;
+ if (!cfqg->dev_weight)
+ return 0;
+ return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
}
-/*
- * Search for the cfq group current task belongs to. request_queue lock must
- * be held.
- */
-static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
+static int cfqg_print_weight_device(struct seq_file *sf, void *v)
{
- struct blkio_cgroup *blkcg;
- struct cfq_group *cfqg = NULL, *__cfqg = NULL;
- struct request_queue *q = cfqd->queue;
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
+ cfqg_prfill_weight_device, &blkcg_policy_cfq,
+ 0, false);
+ return 0;
+}
- rcu_read_lock();
- blkcg = task_blkio_cgroup(current);
- cfqg = cfq_find_cfqg(cfqd, blkcg);
- if (cfqg) {
- rcu_read_unlock();
- return cfqg;
- }
+static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
+{
+ struct cfq_group *cfqg = pd_to_cfqg(pd);
- /*
- * Need to allocate a group. Allocation of group also needs allocation
- * of per cpu stats which in-turn takes a mutex() and can block. Hence
- * we need to drop rcu lock and queue_lock before we call alloc.
- *
- * Not taking any queue reference here and assuming that queue is
- * around by the time we return. CFQ queue allocation code does
- * the same. It might be racy though.
- */
+ if (!cfqg->dev_leaf_weight)
+ return 0;
+ return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight);
+}
- rcu_read_unlock();
- spin_unlock_irq(q->queue_lock);
+static int cfqg_print_leaf_weight_device(struct seq_file *sf, void *v)
+{
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
+ cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq,
+ 0, false);
+ return 0;
+}
- cfqg = cfq_alloc_cfqg(cfqd);
+static int cfq_print_weight(struct seq_file *sf, void *v)
+{
+ seq_printf(sf, "%u\n", css_to_blkcg(seq_css(sf))->cfq_weight);
+ return 0;
+}
- spin_lock_irq(q->queue_lock);
+static int cfq_print_leaf_weight(struct seq_file *sf, void *v)
+{
+ seq_printf(sf, "%u\n", css_to_blkcg(seq_css(sf))->cfq_leaf_weight);
+ return 0;
+}
- rcu_read_lock();
- blkcg = task_blkio_cgroup(current);
+static ssize_t __cfqg_set_weight_device(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off,
+ bool is_leaf_weight)
+{
+ struct blkcg *blkcg = css_to_blkcg(of_css(of));
+ struct blkg_conf_ctx ctx;
+ struct cfq_group *cfqg;
+ int ret;
- /*
- * If some other thread already allocated the group while we were
- * not holding queue lock, free up the group
- */
- __cfqg = cfq_find_cfqg(cfqd, blkcg);
+ ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
+ if (ret)
+ return ret;
- if (__cfqg) {
- kfree(cfqg);
- rcu_read_unlock();
- return __cfqg;
+ ret = -EINVAL;
+ cfqg = blkg_to_cfqg(ctx.blkg);
+ if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) {
+ if (!is_leaf_weight) {
+ cfqg->dev_weight = ctx.v;
+ cfqg->new_weight = ctx.v ?: blkcg->cfq_weight;
+ } else {
+ cfqg->dev_leaf_weight = ctx.v;
+ cfqg->new_leaf_weight = ctx.v ?: blkcg->cfq_leaf_weight;
+ }
+ ret = 0;
}
- if (!cfqg)
- cfqg = &cfqd->root_group;
+ blkg_conf_finish(&ctx);
+ return ret ?: nbytes;
+}
- cfq_init_add_cfqg_lists(cfqd, cfqg, blkcg);
- rcu_read_unlock();
- return cfqg;
+static ssize_t cfqg_set_weight_device(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off)
+{
+ return __cfqg_set_weight_device(of, buf, nbytes, off, false);
}
-static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
+static ssize_t cfqg_set_leaf_weight_device(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off)
{
- cfqg->ref++;
- return cfqg;
+ return __cfqg_set_weight_device(of, buf, nbytes, off, true);
}
-static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
+static int __cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
+ u64 val, bool is_leaf_weight)
{
- /* Currently, all async queues are mapped to root group */
- if (!cfq_cfqq_sync(cfqq))
- cfqg = &cfqq->cfqd->root_group;
+ struct blkcg *blkcg = css_to_blkcg(css);
+ struct blkcg_gq *blkg;
- cfqq->cfqg = cfqg;
- /* cfqq reference on cfqg */
- cfqq->cfqg->ref++;
+ if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
+ return -EINVAL;
+
+ spin_lock_irq(&blkcg->lock);
+
+ if (!is_leaf_weight)
+ blkcg->cfq_weight = val;
+ else
+ blkcg->cfq_leaf_weight = val;
+
+ hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
+ struct cfq_group *cfqg = blkg_to_cfqg(blkg);
+
+ if (!cfqg)
+ continue;
+
+ if (!is_leaf_weight) {
+ if (!cfqg->dev_weight)
+ cfqg->new_weight = blkcg->cfq_weight;
+ } else {
+ if (!cfqg->dev_leaf_weight)
+ cfqg->new_leaf_weight = blkcg->cfq_leaf_weight;
+ }
+ }
+
+ spin_unlock_irq(&blkcg->lock);
+ return 0;
}
-static void cfq_put_cfqg(struct cfq_group *cfqg)
+static int cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
+ u64 val)
{
- struct cfq_rb_root *st;
- int i, j;
+ return __cfq_set_weight(css, cft, val, false);
+}
- BUG_ON(cfqg->ref <= 0);
- cfqg->ref--;
- if (cfqg->ref)
- return;
- for_each_cfqg_st(cfqg, i, j, st)
- BUG_ON(!RB_EMPTY_ROOT(&st->rb));
- free_percpu(cfqg->blkg.stats_cpu);
- kfree(cfqg);
+static int cfq_set_leaf_weight(struct cgroup_subsys_state *css,
+ struct cftype *cft, u64 val)
+{
+ return __cfq_set_weight(css, cft, val, true);
}
-static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
+static int cfqg_print_stat(struct seq_file *sf, void *v)
{
- /* Something wrong if we are trying to remove same group twice */
- BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
+ &blkcg_policy_cfq, seq_cft(sf)->private, false);
+ return 0;
+}
- hlist_del_init(&cfqg->cfqd_node);
+static int cfqg_print_rwstat(struct seq_file *sf, void *v)
+{
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
+ &blkcg_policy_cfq, seq_cft(sf)->private, true);
+ return 0;
+}
- BUG_ON(cfqd->nr_blkcg_linked_grps <= 0);
- cfqd->nr_blkcg_linked_grps--;
+static u64 cfqg_prfill_stat_recursive(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
+{
+ u64 sum = cfqg_stat_pd_recursive_sum(pd, off);
- /*
- * Put the reference taken at the time of creation so that when all
- * queues are gone, group can be destroyed.
- */
- cfq_put_cfqg(cfqg);
+ return __blkg_prfill_u64(sf, pd, sum);
}
-static void cfq_release_cfq_groups(struct cfq_data *cfqd)
+static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
{
- struct hlist_node *pos, *n;
- struct cfq_group *cfqg;
+ struct blkg_rwstat sum = cfqg_rwstat_pd_recursive_sum(pd, off);
- hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
- /*
- * If cgroup removal path got to blk_group first and removed
- * it from cgroup list, then it will take care of destroying
- * cfqg also.
- */
- if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg))
- cfq_destroy_cfqg(cfqd, cfqg);
- }
+ return __blkg_prfill_rwstat(sf, pd, &sum);
}
-/*
- * Blk cgroup controller notification saying that blkio_group object is being
- * delinked as associated cgroup object is going away. That also means that
- * no new IO will come in this group. So get rid of this group as soon as
- * any pending IO in the group is finished.
- *
- * This function is called under rcu_read_lock(). key is the rcu protected
- * pointer. That means "key" is a valid cfq_data pointer as long as we are rcu
- * read lock.
- *
- * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
- * it should not be NULL as even if elevator was exiting, cgroup deltion
- * path got to it first.
- */
-static void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg)
+static int cfqg_print_stat_recursive(struct seq_file *sf, void *v)
{
- unsigned long flags;
- struct cfq_data *cfqd = key;
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
+ cfqg_prfill_stat_recursive, &blkcg_policy_cfq,
+ seq_cft(sf)->private, false);
+ return 0;
+}
- spin_lock_irqsave(cfqd->queue->queue_lock, flags);
- cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
- spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
+static int cfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
+{
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
+ cfqg_prfill_rwstat_recursive, &blkcg_policy_cfq,
+ seq_cft(sf)->private, true);
+ return 0;
}
-#else /* GROUP_IOSCHED */
-static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
{
- return &cfqd->root_group;
+ struct cfq_group *cfqg = pd_to_cfqg(pd);
+ u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
+ u64 v = 0;
+
+ if (samples) {
+ v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
+ v = div64_u64(v, samples);
+ }
+ __blkg_prfill_u64(sf, pd, v);
+ return 0;
}
-static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
+/* print avg_queue_size */
+static int cfqg_print_avg_queue_size(struct seq_file *sf, void *v)
{
- return cfqg;
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
+ cfqg_prfill_avg_queue_size, &blkcg_policy_cfq,
+ 0, false);
+ return 0;
+}
+#endif /* CONFIG_DEBUG_BLK_CGROUP */
+
+static struct cftype cfq_blkcg_files[] = {
+ /* on root, weight is mapped to leaf_weight */
+ {
+ .name = "weight_device",
+ .flags = CFTYPE_ONLY_ON_ROOT,
+ .seq_show = cfqg_print_leaf_weight_device,
+ .write = cfqg_set_leaf_weight_device,
+ },
+ {
+ .name = "weight",
+ .flags = CFTYPE_ONLY_ON_ROOT,
+ .seq_show = cfq_print_leaf_weight,
+ .write_u64 = cfq_set_leaf_weight,
+ },
+
+ /* no such mapping necessary for !roots */
+ {
+ .name = "weight_device",
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .seq_show = cfqg_print_weight_device,
+ .write = cfqg_set_weight_device,
+ },
+ {
+ .name = "weight",
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .seq_show = cfq_print_weight,
+ .write_u64 = cfq_set_weight,
+ },
+
+ {
+ .name = "leaf_weight_device",
+ .seq_show = cfqg_print_leaf_weight_device,
+ .write = cfqg_set_leaf_weight_device,
+ },
+ {
+ .name = "leaf_weight",
+ .seq_show = cfq_print_leaf_weight,
+ .write_u64 = cfq_set_leaf_weight,
+ },
+
+ /* statistics, covers only the tasks in the cfqg */
+ {
+ .name = "time",
+ .private = offsetof(struct cfq_group, stats.time),
+ .seq_show = cfqg_print_stat,
+ },
+ {
+ .name = "sectors",
+ .private = offsetof(struct cfq_group, stats.sectors),
+ .seq_show = cfqg_print_stat,
+ },
+ {
+ .name = "io_service_bytes",
+ .private = offsetof(struct cfq_group, stats.service_bytes),
+ .seq_show = cfqg_print_rwstat,
+ },
+ {
+ .name = "io_serviced",
+ .private = offsetof(struct cfq_group, stats.serviced),
+ .seq_show = cfqg_print_rwstat,
+ },
+ {
+ .name = "io_service_time",
+ .private = offsetof(struct cfq_group, stats.service_time),
+ .seq_show = cfqg_print_rwstat,
+ },
+ {
+ .name = "io_wait_time",
+ .private = offsetof(struct cfq_group, stats.wait_time),
+ .seq_show = cfqg_print_rwstat,
+ },
+ {
+ .name = "io_merged",
+ .private = offsetof(struct cfq_group, stats.merged),
+ .seq_show = cfqg_print_rwstat,
+ },
+ {
+ .name = "io_queued",
+ .private = offsetof(struct cfq_group, stats.queued),
+ .seq_show = cfqg_print_rwstat,
+ },
+
+ /* the same statictics which cover the cfqg and its descendants */
+ {
+ .name = "time_recursive",
+ .private = offsetof(struct cfq_group, stats.time),
+ .seq_show = cfqg_print_stat_recursive,
+ },
+ {
+ .name = "sectors_recursive",
+ .private = offsetof(struct cfq_group, stats.sectors),
+ .seq_show = cfqg_print_stat_recursive,
+ },
+ {
+ .name = "io_service_bytes_recursive",
+ .private = offsetof(struct cfq_group, stats.service_bytes),
+ .seq_show = cfqg_print_rwstat_recursive,
+ },
+ {
+ .name = "io_serviced_recursive",
+ .private = offsetof(struct cfq_group, stats.serviced),
+ .seq_show = cfqg_print_rwstat_recursive,
+ },
+ {
+ .name = "io_service_time_recursive",
+ .private = offsetof(struct cfq_group, stats.service_time),
+ .seq_show = cfqg_print_rwstat_recursive,
+ },
+ {
+ .name = "io_wait_time_recursive",
+ .private = offsetof(struct cfq_group, stats.wait_time),
+ .seq_show = cfqg_print_rwstat_recursive,
+ },
+ {
+ .name = "io_merged_recursive",
+ .private = offsetof(struct cfq_group, stats.merged),
+ .seq_show = cfqg_print_rwstat_recursive,
+ },
+ {
+ .name = "io_queued_recursive",
+ .private = offsetof(struct cfq_group, stats.queued),
+ .seq_show = cfqg_print_rwstat_recursive,
+ },
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ {
+ .name = "avg_queue_size",
+ .seq_show = cfqg_print_avg_queue_size,
+ },
+ {
+ .name = "group_wait_time",
+ .private = offsetof(struct cfq_group, stats.group_wait_time),
+ .seq_show = cfqg_print_stat,
+ },
+ {
+ .name = "idle_time",
+ .private = offsetof(struct cfq_group, stats.idle_time),
+ .seq_show = cfqg_print_stat,
+ },
+ {
+ .name = "empty_time",
+ .private = offsetof(struct cfq_group, stats.empty_time),
+ .seq_show = cfqg_print_stat,
+ },
+ {
+ .name = "dequeue",
+ .private = offsetof(struct cfq_group, stats.dequeue),
+ .seq_show = cfqg_print_stat,
+ },
+ {
+ .name = "unaccounted_time",
+ .private = offsetof(struct cfq_group, stats.unaccounted_time),
+ .seq_show = cfqg_print_stat,
+ },
+#endif /* CONFIG_DEBUG_BLK_CGROUP */
+ { } /* terminate */
+};
+#else /* GROUP_IOSCHED */
+static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
+ struct blkcg *blkcg)
+{
+ return cfqd->root_group;
}
static inline void
@@ -1281,9 +1999,6 @@ cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
cfqq->cfqg = cfqg;
}
-static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
-static inline void cfq_put_cfqg(struct cfq_group *cfqg) {}
-
#endif /* GROUP_IOSCHED */
/*
@@ -1297,15 +2012,14 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct rb_node **p, *parent;
struct cfq_queue *__cfqq;
unsigned long rb_key;
- struct cfq_rb_root *service_tree;
+ struct cfq_rb_root *st;
int left;
int new_cfqq = 1;
- service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
- cfqq_type(cfqq));
+ st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq));
if (cfq_class_idle(cfqq)) {
rb_key = CFQ_IDLE_DELAY;
- parent = rb_last(&service_tree->rb);
+ parent = rb_last(&st->rb);
if (parent && parent != &cfqq->rb_node) {
__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
rb_key += __cfqq->rb_key;
@@ -1323,7 +2037,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfqq->slice_resid = 0;
} else {
rb_key = -HZ;
- __cfqq = cfq_rb_first(service_tree);
+ __cfqq = cfq_rb_first(st);
rb_key += __cfqq ? __cfqq->rb_key : jiffies;
}
@@ -1332,8 +2046,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
/*
* same position, nothing more to do
*/
- if (rb_key == cfqq->rb_key &&
- cfqq->service_tree == service_tree)
+ if (rb_key == cfqq->rb_key && cfqq->service_tree == st)
return;
cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
@@ -1342,11 +2055,9 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
left = 1;
parent = NULL;
- cfqq->service_tree = service_tree;
- p = &service_tree->rb.rb_node;
+ cfqq->service_tree = st;
+ p = &st->rb.rb_node;
while (*p) {
- struct rb_node **n;
-
parent = *p;
__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
@@ -1354,22 +2065,20 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
* sort by key, that represents service time.
*/
if (time_before(rb_key, __cfqq->rb_key))
- n = &(*p)->rb_left;
+ p = &parent->rb_left;
else {
- n = &(*p)->rb_right;
+ p = &parent->rb_right;
left = 0;
}
-
- p = n;
}
if (left)
- service_tree->left = &cfqq->rb_node;
+ st->left = &cfqq->rb_node;
cfqq->rb_key = rb_key;
rb_link_node(&cfqq->rb_node, parent, p);
- rb_insert_color(&cfqq->rb_node, &service_tree->rb);
- service_tree->count++;
+ rb_insert_color(&cfqq->rb_node, &st->rb);
+ st->count++;
if (add_front || !new_cfqq)
return;
cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
@@ -1550,12 +2259,10 @@ static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
{
elv_rb_del(&cfqq->sort_list, rq);
cfqq->queued[rq_is_sync(rq)]--;
- cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
- rq_data_dir(rq), rq_is_sync(rq));
+ cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
cfq_add_rq_rb(rq);
- cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
- &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
- rq_is_sync(rq));
+ cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
+ rq->cmd_flags);
}
static struct request *
@@ -1570,11 +2277,8 @@ cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
return NULL;
cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
- if (cfqq) {
- sector_t sector = bio->bi_sector + bio_sectors(bio);
-
- return elv_rb_find(&cfqq->sort_list, sector);
- }
+ if (cfqq)
+ return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio));
return NULL;
}
@@ -1611,8 +2315,7 @@ static void cfq_remove_request(struct request *rq)
cfq_del_rq_rb(rq);
cfqq->cfqd->rq_queued--;
- cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
- rq_data_dir(rq), rq_is_sync(rq));
+ cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
if (rq->cmd_flags & REQ_PRIO) {
WARN_ON(!cfqq->prio_pending);
cfqq->prio_pending--;
@@ -1647,8 +2350,7 @@ static void cfq_merged_request(struct request_queue *q, struct request *req,
static void cfq_bio_merged(struct request_queue *q, struct request *req,
struct bio *bio)
{
- cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg,
- bio_data_dir(bio), cfq_bio_sync(bio));
+ cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw);
}
static void
@@ -1662,16 +2364,16 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
* reposition in fifo if next is older than rq
*/
if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
- time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
+ time_before(next->fifo_time, rq->fifo_time) &&
+ cfqq == RQ_CFQQ(next)) {
list_move(&rq->queuelist, &next->queuelist);
- rq_set_fifo_time(rq, rq_fifo_time(next));
+ rq->fifo_time = next->fifo_time;
}
if (cfqq->next_rq == next)
cfqq->next_rq = rq;
cfq_remove_request(next);
- cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
- rq_data_dir(next), rq_is_sync(next));
+ cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
cfqq = RQ_CFQQ(next);
/*
@@ -1712,16 +2414,16 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
del_timer(&cfqd->idle_slice_timer);
- cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
+ cfqg_stats_update_idle_time(cfqq->cfqg);
}
static void __cfq_set_active_queue(struct cfq_data *cfqd,
struct cfq_queue *cfqq)
{
if (cfqq) {
- cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
- cfqd->serving_prio, cfqd->serving_type);
- cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
+ cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d",
+ cfqd->serving_wl_class, cfqd->serving_wl_type);
+ cfqg_stats_update_avg_queue_size(cfqq->cfqg);
cfqq->slice_start = 0;
cfqq->dispatch_start = jiffies;
cfqq->allocated_slice = 0;
@@ -1806,19 +2508,18 @@ static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
*/
static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
{
- struct cfq_rb_root *service_tree =
- service_tree_for(cfqd->serving_group, cfqd->serving_prio,
- cfqd->serving_type);
+ struct cfq_rb_root *st = st_for(cfqd->serving_group,
+ cfqd->serving_wl_class, cfqd->serving_wl_type);
if (!cfqd->rq_queued)
return NULL;
/* There is nothing to dispatch */
- if (!service_tree)
+ if (!st)
return NULL;
- if (RB_EMPTY_ROOT(&service_tree->rb))
+ if (RB_EMPTY_ROOT(&st->rb))
return NULL;
- return cfq_rb_first(service_tree);
+ return cfq_rb_first(st);
}
static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
@@ -1974,17 +2675,17 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
- enum wl_prio_t prio = cfqq_prio(cfqq);
- struct cfq_rb_root *service_tree = cfqq->service_tree;
+ enum wl_class_t wl_class = cfqq_class(cfqq);
+ struct cfq_rb_root *st = cfqq->service_tree;
- BUG_ON(!service_tree);
- BUG_ON(!service_tree->count);
+ BUG_ON(!st);
+ BUG_ON(!st->count);
if (!cfqd->cfq_slice_idle)
return false;
/* We never do for idle class queues. */
- if (prio == IDLE_WORKLOAD)
+ if (wl_class == IDLE_WORKLOAD)
return false;
/* We do for queues that were marked with idle window flag. */
@@ -1996,11 +2697,10 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
* Otherwise, we do only if they are the last ones
* in their service tree.
*/
- if (service_tree->count == 1 && cfq_cfqq_sync(cfqq) &&
- !cfq_io_thinktime_big(cfqd, &service_tree->ttime, false))
+ if (st->count == 1 && cfq_cfqq_sync(cfqq) &&
+ !cfq_io_thinktime_big(cfqd, &st->ttime, false))
return true;
- cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
- service_tree->count);
+ cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count);
return false;
}
@@ -2042,7 +2742,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
* task has exited, don't wait
*/
cic = cfqd->active_cic;
- if (!cic || !atomic_read(&cic->icq.ioc->nr_tasks))
+ if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
return;
/*
@@ -2069,7 +2769,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
sl = cfqd->cfq_slice_idle;
mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
- cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
+ cfqg_stats_set_start_idle_time(cfqq->cfqg);
cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
group_idle ? 1 : 0);
}
@@ -2092,8 +2792,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
cfqq->nr_sectors += blk_rq_sectors(rq);
- cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
- rq_data_dir(rq), rq_is_sync(rq));
+ cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags);
}
/*
@@ -2112,7 +2811,7 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
return NULL;
rq = rq_entry_fifo(cfqq->fifo.next);
- if (time_before(jiffies, rq_fifo_time(rq)))
+ if (time_before(jiffies, rq->fifo_time))
rq = NULL;
cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
@@ -2184,8 +2883,8 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
}
}
-static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
- struct cfq_group *cfqg, enum wl_prio_t prio)
+static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd,
+ struct cfq_group *cfqg, enum wl_class_t wl_class)
{
struct cfq_queue *queue;
int i;
@@ -2195,7 +2894,7 @@ static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
for (i = 0; i <= SYNC_WORKLOAD; ++i) {
/* select the one with lowest rb_key */
- queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
+ queue = cfq_rb_first(st_for(cfqg, wl_class, i));
if (queue &&
(!key_valid || time_before(queue->rb_key, lowest_key))) {
lowest_key = queue->rb_key;
@@ -2207,26 +2906,27 @@ static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
return cur_best;
}
-static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
+static void
+choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg)
{
unsigned slice;
unsigned count;
struct cfq_rb_root *st;
unsigned group_slice;
- enum wl_prio_t original_prio = cfqd->serving_prio;
+ enum wl_class_t original_class = cfqd->serving_wl_class;
/* Choose next priority. RT > BE > IDLE */
if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
- cfqd->serving_prio = RT_WORKLOAD;
+ cfqd->serving_wl_class = RT_WORKLOAD;
else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
- cfqd->serving_prio = BE_WORKLOAD;
+ cfqd->serving_wl_class = BE_WORKLOAD;
else {
- cfqd->serving_prio = IDLE_WORKLOAD;
+ cfqd->serving_wl_class = IDLE_WORKLOAD;
cfqd->workload_expires = jiffies + 1;
return;
}
- if (original_prio != cfqd->serving_prio)
+ if (original_class != cfqd->serving_wl_class)
goto new_workload;
/*
@@ -2234,7 +2934,7 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
* (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
* expiration time
*/
- st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
+ st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
count = st->count;
/*
@@ -2245,9 +2945,9 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
new_workload:
/* otherwise select new workload type */
- cfqd->serving_type =
- cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
- st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
+ cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg,
+ cfqd->serving_wl_class);
+ st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
count = st->count;
/*
@@ -2258,10 +2958,11 @@ new_workload:
group_slice = cfq_group_slice(cfqd, cfqg);
slice = group_slice * count /
- max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
- cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
+ max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class],
+ cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd,
+ cfqg));
- if (cfqd->serving_type == ASYNC_WORKLOAD) {
+ if (cfqd->serving_wl_type == ASYNC_WORKLOAD) {
unsigned int tmp;
/*
@@ -2271,7 +2972,8 @@ new_workload:
* to have higher weight. A more accurate thing would be to
* calculate system wide asnc/sync ratio.
*/
- tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
+ tmp = cfqd->cfq_target_latency *
+ cfqg_busy_async_queues(cfqd, cfqg);
tmp = tmp/cfqd->busy_queues;
slice = min_t(unsigned, slice, tmp);
@@ -2306,14 +3008,14 @@ static void cfq_choose_cfqg(struct cfq_data *cfqd)
cfqd->serving_group = cfqg;
/* Restore the workload type data */
- if (cfqg->saved_workload_slice) {
- cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
- cfqd->serving_type = cfqg->saved_workload;
- cfqd->serving_prio = cfqg->saved_serving_prio;
+ if (cfqg->saved_wl_slice) {
+ cfqd->workload_expires = jiffies + cfqg->saved_wl_slice;
+ cfqd->serving_wl_type = cfqg->saved_wl_type;
+ cfqd->serving_wl_class = cfqg->saved_wl_class;
} else
cfqd->workload_expires = jiffies - 1;
- choose_service_tree(cfqd, cfqg);
+ choose_wl_class_and_type(cfqd, cfqg);
}
/*
@@ -2675,7 +3377,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
BUG_ON(cfq_cfqq_on_rr(cfqq));
kmem_cache_free(cfq_pool, cfqq);
- cfq_put_cfqg(cfqg);
+ cfqg_put(cfqg);
}
static void cfq_put_cooperator(struct cfq_queue *cfqq)
@@ -2734,7 +3436,7 @@ static void cfq_exit_icq(struct io_cq *icq)
}
}
-static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
+static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
{
struct task_struct *tsk = current;
int ioprio_class;
@@ -2742,7 +3444,7 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
if (!cfq_cfqq_prio_changed(cfqq))
return;
- ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
+ ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
switch (ioprio_class) {
default:
printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
@@ -2754,11 +3456,11 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
cfqq->ioprio_class = task_nice_ioclass(tsk);
break;
case IOPRIO_CLASS_RT:
- cfqq->ioprio = task_ioprio(ioc);
+ cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
cfqq->ioprio_class = IOPRIO_CLASS_RT;
break;
case IOPRIO_CLASS_BE:
- cfqq->ioprio = task_ioprio(ioc);
+ cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
cfqq->ioprio_class = IOPRIO_CLASS_BE;
break;
case IOPRIO_CLASS_IDLE:
@@ -2776,19 +3478,24 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
cfq_clear_cfqq_prio_changed(cfqq);
}
-static void changed_ioprio(struct cfq_io_cq *cic)
+static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
{
+ int ioprio = cic->icq.ioc->ioprio;
struct cfq_data *cfqd = cic_to_cfqd(cic);
struct cfq_queue *cfqq;
- if (unlikely(!cfqd))
+ /*
+ * Check whether ioprio has changed. The condition may trigger
+ * spuriously on a newly created cic but there's no harm.
+ */
+ if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
return;
cfqq = cic->cfqq[BLK_RW_ASYNC];
if (cfqq) {
struct cfq_queue *new_cfqq;
- new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->icq.ioc,
- GFP_ATOMIC);
+ new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio,
+ GFP_ATOMIC);
if (new_cfqq) {
cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
cfq_put_queue(cfqq);
@@ -2798,6 +3505,8 @@ static void changed_ioprio(struct cfq_io_cq *cic)
cfqq = cic->cfqq[BLK_RW_SYNC];
if (cfqq)
cfq_mark_cfqq_prio_changed(cfqq);
+
+ cic->ioprio = ioprio;
}
static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
@@ -2821,17 +3530,24 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
}
#ifdef CONFIG_CFQ_GROUP_IOSCHED
-static void changed_cgroup(struct cfq_io_cq *cic)
+static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
{
- struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
struct cfq_data *cfqd = cic_to_cfqd(cic);
- struct request_queue *q;
+ struct cfq_queue *sync_cfqq;
+ uint64_t id;
- if (unlikely(!cfqd))
- return;
+ rcu_read_lock();
+ id = bio_blkcg(bio)->id;
+ rcu_read_unlock();
- q = cfqd->queue;
+ /*
+ * Check whether blkcg has changed. The condition may trigger
+ * spuriously on a newly created cic but there's no harm.
+ */
+ if (unlikely(!cfqd) || likely(cic->blkcg_id == id))
+ return;
+ sync_cfqq = cic_to_cfqq(cic, 1);
if (sync_cfqq) {
/*
* Drop reference to sync queue. A new sync queue will be
@@ -2841,21 +3557,26 @@ static void changed_cgroup(struct cfq_io_cq *cic)
cic_set_cfqq(cic, NULL, 1);
cfq_put_queue(sync_cfqq);
}
+
+ cic->blkcg_id = id;
}
+#else
+static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
#endif /* CONFIG_CFQ_GROUP_IOSCHED */
static struct cfq_queue *
-cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
- struct io_context *ioc, gfp_t gfp_mask)
+cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
+ struct bio *bio, gfp_t gfp_mask)
{
+ struct blkcg *blkcg;
struct cfq_queue *cfqq, *new_cfqq = NULL;
- struct cfq_io_cq *cic;
struct cfq_group *cfqg;
retry:
- cfqg = cfq_get_cfqg(cfqd);
- cic = cfq_cic_lookup(cfqd, ioc);
- /* cic always exists here */
+ rcu_read_lock();
+
+ blkcg = bio_blkcg(bio);
+ cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
cfqq = cic_to_cfqq(cic, is_sync);
/*
@@ -2868,6 +3589,7 @@ retry:
cfqq = new_cfqq;
new_cfqq = NULL;
} else if (gfp_mask & __GFP_WAIT) {
+ rcu_read_unlock();
spin_unlock_irq(cfqd->queue->queue_lock);
new_cfqq = kmem_cache_alloc_node(cfq_pool,
gfp_mask | __GFP_ZERO,
@@ -2875,6 +3597,8 @@ retry:
spin_lock_irq(cfqd->queue->queue_lock);
if (new_cfqq)
goto retry;
+ else
+ return &cfqd->oom_cfqq;
} else {
cfqq = kmem_cache_alloc_node(cfq_pool,
gfp_mask | __GFP_ZERO,
@@ -2883,7 +3607,7 @@ retry:
if (cfqq) {
cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
- cfq_init_prio_data(cfqq, ioc);
+ cfq_init_prio_data(cfqq, cic);
cfq_link_cfqq_cfqg(cfqq, cfqg);
cfq_log_cfqq(cfqd, cfqq, "alloced");
} else
@@ -2893,6 +3617,7 @@ retry:
if (new_cfqq)
kmem_cache_free(cfq_pool, new_cfqq);
+ rcu_read_unlock();
return cfqq;
}
@@ -2902,6 +3627,9 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
switch (ioprio_class) {
case IOPRIO_CLASS_RT:
return &cfqd->async_cfqq[0][ioprio];
+ case IOPRIO_CLASS_NONE:
+ ioprio = IOPRIO_NORM;
+ /* fall through */
case IOPRIO_CLASS_BE:
return &cfqd->async_cfqq[1][ioprio];
case IOPRIO_CLASS_IDLE:
@@ -2912,11 +3640,11 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
}
static struct cfq_queue *
-cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
- gfp_t gfp_mask)
+cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
+ struct bio *bio, gfp_t gfp_mask)
{
- const int ioprio = task_ioprio(ioc);
- const int ioprio_class = task_ioprio_class(ioc);
+ const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
+ const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
struct cfq_queue **async_cfqq = NULL;
struct cfq_queue *cfqq = NULL;
@@ -2926,7 +3654,7 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
}
if (!cfqq)
- cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
+ cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask);
/*
* pin the queue now that it's allocated, scheduler exit will prune it
@@ -3008,7 +3736,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
enable_idle = 0;
- else if (!atomic_read(&cic->icq.ioc->nr_tasks) ||
+ else if (!atomic_read(&cic->icq.ioc->active_ref) ||
!cfqd->cfq_slice_idle ||
(!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
enable_idle = 0;
@@ -3068,7 +3796,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
return true;
/* Allow preemption only if we are idling on sync-noidle tree */
- if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
+ if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD &&
cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
new_cfqq->service_tree->count == 2 &&
RB_EMPTY_ROOT(&cfqq->sort_list))
@@ -3120,7 +3848,7 @@ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
* doesn't happen
*/
if (old_type != cfqq_type(cfqq))
- cfqq->cfqg->saved_workload_slice = 0;
+ cfqq->cfqg->saved_wl_slice = 0;
/*
* Put the new queue at the front of the of the current list,
@@ -3172,8 +3900,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfq_clear_cfqq_wait_request(cfqq);
__blk_run_queue(cfqd->queue);
} else {
- cfq_blkiocg_update_idle_time_stats(
- &cfqq->cfqg->blkg);
+ cfqg_stats_update_idle_time(cfqq->cfqg);
cfq_mark_cfqq_must_dispatch(cfqq);
}
}
@@ -3195,14 +3922,13 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
struct cfq_queue *cfqq = RQ_CFQQ(rq);
cfq_log_cfqq(cfqd, cfqq, "insert_request");
- cfq_init_prio_data(cfqq, RQ_CIC(rq)->icq.ioc);
+ cfq_init_prio_data(cfqq, RQ_CIC(rq));
- rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
+ rq->fifo_time = jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)];
list_add_tail(&rq->queuelist, &cfqq->fifo);
cfq_add_rq_rb(rq);
- cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
- &cfqd->serving_group->blkg, rq_data_dir(rq),
- rq_is_sync(rq));
+ cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
+ rq->cmd_flags);
cfq_rq_enqueued(cfqd, cfqq, rq);
}
@@ -3298,23 +4024,23 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
cfqd->rq_in_driver--;
cfqq->dispatched--;
(RQ_CFQG(rq))->dispatched--;
- cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
- rq_start_time_ns(rq), rq_io_start_time_ns(rq),
- rq_data_dir(rq), rq_is_sync(rq));
+ cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
+ rq_io_start_time_ns(rq), rq->cmd_flags);
cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
if (sync) {
- struct cfq_rb_root *service_tree;
+ struct cfq_rb_root *st;
RQ_CIC(rq)->ttime.last_end_request = now;
if (cfq_cfqq_on_rr(cfqq))
- service_tree = cfqq->service_tree;
+ st = cfqq->service_tree;
else
- service_tree = service_tree_for(cfqq->cfqg,
- cfqq_prio(cfqq), cfqq_type(cfqq));
- service_tree->ttime.last_end_request = now;
+ st = st_for(cfqq->cfqg, cfqq_class(cfqq),
+ cfqq_type(cfqq));
+
+ st->ttime.last_end_request = now;
if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
cfqd->last_delayed_sync = now;
}
@@ -3397,7 +4123,7 @@ static int cfq_may_queue(struct request_queue *q, int rw)
cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
if (cfqq) {
- cfq_init_prio_data(cfqq, cic->icq.ioc);
+ cfq_init_prio_data(cfqq, cic);
return __cfq_may_queue(cfqq);
}
@@ -3419,7 +4145,7 @@ static void cfq_put_request(struct request *rq)
cfqq->allocated[rw]--;
/* Put down rq reference on cfqg */
- cfq_put_cfqg(RQ_CFQG(rq));
+ cfqg_put(RQ_CFQG(rq));
rq->elv.priv[0] = NULL;
rq->elv.priv[1] = NULL;
@@ -3463,32 +4189,25 @@ split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
* Allocate cfq data structures associated with this request.
*/
static int
-cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
+cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
+ gfp_t gfp_mask)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
const int rw = rq_data_dir(rq);
const bool is_sync = rq_is_sync(rq);
struct cfq_queue *cfqq;
- unsigned int changed;
might_sleep_if(gfp_mask & __GFP_WAIT);
spin_lock_irq(q->queue_lock);
- /* handle changed notifications */
- changed = icq_get_changed(&cic->icq);
- if (unlikely(changed & ICQ_IOPRIO_CHANGED))
- changed_ioprio(cic);
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- if (unlikely(changed & ICQ_CGROUP_CHANGED))
- changed_cgroup(cic);
-#endif
-
+ check_ioprio_changed(cic, bio);
+ check_blkcg_changed(cic, bio);
new_queue:
cfqq = cic_to_cfqq(cic, is_sync);
if (!cfqq || cfqq == &cfqd->oom_cfqq) {
- cfqq = cfq_get_queue(cfqd, is_sync, cic->icq.ioc, gfp_mask);
+ cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask);
cic_set_cfqq(cic, cfqq, is_sync);
} else {
/*
@@ -3514,8 +4233,9 @@ new_queue:
cfqq->allocated[rw]++;
cfqq->ref++;
+ cfqg_get(cfqq->cfqg);
rq->elv.priv[0] = cfqq;
- rq->elv.priv[1] = cfq_ref_get_cfqg(cfqq->cfqg);
+ rq->elv.priv[1] = cfqq->cfqg;
spin_unlock_irq(q->queue_lock);
return 0;
}
@@ -3612,7 +4332,6 @@ static void cfq_exit_queue(struct elevator_queue *e)
{
struct cfq_data *cfqd = e->elevator_data;
struct request_queue *q = cfqd->queue;
- bool wait = false;
cfq_shutdown_timer_wq(cfqd);
@@ -3622,89 +4341,64 @@ static void cfq_exit_queue(struct elevator_queue *e)
__cfq_slice_expired(cfqd, cfqd->active_queue, 0);
cfq_put_async_queues(cfqd);
- cfq_release_cfq_groups(cfqd);
-
- /*
- * If there are groups which we could not unlink from blkcg list,
- * wait for a rcu period for them to be freed.
- */
- if (cfqd->nr_blkcg_linked_grps)
- wait = true;
spin_unlock_irq(q->queue_lock);
cfq_shutdown_timer_wq(cfqd);
- /*
- * Wait for cfqg->blkg->key accessors to exit their grace periods.
- * Do this wait only if there are other unlinked groups out
- * there. This can happen if cgroup deletion path claimed the
- * responsibility of cleaning up a group before queue cleanup code
- * get to the group.
- *
- * Do not call synchronize_rcu() unconditionally as there are drivers
- * which create/delete request queue hundreds of times during scan/boot
- * and synchronize_rcu() can take significant time and slow down boot.
- */
- if (wait)
- synchronize_rcu();
-
#ifdef CONFIG_CFQ_GROUP_IOSCHED
- /* Free up per cpu stats for root group */
- free_percpu(cfqd->root_group.blkg.stats_cpu);
+ blkcg_deactivate_policy(q, &blkcg_policy_cfq);
+#else
+ kfree(cfqd->root_group);
#endif
kfree(cfqd);
}
-static void *cfq_init_queue(struct request_queue *q)
+static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
{
struct cfq_data *cfqd;
- int i, j;
- struct cfq_group *cfqg;
- struct cfq_rb_root *st;
+ struct blkcg_gq *blkg __maybe_unused;
+ int i, ret;
+ struct elevator_queue *eq;
- cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
- if (!cfqd)
- return NULL;
+ eq = elevator_alloc(q, e);
+ if (!eq)
+ return -ENOMEM;
- /* Init root service tree */
- cfqd->grp_service_tree = CFQ_RB_ROOT;
+ cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
+ if (!cfqd) {
+ kobject_put(&eq->kobj);
+ return -ENOMEM;
+ }
+ eq->elevator_data = cfqd;
- /* Init root group */
- cfqg = &cfqd->root_group;
- for_each_cfqg_st(cfqg, i, j, st)
- *st = CFQ_RB_ROOT;
- RB_CLEAR_NODE(&cfqg->rb_node);
+ cfqd->queue = q;
+ spin_lock_irq(q->queue_lock);
+ q->elevator = eq;
+ spin_unlock_irq(q->queue_lock);
- /* Give preference to root group over other groups */
- cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT;
+ /* Init root service tree */
+ cfqd->grp_service_tree = CFQ_RB_ROOT;
+ /* Init root group and prefer root group over other groups by default */
#ifdef CONFIG_CFQ_GROUP_IOSCHED
- /*
- * Set root group reference to 2. One reference will be dropped when
- * all groups on cfqd->cfqg_list are being deleted during queue exit.
- * Other reference will remain there as we don't want to delete this
- * group as it is statically allocated and gets destroyed when
- * throtl_data goes away.
- */
- cfqg->ref = 2;
-
- if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
- kfree(cfqg);
- kfree(cfqd);
- return NULL;
- }
-
- rcu_read_lock();
+ ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
+ if (ret)
+ goto out_free;
- cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
- (void *)cfqd, 0);
- rcu_read_unlock();
- cfqd->nr_blkcg_linked_grps++;
+ cfqd->root_group = blkg_to_cfqg(q->root_blkg);
+#else
+ ret = -ENOMEM;
+ cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
+ GFP_KERNEL, cfqd->queue->node);
+ if (!cfqd->root_group)
+ goto out_free;
- /* Add group on cfqd->cfqg_list */
- hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
+ cfq_init_cfqg_base(cfqd->root_group);
#endif
+ cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT;
+ cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_DEFAULT;
+
/*
* Not strictly needed (since RB_ROOT just clears the node and we
* zeroed cfqd on alloc), but better be safe in case someone decides
@@ -3716,13 +4410,17 @@ static void *cfq_init_queue(struct request_queue *q)
/*
* Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
* Grab a permanent reference to it, so that the normal code flow
- * will not attempt to free it.
+ * will not attempt to free it. oom_cfqq is linked to root_group
+ * but shouldn't hold a reference as it'll never be unlinked. Lose
+ * the reference from linking right away.
*/
cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
cfqd->oom_cfqq.ref++;
- cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
- cfqd->queue = q;
+ spin_lock_irq(q->queue_lock);
+ cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
+ cfqg_put(cfqd->root_group);
+ spin_unlock_irq(q->queue_lock);
init_timer(&cfqd->idle_slice_timer);
cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
@@ -3737,6 +4435,7 @@ static void *cfq_init_queue(struct request_queue *q)
cfqd->cfq_back_penalty = cfq_back_penalty;
cfqd->cfq_slice[0] = cfq_slice_async;
cfqd->cfq_slice[1] = cfq_slice_sync;
+ cfqd->cfq_target_latency = cfq_target_latency;
cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
cfqd->cfq_slice_idle = cfq_slice_idle;
cfqd->cfq_group_idle = cfq_group_idle;
@@ -3747,7 +4446,12 @@ static void *cfq_init_queue(struct request_queue *q)
* second, in order to have larger depth for async operations.
*/
cfqd->last_delayed_sync = jiffies - HZ;
- return cfqd;
+ return 0;
+
+out_free:
+ kfree(cfqd);
+ kobject_put(&eq->kobj);
+ return ret;
}
/*
@@ -3756,7 +4460,7 @@ static void *cfq_init_queue(struct request_queue *q)
static ssize_t
cfq_var_show(unsigned int var, char *page)
{
- return sprintf(page, "%d\n", var);
+ return sprintf(page, "%u\n", var);
}
static ssize_t
@@ -3788,6 +4492,7 @@ SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
+SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
#undef SHOW_FUNCTION
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
@@ -3821,6 +4526,7 @@ STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
UINT_MAX, 0);
STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
+STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
#undef STORE_FUNCTION
#define CFQ_ATTR(name) \
@@ -3838,6 +4544,7 @@ static struct elv_fs_entry cfq_attrs[] = {
CFQ_ATTR(slice_idle),
CFQ_ATTR(group_idle),
CFQ_ATTR(low_latency),
+ CFQ_ATTR(target_latency),
__ATTR_NULL
};
@@ -3871,15 +4578,14 @@ static struct elevator_type iosched_cfq = {
};
#ifdef CONFIG_CFQ_GROUP_IOSCHED
-static struct blkio_policy_type blkio_policy_cfq = {
- .ops = {
- .blkio_unlink_group_fn = cfq_unlink_blkio_group,
- .blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
- },
- .plid = BLKIO_POLICY_PROP,
+static struct blkcg_policy blkcg_policy_cfq = {
+ .pd_size = sizeof(struct cfq_group),
+ .cftypes = cfq_blkcg_files,
+
+ .pd_init_fn = cfq_pd_init,
+ .pd_offline_fn = cfq_pd_offline,
+ .pd_reset_stats_fn = cfq_pd_reset_stats,
};
-#else
-static struct blkio_policy_type blkio_policy_cfq;
#endif
static int __init cfq_init(void)
@@ -3897,27 +4603,39 @@ static int __init cfq_init(void)
#ifdef CONFIG_CFQ_GROUP_IOSCHED
if (!cfq_group_idle)
cfq_group_idle = 1;
+
+ ret = blkcg_policy_register(&blkcg_policy_cfq);
+ if (ret)
+ return ret;
#else
- cfq_group_idle = 0;
+ cfq_group_idle = 0;
#endif
+
+ ret = -ENOMEM;
cfq_pool = KMEM_CACHE(cfq_queue, 0);
if (!cfq_pool)
- return -ENOMEM;
+ goto err_pol_unreg;
ret = elv_register(&iosched_cfq);
- if (ret) {
- kmem_cache_destroy(cfq_pool);
- return ret;
- }
-
- blkio_policy_register(&blkio_policy_cfq);
+ if (ret)
+ goto err_free_pool;
return 0;
+
+err_free_pool:
+ kmem_cache_destroy(cfq_pool);
+err_pol_unreg:
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+ blkcg_policy_unregister(&blkcg_policy_cfq);
+#endif
+ return ret;
}
static void __exit cfq_exit(void)
{
- blkio_policy_unregister(&blkio_policy_cfq);
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+ blkcg_policy_unregister(&blkcg_policy_cfq);
+#endif
elv_unregister(&iosched_cfq);
kmem_cache_destroy(cfq_pool);
}
diff --git a/block/cfq.h b/block/cfq.h
deleted file mode 100644
index 2a155927e37..00000000000
--- a/block/cfq.h
+++ /dev/null
@@ -1,115 +0,0 @@
-#ifndef _CFQ_H
-#define _CFQ_H
-#include "blk-cgroup.h"
-
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
-static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
- struct blkio_group *curr_blkg, bool direction, bool sync)
-{
- blkiocg_update_io_add_stats(blkg, curr_blkg, direction, sync);
-}
-
-static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
- unsigned long dequeue)
-{
- blkiocg_update_dequeue_stats(blkg, dequeue);
-}
-
-static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
- unsigned long time, unsigned long unaccounted_time)
-{
- blkiocg_update_timeslice_used(blkg, time, unaccounted_time);
-}
-
-static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg)
-{
- blkiocg_set_start_empty_time(blkg);
-}
-
-static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
- bool direction, bool sync)
-{
- blkiocg_update_io_remove_stats(blkg, direction, sync);
-}
-
-static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
- bool direction, bool sync)
-{
- blkiocg_update_io_merged_stats(blkg, direction, sync);
-}
-
-static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
-{
- blkiocg_update_idle_time_stats(blkg);
-}
-
-static inline void
-cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
-{
- blkiocg_update_avg_queue_size_stats(blkg);
-}
-
-static inline void
-cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
-{
- blkiocg_update_set_idle_time_stats(blkg);
-}
-
-static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
- uint64_t bytes, bool direction, bool sync)
-{
- blkiocg_update_dispatch_stats(blkg, bytes, direction, sync);
-}
-
-static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
-{
- blkiocg_update_completion_stats(blkg, start_time, io_start_time,
- direction, sync);
-}
-
-static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
- struct blkio_group *blkg, void *key, dev_t dev) {
- blkiocg_add_blkio_group(blkcg, blkg, key, dev, BLKIO_POLICY_PROP);
-}
-
-static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
-{
- return blkiocg_del_blkio_group(blkg);
-}
-
-#else /* CFQ_GROUP_IOSCHED */
-static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
- struct blkio_group *curr_blkg, bool direction, bool sync) {}
-
-static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
- unsigned long dequeue) {}
-
-static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
- unsigned long time, unsigned long unaccounted_time) {}
-static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
-static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
- bool direction, bool sync) {}
-static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
- bool direction, bool sync) {}
-static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
-{
-}
-static inline void
-cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) {}
-
-static inline void
-cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) {}
-
-static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
- uint64_t bytes, bool direction, bool sync) {}
-static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) {}
-
-static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
- struct blkio_group *blkg, void *key, dev_t dev) {}
-static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
-{
- return 0;
-}
-
-#endif /* CFQ_GROUP_IOSCHED */
-#endif
diff --git a/block/cmdline-parser.c b/block/cmdline-parser.c
new file mode 100644
index 00000000000..9dbc67e42a9
--- /dev/null
+++ b/block/cmdline-parser.c
@@ -0,0 +1,254 @@
+/*
+ * Parse command line, get partition information
+ *
+ * Written by Cai Zhiyong <caizhiyong@huawei.com>
+ *
+ */
+#include <linux/export.h>
+#include <linux/cmdline-parser.h>
+
+static int parse_subpart(struct cmdline_subpart **subpart, char *partdef)
+{
+ int ret = 0;
+ struct cmdline_subpart *new_subpart;
+
+ *subpart = NULL;
+
+ new_subpart = kzalloc(sizeof(struct cmdline_subpart), GFP_KERNEL);
+ if (!new_subpart)
+ return -ENOMEM;
+
+ if (*partdef == '-') {
+ new_subpart->size = (sector_t)(~0ULL);
+ partdef++;
+ } else {
+ new_subpart->size = (sector_t)memparse(partdef, &partdef);
+ if (new_subpart->size < (sector_t)PAGE_SIZE) {
+ pr_warn("cmdline partition size is invalid.");
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+
+ if (*partdef == '@') {
+ partdef++;
+ new_subpart->from = (sector_t)memparse(partdef, &partdef);
+ } else {
+ new_subpart->from = (sector_t)(~0ULL);
+ }
+
+ if (*partdef == '(') {
+ int length;
+ char *next = strchr(++partdef, ')');
+
+ if (!next) {
+ pr_warn("cmdline partition format is invalid.");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ length = min_t(int, next - partdef,
+ sizeof(new_subpart->name) - 1);
+ strncpy(new_subpart->name, partdef, length);
+ new_subpart->name[length] = '\0';
+
+ partdef = ++next;
+ } else
+ new_subpart->name[0] = '\0';
+
+ new_subpart->flags = 0;
+
+ if (!strncmp(partdef, "ro", 2)) {
+ new_subpart->flags |= PF_RDONLY;
+ partdef += 2;
+ }
+
+ if (!strncmp(partdef, "lk", 2)) {
+ new_subpart->flags |= PF_POWERUP_LOCK;
+ partdef += 2;
+ }
+
+ *subpart = new_subpart;
+ return 0;
+fail:
+ kfree(new_subpart);
+ return ret;
+}
+
+static void free_subpart(struct cmdline_parts *parts)
+{
+ struct cmdline_subpart *subpart;
+
+ while (parts->subpart) {
+ subpart = parts->subpart;
+ parts->subpart = subpart->next_subpart;
+ kfree(subpart);
+ }
+}
+
+static int parse_parts(struct cmdline_parts **parts, const char *bdevdef)
+{
+ int ret = -EINVAL;
+ char *next;
+ int length;
+ struct cmdline_subpart **next_subpart;
+ struct cmdline_parts *newparts;
+ char buf[BDEVNAME_SIZE + 32 + 4];
+
+ *parts = NULL;
+
+ newparts = kzalloc(sizeof(struct cmdline_parts), GFP_KERNEL);
+ if (!newparts)
+ return -ENOMEM;
+
+ next = strchr(bdevdef, ':');
+ if (!next) {
+ pr_warn("cmdline partition has no block device.");
+ goto fail;
+ }
+
+ length = min_t(int, next - bdevdef, sizeof(newparts->name) - 1);
+ strncpy(newparts->name, bdevdef, length);
+ newparts->name[length] = '\0';
+ newparts->nr_subparts = 0;
+
+ next_subpart = &newparts->subpart;
+
+ while (next && *(++next)) {
+ bdevdef = next;
+ next = strchr(bdevdef, ',');
+
+ length = (!next) ? (sizeof(buf) - 1) :
+ min_t(int, next - bdevdef, sizeof(buf) - 1);
+
+ strncpy(buf, bdevdef, length);
+ buf[length] = '\0';
+
+ ret = parse_subpart(next_subpart, buf);
+ if (ret)
+ goto fail;
+
+ newparts->nr_subparts++;
+ next_subpart = &(*next_subpart)->next_subpart;
+ }
+
+ if (!newparts->subpart) {
+ pr_warn("cmdline partition has no valid partition.");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ *parts = newparts;
+
+ return 0;
+fail:
+ free_subpart(newparts);
+ kfree(newparts);
+ return ret;
+}
+
+void cmdline_parts_free(struct cmdline_parts **parts)
+{
+ struct cmdline_parts *next_parts;
+
+ while (*parts) {
+ next_parts = (*parts)->next_parts;
+ free_subpart(*parts);
+ kfree(*parts);
+ *parts = next_parts;
+ }
+}
+EXPORT_SYMBOL(cmdline_parts_free);
+
+int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline)
+{
+ int ret;
+ char *buf;
+ char *pbuf;
+ char *next;
+ struct cmdline_parts **next_parts;
+
+ *parts = NULL;
+
+ next = pbuf = buf = kstrdup(cmdline, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ next_parts = parts;
+
+ while (next && *pbuf) {
+ next = strchr(pbuf, ';');
+ if (next)
+ *next = '\0';
+
+ ret = parse_parts(next_parts, pbuf);
+ if (ret)
+ goto fail;
+
+ if (next)
+ pbuf = ++next;
+
+ next_parts = &(*next_parts)->next_parts;
+ }
+
+ if (!*parts) {
+ pr_warn("cmdline partition has no valid partition.");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ ret = 0;
+done:
+ kfree(buf);
+ return ret;
+
+fail:
+ cmdline_parts_free(parts);
+ goto done;
+}
+EXPORT_SYMBOL(cmdline_parts_parse);
+
+struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
+ const char *bdev)
+{
+ while (parts && strncmp(bdev, parts->name, sizeof(parts->name)))
+ parts = parts->next_parts;
+ return parts;
+}
+EXPORT_SYMBOL(cmdline_parts_find);
+
+/*
+ * add_part()
+ * 0 success.
+ * 1 can not add so many partitions.
+ */
+int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
+ int slot,
+ int (*add_part)(int, struct cmdline_subpart *, void *),
+ void *param)
+{
+ sector_t from = 0;
+ struct cmdline_subpart *subpart;
+
+ for (subpart = parts->subpart; subpart;
+ subpart = subpart->next_subpart, slot++) {
+ if (subpart->from == (sector_t)(~0ULL))
+ subpart->from = from;
+ else
+ from = subpart->from;
+
+ if (from >= disk_size)
+ break;
+
+ if (subpart->size > (disk_size - from))
+ subpart->size = disk_size - from;
+
+ from += subpart->size;
+
+ if (add_part(slot, subpart, param))
+ break;
+ }
+
+ return slot;
+}
+EXPORT_SYMBOL(cmdline_parts_set);
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 7c668c8a6f9..a0926a6094b 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -59,6 +59,7 @@ static int compat_hdio_getgeo(struct gendisk *disk, struct block_device *bdev,
if (!disk->fops->getgeo)
return -ENOTTY;
+ memset(&geo, 0, sizeof(geo));
/*
* We need to set the startsect first, the driver may
* want to override it.
@@ -69,7 +70,7 @@ static int compat_hdio_getgeo(struct gendisk *disk, struct block_device *bdev,
return ret;
ret = copy_to_user(ugeo, &geo, 4);
- ret |= __put_user(geo.start, &ugeo->start);
+ ret |= put_user(geo.start, &ugeo->start);
if (ret)
ret = -EFAULT;
@@ -689,6 +690,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
case BLKROSET:
case BLKDISCARD:
case BLKSECDISCARD:
+ case BLKZEROOUT:
/*
* the ones below are implemented in blkdev_locked_ioctl,
* but we call blkdev_ioctl, which gets the lock for us
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 7bf12d793fc..a753df2b3fc 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -106,7 +106,7 @@ deadline_add_request(struct request_queue *q, struct request *rq)
/*
* set expire time and add to fifo list
*/
- rq_set_fifo_time(rq, jiffies + dd->fifo_expire[data_dir]);
+ rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
}
@@ -132,7 +132,7 @@ deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
* check for front merge
*/
if (dd->front_merges) {
- sector_t sector = bio->bi_sector + bio_sectors(bio);
+ sector_t sector = bio_end_sector(bio);
__rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
if (__rq) {
@@ -174,9 +174,9 @@ deadline_merged_requests(struct request_queue *q, struct request *req,
* and move into next position (next will be deleted) in fifo
*/
if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
- if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
+ if (time_before(next->fifo_time, req->fifo_time)) {
list_move(&req->queuelist, &next->queuelist);
- rq_set_fifo_time(req, rq_fifo_time(next));
+ req->fifo_time = next->fifo_time;
}
}
@@ -230,7 +230,7 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
/*
* rq is expired!
*/
- if (time_after(jiffies, rq_fifo_time(rq)))
+ if (time_after_eq(jiffies, rq->fifo_time))
return 1;
return 0;
@@ -337,13 +337,21 @@ static void deadline_exit_queue(struct elevator_queue *e)
/*
* initialize elevator private data (deadline_data).
*/
-static void *deadline_init_queue(struct request_queue *q)
+static int deadline_init_queue(struct request_queue *q, struct elevator_type *e)
{
struct deadline_data *dd;
+ struct elevator_queue *eq;
- dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node);
- if (!dd)
- return NULL;
+ eq = elevator_alloc(q, e);
+ if (!eq)
+ return -ENOMEM;
+
+ dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
+ if (!dd) {
+ kobject_put(&eq->kobj);
+ return -ENOMEM;
+ }
+ eq->elevator_data = dd;
INIT_LIST_HEAD(&dd->fifo_list[READ]);
INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
@@ -354,7 +362,11 @@ static void *deadline_init_queue(struct request_queue *q)
dd->writes_starved = writes_starved;
dd->front_merges = 1;
dd->fifo_batch = fifo_batch;
- return dd;
+
+ spin_lock_irq(q->queue_lock);
+ q->elevator = eq;
+ spin_unlock_irq(q->queue_lock);
+ return 0;
}
/*
diff --git a/block/elevator.c b/block/elevator.c
index f016855a46b..24c28b659bb 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -34,10 +34,12 @@
#include <linux/blktrace_api.h>
#include <linux/hash.h>
#include <linux/uaccess.h>
+#include <linux/pm_runtime.h>
#include <trace/events/block.h>
#include "blk.h"
+#include "blk-cgroup.h"
static DEFINE_SPINLOCK(elv_list_lock);
static LIST_HEAD(elv_list);
@@ -45,11 +47,6 @@ static LIST_HEAD(elv_list);
/*
* Merge hash stuff.
*/
-static const int elv_hash_shift = 6;
-#define ELV_HASH_BLOCK(sec) ((sec) >> 3)
-#define ELV_HASH_FN(sec) \
- (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
-#define ELV_HASH_ENTRIES (1 << elv_hash_shift)
#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
/*
@@ -99,14 +96,14 @@ static void elevator_put(struct elevator_type *e)
module_put(e->elevator_owner);
}
-static struct elevator_type *elevator_get(const char *name)
+static struct elevator_type *elevator_get(const char *name, bool try_loading)
{
struct elevator_type *e;
spin_lock(&elv_list_lock);
e = elevator_find(name);
- if (!e) {
+ if (!e && try_loading) {
spin_unlock(&elv_list_lock);
request_module("%s-iosched", name);
spin_lock(&elv_list_lock);
@@ -121,15 +118,6 @@ static struct elevator_type *elevator_get(const char *name)
return e;
}
-static int elevator_init_queue(struct request_queue *q,
- struct elevator_queue *eq)
-{
- eq->elevator_data = eq->type->ops.elevator_init_fn(q);
- if (eq->elevator_data)
- return 0;
- return -ENOMEM;
-}
-
static char chosen_elevator[ELV_NAME_MAX];
static int __init elevator_setup(char *str)
@@ -144,29 +132,37 @@ static int __init elevator_setup(char *str)
__setup("elevator=", elevator_setup);
+/* called during boot to load the elevator chosen by the elevator param */
+void __init load_default_elevator_module(void)
+{
+ struct elevator_type *e;
+
+ if (!chosen_elevator[0])
+ return;
+
+ spin_lock(&elv_list_lock);
+ e = elevator_find(chosen_elevator);
+ spin_unlock(&elv_list_lock);
+
+ if (!e)
+ request_module("%s-iosched", chosen_elevator);
+}
+
static struct kobj_type elv_ktype;
-static struct elevator_queue *elevator_alloc(struct request_queue *q,
+struct elevator_queue *elevator_alloc(struct request_queue *q,
struct elevator_type *e)
{
struct elevator_queue *eq;
- int i;
- eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
+ eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
if (unlikely(!eq))
goto err;
eq->type = e;
kobject_init(&eq->kobj, &elv_ktype);
mutex_init(&eq->sysfs_lock);
-
- eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
- GFP_KERNEL, q->node);
- if (!eq->hash)
- goto err;
-
- for (i = 0; i < ELV_HASH_ENTRIES; i++)
- INIT_HLIST_HEAD(&eq->hash[i]);
+ hash_init(eq->hash);
return eq;
err:
@@ -174,6 +170,7 @@ err:
elevator_put(e);
return NULL;
}
+EXPORT_SYMBOL(elevator_alloc);
static void elevator_release(struct kobject *kobj)
{
@@ -181,16 +178,20 @@ static void elevator_release(struct kobject *kobj)
e = container_of(kobj, struct elevator_queue, kobj);
elevator_put(e->type);
- kfree(e->hash);
kfree(e);
}
int elevator_init(struct request_queue *q, char *name)
{
struct elevator_type *e = NULL;
- struct elevator_queue *eq;
int err;
+ /*
+ * q->sysfs_lock must be held to provide mutual exclusion between
+ * elevator_switch() and here.
+ */
+ lockdep_assert_held(&q->sysfs_lock);
+
if (unlikely(q->elevator))
return 0;
@@ -200,39 +201,34 @@ int elevator_init(struct request_queue *q, char *name)
q->boundary_rq = NULL;
if (name) {
- e = elevator_get(name);
+ e = elevator_get(name, true);
if (!e)
return -EINVAL;
}
+ /*
+ * Use the default elevator specified by config boot param or
+ * config option. Don't try to load modules as we could be running
+ * off async and request_module() isn't allowed from async.
+ */
if (!e && *chosen_elevator) {
- e = elevator_get(chosen_elevator);
+ e = elevator_get(chosen_elevator, false);
if (!e)
printk(KERN_ERR "I/O scheduler %s not found\n",
chosen_elevator);
}
if (!e) {
- e = elevator_get(CONFIG_DEFAULT_IOSCHED);
+ e = elevator_get(CONFIG_DEFAULT_IOSCHED, false);
if (!e) {
printk(KERN_ERR
"Default I/O scheduler not found. " \
"Using noop.\n");
- e = elevator_get("noop");
+ e = elevator_get("noop", false);
}
}
- eq = elevator_alloc(q, e);
- if (!eq)
- return -ENOMEM;
-
- err = elevator_init_queue(q, eq);
- if (err) {
- kobject_put(&eq->kobj);
- return err;
- }
-
- q->elevator = eq;
+ err = e->ops.elevator_init_fn(q, e);
return 0;
}
EXPORT_SYMBOL(elevator_init);
@@ -250,7 +246,8 @@ EXPORT_SYMBOL(elevator_exit);
static inline void __elv_rqhash_del(struct request *rq)
{
- hlist_del_init(&rq->hash);
+ hash_del(&rq->hash);
+ rq->cmd_flags &= ~REQ_HASHED;
}
static void elv_rqhash_del(struct request_queue *q, struct request *rq)
@@ -264,7 +261,8 @@ static void elv_rqhash_add(struct request_queue *q, struct request *rq)
struct elevator_queue *e = q->elevator;
BUG_ON(ELV_ON_HASH(rq));
- hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
+ hash_add(e->hash, &rq->hash, rq_hash_key(rq));
+ rq->cmd_flags |= REQ_HASHED;
}
static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
@@ -276,11 +274,10 @@ static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
{
struct elevator_queue *e = q->elevator;
- struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
- struct hlist_node *entry, *next;
+ struct hlist_node *next;
struct request *rq;
- hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
+ hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
BUG_ON(!ELV_ON_HASH(rq));
if (unlikely(!rq_mergeable(rq))) {
@@ -445,7 +442,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
/*
* See if our hash lookup can find a potential backmerge.
*/
- __rq = elv_rqhash_find(q, bio->bi_sector);
+ __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
if (__rq && elv_rq_merge_ok(__rq, bio)) {
*req = __rq;
return ELEVATOR_BACK_MERGE;
@@ -468,6 +465,7 @@ static bool elv_attempt_insert_merge(struct request_queue *q,
struct request *rq)
{
struct request *__rq;
+ bool ret;
if (blk_queue_nomerges(q))
return false;
@@ -481,14 +479,21 @@ static bool elv_attempt_insert_merge(struct request_queue *q,
if (blk_queue_noxmerges(q))
return false;
+ ret = false;
/*
* See if our hash lookup can find a potential backmerge.
*/
- __rq = elv_rqhash_find(q, blk_rq_pos(rq));
- if (__rq && blk_attempt_req_merge(q, __rq, rq))
- return true;
+ while (1) {
+ __rq = elv_rqhash_find(q, blk_rq_pos(rq));
+ if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
+ break;
+
+ /* The merged request could be merged with others, try again */
+ ret = true;
+ rq = __rq;
+ }
- return false;
+ return ret;
}
void elv_merged_request(struct request_queue *q, struct request *rq, int type)
@@ -532,6 +537,27 @@ void elv_bio_merged(struct request_queue *q, struct request *rq,
e->type->ops.elevator_bio_merged_fn(q, rq, bio);
}
+#ifdef CONFIG_PM_RUNTIME
+static void blk_pm_requeue_request(struct request *rq)
+{
+ if (rq->q->dev && !(rq->cmd_flags & REQ_PM))
+ rq->q->nr_pending--;
+}
+
+static void blk_pm_add_request(struct request_queue *q, struct request *rq)
+{
+ if (q->dev && !(rq->cmd_flags & REQ_PM) && q->nr_pending++ == 0 &&
+ (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
+ pm_request_resume(q->dev);
+}
+#else
+static inline void blk_pm_requeue_request(struct request *rq) {}
+static inline void blk_pm_add_request(struct request_queue *q,
+ struct request *rq)
+{
+}
+#endif
+
void elv_requeue_request(struct request_queue *q, struct request *rq)
{
/*
@@ -546,6 +572,8 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
rq->cmd_flags &= ~REQ_STARTED;
+ blk_pm_requeue_request(rq);
+
__elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
}
@@ -564,35 +592,17 @@ void elv_drain_elevator(struct request_queue *q)
}
}
-void elv_quiesce_start(struct request_queue *q)
-{
- if (!q->elevator)
- return;
-
- spin_lock_irq(q->queue_lock);
- queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
- spin_unlock_irq(q->queue_lock);
-
- blk_drain_queue(q, false);
-}
-
-void elv_quiesce_end(struct request_queue *q)
-{
- spin_lock_irq(q->queue_lock);
- queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
- spin_unlock_irq(q->queue_lock);
-}
-
void __elv_add_request(struct request_queue *q, struct request *rq, int where)
{
trace_block_rq_insert(q, rq);
+ blk_pm_add_request(q, rq);
+
rq->q = q;
if (rq->cmd_flags & REQ_SOFTBARRIER) {
/* barriers are scheduling boundary, update end_sector */
- if (rq->cmd_type == REQ_TYPE_FS ||
- (rq->cmd_flags & REQ_DISCARD)) {
+ if (rq->cmd_type == REQ_TYPE_FS) {
q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq;
}
@@ -634,8 +644,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
if (elv_attempt_insert_merge(q, rq))
break;
case ELEVATOR_INSERT_SORT:
- BUG_ON(rq->cmd_type != REQ_TYPE_FS &&
- !(rq->cmd_flags & REQ_DISCARD));
+ BUG_ON(rq->cmd_type != REQ_TYPE_FS);
rq->cmd_flags |= REQ_SORTED;
q->nr_sorted++;
if (rq_mergeable(rq)) {
@@ -692,12 +701,13 @@ struct request *elv_former_request(struct request_queue *q, struct request *rq)
return NULL;
}
-int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
+int elv_set_request(struct request_queue *q, struct request *rq,
+ struct bio *bio, gfp_t gfp_mask)
{
struct elevator_queue *e = q->elevator;
if (e->type->ops.elevator_set_req_fn)
- return e->type->ops.elevator_set_req_fn(q, rq, gfp_mask);
+ return e->type->ops.elevator_set_req_fn(q, rq, bio, gfp_mask);
return 0;
}
@@ -719,26 +729,6 @@ int elv_may_queue(struct request_queue *q, int rw)
return ELV_MQUEUE_MAY;
}
-void elv_abort_queue(struct request_queue *q)
-{
- struct request *rq;
-
- blk_abort_flushes(q);
-
- while (!list_empty(&q->queue_head)) {
- rq = list_entry_rq(q->queue_head.next);
- rq->cmd_flags |= REQ_QUIET;
- trace_block_rq_abort(q, rq);
- /*
- * Mark this request as started so we don't trigger
- * any debug logic in the end I/O path.
- */
- blk_start_request(rq);
- __blk_end_request_all(rq, -EIO);
- }
-}
-EXPORT_SYMBOL(elv_abort_queue);
-
void elv_completed_request(struct request_queue *q, struct request *rq)
{
struct elevator_queue *e = q->elevator;
@@ -801,8 +791,9 @@ static struct kobj_type elv_ktype = {
.release = elevator_release,
};
-int __elv_register_queue(struct request_queue *q, struct elevator_queue *e)
+int elv_register_queue(struct request_queue *q)
{
+ struct elevator_queue *e = q->elevator;
int error;
error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
@@ -820,11 +811,6 @@ int __elv_register_queue(struct request_queue *q, struct elevator_queue *e)
}
return error;
}
-
-int elv_register_queue(struct request_queue *q)
-{
- return __elv_register_queue(q, q->elevator);
-}
EXPORT_SYMBOL(elv_register_queue);
void elv_unregister_queue(struct request_queue *q)
@@ -907,53 +893,53 @@ EXPORT_SYMBOL_GPL(elv_unregister);
*/
static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
{
- struct elevator_queue *old_elevator, *e;
+ struct elevator_queue *old = q->elevator;
+ bool registered = old->registered;
int err;
- /* allocate new elevator */
- e = elevator_alloc(q, new_e);
- if (!e)
- return -ENOMEM;
-
- err = elevator_init_queue(q, e);
- if (err) {
- kobject_put(&e->kobj);
- return err;
- }
-
- /* turn on BYPASS and drain all requests w/ elevator private data */
- elv_quiesce_start(q);
+ /*
+ * Turn on BYPASS and drain all requests w/ elevator private data.
+ * Block layer doesn't call into a quiesced elevator - all requests
+ * are directly put on the dispatch list without elevator data
+ * using INSERT_BACK. All requests have SOFTBARRIER set and no
+ * merge happens either.
+ */
+ blk_queue_bypass_start(q);
- /* unregister old queue, register new one and kill old elevator */
- if (q->elevator->registered) {
+ /* unregister and clear all auxiliary data of the old elevator */
+ if (registered)
elv_unregister_queue(q);
- err = __elv_register_queue(q, e);
- if (err)
- goto fail_register;
- }
- /* done, clear io_cq's, switch elevators and turn off BYPASS */
spin_lock_irq(q->queue_lock);
ioc_clear_queue(q);
- old_elevator = q->elevator;
- q->elevator = e;
spin_unlock_irq(q->queue_lock);
- elevator_exit(old_elevator);
- elv_quiesce_end(q);
+ /* allocate, init and register new elevator */
+ err = new_e->ops.elevator_init_fn(q, new_e);
+ if (err)
+ goto fail_init;
+
+ if (registered) {
+ err = elv_register_queue(q);
+ if (err)
+ goto fail_register;
+ }
+
+ /* done, kill the old one and finish */
+ elevator_exit(old);
+ blk_queue_bypass_end(q);
- blk_add_trace_msg(q, "elv switch: %s", e->type->elevator_name);
+ blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
return 0;
fail_register:
- /*
- * switch failed, exit the new io scheduler and reattach the old
- * one again (along with re-adding the sysfs dir)
- */
- elevator_exit(e);
+ elevator_exit(q->elevator);
+fail_init:
+ /* switch failed, restore and re-register old elevator */
+ q->elevator = old;
elv_register_queue(q);
- elv_quiesce_end(q);
+ blk_queue_bypass_end(q);
return err;
}
@@ -961,7 +947,7 @@ fail_register:
/*
* Switch this queue to the given IO scheduler.
*/
-int elevator_change(struct request_queue *q, const char *name)
+static int __elevator_change(struct request_queue *q, const char *name)
{
char elevator_name[ELV_NAME_MAX];
struct elevator_type *e;
@@ -970,7 +956,7 @@ int elevator_change(struct request_queue *q, const char *name)
return -ENXIO;
strlcpy(elevator_name, name, sizeof(elevator_name));
- e = elevator_get(strstrip(elevator_name));
+ e = elevator_get(strstrip(elevator_name), true);
if (!e) {
printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
return -EINVAL;
@@ -983,6 +969,18 @@ int elevator_change(struct request_queue *q, const char *name)
return elevator_switch(q, e);
}
+
+int elevator_change(struct request_queue *q, const char *name)
+{
+ int ret;
+
+ /* Protect q->elevator from elevator_init() */
+ mutex_lock(&q->sysfs_lock);
+ ret = __elevator_change(q, name);
+ mutex_unlock(&q->sysfs_lock);
+
+ return ret;
+}
EXPORT_SYMBOL(elevator_change);
ssize_t elv_iosched_store(struct request_queue *q, const char *name,
@@ -993,7 +991,7 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
if (!q->elevator)
return count;
- ret = elevator_change(q, name);
+ ret = __elevator_change(q, name);
if (!ret)
return count;
diff --git a/block/genhd.c b/block/genhd.c
index df9816ede75..791f4194313 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -18,6 +18,7 @@
#include <linux/mutex.h>
#include <linux/idr.h>
#include <linux/log2.h>
+#include <linux/pm_runtime.h>
#include "blk.h"
@@ -25,7 +26,7 @@ static DEFINE_MUTEX(block_class_lock);
struct kobject *block_depr;
/* for extended dynamic devt allocation, currently only one major is used */
-#define MAX_EXT_DEVT (1 << MINORBITS)
+#define NR_EXT_DEVT (1 << MINORBITS)
/* For extended devt allocation. ext_devt_mutex prevents look up
* results from going away underneath its user.
@@ -35,6 +36,8 @@ static DEFINE_IDR(ext_devt_idr);
static struct device_type disk_type;
+static void disk_check_events(struct disk_events *ev,
+ unsigned int *clearing_ptr);
static void disk_alloc_events(struct gendisk *disk);
static void disk_add_events(struct gendisk *disk);
static void disk_del_events(struct gendisk *disk);
@@ -154,7 +157,7 @@ struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter)
part = rcu_dereference(ptbl->part[piter->idx]);
if (!part)
continue;
- if (!part->nr_sects &&
+ if (!part_nr_sects_read(part) &&
!(piter->flags & DISK_PITER_INCL_EMPTY) &&
!(piter->flags & DISK_PITER_INCL_EMPTY_PART0 &&
piter->idx == 0))
@@ -191,7 +194,7 @@ EXPORT_SYMBOL_GPL(disk_part_iter_exit);
static inline int sector_in_part(struct hd_struct *part, sector_t sector)
{
return part->start_sect <= sector &&
- sector < part->start_sect + part->nr_sects;
+ sector < part->start_sect + part_nr_sects_read(part);
}
/**
@@ -408,7 +411,7 @@ static int blk_mangle_minor(int minor)
int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
{
struct gendisk *disk = part_to_disk(part);
- int idx, rc;
+ int idx;
/* in consecutive minor range? */
if (part->partno < disk->minors) {
@@ -417,19 +420,11 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
}
/* allocate ext devt */
- do {
- if (!idr_pre_get(&ext_devt_idr, GFP_KERNEL))
- return -ENOMEM;
- rc = idr_get_new(&ext_devt_idr, part, &idx);
- } while (rc == -EAGAIN);
-
- if (rc)
- return rc;
-
- if (idx > MAX_EXT_DEVT) {
- idr_remove(&ext_devt_idr, idx);
- return -EBUSY;
- }
+ mutex_lock(&ext_devt_mutex);
+ idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_KERNEL);
+ mutex_unlock(&ext_devt_mutex);
+ if (idx < 0)
+ return idx == -ENOSPC ? -EBUSY : idx;
*devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx));
return 0;
@@ -517,7 +512,7 @@ static void register_disk(struct gendisk *disk)
ddev->parent = disk->driverfs_dev;
- dev_set_name(ddev, disk->disk_name);
+ dev_set_name(ddev, "%s", disk->disk_name);
/* delay uevents, until we scanned partition table */
dev_set_uevent_suppress(ddev, 1);
@@ -532,6 +527,14 @@ static void register_disk(struct gendisk *disk)
return;
}
}
+
+ /*
+ * avoid probable deadlock caused by allocating memory with
+ * GFP_KERNEL in runtime_resume callback of its all ancestor
+ * devices
+ */
+ pm_runtime_set_memalloc_noio(ddev, true);
+
disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj);
disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
@@ -644,7 +647,6 @@ void del_gendisk(struct gendisk *disk)
disk_part_iter_exit(&piter);
invalidate_partition(disk, 0);
- blk_free_devt(disk_to_dev(disk)->devt);
set_capacity(disk, 0);
disk->flags &= ~GENHD_FL_UP;
@@ -661,7 +663,9 @@ void del_gendisk(struct gendisk *disk)
disk->driverfs_dev = NULL;
if (!sysfs_deprecated)
sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
+ pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
device_del(disk_to_dev(disk));
+ blk_free_devt(disk_to_dev(disk)->devt);
}
EXPORT_SYMBOL(del_gendisk);
@@ -743,7 +747,6 @@ void __init printk_all_partitions(void)
struct hd_struct *part;
char name_buf[BDEVNAME_SIZE];
char devt_buf[BDEVT_SIZE];
- u8 uuid[PARTITION_META_INFO_UUIDLTH * 2 + 1];
/*
* Don't show empty devices or things that have been
@@ -762,14 +765,11 @@ void __init printk_all_partitions(void)
while ((part = disk_part_iter_next(&piter))) {
bool is_part0 = part == &disk->part0;
- uuid[0] = 0;
- if (part->info)
- part_unpack_uuid(part->info->uuid, uuid);
-
printk("%s%s %10llu %s %s", is_part0 ? "" : " ",
bdevt_str(part_devt(part), devt_buf),
- (unsigned long long)part->nr_sects >> 1,
- disk_name(disk, part->partno, name_buf), uuid);
+ (unsigned long long)part_nr_sects_read(part) >> 1
+ , disk_name(disk, part->partno, name_buf),
+ part->info ? part->info->uuid : "");
if (is_part0) {
if (disk->driverfs_dev != NULL &&
disk->driverfs_dev->driver != NULL)
@@ -833,7 +833,7 @@ static void disk_seqf_stop(struct seq_file *seqf, void *v)
static void *show_partition_start(struct seq_file *seqf, loff_t *pos)
{
- static void *p;
+ void *p;
p = disk_seqf_start(seqf, pos);
if (!IS_ERR_OR_NULL(p) && !*pos)
@@ -860,7 +860,7 @@ static int show_partition(struct seq_file *seqf, void *v)
while ((part = disk_part_iter_next(&piter)))
seq_printf(seqf, "%4d %7d %10llu %s\n",
MAJOR(part_devt(part)), MINOR(part_devt(part)),
- (unsigned long long)part->nr_sects >> 1,
+ (unsigned long long)part_nr_sects_read(part) >> 1,
disk_name(sgp, part->partno, buf));
disk_part_iter_exit(&piter);
@@ -1111,7 +1111,8 @@ struct class block_class = {
.name = "block",
};
-static char *block_devnode(struct device *dev, umode_t *mode)
+static char *block_devnode(struct device *dev, umode_t *mode,
+ kuid_t *uid, kgid_t *gid)
{
struct gendisk *disk = dev_to_disk(dev);
@@ -1243,7 +1244,7 @@ EXPORT_SYMBOL(blk_lookup_devt);
struct gendisk *alloc_disk(int minors)
{
- return alloc_disk_node(minors, -1);
+ return alloc_disk_node(minors, NUMA_NO_NODE);
}
EXPORT_SYMBOL(alloc_disk);
@@ -1251,8 +1252,7 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
{
struct gendisk *disk;
- disk = kmalloc_node(sizeof(struct gendisk),
- GFP_KERNEL | __GFP_ZERO, node_id);
+ disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
if (disk) {
if (!init_part_stats(&disk->part0)) {
kfree(disk);
@@ -1266,6 +1266,16 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
}
disk->part_tbl->part[0] = &disk->part0;
+ /*
+ * set_capacity() and get_capacity() currently don't use
+ * seqcounter to read/update the part0->nr_sects. Still init
+ * the counter as we can read the sectors in IO submission
+ * patch using seqence counters.
+ *
+ * TODO: Ideally set_capacity() and get_capacity() should be
+ * converted to make use of bd_mutex and sequence counters.
+ */
+ seqcount_init(&disk->part0.nr_sects_seq);
hd_ref_init(&disk->part0);
disk->minors = minors;
@@ -1478,9 +1488,11 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now)
intv = disk_events_poll_jiffies(disk);
set_timer_slack(&ev->dwork.timer, intv / 4);
if (check_now)
- queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
+ queue_delayed_work(system_freezable_power_efficient_wq,
+ &ev->dwork, 0);
else if (intv)
- queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv);
+ queue_delayed_work(system_freezable_power_efficient_wq,
+ &ev->dwork, intv);
out_unlock:
spin_unlock_irqrestore(&ev->lock, flags);
}
@@ -1522,10 +1534,9 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask)
spin_lock_irq(&ev->lock);
ev->clearing |= mask;
- if (!ev->block) {
- cancel_delayed_work(&ev->dwork);
- queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
- }
+ if (!ev->block)
+ mod_delayed_work(system_freezable_power_efficient_wq,
+ &ev->dwork, 0);
spin_unlock_irq(&ev->lock);
}
@@ -1545,6 +1556,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
const struct block_device_operations *bdops = disk->fops;
struct disk_events *ev = disk->ev;
unsigned int pending;
+ unsigned int clearing = mask;
if (!ev) {
/* for drivers still using the old ->media_changed method */
@@ -1554,34 +1566,53 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
return 0;
}
- /* tell the workfn about the events being cleared */
+ disk_block_events(disk);
+
+ /*
+ * store the union of mask and ev->clearing on the stack so that the
+ * race with disk_flush_events does not cause ambiguity (ev->clearing
+ * can still be modified even if events are blocked).
+ */
spin_lock_irq(&ev->lock);
- ev->clearing |= mask;
+ clearing |= ev->clearing;
+ ev->clearing = 0;
spin_unlock_irq(&ev->lock);
- /* uncondtionally schedule event check and wait for it to finish */
- disk_block_events(disk);
- queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
- flush_delayed_work(&ev->dwork);
- __disk_unblock_events(disk, false);
+ disk_check_events(ev, &clearing);
+ /*
+ * if ev->clearing is not 0, the disk_flush_events got called in the
+ * middle of this function, so we want to run the workfn without delay.
+ */
+ __disk_unblock_events(disk, ev->clearing ? true : false);
/* then, fetch and clear pending events */
spin_lock_irq(&ev->lock);
- WARN_ON_ONCE(ev->clearing & mask); /* cleared by workfn */
pending = ev->pending & mask;
ev->pending &= ~mask;
spin_unlock_irq(&ev->lock);
+ WARN_ON_ONCE(clearing & mask);
return pending;
}
+/*
+ * Separate this part out so that a different pointer for clearing_ptr can be
+ * passed in for disk_clear_events.
+ */
static void disk_events_workfn(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct disk_events *ev = container_of(dwork, struct disk_events, dwork);
+
+ disk_check_events(ev, &ev->clearing);
+}
+
+static void disk_check_events(struct disk_events *ev,
+ unsigned int *clearing_ptr)
+{
struct gendisk *disk = ev->disk;
char *envp[ARRAY_SIZE(disk_uevents) + 1] = { };
- unsigned int clearing = ev->clearing;
+ unsigned int clearing = *clearing_ptr;
unsigned int events;
unsigned long intv;
int nr_events = 0, i;
@@ -1594,11 +1625,12 @@ static void disk_events_workfn(struct work_struct *work)
events &= ~ev->pending;
ev->pending |= events;
- ev->clearing &= ~clearing;
+ *clearing_ptr &= ~clearing;
intv = disk_events_poll_jiffies(disk);
if (!ev->block && intv)
- queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv);
+ queue_delayed_work(system_freezable_power_efficient_wq,
+ &ev->dwork, intv);
spin_unlock_irq(&ev->lock);
diff --git a/block/ioctl.c b/block/ioctl.c
index ba15b2dbfb9..7d5c3b20af4 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -13,7 +13,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
{
struct block_device *bdevp;
struct gendisk *disk;
- struct hd_struct *part;
+ struct hd_struct *part, *lpart;
struct blkpg_ioctl_arg a;
struct blkpg_partition p;
struct disk_part_iter piter;
@@ -36,12 +36,12 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
case BLKPG_ADD_PARTITION:
start = p.start >> 9;
length = p.length >> 9;
- /* check for fit in a hd_struct */
- if (sizeof(sector_t) == sizeof(long) &&
+ /* check for fit in a hd_struct */
+ if (sizeof(sector_t) == sizeof(long) &&
sizeof(long long) > sizeof(long)) {
long pstart = start, plength = length;
if (pstart != start || plength != length
- || pstart < 0 || plength < 0)
+ || pstart < 0 || plength < 0 || partno > 65535)
return -EINVAL;
}
@@ -64,7 +64,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
part = add_partition(disk, partno, start, length,
ADDPART_FLAG_NONE, NULL);
mutex_unlock(&bdev->bd_mutex);
- return IS_ERR(part) ? PTR_ERR(part) : 0;
+ return PTR_ERR_OR_ZERO(part);
case BLKPG_DEL_PARTITION:
part = disk_get_part(disk, partno);
if (!part)
@@ -92,6 +92,59 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
bdput(bdevp);
return 0;
+ case BLKPG_RESIZE_PARTITION:
+ start = p.start >> 9;
+ /* new length of partition in bytes */
+ length = p.length >> 9;
+ /* check for fit in a hd_struct */
+ if (sizeof(sector_t) == sizeof(long) &&
+ sizeof(long long) > sizeof(long)) {
+ long pstart = start, plength = length;
+ if (pstart != start || plength != length
+ || pstart < 0 || plength < 0)
+ return -EINVAL;
+ }
+ part = disk_get_part(disk, partno);
+ if (!part)
+ return -ENXIO;
+ bdevp = bdget(part_devt(part));
+ if (!bdevp) {
+ disk_put_part(part);
+ return -ENOMEM;
+ }
+ mutex_lock(&bdevp->bd_mutex);
+ mutex_lock_nested(&bdev->bd_mutex, 1);
+ if (start != part->start_sect) {
+ mutex_unlock(&bdevp->bd_mutex);
+ mutex_unlock(&bdev->bd_mutex);
+ bdput(bdevp);
+ disk_put_part(part);
+ return -EINVAL;
+ }
+ /* overlap? */
+ disk_part_iter_init(&piter, disk,
+ DISK_PITER_INCL_EMPTY);
+ while ((lpart = disk_part_iter_next(&piter))) {
+ if (lpart->partno != partno &&
+ !(start + length <= lpart->start_sect ||
+ start >= lpart->start_sect + lpart->nr_sects)
+ ) {
+ disk_part_iter_exit(&piter);
+ mutex_unlock(&bdevp->bd_mutex);
+ mutex_unlock(&bdev->bd_mutex);
+ bdput(bdevp);
+ disk_put_part(part);
+ return -EBUSY;
+ }
+ }
+ disk_part_iter_exit(&piter);
+ part_nr_sects_write(part, (sector_t)length);
+ i_size_write(bdevp->bd_inode, p.length);
+ mutex_unlock(&bdevp->bd_mutex);
+ mutex_unlock(&bdev->bd_mutex);
+ bdput(bdevp);
+ disk_put_part(part);
+ return 0;
default:
return -EINVAL;
}
@@ -132,6 +185,22 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
return blkdev_issue_discard(bdev, start, len, GFP_KERNEL, flags);
}
+static int blk_ioctl_zeroout(struct block_device *bdev, uint64_t start,
+ uint64_t len)
+{
+ if (start & 511)
+ return -EINVAL;
+ if (len & 511)
+ return -EINVAL;
+ start >>= 9;
+ len >>= 9;
+
+ if (start + len > (i_size_read(bdev->bd_inode) >> 9))
+ return -EINVAL;
+
+ return blkdev_issue_zeroout(bdev, start, len, GFP_KERNEL);
+}
+
static int put_ushort(unsigned long arg, unsigned short val)
{
return put_user(val, (unsigned short __user *)arg);
@@ -247,6 +316,17 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
return blk_ioctl_discard(bdev, range[0], range[1],
cmd == BLKSECDISCARD);
}
+ case BLKZEROOUT: {
+ uint64_t range[2];
+
+ if (!(mode & FMODE_WRITE))
+ return -EBADF;
+
+ if (copy_from_user(range, (void __user *)arg, sizeof(range)))
+ return -EFAULT;
+
+ return blk_ioctl_zeroout(bdev, range[0], range[1]);
+ }
case HDIO_GETGEO: {
struct hd_geometry geo;
diff --git a/block/ioprio.c b/block/ioprio.c
new file mode 100644
index 00000000000..e50170ca7c3
--- /dev/null
+++ b/block/ioprio.c
@@ -0,0 +1,241 @@
+/*
+ * fs/ioprio.c
+ *
+ * Copyright (C) 2004 Jens Axboe <axboe@kernel.dk>
+ *
+ * Helper functions for setting/querying io priorities of processes. The
+ * system calls closely mimmick getpriority/setpriority, see the man page for
+ * those. The prio argument is a composite of prio class and prio data, where
+ * the data argument has meaning within that class. The standard scheduling
+ * classes have 8 distinct prio levels, with 0 being the highest prio and 7
+ * being the lowest.
+ *
+ * IOW, setting BE scheduling class with prio 2 is done ala:
+ *
+ * unsigned int prio = (IOPRIO_CLASS_BE << IOPRIO_CLASS_SHIFT) | 2;
+ *
+ * ioprio_set(PRIO_PROCESS, pid, prio);
+ *
+ * See also Documentation/block/ioprio.txt
+ *
+ */
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/ioprio.h>
+#include <linux/blkdev.h>
+#include <linux/capability.h>
+#include <linux/syscalls.h>
+#include <linux/security.h>
+#include <linux/pid_namespace.h>
+
+int set_task_ioprio(struct task_struct *task, int ioprio)
+{
+ int err;
+ struct io_context *ioc;
+ const struct cred *cred = current_cred(), *tcred;
+
+ rcu_read_lock();
+ tcred = __task_cred(task);
+ if (!uid_eq(tcred->uid, cred->euid) &&
+ !uid_eq(tcred->uid, cred->uid) && !capable(CAP_SYS_NICE)) {
+ rcu_read_unlock();
+ return -EPERM;
+ }
+ rcu_read_unlock();
+
+ err = security_task_setioprio(task, ioprio);
+ if (err)
+ return err;
+
+ ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
+ if (ioc) {
+ ioc->ioprio = ioprio;
+ put_io_context(ioc);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(set_task_ioprio);
+
+SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
+{
+ int class = IOPRIO_PRIO_CLASS(ioprio);
+ int data = IOPRIO_PRIO_DATA(ioprio);
+ struct task_struct *p, *g;
+ struct user_struct *user;
+ struct pid *pgrp;
+ kuid_t uid;
+ int ret;
+
+ switch (class) {
+ case IOPRIO_CLASS_RT:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ /* fall through, rt has prio field too */
+ case IOPRIO_CLASS_BE:
+ if (data >= IOPRIO_BE_NR || data < 0)
+ return -EINVAL;
+
+ break;
+ case IOPRIO_CLASS_IDLE:
+ break;
+ case IOPRIO_CLASS_NONE:
+ if (data)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = -ESRCH;
+ rcu_read_lock();
+ switch (which) {
+ case IOPRIO_WHO_PROCESS:
+ if (!who)
+ p = current;
+ else
+ p = find_task_by_vpid(who);
+ if (p)
+ ret = set_task_ioprio(p, ioprio);
+ break;
+ case IOPRIO_WHO_PGRP:
+ if (!who)
+ pgrp = task_pgrp(current);
+ else
+ pgrp = find_vpid(who);
+ do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
+ ret = set_task_ioprio(p, ioprio);
+ if (ret)
+ break;
+ } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
+ break;
+ case IOPRIO_WHO_USER:
+ uid = make_kuid(current_user_ns(), who);
+ if (!uid_valid(uid))
+ break;
+ if (!who)
+ user = current_user();
+ else
+ user = find_user(uid);
+
+ if (!user)
+ break;
+
+ do_each_thread(g, p) {
+ if (!uid_eq(task_uid(p), uid))
+ continue;
+ ret = set_task_ioprio(p, ioprio);
+ if (ret)
+ goto free_uid;
+ } while_each_thread(g, p);
+free_uid:
+ if (who)
+ free_uid(user);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ rcu_read_unlock();
+ return ret;
+}
+
+static int get_task_ioprio(struct task_struct *p)
+{
+ int ret;
+
+ ret = security_task_getioprio(p);
+ if (ret)
+ goto out;
+ ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
+ if (p->io_context)
+ ret = p->io_context->ioprio;
+out:
+ return ret;
+}
+
+int ioprio_best(unsigned short aprio, unsigned short bprio)
+{
+ unsigned short aclass = IOPRIO_PRIO_CLASS(aprio);
+ unsigned short bclass = IOPRIO_PRIO_CLASS(bprio);
+
+ if (aclass == IOPRIO_CLASS_NONE)
+ aclass = IOPRIO_CLASS_BE;
+ if (bclass == IOPRIO_CLASS_NONE)
+ bclass = IOPRIO_CLASS_BE;
+
+ if (aclass == bclass)
+ return min(aprio, bprio);
+ if (aclass > bclass)
+ return bprio;
+ else
+ return aprio;
+}
+
+SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
+{
+ struct task_struct *g, *p;
+ struct user_struct *user;
+ struct pid *pgrp;
+ kuid_t uid;
+ int ret = -ESRCH;
+ int tmpio;
+
+ rcu_read_lock();
+ switch (which) {
+ case IOPRIO_WHO_PROCESS:
+ if (!who)
+ p = current;
+ else
+ p = find_task_by_vpid(who);
+ if (p)
+ ret = get_task_ioprio(p);
+ break;
+ case IOPRIO_WHO_PGRP:
+ if (!who)
+ pgrp = task_pgrp(current);
+ else
+ pgrp = find_vpid(who);
+ do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
+ tmpio = get_task_ioprio(p);
+ if (tmpio < 0)
+ continue;
+ if (ret == -ESRCH)
+ ret = tmpio;
+ else
+ ret = ioprio_best(ret, tmpio);
+ } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
+ break;
+ case IOPRIO_WHO_USER:
+ uid = make_kuid(current_user_ns(), who);
+ if (!who)
+ user = current_user();
+ else
+ user = find_user(uid);
+
+ if (!user)
+ break;
+
+ do_each_thread(g, p) {
+ if (!uid_eq(task_uid(p), user->uid))
+ continue;
+ tmpio = get_task_ioprio(p);
+ if (tmpio < 0)
+ continue;
+ if (ret == -ESRCH)
+ ret = tmpio;
+ else
+ ret = ioprio_best(ret, tmpio);
+ } while_each_thread(g, p);
+
+ if (who)
+ free_uid(user);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ rcu_read_unlock();
+ return ret;
+}
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
index 413a0b1d788..3de89d4690f 100644
--- a/block/noop-iosched.c
+++ b/block/noop-iosched.c
@@ -59,15 +59,28 @@ noop_latter_request(struct request_queue *q, struct request *rq)
return list_entry(rq->queuelist.next, struct request, queuelist);
}
-static void *noop_init_queue(struct request_queue *q)
+static int noop_init_queue(struct request_queue *q, struct elevator_type *e)
{
struct noop_data *nd;
+ struct elevator_queue *eq;
+
+ eq = elevator_alloc(q, e);
+ if (!eq)
+ return -ENOMEM;
nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node);
- if (!nd)
- return NULL;
+ if (!nd) {
+ kobject_put(&eq->kobj);
+ return -ENOMEM;
+ }
+ eq->elevator_data = nd;
+
INIT_LIST_HEAD(&nd->queue);
- return nd;
+
+ spin_lock_irq(q->queue_lock);
+ q->elevator = eq;
+ spin_unlock_irq(q->queue_lock);
+ return 0;
}
static void noop_exit_queue(struct elevator_queue *e)
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 6df5d6928a4..789cdea0589 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -84,7 +84,7 @@ ssize_t part_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hd_struct *p = dev_to_part(dev);
- return sprintf(buf, "%llu\n",(unsigned long long)p->nr_sects);
+ return sprintf(buf, "%llu\n",(unsigned long long)part_nr_sects_read(p));
}
static ssize_t part_ro_show(struct device *dev,
@@ -249,11 +249,11 @@ void delete_partition(struct gendisk *disk, int partno)
if (!part)
return;
- blk_free_devt(part_devt(part));
rcu_assign_pointer(ptbl->part[partno], NULL);
rcu_assign_pointer(ptbl->last_lookup, NULL);
kobject_put(part->holder_dir);
device_del(part_to_dev(part));
+ blk_free_devt(part_devt(part));
hd_struct_put(part);
}
@@ -294,6 +294,8 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
err = -ENOMEM;
goto out_free;
}
+
+ seqcount_init(&p->nr_sects_seq);
pdev = part_to_dev(p);
p->start_sect = start;
@@ -416,7 +418,7 @@ int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
int p, highest, res;
rescan:
if (state && !IS_ERR(state)) {
- kfree(state);
+ free_partitions(state);
state = NULL;
}
@@ -523,7 +525,7 @@ rescan:
md_autodetect_dev(part_to_dev(part)->devt);
#endif
}
- kfree(state);
+ free_partitions(state);
return 0;
}
diff --git a/block/partitions/Kconfig b/block/partitions/Kconfig
index cb5f0a3f1b0..9b29a996c31 100644
--- a/block/partitions/Kconfig
+++ b/block/partitions/Kconfig
@@ -68,6 +68,17 @@ config ACORN_PARTITION_RISCIX
of machines called RISCiX. If you say 'Y' here, Linux will be able
to read disks partitioned under RISCiX.
+config AIX_PARTITION
+ bool "AIX basic partition table support" if PARTITION_ADVANCED
+ help
+ Say Y here if you would like to be able to read the hard disk
+ partition table format used by IBM or Motorola PowerPC machines
+ running AIX. AIX actually uses a Logical Volume Manager, where
+ "logical volumes" can be spread across one or multiple disks,
+ but this driver works only for the simple case of partitions which
+ are contiguous.
+ Otherwise, say N.
+
config OSF_PARTITION
bool "Alpha OSF partition support" if PARTITION_ADVANCED
default y if ALPHA
@@ -234,8 +245,8 @@ config KARMA_PARTITION
uses a proprietary partition table.
config EFI_PARTITION
- bool "EFI GUID Partition support"
- depends on PARTITION_ADVANCED
+ bool "EFI GUID Partition support" if PARTITION_ADVANCED
+ default y
select CRC32
help
Say Y here if you would like to use hard disks under Linux which
@@ -249,3 +260,10 @@ config SYSV68_PARTITION
partition table format used by Motorola Delta machines (using
sysv68).
Otherwise, say N.
+
+config CMDLINE_PARTITION
+ bool "Command line partition support" if PARTITION_ADVANCED
+ select BLK_CMDLINE_PARSER
+ help
+ Say Y here if you want to read the partition table from bootargs.
+ The format for the command line is just like mtdparts.
diff --git a/block/partitions/Makefile b/block/partitions/Makefile
index 03af8eac51d..37a95270503 100644
--- a/block/partitions/Makefile
+++ b/block/partitions/Makefile
@@ -7,6 +7,8 @@ obj-$(CONFIG_BLOCK) := check.o
obj-$(CONFIG_ACORN_PARTITION) += acorn.o
obj-$(CONFIG_AMIGA_PARTITION) += amiga.o
obj-$(CONFIG_ATARI_PARTITION) += atari.o
+obj-$(CONFIG_AIX_PARTITION) += aix.o
+obj-$(CONFIG_CMDLINE_PARTITION) += cmdline.o
obj-$(CONFIG_MAC_PARTITION) += mac.o
obj-$(CONFIG_LDM_PARTITION) += ldm.o
obj-$(CONFIG_MSDOS_PARTITION) += msdos.o
diff --git a/block/partitions/aix.c b/block/partitions/aix.c
new file mode 100644
index 00000000000..43be471d9b1
--- /dev/null
+++ b/block/partitions/aix.c
@@ -0,0 +1,293 @@
+/*
+ * fs/partitions/aix.c
+ *
+ * Copyright (C) 2012-2013 Philippe De Muyter <phdm@macqel.be>
+ */
+
+#include "check.h"
+#include "aix.h"
+
+struct lvm_rec {
+ char lvm_id[4]; /* "_LVM" */
+ char reserved4[16];
+ __be32 lvmarea_len;
+ __be32 vgda_len;
+ __be32 vgda_psn[2];
+ char reserved36[10];
+ __be16 pp_size; /* log2(pp_size) */
+ char reserved46[12];
+ __be16 version;
+ };
+
+struct vgda {
+ __be32 secs;
+ __be32 usec;
+ char reserved8[16];
+ __be16 numlvs;
+ __be16 maxlvs;
+ __be16 pp_size;
+ __be16 numpvs;
+ __be16 total_vgdas;
+ __be16 vgda_size;
+ };
+
+struct lvd {
+ __be16 lv_ix;
+ __be16 res2;
+ __be16 res4;
+ __be16 maxsize;
+ __be16 lv_state;
+ __be16 mirror;
+ __be16 mirror_policy;
+ __be16 num_lps;
+ __be16 res10[8];
+ };
+
+struct lvname {
+ char name[64];
+ };
+
+struct ppe {
+ __be16 lv_ix;
+ unsigned short res2;
+ unsigned short res4;
+ __be16 lp_ix;
+ unsigned short res8[12];
+ };
+
+struct pvd {
+ char reserved0[16];
+ __be16 pp_count;
+ char reserved18[2];
+ __be32 psn_part1;
+ char reserved24[8];
+ struct ppe ppe[1016];
+ };
+
+#define LVM_MAXLVS 256
+
+/**
+ * last_lba(): return number of last logical block of device
+ * @bdev: block device
+ *
+ * Description: Returns last LBA value on success, 0 on error.
+ * This is stored (by sd and ide-geometry) in
+ * the part[0] entry for this disk, and is the number of
+ * physical sectors available on the disk.
+ */
+static u64 last_lba(struct block_device *bdev)
+{
+ if (!bdev || !bdev->bd_inode)
+ return 0;
+ return (bdev->bd_inode->i_size >> 9) - 1ULL;
+}
+
+/**
+ * read_lba(): Read bytes from disk, starting at given LBA
+ * @state
+ * @lba
+ * @buffer
+ * @count
+ *
+ * Description: Reads @count bytes from @state->bdev into @buffer.
+ * Returns number of bytes read on success, 0 on error.
+ */
+static size_t read_lba(struct parsed_partitions *state, u64 lba, u8 *buffer,
+ size_t count)
+{
+ size_t totalreadcount = 0;
+
+ if (!buffer || lba + count / 512 > last_lba(state->bdev))
+ return 0;
+
+ while (count) {
+ int copied = 512;
+ Sector sect;
+ unsigned char *data = read_part_sector(state, lba++, &sect);
+ if (!data)
+ break;
+ if (copied > count)
+ copied = count;
+ memcpy(buffer, data, copied);
+ put_dev_sector(sect);
+ buffer += copied;
+ totalreadcount += copied;
+ count -= copied;
+ }
+ return totalreadcount;
+}
+
+/**
+ * alloc_pvd(): reads physical volume descriptor
+ * @state
+ * @lba
+ *
+ * Description: Returns pvd on success, NULL on error.
+ * Allocates space for pvd and fill it with disk blocks at @lba
+ * Notes: remember to free pvd when you're done!
+ */
+static struct pvd *alloc_pvd(struct parsed_partitions *state, u32 lba)
+{
+ size_t count = sizeof(struct pvd);
+ struct pvd *p;
+
+ p = kmalloc(count, GFP_KERNEL);
+ if (!p)
+ return NULL;
+
+ if (read_lba(state, lba, (u8 *) p, count) < count) {
+ kfree(p);
+ return NULL;
+ }
+ return p;
+}
+
+/**
+ * alloc_lvn(): reads logical volume names
+ * @state
+ * @lba
+ *
+ * Description: Returns lvn on success, NULL on error.
+ * Allocates space for lvn and fill it with disk blocks at @lba
+ * Notes: remember to free lvn when you're done!
+ */
+static struct lvname *alloc_lvn(struct parsed_partitions *state, u32 lba)
+{
+ size_t count = sizeof(struct lvname) * LVM_MAXLVS;
+ struct lvname *p;
+
+ p = kmalloc(count, GFP_KERNEL);
+ if (!p)
+ return NULL;
+
+ if (read_lba(state, lba, (u8 *) p, count) < count) {
+ kfree(p);
+ return NULL;
+ }
+ return p;
+}
+
+int aix_partition(struct parsed_partitions *state)
+{
+ int ret = 0;
+ Sector sect;
+ unsigned char *d;
+ u32 pp_bytes_size;
+ u32 pp_blocks_size = 0;
+ u32 vgda_sector = 0;
+ u32 vgda_len = 0;
+ int numlvs = 0;
+ struct pvd *pvd;
+ struct lv_info {
+ unsigned short pps_per_lv;
+ unsigned short pps_found;
+ unsigned char lv_is_contiguous;
+ } *lvip;
+ struct lvname *n = NULL;
+
+ d = read_part_sector(state, 7, &sect);
+ if (d) {
+ struct lvm_rec *p = (struct lvm_rec *)d;
+ u16 lvm_version = be16_to_cpu(p->version);
+ char tmp[64];
+
+ if (lvm_version == 1) {
+ int pp_size_log2 = be16_to_cpu(p->pp_size);
+
+ pp_bytes_size = 1 << pp_size_log2;
+ pp_blocks_size = pp_bytes_size / 512;
+ snprintf(tmp, sizeof(tmp),
+ " AIX LVM header version %u found\n",
+ lvm_version);
+ vgda_len = be32_to_cpu(p->vgda_len);
+ vgda_sector = be32_to_cpu(p->vgda_psn[0]);
+ } else {
+ snprintf(tmp, sizeof(tmp),
+ " unsupported AIX LVM version %d found\n",
+ lvm_version);
+ }
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+ put_dev_sector(sect);
+ }
+ if (vgda_sector && (d = read_part_sector(state, vgda_sector, &sect))) {
+ struct vgda *p = (struct vgda *)d;
+
+ numlvs = be16_to_cpu(p->numlvs);
+ put_dev_sector(sect);
+ }
+ lvip = kzalloc(sizeof(struct lv_info) * state->limit, GFP_KERNEL);
+ if (!lvip)
+ return 0;
+ if (numlvs && (d = read_part_sector(state, vgda_sector + 1, &sect))) {
+ struct lvd *p = (struct lvd *)d;
+ int i;
+
+ n = alloc_lvn(state, vgda_sector + vgda_len - 33);
+ if (n) {
+ int foundlvs = 0;
+
+ for (i = 0; foundlvs < numlvs && i < state->limit; i += 1) {
+ lvip[i].pps_per_lv = be16_to_cpu(p[i].num_lps);
+ if (lvip[i].pps_per_lv)
+ foundlvs += 1;
+ }
+ }
+ put_dev_sector(sect);
+ }
+ pvd = alloc_pvd(state, vgda_sector + 17);
+ if (pvd) {
+ int numpps = be16_to_cpu(pvd->pp_count);
+ int psn_part1 = be32_to_cpu(pvd->psn_part1);
+ int i;
+ int cur_lv_ix = -1;
+ int next_lp_ix = 1;
+ int lp_ix;
+
+ for (i = 0; i < numpps; i += 1) {
+ struct ppe *p = pvd->ppe + i;
+ unsigned int lv_ix;
+
+ lp_ix = be16_to_cpu(p->lp_ix);
+ if (!lp_ix) {
+ next_lp_ix = 1;
+ continue;
+ }
+ lv_ix = be16_to_cpu(p->lv_ix) - 1;
+ if (lv_ix > state->limit) {
+ cur_lv_ix = -1;
+ continue;
+ }
+ lvip[lv_ix].pps_found += 1;
+ if (lp_ix == 1) {
+ cur_lv_ix = lv_ix;
+ next_lp_ix = 1;
+ } else if (lv_ix != cur_lv_ix || lp_ix != next_lp_ix) {
+ next_lp_ix = 1;
+ continue;
+ }
+ if (lp_ix == lvip[lv_ix].pps_per_lv) {
+ char tmp[70];
+
+ put_partition(state, lv_ix + 1,
+ (i + 1 - lp_ix) * pp_blocks_size + psn_part1,
+ lvip[lv_ix].pps_per_lv * pp_blocks_size);
+ snprintf(tmp, sizeof(tmp), " <%s>\n",
+ n[lv_ix].name);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+ lvip[lv_ix].lv_is_contiguous = 1;
+ ret = 1;
+ next_lp_ix = 1;
+ } else
+ next_lp_ix += 1;
+ }
+ for (i = 0; i < state->limit; i += 1)
+ if (lvip[i].pps_found && !lvip[i].lv_is_contiguous)
+ pr_warn("partition %s (%u pp's found) is "
+ "not contiguous\n",
+ n[i].name, lvip[i].pps_found);
+ kfree(pvd);
+ }
+ kfree(n);
+ kfree(lvip);
+ return ret;
+}
diff --git a/block/partitions/aix.h b/block/partitions/aix.h
new file mode 100644
index 00000000000..e0c66a98752
--- /dev/null
+++ b/block/partitions/aix.h
@@ -0,0 +1 @@
+extern int aix_partition(struct parsed_partitions *state);
diff --git a/block/partitions/atari.h b/block/partitions/atari.h
index fe2d32a89f3..f2ec43bfeec 100644
--- a/block/partitions/atari.h
+++ b/block/partitions/atari.h
@@ -11,6 +11,8 @@
* by Guenther Kelleter (guenther@pool.informatik.rwth-aachen.de)
*/
+#include <linux/compiler.h>
+
struct partition_info
{
u8 flg; /* bit 0: active; bit 7: bootable */
@@ -29,6 +31,6 @@ struct rootsector
u32 bsl_st; /* start of bad sector list */
u32 bsl_cnt; /* length of bad sector list */
u16 checksum; /* checksum for bootable disks */
-} __attribute__((__packed__));
+} __packed;
int atari_partition(struct parsed_partitions *state);
diff --git a/block/partitions/check.c b/block/partitions/check.c
index bc908672c97..9ac1df74f69 100644
--- a/block/partitions/check.c
+++ b/block/partitions/check.c
@@ -14,6 +14,7 @@
*/
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <linux/ctype.h>
#include <linux/genhd.h>
@@ -33,6 +34,7 @@
#include "efi.h"
#include "karma.h"
#include "sysv68.h"
+#include "cmdline.h"
int warn_no_part = 1; /*This is ugly: should make genhd removable media aware*/
@@ -64,6 +66,9 @@ static int (*check_part[])(struct parsed_partitions *) = {
adfspart_check_ADFS,
#endif
+#ifdef CONFIG_CMDLINE_PARTITION
+ cmdline_partition,
+#endif
#ifdef CONFIG_EFI_PARTITION
efi_partition, /* this must come before msdos */
#endif
@@ -106,18 +111,45 @@ static int (*check_part[])(struct parsed_partitions *) = {
NULL
};
+static struct parsed_partitions *allocate_partitions(struct gendisk *hd)
+{
+ struct parsed_partitions *state;
+ int nr;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ nr = disk_max_parts(hd);
+ state->parts = vzalloc(nr * sizeof(state->parts[0]));
+ if (!state->parts) {
+ kfree(state);
+ return NULL;
+ }
+
+ state->limit = nr;
+
+ return state;
+}
+
+void free_partitions(struct parsed_partitions *state)
+{
+ vfree(state->parts);
+ kfree(state);
+}
+
struct parsed_partitions *
check_partition(struct gendisk *hd, struct block_device *bdev)
{
struct parsed_partitions *state;
int i, res, err;
- state = kzalloc(sizeof(struct parsed_partitions), GFP_KERNEL);
+ state = allocate_partitions(hd);
if (!state)
return NULL;
state->pp_buf = (char *)__get_free_page(GFP_KERNEL);
if (!state->pp_buf) {
- kfree(state);
+ free_partitions(state);
return NULL;
}
state->pp_buf[0] = '\0';
@@ -128,10 +160,9 @@ check_partition(struct gendisk *hd, struct block_device *bdev)
if (isdigit(state->name[strlen(state->name)-1]))
sprintf(state->name, "p");
- state->limit = disk_max_parts(hd);
i = res = err = 0;
while (!res && check_part[i]) {
- memset(&state->parts, 0, sizeof(state->parts));
+ memset(state->parts, 0, state->limit * sizeof(state->parts[0]));
res = check_part[i++](state);
if (res < 0) {
/* We have hit an I/O error which we don't report now.
@@ -161,6 +192,6 @@ check_partition(struct gendisk *hd, struct block_device *bdev)
printk(KERN_INFO "%s", state->pp_buf);
free_page((unsigned long)state->pp_buf);
- kfree(state);
+ free_partitions(state);
return ERR_PTR(res);
}
diff --git a/block/partitions/check.h b/block/partitions/check.h
index 52b100311ec..eade17ea910 100644
--- a/block/partitions/check.h
+++ b/block/partitions/check.h
@@ -15,13 +15,15 @@ struct parsed_partitions {
int flags;
bool has_info;
struct partition_meta_info info;
- } parts[DISK_MAX_PARTS];
+ } *parts;
int next;
int limit;
bool access_beyond_eod;
char *pp_buf;
};
+void free_partitions(struct parsed_partitions *state);
+
struct parsed_partitions *
check_partition(struct gendisk *, struct block_device *);
diff --git a/block/partitions/cmdline.c b/block/partitions/cmdline.c
new file mode 100644
index 00000000000..5141b563adf
--- /dev/null
+++ b/block/partitions/cmdline.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2013 HUAWEI
+ * Author: Cai Zhiyong <caizhiyong@huawei.com>
+ *
+ * Read block device partition table from the command line.
+ * Typically used for fixed block (eMMC) embedded devices.
+ * It has no MBR, so saves storage space. Bootloader can be easily accessed
+ * by absolute address of data on the block device.
+ * Users can easily change the partition.
+ *
+ * The format for the command line is just like mtdparts.
+ *
+ * For further information, see "Documentation/block/cmdline-partition.txt"
+ *
+ */
+
+#include <linux/cmdline-parser.h>
+
+#include "check.h"
+#include "cmdline.h"
+
+static char *cmdline;
+static struct cmdline_parts *bdev_parts;
+
+static int add_part(int slot, struct cmdline_subpart *subpart, void *param)
+{
+ int label_min;
+ struct partition_meta_info *info;
+ char tmp[sizeof(info->volname) + 4];
+ struct parsed_partitions *state = (struct parsed_partitions *)param;
+
+ if (slot >= state->limit)
+ return 1;
+
+ put_partition(state, slot, subpart->from >> 9,
+ subpart->size >> 9);
+
+ info = &state->parts[slot].info;
+
+ label_min = min_t(int, sizeof(info->volname) - 1,
+ sizeof(subpart->name));
+ strncpy(info->volname, subpart->name, label_min);
+ info->volname[label_min] = '\0';
+
+ snprintf(tmp, sizeof(tmp), "(%s)", info->volname);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+
+ state->parts[slot].has_info = true;
+
+ return 0;
+}
+
+static int __init cmdline_parts_setup(char *s)
+{
+ cmdline = s;
+ return 1;
+}
+__setup("blkdevparts=", cmdline_parts_setup);
+
+/*
+ * Purpose: allocate cmdline partitions.
+ * Returns:
+ * -1 if unable to read the partition table
+ * 0 if this isn't our partition table
+ * 1 if successful
+ */
+int cmdline_partition(struct parsed_partitions *state)
+{
+ sector_t disk_size;
+ char bdev[BDEVNAME_SIZE];
+ struct cmdline_parts *parts;
+
+ if (cmdline) {
+ if (bdev_parts)
+ cmdline_parts_free(&bdev_parts);
+
+ if (cmdline_parts_parse(&bdev_parts, cmdline)) {
+ cmdline = NULL;
+ return -1;
+ }
+ cmdline = NULL;
+ }
+
+ if (!bdev_parts)
+ return 0;
+
+ bdevname(state->bdev, bdev);
+ parts = cmdline_parts_find(bdev_parts, bdev);
+ if (!parts)
+ return 0;
+
+ disk_size = get_capacity(state->bdev->bd_disk) << 9;
+
+ cmdline_parts_set(parts, disk_size, 1, add_part, (void *)state);
+
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
+
+ return 1;
+}
diff --git a/block/partitions/cmdline.h b/block/partitions/cmdline.h
new file mode 100644
index 00000000000..26e0f8da141
--- /dev/null
+++ b/block/partitions/cmdline.h
@@ -0,0 +1,2 @@
+
+int cmdline_partition(struct parsed_partitions *state);
diff --git a/block/partitions/efi.c b/block/partitions/efi.c
index 6296b403c67..dc51f467a56 100644
--- a/block/partitions/efi.c
+++ b/block/partitions/efi.c
@@ -25,6 +25,9 @@
* TODO:
*
* Changelog:
+ * Mon August 5th, 2013 Davidlohr Bueso <davidlohr@hp.com>
+ * - detect hybrid MBRs, tighter pMBR checking & cleanups.
+ *
* Mon Nov 09 2004 Matt Domsch <Matt_Domsch@dell.com>
* - test for valid PMBR and valid PGPT before ever reading
* AGPT, allow override with 'gpt' kernel command line option.
@@ -93,6 +96,7 @@
* - Code works, detects all the partitions.
*
************************************************************/
+#include <linux/kernel.h>
#include <linux/crc32.h>
#include <linux/ctype.h>
#include <linux/math64.h>
@@ -149,34 +153,89 @@ static u64 last_lba(struct block_device *bdev)
bdev_logical_block_size(bdev)) - 1ULL;
}
-static inline int
-pmbr_part_valid(struct partition *part)
+static inline int pmbr_part_valid(gpt_mbr_record *part)
{
- if (part->sys_ind == EFI_PMBR_OSTYPE_EFI_GPT &&
- le32_to_cpu(part->start_sect) == 1UL)
- return 1;
- return 0;
+ if (part->os_type != EFI_PMBR_OSTYPE_EFI_GPT)
+ goto invalid;
+
+ /* set to 0x00000001 (i.e., the LBA of the GPT Partition Header) */
+ if (le32_to_cpu(part->starting_lba) != GPT_PRIMARY_PARTITION_TABLE_LBA)
+ goto invalid;
+
+ return GPT_MBR_PROTECTIVE;
+invalid:
+ return 0;
}
/**
* is_pmbr_valid(): test Protective MBR for validity
* @mbr: pointer to a legacy mbr structure
+ * @total_sectors: amount of sectors in the device
*
- * Description: Returns 1 if PMBR is valid, 0 otherwise.
- * Validity depends on two things:
+ * Description: Checks for a valid protective or hybrid
+ * master boot record (MBR). The validity of a pMBR depends
+ * on all of the following properties:
* 1) MSDOS signature is in the last two bytes of the MBR
* 2) One partition of type 0xEE is found
+ *
+ * In addition, a hybrid MBR will have up to three additional
+ * primary partitions, which point to the same space that's
+ * marked out by up to three GPT partitions.
+ *
+ * Returns 0 upon invalid MBR, or GPT_MBR_PROTECTIVE or
+ * GPT_MBR_HYBRID depending on the device layout.
*/
-static int
-is_pmbr_valid(legacy_mbr *mbr)
+static int is_pmbr_valid(legacy_mbr *mbr, sector_t total_sectors)
{
- int i;
+ uint32_t sz = 0;
+ int i, part = 0, ret = 0; /* invalid by default */
+
if (!mbr || le16_to_cpu(mbr->signature) != MSDOS_MBR_SIGNATURE)
- return 0;
+ goto done;
+
+ for (i = 0; i < 4; i++) {
+ ret = pmbr_part_valid(&mbr->partition_record[i]);
+ if (ret == GPT_MBR_PROTECTIVE) {
+ part = i;
+ /*
+ * Ok, we at least know that there's a protective MBR,
+ * now check if there are other partition types for
+ * hybrid MBR.
+ */
+ goto check_hybrid;
+ }
+ }
+
+ if (ret != GPT_MBR_PROTECTIVE)
+ goto done;
+check_hybrid:
for (i = 0; i < 4; i++)
- if (pmbr_part_valid(&mbr->partition_record[i]))
- return 1;
- return 0;
+ if ((mbr->partition_record[i].os_type !=
+ EFI_PMBR_OSTYPE_EFI_GPT) &&
+ (mbr->partition_record[i].os_type != 0x00))
+ ret = GPT_MBR_HYBRID;
+
+ /*
+ * Protective MBRs take up the lesser of the whole disk
+ * or 2 TiB (32bit LBA), ignoring the rest of the disk.
+ * Some partitioning programs, nonetheless, choose to set
+ * the size to the maximum 32-bit limitation, disregarding
+ * the disk size.
+ *
+ * Hybrid MBRs do not necessarily comply with this.
+ *
+ * Consider a bad value here to be a warning to support dd'ing
+ * an image from a smaller disk to a larger disk.
+ */
+ if (ret == GPT_MBR_PROTECTIVE) {
+ sz = le32_to_cpu(mbr->partition_record[part].size_in_lba);
+ if (sz != (uint32_t) total_sectors - 1 && sz != 0xFFFFFFFF)
+ pr_debug("GPT: mbr size in lba (%u) different than whole disk (%u).\n",
+ sz, min_t(uint32_t,
+ total_sectors - 1, 0xFFFFFFFF));
+ }
+done:
+ return ret;
}
/**
@@ -238,13 +297,12 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
le32_to_cpu(gpt->sizeof_partition_entry);
if (!count)
return NULL;
- pte = kzalloc(count, GFP_KERNEL);
+ pte = kmalloc(count, GFP_KERNEL);
if (!pte)
return NULL;
if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
- (u8 *) pte,
- count) < count) {
+ (u8 *) pte, count) < count) {
kfree(pte);
pte=NULL;
return NULL;
@@ -267,7 +325,7 @@ static gpt_header *alloc_read_gpt_header(struct parsed_partitions *state,
gpt_header *gpt;
unsigned ssz = bdev_logical_block_size(state->bdev);
- gpt = kzalloc(ssz, GFP_KERNEL);
+ gpt = kmalloc(ssz, GFP_KERNEL);
if (!gpt)
return NULL;
@@ -310,15 +368,23 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
goto fail;
}
- /* Check the GUID Partition Table header size */
+ /* Check the GUID Partition Table header size is too big */
if (le32_to_cpu((*gpt)->header_size) >
bdev_logical_block_size(state->bdev)) {
- pr_debug("GUID Partition Table Header size is wrong: %u > %u\n",
+ pr_debug("GUID Partition Table Header size is too large: %u > %u\n",
le32_to_cpu((*gpt)->header_size),
bdev_logical_block_size(state->bdev));
goto fail;
}
+ /* Check the GUID Partition Table header size is too small */
+ if (le32_to_cpu((*gpt)->header_size) < sizeof(gpt_header)) {
+ pr_debug("GUID Partition Table Header size is too small: %u < %zu\n",
+ le32_to_cpu((*gpt)->header_size),
+ sizeof(gpt_header));
+ goto fail;
+ }
+
/* Check the GUID Partition Table CRC */
origcrc = le32_to_cpu((*gpt)->header_crc32);
(*gpt)->header_crc32 = 0;
@@ -356,7 +422,12 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
(unsigned long long)lastlba);
goto fail;
}
-
+ if (le64_to_cpu((*gpt)->last_usable_lba) < le64_to_cpu((*gpt)->first_usable_lba)) {
+ pr_debug("GPT: last_usable_lba incorrect: %lld > %lld\n",
+ (unsigned long long)le64_to_cpu((*gpt)->last_usable_lba),
+ (unsigned long long)le64_to_cpu((*gpt)->first_usable_lba));
+ goto fail;
+ }
/* Check that sizeof_partition_entry has the correct value */
if (le32_to_cpu((*gpt)->sizeof_partition_entry) != sizeof(gpt_entry)) {
pr_debug("GUID Partitition Entry Size check failed.\n");
@@ -421,44 +492,42 @@ compare_gpts(gpt_header *pgpt, gpt_header *agpt, u64 lastlba)
if (!pgpt || !agpt)
return;
if (le64_to_cpu(pgpt->my_lba) != le64_to_cpu(agpt->alternate_lba)) {
- printk(KERN_WARNING
- "GPT:Primary header LBA != Alt. header alternate_lba\n");
- printk(KERN_WARNING "GPT:%lld != %lld\n",
+ pr_warn("GPT:Primary header LBA != Alt. header alternate_lba\n");
+ pr_warn("GPT:%lld != %lld\n",
(unsigned long long)le64_to_cpu(pgpt->my_lba),
(unsigned long long)le64_to_cpu(agpt->alternate_lba));
error_found++;
}
if (le64_to_cpu(pgpt->alternate_lba) != le64_to_cpu(agpt->my_lba)) {
- printk(KERN_WARNING
- "GPT:Primary header alternate_lba != Alt. header my_lba\n");
- printk(KERN_WARNING "GPT:%lld != %lld\n",
+ pr_warn("GPT:Primary header alternate_lba != Alt. header my_lba\n");
+ pr_warn("GPT:%lld != %lld\n",
(unsigned long long)le64_to_cpu(pgpt->alternate_lba),
(unsigned long long)le64_to_cpu(agpt->my_lba));
error_found++;
}
if (le64_to_cpu(pgpt->first_usable_lba) !=
le64_to_cpu(agpt->first_usable_lba)) {
- printk(KERN_WARNING "GPT:first_usable_lbas don't match.\n");
- printk(KERN_WARNING "GPT:%lld != %lld\n",
+ pr_warn("GPT:first_usable_lbas don't match.\n");
+ pr_warn("GPT:%lld != %lld\n",
(unsigned long long)le64_to_cpu(pgpt->first_usable_lba),
(unsigned long long)le64_to_cpu(agpt->first_usable_lba));
error_found++;
}
if (le64_to_cpu(pgpt->last_usable_lba) !=
le64_to_cpu(agpt->last_usable_lba)) {
- printk(KERN_WARNING "GPT:last_usable_lbas don't match.\n");
- printk(KERN_WARNING "GPT:%lld != %lld\n",
+ pr_warn("GPT:last_usable_lbas don't match.\n");
+ pr_warn("GPT:%lld != %lld\n",
(unsigned long long)le64_to_cpu(pgpt->last_usable_lba),
(unsigned long long)le64_to_cpu(agpt->last_usable_lba));
error_found++;
}
if (efi_guidcmp(pgpt->disk_guid, agpt->disk_guid)) {
- printk(KERN_WARNING "GPT:disk_guids don't match.\n");
+ pr_warn("GPT:disk_guids don't match.\n");
error_found++;
}
if (le32_to_cpu(pgpt->num_partition_entries) !=
le32_to_cpu(agpt->num_partition_entries)) {
- printk(KERN_WARNING "GPT:num_partition_entries don't match: "
+ pr_warn("GPT:num_partition_entries don't match: "
"0x%x != 0x%x\n",
le32_to_cpu(pgpt->num_partition_entries),
le32_to_cpu(agpt->num_partition_entries));
@@ -466,8 +535,7 @@ compare_gpts(gpt_header *pgpt, gpt_header *agpt, u64 lastlba)
}
if (le32_to_cpu(pgpt->sizeof_partition_entry) !=
le32_to_cpu(agpt->sizeof_partition_entry)) {
- printk(KERN_WARNING
- "GPT:sizeof_partition_entry values don't match: "
+ pr_warn("GPT:sizeof_partition_entry values don't match: "
"0x%x != 0x%x\n",
le32_to_cpu(pgpt->sizeof_partition_entry),
le32_to_cpu(agpt->sizeof_partition_entry));
@@ -475,34 +543,30 @@ compare_gpts(gpt_header *pgpt, gpt_header *agpt, u64 lastlba)
}
if (le32_to_cpu(pgpt->partition_entry_array_crc32) !=
le32_to_cpu(agpt->partition_entry_array_crc32)) {
- printk(KERN_WARNING
- "GPT:partition_entry_array_crc32 values don't match: "
+ pr_warn("GPT:partition_entry_array_crc32 values don't match: "
"0x%x != 0x%x\n",
le32_to_cpu(pgpt->partition_entry_array_crc32),
le32_to_cpu(agpt->partition_entry_array_crc32));
error_found++;
}
if (le64_to_cpu(pgpt->alternate_lba) != lastlba) {
- printk(KERN_WARNING
- "GPT:Primary header thinks Alt. header is not at the end of the disk.\n");
- printk(KERN_WARNING "GPT:%lld != %lld\n",
+ pr_warn("GPT:Primary header thinks Alt. header is not at the end of the disk.\n");
+ pr_warn("GPT:%lld != %lld\n",
(unsigned long long)le64_to_cpu(pgpt->alternate_lba),
(unsigned long long)lastlba);
error_found++;
}
if (le64_to_cpu(agpt->my_lba) != lastlba) {
- printk(KERN_WARNING
- "GPT:Alternate GPT header not at the end of the disk.\n");
- printk(KERN_WARNING "GPT:%lld != %lld\n",
+ pr_warn("GPT:Alternate GPT header not at the end of the disk.\n");
+ pr_warn("GPT:%lld != %lld\n",
(unsigned long long)le64_to_cpu(agpt->my_lba),
(unsigned long long)lastlba);
error_found++;
}
if (error_found)
- printk(KERN_WARNING
- "GPT: Use GNU Parted to correct GPT errors.\n");
+ pr_warn("GPT: Use GNU Parted to correct GPT errors.\n");
return;
}
@@ -528,6 +592,7 @@ static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt,
gpt_header *pgpt = NULL, *agpt = NULL;
gpt_entry *pptes = NULL, *aptes = NULL;
legacy_mbr *legacymbr;
+ sector_t total_sectors = i_size_read(state->bdev->bd_inode) >> 9;
u64 lastlba;
if (!ptes)
@@ -535,17 +600,22 @@ static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt,
lastlba = last_lba(state->bdev);
if (!force_gpt) {
- /* This will be added to the EFI Spec. per Intel after v1.02. */
- legacymbr = kzalloc(sizeof (*legacymbr), GFP_KERNEL);
- if (legacymbr) {
- read_lba(state, 0, (u8 *) legacymbr,
- sizeof (*legacymbr));
- good_pmbr = is_pmbr_valid(legacymbr);
- kfree(legacymbr);
- }
- if (!good_pmbr)
- goto fail;
- }
+ /* This will be added to the EFI Spec. per Intel after v1.02. */
+ legacymbr = kzalloc(sizeof(*legacymbr), GFP_KERNEL);
+ if (!legacymbr)
+ goto fail;
+
+ read_lba(state, 0, (u8 *)legacymbr, sizeof(*legacymbr));
+ good_pmbr = is_pmbr_valid(legacymbr, total_sectors);
+ kfree(legacymbr);
+
+ if (!good_pmbr)
+ goto fail;
+
+ pr_debug("Device has a %s MBR\n",
+ good_pmbr == GPT_MBR_PROTECTIVE ?
+ "protective" : "hybrid");
+ }
good_pgpt = is_gpt_valid(state, GPT_PRIMARY_PARTITION_TABLE_LBA,
&pgpt, &pptes);
@@ -568,11 +638,8 @@ static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt,
*ptes = pptes;
kfree(agpt);
kfree(aptes);
- if (!good_agpt) {
- printk(KERN_WARNING
- "Alternate GPT is invalid, "
- "using primary GPT.\n");
- }
+ if (!good_agpt)
+ pr_warn("Alternate GPT is invalid, using primary GPT.\n");
return 1;
}
else if (good_agpt) {
@@ -580,8 +647,7 @@ static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt,
*ptes = aptes;
kfree(pgpt);
kfree(pptes);
- printk(KERN_WARNING
- "Primary GPT is invalid, using alternate GPT.\n");
+ pr_warn("Primary GPT is invalid, using alternate GPT.\n");
return 1;
}
@@ -620,7 +686,6 @@ int efi_partition(struct parsed_partitions *state)
gpt_entry *ptes = NULL;
u32 i;
unsigned ssz = bdev_logical_block_size(state->bdev) / 512;
- u8 unparsed_guid[37];
if (!find_valid_gpt(state, &gpt, &ptes) || !gpt || !ptes) {
kfree(gpt);
@@ -644,20 +709,15 @@ int efi_partition(struct parsed_partitions *state)
put_partition(state, i+1, start * ssz, size * ssz);
/* If this is a RAID volume, tell md */
- if (!efi_guidcmp(ptes[i].partition_type_guid,
- PARTITION_LINUX_RAID_GUID))
+ if (!efi_guidcmp(ptes[i].partition_type_guid, PARTITION_LINUX_RAID_GUID))
state->parts[i + 1].flags = ADDPART_FLAG_RAID;
info = &state->parts[i + 1].info;
- /* Instead of doing a manual swap to big endian, reuse the
- * common ASCII hex format as the interim.
- */
- efi_guid_unparse(&ptes[i].unique_partition_guid, unparsed_guid);
- part_pack_uuid(unparsed_guid, info->uuid);
+ efi_guid_unparse(&ptes[i].unique_partition_guid, info->uuid);
/* Naively convert UTF16-LE to 7 bits. */
- label_max = min(sizeof(info->volname) - 1,
- sizeof(ptes[i].partition_name));
+ label_max = min(ARRAY_SIZE(info->volname) - 1,
+ ARRAY_SIZE(ptes[i].partition_name));
info->volname[label_max] = 0;
while (label_count < label_max) {
u8 c = ptes[i].partition_name[label_count] & 0xff;
diff --git a/block/partitions/efi.h b/block/partitions/efi.h
index b69ab729558..abd0b19288a 100644
--- a/block/partitions/efi.h
+++ b/block/partitions/efi.h
@@ -32,11 +32,15 @@
#include <linux/major.h>
#include <linux/string.h>
#include <linux/efi.h>
+#include <linux/compiler.h>
#define MSDOS_MBR_SIGNATURE 0xaa55
#define EFI_PMBR_OSTYPE_EFI 0xEF
#define EFI_PMBR_OSTYPE_EFI_GPT 0xEE
+#define GPT_MBR_PROTECTIVE 1
+#define GPT_MBR_HYBRID 2
+
#define GPT_HEADER_SIGNATURE 0x5452415020494645ULL
#define GPT_HEADER_REVISION_V1 0x00010000
#define GPT_PRIMARY_PARTITION_TABLE_LBA 1
@@ -84,13 +88,13 @@ typedef struct _gpt_header {
*
* uint8_t reserved2[ BlockSize - 92 ];
*/
-} __attribute__ ((packed)) gpt_header;
+} __packed gpt_header;
typedef struct _gpt_entry_attributes {
u64 required_to_function:1;
u64 reserved:47;
u64 type_guid_specific:16;
-} __attribute__ ((packed)) gpt_entry_attributes;
+} __packed gpt_entry_attributes;
typedef struct _gpt_entry {
efi_guid_t partition_type_guid;
@@ -99,36 +103,31 @@ typedef struct _gpt_entry {
__le64 ending_lba;
gpt_entry_attributes attributes;
efi_char16_t partition_name[72 / sizeof (efi_char16_t)];
-} __attribute__ ((packed)) gpt_entry;
+} __packed gpt_entry;
+
+typedef struct _gpt_mbr_record {
+ u8 boot_indicator; /* unused by EFI, set to 0x80 for bootable */
+ u8 start_head; /* unused by EFI, pt start in CHS */
+ u8 start_sector; /* unused by EFI, pt start in CHS */
+ u8 start_track;
+ u8 os_type; /* EFI and legacy non-EFI OS types */
+ u8 end_head; /* unused by EFI, pt end in CHS */
+ u8 end_sector; /* unused by EFI, pt end in CHS */
+ u8 end_track; /* unused by EFI, pt end in CHS */
+ __le32 starting_lba; /* used by EFI - start addr of the on disk pt */
+ __le32 size_in_lba; /* used by EFI - size of pt in LBA */
+} __packed gpt_mbr_record;
+
typedef struct _legacy_mbr {
u8 boot_code[440];
__le32 unique_mbr_signature;
__le16 unknown;
- struct partition partition_record[4];
+ gpt_mbr_record partition_record[4];
__le16 signature;
-} __attribute__ ((packed)) legacy_mbr;
+} __packed legacy_mbr;
/* Functions */
extern int efi_partition(struct parsed_partitions *state);
#endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * --------------------------------------------------------------------------
- * Local variables:
- * c-indent-level: 4
- * c-brace-imaginary-offset: 0
- * c-brace-offset: -4
- * c-argdecl-indent: 4
- * c-label-offset: -4
- * c-continued-statement-offset: 4
- * c-continued-brace-offset: 0
- * indent-tabs-mode: nil
- * tab-width: 8
- * End:
- */
diff --git a/block/partitions/ibm.c b/block/partitions/ibm.c
index d513a07f44b..47a61474e79 100644
--- a/block/partitions/ibm.c
+++ b/block/partitions/ibm.c
@@ -1,9 +1,8 @@
/*
- * File...........: linux/fs/partitions/ibm.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Volker Sameske <sameske@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ * Copyright IBM Corp. 1999, 2012
*/
#include <linux/buffer_head.h>
@@ -17,17 +16,23 @@
#include "check.h"
#include "ibm.h"
+
+union label_t {
+ struct vtoc_volume_label_cdl vol;
+ struct vtoc_volume_label_ldl lnx;
+ struct vtoc_cms_label cms;
+};
+
/*
* compute the block number from a
* cyl-cyl-head-head structure
*/
-static sector_t
-cchh2blk (struct vtoc_cchh *ptr, struct hd_geometry *geo) {
-
+static sector_t cchh2blk(struct vtoc_cchh *ptr, struct hd_geometry *geo)
+{
sector_t cyl;
__u16 head;
- /*decode cylinder and heads for large volumes */
+ /* decode cylinder and heads for large volumes */
cyl = ptr->hh & 0xFFF0;
cyl <<= 12;
cyl |= ptr->cc;
@@ -40,13 +45,12 @@ cchh2blk (struct vtoc_cchh *ptr, struct hd_geometry *geo) {
* compute the block number from a
* cyl-cyl-head-head-block structure
*/
-static sector_t
-cchhb2blk (struct vtoc_cchhb *ptr, struct hd_geometry *geo) {
-
+static sector_t cchhb2blk(struct vtoc_cchhb *ptr, struct hd_geometry *geo)
+{
sector_t cyl;
__u16 head;
- /*decode cylinder and heads for large volumes */
+ /* decode cylinder and heads for large volumes */
cyl = ptr->hh & 0xFFF0;
cyl <<= 12;
cyl |= ptr->cc;
@@ -56,26 +60,243 @@ cchhb2blk (struct vtoc_cchhb *ptr, struct hd_geometry *geo) {
ptr->b;
}
+static int find_label(struct parsed_partitions *state,
+ dasd_information2_t *info,
+ struct hd_geometry *geo,
+ int blocksize,
+ sector_t *labelsect,
+ char name[],
+ char type[],
+ union label_t *label)
+{
+ Sector sect;
+ unsigned char *data;
+ sector_t testsect[3];
+ unsigned char temp[5];
+ int found = 0;
+ int i, testcount;
+
+ /* There a three places where we may find a valid label:
+ * - on an ECKD disk it's block 2
+ * - on an FBA disk it's block 1
+ * - on an CMS formatted FBA disk it is sector 1, even if the block size
+ * is larger than 512 bytes (possible if the DIAG discipline is used)
+ * If we have a valid info structure, then we know exactly which case we
+ * have, otherwise we just search through all possebilities.
+ */
+ if (info) {
+ if ((info->cu_type == 0x6310 && info->dev_type == 0x9336) ||
+ (info->cu_type == 0x3880 && info->dev_type == 0x3370))
+ testsect[0] = info->label_block;
+ else
+ testsect[0] = info->label_block * (blocksize >> 9);
+ testcount = 1;
+ } else {
+ testsect[0] = 1;
+ testsect[1] = (blocksize >> 9);
+ testsect[2] = 2 * (blocksize >> 9);
+ testcount = 3;
+ }
+ for (i = 0; i < testcount; ++i) {
+ data = read_part_sector(state, testsect[i], &sect);
+ if (data == NULL)
+ continue;
+ memcpy(label, data, sizeof(*label));
+ memcpy(temp, data, 4);
+ temp[4] = 0;
+ EBCASC(temp, 4);
+ put_dev_sector(sect);
+ if (!strcmp(temp, "VOL1") ||
+ !strcmp(temp, "LNX1") ||
+ !strcmp(temp, "CMS1")) {
+ if (!strcmp(temp, "VOL1")) {
+ strncpy(type, label->vol.vollbl, 4);
+ strncpy(name, label->vol.volid, 6);
+ } else {
+ strncpy(type, label->lnx.vollbl, 4);
+ strncpy(name, label->lnx.volid, 6);
+ }
+ EBCASC(type, 4);
+ EBCASC(name, 6);
+ *labelsect = testsect[i];
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ memset(label, 0, sizeof(*label));
+
+ return found;
+}
+
+static int find_vol1_partitions(struct parsed_partitions *state,
+ struct hd_geometry *geo,
+ int blocksize,
+ char name[],
+ union label_t *label)
+{
+ sector_t blk;
+ int counter;
+ char tmp[64];
+ Sector sect;
+ unsigned char *data;
+ loff_t offset, size;
+ struct vtoc_format1_label f1;
+ int secperblk;
+
+ snprintf(tmp, sizeof(tmp), "VOL1/%8s:", name);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+ /*
+ * get start of VTOC from the disk label and then search for format1
+ * and format8 labels
+ */
+ secperblk = blocksize >> 9;
+ blk = cchhb2blk(&label->vol.vtoc, geo) + 1;
+ counter = 0;
+ data = read_part_sector(state, blk * secperblk, &sect);
+ while (data != NULL) {
+ memcpy(&f1, data, sizeof(struct vtoc_format1_label));
+ put_dev_sector(sect);
+ /* skip FMT4 / FMT5 / FMT7 labels */
+ if (f1.DS1FMTID == _ascebc['4']
+ || f1.DS1FMTID == _ascebc['5']
+ || f1.DS1FMTID == _ascebc['7']
+ || f1.DS1FMTID == _ascebc['9']) {
+ blk++;
+ data = read_part_sector(state, blk * secperblk, &sect);
+ continue;
+ }
+ /* only FMT1 and 8 labels valid at this point */
+ if (f1.DS1FMTID != _ascebc['1'] &&
+ f1.DS1FMTID != _ascebc['8'])
+ break;
+ /* OK, we got valid partition data */
+ offset = cchh2blk(&f1.DS1EXT1.llimit, geo);
+ size = cchh2blk(&f1.DS1EXT1.ulimit, geo) -
+ offset + geo->sectors;
+ offset *= secperblk;
+ size *= secperblk;
+ if (counter >= state->limit)
+ break;
+ put_partition(state, counter + 1, offset, size);
+ counter++;
+ blk++;
+ data = read_part_sector(state, blk * secperblk, &sect);
+ }
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
+
+ if (!data)
+ return -1;
+
+ return 1;
+}
+
+static int find_lnx1_partitions(struct parsed_partitions *state,
+ struct hd_geometry *geo,
+ int blocksize,
+ char name[],
+ union label_t *label,
+ sector_t labelsect,
+ loff_t i_size,
+ dasd_information2_t *info)
+{
+ loff_t offset, geo_size, size;
+ char tmp[64];
+ int secperblk;
+
+ snprintf(tmp, sizeof(tmp), "LNX1/%8s:", name);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+ secperblk = blocksize >> 9;
+ if (label->lnx.ldl_version == 0xf2) {
+ size = label->lnx.formatted_blocks * secperblk;
+ } else {
+ /*
+ * Formated w/o large volume support. If the sanity check
+ * 'size based on geo == size based on i_size' is true, then
+ * we can safely assume that we know the formatted size of
+ * the disk, otherwise we need additional information
+ * that we can only get from a real DASD device.
+ */
+ geo_size = geo->cylinders * geo->heads
+ * geo->sectors * secperblk;
+ size = i_size >> 9;
+ if (size != geo_size) {
+ if (!info) {
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
+ return 1;
+ }
+ if (!strcmp(info->type, "ECKD"))
+ if (geo_size < size)
+ size = geo_size;
+ /* else keep size based on i_size */
+ }
+ }
+ /* first and only partition starts in the first block after the label */
+ offset = labelsect + secperblk;
+ put_partition(state, 1, offset, size - offset);
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
+ return 1;
+}
+
+static int find_cms1_partitions(struct parsed_partitions *state,
+ struct hd_geometry *geo,
+ int blocksize,
+ char name[],
+ union label_t *label,
+ sector_t labelsect)
+{
+ loff_t offset, size;
+ char tmp[64];
+ int secperblk;
+
+ /*
+ * VM style CMS1 labeled disk
+ */
+ blocksize = label->cms.block_size;
+ secperblk = blocksize >> 9;
+ if (label->cms.disk_offset != 0) {
+ snprintf(tmp, sizeof(tmp), "CMS1/%8s(MDSK):", name);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+ /* disk is reserved minidisk */
+ offset = label->cms.disk_offset * secperblk;
+ size = (label->cms.block_count - 1) * secperblk;
+ } else {
+ snprintf(tmp, sizeof(tmp), "CMS1/%8s:", name);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+ /*
+ * Special case for FBA devices:
+ * If an FBA device is CMS formatted with blocksize > 512 byte
+ * and the DIAG discipline is used, then the CMS label is found
+ * in sector 1 instead of block 1. However, the partition is
+ * still supposed to start in block 2.
+ */
+ if (labelsect == 1)
+ offset = 2 * secperblk;
+ else
+ offset = labelsect + secperblk;
+ size = label->cms.block_count * secperblk;
+ }
+
+ put_partition(state, 1, offset, size-offset);
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
+ return 1;
+}
+
+
/*
+ * This is the main function, called by check.c
*/
int ibm_partition(struct parsed_partitions *state)
{
struct block_device *bdev = state->bdev;
int blocksize, res;
- loff_t i_size, offset, size, fmt_size;
+ loff_t i_size, offset, size;
dasd_information2_t *info;
struct hd_geometry *geo;
char type[5] = {0,};
char name[7] = {0,};
- union label_t {
- struct vtoc_volume_label_cdl vol;
- struct vtoc_volume_label_ldl lnx;
- struct vtoc_cms_label cms;
- } *label;
- unsigned char *data;
- Sector sect;
sector_t labelsect;
- char tmp[64];
+ union label_t *label;
res = 0;
blocksize = bdev_logical_block_size(bdev);
@@ -84,7 +305,6 @@ int ibm_partition(struct parsed_partitions *state)
i_size = i_size_read(bdev->bd_inode);
if (i_size == 0)
goto out_exit;
-
info = kmalloc(sizeof(dasd_information2_t), GFP_KERNEL);
if (info == NULL)
goto out_exit;
@@ -94,176 +314,45 @@ int ibm_partition(struct parsed_partitions *state)
label = kmalloc(sizeof(union label_t), GFP_KERNEL);
if (label == NULL)
goto out_nolab;
-
- if (ioctl_by_bdev(bdev, BIODASDINFO2, (unsigned long)info) != 0 ||
- ioctl_by_bdev(bdev, HDIO_GETGEO, (unsigned long)geo) != 0)
+ if (ioctl_by_bdev(bdev, HDIO_GETGEO, (unsigned long)geo) != 0)
goto out_freeall;
-
- /*
- * Special case for FBA disks: label sector does not depend on
- * blocksize.
- */
- if ((info->cu_type == 0x6310 && info->dev_type == 0x9336) ||
- (info->cu_type == 0x3880 && info->dev_type == 0x3370))
- labelsect = info->label_block;
- else
- labelsect = info->label_block * (blocksize >> 9);
-
- /*
- * Get volume label, extract name and type.
- */
- data = read_part_sector(state, labelsect, &sect);
- if (data == NULL)
- goto out_readerr;
-
- memcpy(label, data, sizeof(union label_t));
- put_dev_sector(sect);
-
- if ((!info->FBA_layout) && (!strcmp(info->type, "ECKD"))) {
- strncpy(type, label->vol.vollbl, 4);
- strncpy(name, label->vol.volid, 6);
- } else {
- strncpy(type, label->lnx.vollbl, 4);
- strncpy(name, label->lnx.volid, 6);
+ if (ioctl_by_bdev(bdev, BIODASDINFO2, (unsigned long)info) != 0) {
+ kfree(info);
+ info = NULL;
}
- EBCASC(type, 4);
- EBCASC(name, 6);
-
- res = 1;
- /*
- * Three different formats: LDL, CDL and unformated disk
- *
- * identified by info->format
- *
- * unformated disks we do not have to care about
- */
- if (info->format == DASD_FORMAT_LDL) {
- if (strncmp(type, "CMS1", 4) == 0) {
- /*
- * VM style CMS1 labeled disk
- */
- blocksize = label->cms.block_size;
- if (label->cms.disk_offset != 0) {
- snprintf(tmp, sizeof(tmp), "CMS1/%8s(MDSK):", name);
- strlcat(state->pp_buf, tmp, PAGE_SIZE);
- /* disk is reserved minidisk */
- offset = label->cms.disk_offset;
- size = (label->cms.block_count - 1)
- * (blocksize >> 9);
- } else {
- snprintf(tmp, sizeof(tmp), "CMS1/%8s:", name);
- strlcat(state->pp_buf, tmp, PAGE_SIZE);
- offset = (info->label_block + 1);
- size = label->cms.block_count
- * (blocksize >> 9);
- }
- put_partition(state, 1, offset*(blocksize >> 9),
- size-offset*(blocksize >> 9));
- } else {
- if (strncmp(type, "LNX1", 4) == 0) {
- snprintf(tmp, sizeof(tmp), "LNX1/%8s:", name);
- strlcat(state->pp_buf, tmp, PAGE_SIZE);
- if (label->lnx.ldl_version == 0xf2) {
- fmt_size = label->lnx.formatted_blocks
- * (blocksize >> 9);
- } else if (!strcmp(info->type, "ECKD")) {
- /* formated w/o large volume support */
- fmt_size = geo->cylinders * geo->heads
- * geo->sectors * (blocksize >> 9);
- } else {
- /* old label and no usable disk geometry
- * (e.g. DIAG) */
- fmt_size = i_size >> 9;
- }
- size = i_size >> 9;
- if (fmt_size < size)
- size = fmt_size;
- offset = (info->label_block + 1);
- } else {
- /* unlabeled disk */
- strlcat(state->pp_buf, "(nonl)", PAGE_SIZE);
- size = i_size >> 9;
- offset = (info->label_block + 1);
- }
- put_partition(state, 1, offset*(blocksize >> 9),
- size-offset*(blocksize >> 9));
+ if (find_label(state, info, geo, blocksize, &labelsect, name, type,
+ label)) {
+ if (!strncmp(type, "VOL1", 4)) {
+ res = find_vol1_partitions(state, geo, blocksize, name,
+ label);
+ } else if (!strncmp(type, "LNX1", 4)) {
+ res = find_lnx1_partitions(state, geo, blocksize, name,
+ label, labelsect, i_size,
+ info);
+ } else if (!strncmp(type, "CMS1", 4)) {
+ res = find_cms1_partitions(state, geo, blocksize, name,
+ label, labelsect);
}
- } else if (info->format == DASD_FORMAT_CDL) {
- /*
- * New style CDL formatted disk
- */
- sector_t blk;
- int counter;
-
+ } else if (info) {
/*
- * check if VOL1 label is available
- * if not, something is wrong, skipping partition detection
+ * ugly but needed for backward compatibility:
+ * If the block device is a DASD (i.e. BIODASDINFO2 works),
+ * then we claim it in any case, even though it has no valid
+ * label. If it has the LDL format, then we simply define a
+ * partition as if it had an LNX1 label.
*/
- if (strncmp(type, "VOL1", 4) == 0) {
- snprintf(tmp, sizeof(tmp), "VOL1/%8s:", name);
- strlcat(state->pp_buf, tmp, PAGE_SIZE);
- /*
- * get block number and read then go through format1
- * labels
- */
- blk = cchhb2blk(&label->vol.vtoc, geo) + 1;
- counter = 0;
- data = read_part_sector(state, blk * (blocksize/512),
- &sect);
- while (data != NULL) {
- struct vtoc_format1_label f1;
-
- memcpy(&f1, data,
- sizeof(struct vtoc_format1_label));
- put_dev_sector(sect);
-
- /* skip FMT4 / FMT5 / FMT7 labels */
- if (f1.DS1FMTID == _ascebc['4']
- || f1.DS1FMTID == _ascebc['5']
- || f1.DS1FMTID == _ascebc['7']
- || f1.DS1FMTID == _ascebc['9']) {
- blk++;
- data = read_part_sector(state,
- blk * (blocksize/512), &sect);
- continue;
- }
-
- /* only FMT1 and 8 labels valid at this point */
- if (f1.DS1FMTID != _ascebc['1'] &&
- f1.DS1FMTID != _ascebc['8'])
- break;
-
- /* OK, we got valid partition data */
- offset = cchh2blk(&f1.DS1EXT1.llimit, geo);
- size = cchh2blk(&f1.DS1EXT1.ulimit, geo) -
- offset + geo->sectors;
- if (counter >= state->limit)
- break;
- put_partition(state, counter + 1,
- offset * (blocksize >> 9),
- size * (blocksize >> 9));
- counter++;
- blk++;
- data = read_part_sector(state,
- blk * (blocksize/512), &sect);
- }
-
- if (!data)
- /* Are we not supposed to report this ? */
- goto out_readerr;
- } else
- printk(KERN_WARNING "Warning, expected Label VOL1 not "
- "found, treating as CDL formated Disk");
-
- }
-
- strlcat(state->pp_buf, "\n", PAGE_SIZE);
- goto out_freeall;
-
+ res = 1;
+ if (info->format == DASD_FORMAT_LDL) {
+ strlcat(state->pp_buf, "(nonl)", PAGE_SIZE);
+ size = i_size >> 9;
+ offset = (info->label_block + 1) * (blocksize >> 9);
+ put_partition(state, 1, offset, size-offset);
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
+ }
+ } else
+ res = 0;
-out_readerr:
- res = -1;
out_freeall:
kfree(label);
out_nolab:
diff --git a/block/partitions/karma.c b/block/partitions/karma.c
index 0ea19312706..9721fa589bb 100644
--- a/block/partitions/karma.c
+++ b/block/partitions/karma.c
@@ -8,6 +8,7 @@
#include "check.h"
#include "karma.h"
+#include <linux/compiler.h>
int karma_partition(struct parsed_partitions *state)
{
@@ -26,7 +27,7 @@ int karma_partition(struct parsed_partitions *state)
} d_partitions[2];
u8 d_blank[208];
__le16 d_magic;
- } __attribute__((packed)) *label;
+ } __packed *label;
struct d_partition *p;
data = read_part_sector(state, 0, &sect);
diff --git a/block/partitions/mac.c b/block/partitions/mac.c
index 11f688bd76c..76d8ba6379a 100644
--- a/block/partitions/mac.c
+++ b/block/partitions/mac.c
@@ -63,6 +63,10 @@ int mac_partition(struct parsed_partitions *state)
put_dev_sector(sect);
return 0;
}
+
+ if (blocks_in_map >= state->limit)
+ blocks_in_map = state->limit - 1;
+
strlcat(state->pp_buf, " [mac]", PAGE_SIZE);
for (slot = 1; slot <= blocks_in_map; ++slot) {
int pos = slot * secsize;
diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c
index 5f79a6677c6..9123f250b42 100644
--- a/block/partitions/msdos.c
+++ b/block/partitions/msdos.c
@@ -23,6 +23,7 @@
#include "check.h"
#include "msdos.h"
#include "efi.h"
+#include "aix.h"
/*
* Many architectures don't like unaligned accesses, while
@@ -90,10 +91,21 @@ static int aix_magic_present(struct parsed_partitions *state, unsigned char *p)
if (d[0] == '_' && d[1] == 'L' && d[2] == 'V' && d[3] == 'M')
ret = 1;
put_dev_sector(sect);
- };
+ }
return ret;
}
+static void set_info(struct parsed_partitions *state, int slot,
+ u32 disksig)
+{
+ struct partition_meta_info *info = &state->parts[slot].info;
+
+ snprintf(info->uuid, sizeof(info->uuid), "%08x-%02x", disksig,
+ slot);
+ info->volname[0] = 0;
+ state->parts[slot].has_info = true;
+}
+
/*
* Create devices for each logical partition in an extended partition.
* The logical partitions form a linked list, with each entry being
@@ -106,7 +118,8 @@ static int aix_magic_present(struct parsed_partitions *state, unsigned char *p)
*/
static void parse_extended(struct parsed_partitions *state,
- sector_t first_sector, sector_t first_size)
+ sector_t first_sector, sector_t first_size,
+ u32 disksig)
{
struct partition *p;
Sector sect;
@@ -130,7 +143,7 @@ static void parse_extended(struct parsed_partitions *state,
return;
if (!msdos_magic_present(data + 510))
- goto done;
+ goto done;
p = (struct partition *) (data + 0x1be);
@@ -143,7 +156,7 @@ static void parse_extended(struct parsed_partitions *state,
* and OS/2 seems to use all four entries.
*/
- /*
+ /*
* First process the data partition(s)
*/
for (i=0; i<4; i++, p++) {
@@ -166,6 +179,7 @@ static void parse_extended(struct parsed_partitions *state,
}
put_partition(state, state->next, next, size);
+ set_info(state, state->next, disksig);
if (SYS_IND(p) == LINUX_RAID_PARTITION)
state->parts[state->next].flags = ADDPART_FLAG_RAID;
loopct = 0;
@@ -250,7 +264,7 @@ static void parse_solaris_x86(struct parsed_partitions *state,
}
#if defined(CONFIG_BSD_DISKLABEL)
-/*
+/*
* Create devices for BSD partitions listed in a disklabel, under a
* dos-like partition. See parse_extended() for more information.
*/
@@ -281,7 +295,7 @@ static void parse_bsd(struct parsed_partitions *state,
if (state->next == state->limit)
break;
- if (p->p_fstype == BSD_FS_UNUSED)
+ if (p->p_fstype == BSD_FS_UNUSED)
continue;
bsd_start = le32_to_cpu(p->p_offset);
bsd_size = le32_to_cpu(p->p_size);
@@ -428,7 +442,7 @@ static struct {
{NEW_SOLARIS_X86_PARTITION, parse_solaris_x86},
{0, NULL},
};
-
+
int msdos_partition(struct parsed_partitions *state)
{
sector_t sector_size = bdev_logical_block_size(state->bdev) / 512;
@@ -437,18 +451,28 @@ int msdos_partition(struct parsed_partitions *state)
struct partition *p;
struct fat_boot_sector *fb;
int slot;
+ u32 disksig;
data = read_part_sector(state, 0, &sect);
if (!data)
return -1;
- if (!msdos_magic_present(data + 510)) {
+
+ /*
+ * Note order! (some AIX disks, e.g. unbootable kind,
+ * have no MSDOS 55aa)
+ */
+ if (aix_magic_present(state, data)) {
put_dev_sector(sect);
+#ifdef CONFIG_AIX_PARTITION
+ return aix_partition(state);
+#else
+ strlcat(state->pp_buf, " [AIX]", PAGE_SIZE);
return 0;
+#endif
}
- if (aix_magic_present(state, data)) {
+ if (!msdos_magic_present(data + 510)) {
put_dev_sector(sect);
- strlcat(state->pp_buf, " [AIX]", PAGE_SIZE);
return 0;
}
@@ -491,6 +515,8 @@ int msdos_partition(struct parsed_partitions *state)
#endif
p = (struct partition *) (data + 0x1be);
+ disksig = le32_to_cpup((__le32 *)(data + 0x1b8));
+
/*
* Look for partitions in two passes:
* First find the primary and DOS-type extended partitions.
@@ -515,11 +541,12 @@ int msdos_partition(struct parsed_partitions *state)
put_partition(state, slot, start, n);
strlcat(state->pp_buf, " <", PAGE_SIZE);
- parse_extended(state, start, size);
+ parse_extended(state, start, size, disksig);
strlcat(state->pp_buf, " >", PAGE_SIZE);
continue;
}
put_partition(state, slot, start, size);
+ set_info(state, slot, disksig);
if (SYS_IND(p) == LINUX_RAID_PARTITION)
state->parts[slot].flags = ADDPART_FLAG_RAID;
if (SYS_IND(p) == DM6_PARTITION)
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 260fa80ef57..14695c6221c 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -27,6 +27,7 @@
#include <linux/ratelimit.h>
#include <linux/slab.h>
#include <linux/times.h>
+#include <linux/uio.h>
#include <asm/uaccess.h>
#include <scsi/scsi.h>
@@ -204,10 +205,6 @@ int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm)
if (capable(CAP_SYS_RAWIO))
return 0;
- /* if there's no filter set, assume we're filtering everything out */
- if (!filter)
- return -EPERM;
-
/* Anybody who can open the device can do a read-safe command */
if (test_bit(cmd[0], filter->read_ok))
return 0;
@@ -232,7 +229,6 @@ static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
* fill in request structure
*/
rq->cmd_len = hdr->cmd_len;
- rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->timeout = msecs_to_jiffies(hdr->timeout);
if (!rq->timeout)
@@ -285,7 +281,8 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
struct sg_io_hdr *hdr, fmode_t mode)
{
unsigned long start_time;
- int writing = 0, ret = 0;
+ ssize_t ret = 0;
+ int writing = 0;
struct request *rq;
char sense[SCSI_SENSE_BUFFERSIZE];
struct bio *bio;
@@ -313,6 +310,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
if (!rq)
return -ENOMEM;
+ blk_rq_set_block_pc(rq);
if (blk_fill_sghdr_rq(q, rq, hdr, mode)) {
blk_put_request(rq);
@@ -320,37 +318,18 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
}
if (hdr->iovec_count) {
- const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
size_t iov_data_len;
- struct sg_iovec *sg_iov;
- struct iovec *iov;
- int i;
+ struct iovec *iov = NULL;
- sg_iov = kmalloc(size, GFP_KERNEL);
- if (!sg_iov) {
- ret = -ENOMEM;
+ ret = rw_copy_check_uvector(-1, hdr->dxferp, hdr->iovec_count,
+ 0, NULL, &iov);
+ if (ret < 0) {
+ kfree(iov);
goto out;
}
- if (copy_from_user(sg_iov, hdr->dxferp, size)) {
- kfree(sg_iov);
- ret = -EFAULT;
- goto out;
- }
-
- /*
- * Sum up the vecs, making sure they don't overflow
- */
- iov = (struct iovec *) sg_iov;
- iov_data_len = 0;
- for (i = 0; i < hdr->iovec_count; i++) {
- if (iov_data_len + iov[i].iov_len < iov_data_len) {
- kfree(sg_iov);
- ret = -EINVAL;
- goto out;
- }
- iov_data_len += iov[i].iov_len;
- }
+ iov_data_len = ret;
+ ret = 0;
/* SG_IO howto says that the shorter of the two wins */
if (hdr->dxfer_len < iov_data_len) {
@@ -360,9 +339,10 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
iov_data_len = hdr->dxfer_len;
}
- ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count,
+ ret = blk_rq_map_user_iov(q, rq, NULL, (struct sg_iovec *) iov,
+ hdr->iovec_count,
iov_data_len, GFP_KERNEL);
- kfree(sg_iov);
+ kfree(iov);
} else if (hdr->dxfer_len)
ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
GFP_KERNEL);
@@ -511,7 +491,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
memset(sense, 0, sizeof(sense));
rq->sense = sense;
rq->sense_len = 0;
- rq->cmd_type = REQ_TYPE_BLOCK_PC;
+ blk_rq_set_block_pc(rq);
blk_execute_rq(q, disk, rq, 0);
@@ -544,7 +524,7 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
int err;
rq = blk_get_request(q, WRITE, __GFP_WAIT);
- rq->cmd_type = REQ_TYPE_BLOCK_PC;
+ blk_rq_set_block_pc(rq);
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
rq->cmd[0] = cmd;
rq->cmd[4] = data;
@@ -721,11 +701,14 @@ int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
break;
}
+ if (capable(CAP_SYS_RAWIO))
+ return 0;
+
/* In particular, rule out all resets and host-specific ioctls. */
printk_ratelimited(KERN_WARNING
"%s: sending ioctl %x to a partition!\n", current->comm, cmd);
- return capable(CAP_SYS_RAWIO) ? 0 : -ENOIOCTLCMD;
+ return -ENOIOCTLCMD;
}
EXPORT_SYMBOL(scsi_verify_blk_ioctl);