aboutsummaryrefslogtreecommitdiff
path: root/drivers/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2005-11-04 08:43:35 +0100
committerJens Axboe <axboe@suse.de>2005-11-04 08:43:35 +0100
commit3a65dfe8c088143c7155cfd36a72f4b0ad2fc4b2 (patch)
treedb930c9f71f94d3ee674f65e38c38e95ca97227e /drivers/block
parent0f3278d14f0255e4cd9e07ccefc33ff12d8bb59c (diff)
[BLOCK] Move all core block layer code to new block/ directory
drivers/block/ is right now a mix of core and driver parts. Lets move the core parts to a new top level directory. Al will move the fs/ related block parts to block/ next. Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/Kconfig12
-rw-r--r--drivers/block/Kconfig.iosched69
-rw-r--r--drivers/block/Makefile14
-rw-r--r--drivers/block/as-iosched.c1985
-rw-r--r--drivers/block/cfq-iosched.c2428
-rw-r--r--drivers/block/deadline-iosched.c878
-rw-r--r--drivers/block/elevator.c802
-rw-r--r--drivers/block/genhd.c726
-rw-r--r--drivers/block/ioctl.c275
-rw-r--r--drivers/block/ll_rw_blk.c3613
-rw-r--r--drivers/block/noop-iosched.c46
-rw-r--r--drivers/block/scsi_ioctl.c589
12 files changed, 0 insertions, 11437 deletions
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 51b0af1cebe..7b1cd93892b 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -409,16 +409,6 @@ config BLK_DEV_INITRD
for details.
-#XXX - it makes sense to enable this only for 32-bit subarch's, not for x86_64
-#for instance.
-config LBD
- bool "Support for Large Block Devices"
- depends on X86 || (MIPS && 32BIT) || PPC32 || ARCH_S390_31 || SUPERH || UML
- help
- Say Y here if you want to attach large (bigger than 2TB) discs to
- your machine, or if you want to have a raid or loopback device
- bigger than 2TB. Otherwise say N.
-
config CDROM_PKTCDVD
tristate "Packet writing on CD/DVD media"
depends on !UML
@@ -455,8 +445,6 @@ config CDROM_PKTCDVD_WCACHE
source "drivers/s390/block/Kconfig"
-source "drivers/block/Kconfig.iosched"
-
config ATA_OVER_ETH
tristate "ATA over Ethernet support"
depends on NET
diff --git a/drivers/block/Kconfig.iosched b/drivers/block/Kconfig.iosched
deleted file mode 100644
index 5b90d2fa63b..00000000000
--- a/drivers/block/Kconfig.iosched
+++ /dev/null
@@ -1,69 +0,0 @@
-
-menu "IO Schedulers"
-
-config IOSCHED_NOOP
- bool
- default y
- ---help---
- The no-op I/O scheduler is a minimal scheduler that does basic merging
- and sorting. Its main uses include non-disk based block devices like
- memory devices, and specialised software or hardware environments
- that do their own scheduling and require only minimal assistance from
- the kernel.
-
-config IOSCHED_AS
- tristate "Anticipatory I/O scheduler"
- default y
- ---help---
- The anticipatory I/O scheduler is the default disk scheduler. It is
- generally a good choice for most environments, but is quite large and
- complex when compared to the deadline I/O scheduler, it can also be
- slower in some cases especially some database loads.
-
-config IOSCHED_DEADLINE
- tristate "Deadline I/O scheduler"
- default y
- ---help---
- The deadline I/O scheduler is simple and compact, and is often as
- good as the anticipatory I/O scheduler, and in some database
- workloads, better. In the case of a single process performing I/O to
- a disk at any one time, its behaviour is almost identical to the
- anticipatory I/O scheduler and so is a good choice.
-
-config IOSCHED_CFQ
- tristate "CFQ I/O scheduler"
- default y
- ---help---
- The CFQ I/O scheduler tries to distribute bandwidth equally
- among all processes in the system. It should provide a fair
- working environment, suitable for desktop systems.
-
-choice
- prompt "Default I/O scheduler"
- default DEFAULT_AS
- help
- Select the I/O scheduler which will be used by default for all
- block devices.
-
- config DEFAULT_AS
- bool "Anticipatory" if IOSCHED_AS
-
- config DEFAULT_DEADLINE
- bool "Deadline" if IOSCHED_DEADLINE
-
- config DEFAULT_CFQ
- bool "CFQ" if IOSCHED_CFQ
-
- config DEFAULT_NOOP
- bool "No-op"
-
-endchoice
-
-config DEFAULT_IOSCHED
- string
- default "anticipatory" if DEFAULT_AS
- default "deadline" if DEFAULT_DEADLINE
- default "cfq" if DEFAULT_CFQ
- default "noop" if DEFAULT_NOOP
-
-endmenu
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 1cf09a1c065..3ec1f8df87b 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -4,21 +4,7 @@
# 12 June 2000, Christoph Hellwig <hch@infradead.org>
# Rewritten to use lists instead of if-statements.
#
-# Note : at this point, these files are compiled on all systems.
-# In the future, some of these should be built conditionally.
-#
-
-#
-# NOTE that ll_rw_blk.c must come early in linkage order - it starts the
-# kblockd threads
-#
-
-obj-y := elevator.o ll_rw_blk.o ioctl.o genhd.o scsi_ioctl.o
-obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
-obj-$(CONFIG_IOSCHED_AS) += as-iosched.o
-obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
-obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
obj-$(CONFIG_MAC_FLOPPY) += swim3.o
obj-$(CONFIG_BLK_DEV_FD) += floppy.o
obj-$(CONFIG_BLK_DEV_FD98) += floppy98.o
diff --git a/drivers/block/as-iosched.c b/drivers/block/as-iosched.c
deleted file mode 100644
index c6744ff3829..00000000000
--- a/drivers/block/as-iosched.c
+++ /dev/null
@@ -1,1985 +0,0 @@
-/*
- * linux/drivers/block/as-iosched.c
- *
- * Anticipatory & deadline i/o scheduler.
- *
- * Copyright (C) 2002 Jens Axboe <axboe@suse.de>
- * Nick Piggin <piggin@cyberone.com.au>
- *
- */
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/blkdev.h>
-#include <linux/elevator.h>
-#include <linux/bio.h>
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/compiler.h>
-#include <linux/hash.h>
-#include <linux/rbtree.h>
-#include <linux/interrupt.h>
-
-#define REQ_SYNC 1
-#define REQ_ASYNC 0
-
-/*
- * See Documentation/block/as-iosched.txt
- */
-
-/*
- * max time before a read is submitted.
- */
-#define default_read_expire (HZ / 8)
-
-/*
- * ditto for writes, these limits are not hard, even
- * if the disk is capable of satisfying them.
- */
-#define default_write_expire (HZ / 4)
-
-/*
- * read_batch_expire describes how long we will allow a stream of reads to
- * persist before looking to see whether it is time to switch over to writes.
- */
-#define default_read_batch_expire (HZ / 2)
-
-/*
- * write_batch_expire describes how long we want a stream of writes to run for.
- * This is not a hard limit, but a target we set for the auto-tuning thingy.
- * See, the problem is: we can send a lot of writes to disk cache / TCQ in
- * a short amount of time...
- */
-#define default_write_batch_expire (HZ / 8)
-
-/*
- * max time we may wait to anticipate a read (default around 6ms)
- */
-#define default_antic_expire ((HZ / 150) ? HZ / 150 : 1)
-
-/*
- * Keep track of up to 20ms thinktimes. We can go as big as we like here,
- * however huge values tend to interfere and not decay fast enough. A program
- * might be in a non-io phase of operation. Waiting on user input for example,
- * or doing a lengthy computation. A small penalty can be justified there, and
- * will still catch out those processes that constantly have large thinktimes.
- */
-#define MAX_THINKTIME (HZ/50UL)
-
-/* Bits in as_io_context.state */
-enum as_io_states {
- AS_TASK_RUNNING=0, /* Process has not exitted */
- AS_TASK_IOSTARTED, /* Process has started some IO */
- AS_TASK_IORUNNING, /* Process has completed some IO */
-};
-
-enum anticipation_status {
- ANTIC_OFF=0, /* Not anticipating (normal operation) */
- ANTIC_WAIT_REQ, /* The last read has not yet completed */
- ANTIC_WAIT_NEXT, /* Currently anticipating a request vs
- last read (which has completed) */
- ANTIC_FINISHED, /* Anticipating but have found a candidate
- * or timed out */
-};
-
-struct as_data {
- /*
- * run time data
- */
-
- struct request_queue *q; /* the "owner" queue */
-
- /*
- * requests (as_rq s) are present on both sort_list and fifo_list
- */
- struct rb_root sort_list[2];
- struct list_head fifo_list[2];
-
- struct as_rq *next_arq[2]; /* next in sort order */
- sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */
- struct list_head *hash; /* request hash */
-
- unsigned long exit_prob; /* probability a task will exit while
- being waited on */
- unsigned long new_ttime_total; /* mean thinktime on new proc */
- unsigned long new_ttime_mean;
- u64 new_seek_total; /* mean seek on new proc */
- sector_t new_seek_mean;
-
- unsigned long current_batch_expires;
- unsigned long last_check_fifo[2];
- int changed_batch; /* 1: waiting for old batch to end */
- int new_batch; /* 1: waiting on first read complete */
- int batch_data_dir; /* current batch REQ_SYNC / REQ_ASYNC */
- int write_batch_count; /* max # of reqs in a write batch */
- int current_write_count; /* how many requests left this batch */
- int write_batch_idled; /* has the write batch gone idle? */
- mempool_t *arq_pool;
-
- enum anticipation_status antic_status;
- unsigned long antic_start; /* jiffies: when it started */
- struct timer_list antic_timer; /* anticipatory scheduling timer */
- struct work_struct antic_work; /* Deferred unplugging */
- struct io_context *io_context; /* Identify the expected process */
- int ioc_finished; /* IO associated with io_context is finished */
- int nr_dispatched;
-
- /*
- * settings that change how the i/o scheduler behaves
- */
- unsigned long fifo_expire[2];
- unsigned long batch_expire[2];
- unsigned long antic_expire;
-};
-
-#define list_entry_fifo(ptr) list_entry((ptr), struct as_rq, fifo)
-
-/*
- * per-request data.
- */
-enum arq_state {
- AS_RQ_NEW=0, /* New - not referenced and not on any lists */
- AS_RQ_QUEUED, /* In the request queue. It belongs to the
- scheduler */
- AS_RQ_DISPATCHED, /* On the dispatch list. It belongs to the
- driver now */
- AS_RQ_PRESCHED, /* Debug poisoning for requests being used */
- AS_RQ_REMOVED,
- AS_RQ_MERGED,
- AS_RQ_POSTSCHED, /* when they shouldn't be */
-};
-
-struct as_rq {
- /*
- * rbtree index, key is the starting offset
- */
- struct rb_node rb_node;
- sector_t rb_key;
-
- struct request *request;
-
- struct io_context *io_context; /* The submitting task */
-
- /*
- * request hash, key is the ending offset (for back merge lookup)
- */
- struct list_head hash;
- unsigned int on_hash;
-
- /*
- * expire fifo
- */
- struct list_head fifo;
- unsigned long expires;
-
- unsigned int is_sync;
- enum arq_state state;
-};
-
-#define RQ_DATA(rq) ((struct as_rq *) (rq)->elevator_private)
-
-static kmem_cache_t *arq_pool;
-
-/*
- * IO Context helper functions
- */
-
-/* Called to deallocate the as_io_context */
-static void free_as_io_context(struct as_io_context *aic)
-{
- kfree(aic);
-}
-
-/* Called when the task exits */
-static void exit_as_io_context(struct as_io_context *aic)
-{
- WARN_ON(!test_bit(AS_TASK_RUNNING, &aic->state));
- clear_bit(AS_TASK_RUNNING, &aic->state);
-}
-
-static struct as_io_context *alloc_as_io_context(void)
-{
- struct as_io_context *ret;
-
- ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
- if (ret) {
- ret->dtor = free_as_io_context;
- ret->exit = exit_as_io_context;
- ret->state = 1 << AS_TASK_RUNNING;
- atomic_set(&ret->nr_queued, 0);
- atomic_set(&ret->nr_dispatched, 0);
- spin_lock_init(&ret->lock);
- ret->ttime_total = 0;
- ret->ttime_samples = 0;
- ret->ttime_mean = 0;
- ret->seek_total = 0;
- ret->seek_samples = 0;
- ret->seek_mean = 0;
- }
-
- return ret;
-}
-
-/*
- * If the current task has no AS IO context then create one and initialise it.
- * Then take a ref on the task's io context and return it.
- */
-static struct io_context *as_get_io_context(void)
-{
- struct io_context *ioc = get_io_context(GFP_ATOMIC);
- if (ioc && !ioc->aic) {
- ioc->aic = alloc_as_io_context();
- if (!ioc->aic) {
- put_io_context(ioc);
- ioc = NULL;
- }
- }
- return ioc;
-}
-
-static void as_put_io_context(struct as_rq *arq)
-{
- struct as_io_context *aic;
-
- if (unlikely(!arq->io_context))
- return;
-
- aic = arq->io_context->aic;
-
- if (arq->is_sync == REQ_SYNC && aic) {
- spin_lock(&aic->lock);
- set_bit(AS_TASK_IORUNNING, &aic->state);
- aic->last_end_request = jiffies;
- spin_unlock(&aic->lock);
- }
-
- put_io_context(arq->io_context);
-}
-
-/*
- * the back merge hash support functions
- */
-static const int as_hash_shift = 6;
-#define AS_HASH_BLOCK(sec) ((sec) >> 3)
-#define AS_HASH_FN(sec) (hash_long(AS_HASH_BLOCK((sec)), as_hash_shift))
-#define AS_HASH_ENTRIES (1 << as_hash_shift)
-#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
-#define list_entry_hash(ptr) list_entry((ptr), struct as_rq, hash)
-
-static inline void __as_del_arq_hash(struct as_rq *arq)
-{
- arq->on_hash = 0;
- list_del_init(&arq->hash);
-}
-
-static inline void as_del_arq_hash(struct as_rq *arq)
-{
- if (arq->on_hash)
- __as_del_arq_hash(arq);
-}
-
-static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq)
-{
- struct request *rq = arq->request;
-
- BUG_ON(arq->on_hash);
-
- arq->on_hash = 1;
- list_add(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]);
-}
-
-/*
- * move hot entry to front of chain
- */
-static inline void as_hot_arq_hash(struct as_data *ad, struct as_rq *arq)
-{
- struct request *rq = arq->request;
- struct list_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))];
-
- if (!arq->on_hash) {
- WARN_ON(1);
- return;
- }
-
- if (arq->hash.prev != head) {
- list_del(&arq->hash);
- list_add(&arq->hash, head);
- }
-}
-
-static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset)
-{
- struct list_head *hash_list = &ad->hash[AS_HASH_FN(offset)];
- struct list_head *entry, *next = hash_list->next;
-
- while ((entry = next) != hash_list) {
- struct as_rq *arq = list_entry_hash(entry);
- struct request *__rq = arq->request;
-
- next = entry->next;
-
- BUG_ON(!arq->on_hash);
-
- if (!rq_mergeable(__rq)) {
- as_del_arq_hash(arq);
- continue;
- }
-
- if (rq_hash_key(__rq) == offset)
- return __rq;
- }
-
- return NULL;
-}
-
-/*
- * rb tree support functions
- */
-#define RB_NONE (2)
-#define RB_EMPTY(root) ((root)->rb_node == NULL)
-#define ON_RB(node) ((node)->rb_color != RB_NONE)
-#define RB_CLEAR(node) ((node)->rb_color = RB_NONE)
-#define rb_entry_arq(node) rb_entry((node), struct as_rq, rb_node)
-#define ARQ_RB_ROOT(ad, arq) (&(ad)->sort_list[(arq)->is_sync])
-#define rq_rb_key(rq) (rq)->sector
-
-/*
- * as_find_first_arq finds the first (lowest sector numbered) request
- * for the specified data_dir. Used to sweep back to the start of the disk
- * (1-way elevator) after we process the last (highest sector) request.
- */
-static struct as_rq *as_find_first_arq(struct as_data *ad, int data_dir)
-{
- struct rb_node *n = ad->sort_list[data_dir].rb_node;
-
- if (n == NULL)
- return NULL;
-
- for (;;) {
- if (n->rb_left == NULL)
- return rb_entry_arq(n);
-
- n = n->rb_left;
- }
-}
-
-/*
- * Add the request to the rb tree if it is unique. If there is an alias (an
- * existing request against the same sector), which can happen when using
- * direct IO, then return the alias.
- */
-static struct as_rq *as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
-{
- struct rb_node **p = &ARQ_RB_ROOT(ad, arq)->rb_node;
- struct rb_node *parent = NULL;
- struct as_rq *__arq;
- struct request *rq = arq->request;
-
- arq->rb_key = rq_rb_key(rq);
-
- while (*p) {
- parent = *p;
- __arq = rb_entry_arq(parent);
-
- if (arq->rb_key < __arq->rb_key)
- p = &(*p)->rb_left;
- else if (arq->rb_key > __arq->rb_key)
- p = &(*p)->rb_right;
- else
- return __arq;
- }
-
- rb_link_node(&arq->rb_node, parent, p);
- rb_insert_color(&arq->rb_node, ARQ_RB_ROOT(ad, arq));
-
- return NULL;
-}
-
-static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq)
-{
- if (!ON_RB(&arq->rb_node)) {
- WARN_ON(1);
- return;
- }
-
- rb_erase(&arq->rb_node, ARQ_RB_ROOT(ad, arq));
- RB_CLEAR(&arq->rb_node);
-}
-
-static struct request *
-as_find_arq_rb(struct as_data *ad, sector_t sector, int data_dir)
-{
- struct rb_node *n = ad->sort_list[data_dir].rb_node;
- struct as_rq *arq;
-
- while (n) {
- arq = rb_entry_arq(n);
-
- if (sector < arq->rb_key)
- n = n->rb_left;
- else if (sector > arq->rb_key)
- n = n->rb_right;
- else
- return arq->request;
- }
-
- return NULL;
-}
-
-/*
- * IO Scheduler proper
- */
-
-#define MAXBACK (1024 * 1024) /*
- * Maximum distance the disk will go backward
- * for a request.
- */
-
-#define BACK_PENALTY 2
-
-/*
- * as_choose_req selects the preferred one of two requests of the same data_dir
- * ignoring time - eg. timeouts, which is the job of as_dispatch_request
- */
-static struct as_rq *
-as_choose_req(struct as_data *ad, struct as_rq *arq1, struct as_rq *arq2)
-{
- int data_dir;
- sector_t last, s1, s2, d1, d2;
- int r1_wrap=0, r2_wrap=0; /* requests are behind the disk head */
- const sector_t maxback = MAXBACK;
-
- if (arq1 == NULL || arq1 == arq2)
- return arq2;
- if (arq2 == NULL)
- return arq1;
-
- data_dir = arq1->is_sync;
-
- last = ad->last_sector[data_dir];
- s1 = arq1->request->sector;
- s2 = arq2->request->sector;
-
- BUG_ON(data_dir != arq2->is_sync);
-
- /*
- * Strict one way elevator _except_ in the case where we allow
- * short backward seeks which are biased as twice the cost of a
- * similar forward seek.
- */
- if (s1 >= last)
- d1 = s1 - last;
- else if (s1+maxback >= last)
- d1 = (last - s1)*BACK_PENALTY;
- else {
- r1_wrap = 1;
- d1 = 0; /* shut up, gcc */
- }
-
- if (s2 >= last)
- d2 = s2 - last;
- else if (s2+maxback >= last)
- d2 = (last - s2)*BACK_PENALTY;
- else {
- r2_wrap = 1;
- d2 = 0;
- }
-
- /* Found required data */
- if (!r1_wrap && r2_wrap)
- return arq1;
- else if (!r2_wrap && r1_wrap)
- return arq2;
- else if (r1_wrap && r2_wrap) {
- /* both behind the head */
- if (s1 <= s2)
- return arq1;
- else
- return arq2;
- }
-
- /* Both requests in front of the head */
- if (d1 < d2)
- return arq1;
- else if (d2 < d1)
- return arq2;
- else {
- if (s1 >= s2)
- return arq1;
- else
- return arq2;
- }
-}
-
-/*
- * as_find_next_arq finds the next request after @prev in elevator order.
- * this with as_choose_req form the basis for how the scheduler chooses
- * what request to process next. Anticipation works on top of this.
- */
-static struct as_rq *as_find_next_arq(struct as_data *ad, struct as_rq *last)
-{
- const int data_dir = last->is_sync;
- struct as_rq *ret;
- struct rb_node *rbnext = rb_next(&last->rb_node);
- struct rb_node *rbprev = rb_prev(&last->rb_node);
- struct as_rq *arq_next, *arq_prev;
-
- BUG_ON(!ON_RB(&last->rb_node));
-
- if (rbprev)
- arq_prev = rb_entry_arq(rbprev);
- else
- arq_prev = NULL;
-
- if (rbnext)
- arq_next = rb_entry_arq(rbnext);
- else {
- arq_next = as_find_first_arq(ad, data_dir);
- if (arq_next == last)
- arq_next = NULL;
- }
-
- ret = as_choose_req(ad, arq_next, arq_prev);
-
- return ret;
-}
-
-/*
- * anticipatory scheduling functions follow
- */
-
-/*
- * as_antic_expired tells us when we have anticipated too long.
- * The funny "absolute difference" math on the elapsed time is to handle
- * jiffy wraps, and disks which have been idle for 0x80000000 jiffies.
- */
-static int as_antic_expired(struct as_data *ad)
-{
- long delta_jif;
-
- delta_jif = jiffies - ad->antic_start;
- if (unlikely(delta_jif < 0))
- delta_jif = -delta_jif;
- if (delta_jif < ad->antic_expire)
- return 0;
-
- return 1;
-}
-
-/*
- * as_antic_waitnext starts anticipating that a nice request will soon be
- * submitted. See also as_antic_waitreq
- */
-static void as_antic_waitnext(struct as_data *ad)
-{
- unsigned long timeout;
-
- BUG_ON(ad->antic_status != ANTIC_OFF
- && ad->antic_status != ANTIC_WAIT_REQ);
-
- timeout = ad->antic_start + ad->antic_expire;
-
- mod_timer(&ad->antic_timer, timeout);
-
- ad->antic_status = ANTIC_WAIT_NEXT;
-}
-
-/*
- * as_antic_waitreq starts anticipating. We don't start timing the anticipation
- * until the request that we're anticipating on has finished. This means we
- * are timing from when the candidate process wakes up hopefully.
- */
-static void as_antic_waitreq(struct as_data *ad)
-{
- BUG_ON(ad->antic_status == ANTIC_FINISHED);
- if (ad->antic_status == ANTIC_OFF) {
- if (!ad->io_context || ad->ioc_finished)
- as_antic_waitnext(ad);
- else
- ad->antic_status = ANTIC_WAIT_REQ;
- }
-}
-
-/*
- * This is called directly by the functions in this file to stop anticipation.
- * We kill the timer and schedule a call to the request_fn asap.
- */
-static void as_antic_stop(struct as_data *ad)
-{
- int status = ad->antic_status;
-
- if (status == ANTIC_WAIT_REQ || status == ANTIC_WAIT_NEXT) {
- if (status == ANTIC_WAIT_NEXT)
- del_timer(&ad->antic_timer);
- ad->antic_status = ANTIC_FINISHED;
- /* see as_work_handler */
- kblockd_schedule_work(&ad->antic_work);
- }
-}
-
-/*
- * as_antic_timeout is the timer function set by as_antic_waitnext.
- */
-static void as_antic_timeout(unsigned long data)
-{
- struct request_queue *q = (struct request_queue *)data;
- struct as_data *ad = q->elevator->elevator_data;
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- if (ad->antic_status == ANTIC_WAIT_REQ
- || ad->antic_status == ANTIC_WAIT_NEXT) {
- struct as_io_context *aic = ad->io_context->aic;
-
- ad->antic_status = ANTIC_FINISHED;
- kblockd_schedule_work(&ad->antic_work);
-
- if (aic->ttime_samples == 0) {
- /* process anticipated on has exitted or timed out*/
- ad->exit_prob = (7*ad->exit_prob + 256)/8;
- }
- }
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
-/*
- * as_close_req decides if one request is considered "close" to the
- * previous one issued.
- */
-static int as_close_req(struct as_data *ad, struct as_rq *arq)
-{
- unsigned long delay; /* milliseconds */
- sector_t last = ad->last_sector[ad->batch_data_dir];
- sector_t next = arq->request->sector;
- sector_t delta; /* acceptable close offset (in sectors) */
-
- if (ad->antic_status == ANTIC_OFF || !ad->ioc_finished)
- delay = 0;
- else
- delay = ((jiffies - ad->antic_start) * 1000) / HZ;
-
- if (delay <= 1)
- delta = 64;
- else if (delay <= 20 && delay <= ad->antic_expire)
- delta = 64 << (delay-1);
- else
- return 1;
-
- return (last - (delta>>1) <= next) && (next <= last + delta);
-}
-
-/*
- * as_can_break_anticipation returns true if we have been anticipating this
- * request.
- *
- * It also returns true if the process against which we are anticipating
- * submits a write - that's presumably an fsync, O_SYNC write, etc. We want to
- * dispatch it ASAP, because we know that application will not be submitting
- * any new reads.
- *
- * If the task which has submitted the request has exitted, break anticipation.
- *
- * If this task has queued some other IO, do not enter enticipation.
- */
-static int as_can_break_anticipation(struct as_data *ad, struct as_rq *arq)
-{
- struct io_context *ioc;
- struct as_io_context *aic;
- sector_t s;
-
- ioc = ad->io_context;
- BUG_ON(!ioc);
-
- if (arq && ioc == arq->io_context) {
- /* request from same process */
- return 1;
- }
-
- if (ad->ioc_finished && as_antic_expired(ad)) {
- /*
- * In this situation status should really be FINISHED,
- * however the timer hasn't had the chance to run yet.
- */
- return 1;
- }
-
- aic = ioc->aic;
- if (!aic)
- return 0;
-
- if (!test_bit(AS_TASK_RUNNING, &aic->state)) {
- /* process anticipated on has exitted */
- if (aic->ttime_samples == 0)
- ad->exit_prob = (7*ad->exit_prob + 256)/8;
- return 1;
- }
-
- if (atomic_read(&aic->nr_queued) > 0) {
- /* process has more requests queued */
- return 1;
- }
-
- if (atomic_read(&aic->nr_dispatched) > 0) {
- /* process has more requests dispatched */
- return 1;
- }
-
- if (arq && arq->is_sync == REQ_SYNC && as_close_req(ad, arq)) {
- /*
- * Found a close request that is not one of ours.
- *
- * This makes close requests from another process reset
- * our thinktime delay. Is generally useful when there are
- * two or more cooperating processes working in the same
- * area.
- */
- spin_lock(&aic->lock);
- aic->last_end_request = jiffies;
- spin_unlock(&aic->lock);
- return 1;
- }
-
-
- if (aic->ttime_samples == 0) {
- if (ad->new_ttime_mean > ad->antic_expire)
- return 1;
- if (ad->exit_prob > 128)
- return 1;
- } else if (aic->ttime_mean > ad->antic_expire) {
- /* the process thinks too much between requests */
- return 1;
- }
-
- if (!arq)
- return 0;
-
- if (ad->last_sector[REQ_SYNC] < arq->request->sector)
- s = arq->request->sector - ad->last_sector[REQ_SYNC];
- else
- s = ad->last_sector[REQ_SYNC] - arq->request->sector;
-
- if (aic->seek_samples == 0) {
- /*
- * Process has just started IO. Use past statistics to
- * guage success possibility
- */
- if (ad->new_seek_mean > s) {
- /* this request is better than what we're expecting */
- return 1;
- }
-
- } else {
- if (aic->seek_mean > s) {
- /* this request is better than what we're expecting */
- return 1;
- }
- }
-
- return 0;
-}
-
-/*
- * as_can_anticipate indicates weather we should either run arq
- * or keep anticipating a better request.
- */
-static int as_can_anticipate(struct as_data *ad, struct as_rq *arq)
-{
- if (!ad->io_context)
- /*
- * Last request submitted was a write
- */
- return 0;
-
- if (ad->antic_status == ANTIC_FINISHED)
- /*
- * Don't restart if we have just finished. Run the next request
- */
- return 0;
-
- if (as_can_break_anticipation(ad, arq))
- /*
- * This request is a good candidate. Don't keep anticipating,
- * run it.
- */
- return 0;
-
- /*
- * OK from here, we haven't finished, and don't have a decent request!
- * Status is either ANTIC_OFF so start waiting,
- * ANTIC_WAIT_REQ so continue waiting for request to finish
- * or ANTIC_WAIT_NEXT so continue waiting for an acceptable request.
- *
- */
-
- return 1;
-}
-
-static void as_update_thinktime(struct as_data *ad, struct as_io_context *aic, unsigned long ttime)
-{
- /* fixed point: 1.0 == 1<<8 */
- if (aic->ttime_samples == 0) {
- ad->new_ttime_total = (7*ad->new_ttime_total + 256*ttime) / 8;
- ad->new_ttime_mean = ad->new_ttime_total / 256;
-
- ad->exit_prob = (7*ad->exit_prob)/8;
- }
- aic->ttime_samples = (7*aic->ttime_samples + 256) / 8;
- aic->ttime_total = (7*aic->ttime_total + 256*ttime) / 8;
- aic->ttime_mean = (aic->ttime_total + 128) / aic->ttime_samples;
-}
-
-static void as_update_seekdist(struct as_data *ad, struct as_io_context *aic, sector_t sdist)
-{
- u64 total;
-
- if (aic->seek_samples == 0) {
- ad->new_seek_total = (7*ad->new_seek_total + 256*(u64)sdist)/8;
- ad->new_seek_mean = ad->new_seek_total / 256;
- }
-
- /*
- * Don't allow the seek distance to get too large from the
- * odd fragment, pagein, etc
- */
- if (aic->seek_samples <= 60) /* second&third seek */
- sdist = min(sdist, (aic->seek_mean * 4) + 2*1024*1024);
- else
- sdist = min(sdist, (aic->seek_mean * 4) + 2*1024*64);
-
- aic->seek_samples = (7*aic->seek_samples + 256) / 8;
- aic->seek_total = (7*aic->seek_total + (u64)256*sdist) / 8;
- total = aic->seek_total + (aic->seek_samples/2);
- do_div(total, aic->seek_samples);
- aic->seek_mean = (sector_t)total;
-}
-
-/*
- * as_update_iohist keeps a decaying histogram of IO thinktimes, and
- * updates @aic->ttime_mean based on that. It is called when a new
- * request is queued.
- */
-static void as_update_iohist(struct as_data *ad, struct as_io_context *aic, struct request *rq)
-{
- struct as_rq *arq = RQ_DATA(rq);
- int data_dir = arq->is_sync;
- unsigned long thinktime;
- sector_t seek_dist;
-
- if (aic == NULL)
- return;
-
- if (data_dir == REQ_SYNC) {
- unsigned long in_flight = atomic_read(&aic->nr_queued)
- + atomic_read(&aic->nr_dispatched);
- spin_lock(&aic->lock);
- if (test_bit(AS_TASK_IORUNNING, &aic->state) ||
- test_bit(AS_TASK_IOSTARTED, &aic->state)) {
- /* Calculate read -> read thinktime */
- if (test_bit(AS_TASK_IORUNNING, &aic->state)
- && in_flight == 0) {
- thinktime = jiffies - aic->last_end_request;
- thinktime = min(thinktime, MAX_THINKTIME-1);
- } else
- thinktime = 0;
- as_update_thinktime(ad, aic, thinktime);
-
- /* Calculate read -> read seek distance */
- if (aic->last_request_pos < rq->sector)
- seek_dist = rq->sector - aic->last_request_pos;
- else
- seek_dist = aic->last_request_pos - rq->sector;
- as_update_seekdist(ad, aic, seek_dist);
- }
- aic->last_request_pos = rq->sector + rq->nr_sectors;
- set_bit(AS_TASK_IOSTARTED, &aic->state);
- spin_unlock(&aic->lock);
- }
-}
-
-/*
- * as_update_arq must be called whenever a request (arq) is added to
- * the sort_list. This function keeps caches up to date, and checks if the
- * request might be one we are "anticipating"
- */
-static void as_update_arq(struct as_data *ad, struct as_rq *arq)
-{
- const int data_dir = arq->is_sync;
-
- /* keep the next_arq cache up to date */
- ad->next_arq[data_dir] = as_choose_req(ad, arq, ad->next_arq[data_dir]);
-
- /*
- * have we been anticipating this request?
- * or does it come from the same process as the one we are anticipating
- * for?
- */
- if (ad->antic_status == ANTIC_WAIT_REQ
- || ad->antic_status == ANTIC_WAIT_NEXT) {
- if (as_can_break_anticipation(ad, arq))
- as_antic_stop(ad);
- }
-}
-
-/*
- * Gathers timings and resizes the write batch automatically
- */
-static void update_write_batch(struct as_data *ad)
-{
- unsigned long batch = ad->batch_expire[REQ_ASYNC];
- long write_time;
-
- write_time = (jiffies - ad->current_batch_expires) + batch;
- if (write_time < 0)
- write_time = 0;
-
- if (write_time > batch && !ad->write_batch_idled) {
- if (write_time > batch * 3)
- ad->write_batch_count /= 2;
- else
- ad->write_batch_count--;
- } else if (write_time < batch && ad->current_write_count == 0) {
- if (batch > write_time * 3)
- ad->write_batch_count *= 2;