aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/pata_octeon_cf.c2
-rw-r--r--drivers/base/devtmpfs.c18
-rw-r--r--drivers/ide/hpt366.c14
-rw-r--r--drivers/ide/ide-dma.c11
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c14
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c14
-rw-r--r--drivers/isdn/capi/capifs.c8
-rw-r--r--drivers/md/bitmap.c30
-rw-r--r--drivers/md/bitmap.h4
-rw-r--r--drivers/md/faulty.c2
-rw-r--r--drivers/md/md.c162
-rw-r--r--drivers/md/md.h8
-rw-r--r--drivers/md/raid1.c224
-rw-r--r--drivers/md/raid1.h2
-rw-r--r--drivers/md/raid10.c42
-rw-r--r--drivers/md/raid5.c6
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c9
-rw-r--r--drivers/mtd/mtdchar.c10
-rw-r--r--drivers/mtd/mtdsuper.c54
-rw-r--r--drivers/oprofile/oprofilefs.c8
-rw-r--r--drivers/s390/block/dasd_eckd.c69
-rw-r--r--drivers/s390/block/dasd_eckd.h1
-rw-r--r--drivers/s390/char/tape_core.c9
-rw-r--r--drivers/s390/char/tape_std.c4
-rw-r--r--drivers/staging/autofs/init.c8
-rw-r--r--drivers/staging/pohmelfs/inode.c9
-rw-r--r--drivers/staging/smbfs/inode.c8
-rw-r--r--drivers/usb/core/inode.c8
-rw-r--r--drivers/usb/gadget/f_fs.c14
-rw-r--r--drivers/usb/gadget/inode.c10
-rw-r--r--drivers/usb/host/Kconfig30
-rw-r--r--drivers/usb/host/Makefile1
-rw-r--r--drivers/usb/host/ehci-hcd.c5
-rw-r--r--drivers/usb/host/ehci-octeon.c207
-rw-r--r--drivers/usb/host/octeon2-common.c185
-rw-r--r--drivers/usb/host/ohci-hcd.c5
-rw-r--r--drivers/usb/host/ohci-octeon.c214
-rw-r--r--drivers/watchdog/octeon-wdt-main.c4
-rw-r--r--drivers/xen/xenfs/super.c8
39 files changed, 1103 insertions, 338 deletions
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 06ddd91ffed..74b82981789 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -60,7 +60,7 @@ static unsigned int ns_to_tim_reg(unsigned int tim_mult, unsigned int nsecs)
* Compute # of eclock periods to get desired duration in
* nanoseconds.
*/
- val = DIV_ROUND_UP(nsecs * (octeon_get_clock_rate() / 1000000),
+ val = DIV_ROUND_UP(nsecs * (octeon_get_io_clock_rate() / 1000000),
1000 * tim_mult);
return val;
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index af0600143d1..82bbb5967aa 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -29,33 +29,33 @@
static struct vfsmount *dev_mnt;
#if defined CONFIG_DEVTMPFS_MOUNT
-static int dev_mount = 1;
+static int mount_dev = 1;
#else
-static int dev_mount;
+static int mount_dev;
#endif
static DEFINE_MUTEX(dirlock);
static int __init mount_param(char *str)
{
- dev_mount = simple_strtoul(str, NULL, 0);
+ mount_dev = simple_strtoul(str, NULL, 0);
return 1;
}
__setup("devtmpfs.mount=", mount_param);
-static int dev_get_sb(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data, struct vfsmount *mnt)
+static struct dentry *dev_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data)
{
#ifdef CONFIG_TMPFS
- return get_sb_single(fs_type, flags, data, shmem_fill_super, mnt);
+ return mount_single(fs_type, flags, data, shmem_fill_super);
#else
- return get_sb_single(fs_type, flags, data, ramfs_fill_super, mnt);
+ return mount_single(fs_type, flags, data, ramfs_fill_super);
#endif
}
static struct file_system_type dev_fs_type = {
.name = "devtmpfs",
- .get_sb = dev_get_sb,
+ .mount = dev_mount,
.kill_sb = kill_litter_super,
};
@@ -351,7 +351,7 @@ int devtmpfs_mount(const char *mntdir)
{
int err;
- if (!dev_mount)
+ if (!mount_dev)
return 0;
if (!dev_mnt)
diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
index 97d98fbf584..58c51cddc10 100644
--- a/drivers/ide/hpt366.c
+++ b/drivers/ide/hpt366.c
@@ -838,7 +838,7 @@ static void hpt3xxn_set_clock(ide_hwif_t *hwif, u8 mode)
static void hpt3xxn_rw_disk(ide_drive_t *drive, struct request *rq)
{
- hpt3xxn_set_clock(drive->hwif, rq_data_dir(rq) ? 0x23 : 0x21);
+ hpt3xxn_set_clock(drive->hwif, rq_data_dir(rq) ? 0x21 : 0x23);
}
/**
@@ -1173,8 +1173,9 @@ static u8 hpt3xx_cable_detect(ide_hwif_t *hwif)
u16 mcr;
pci_read_config_word(dev, mcr_addr, &mcr);
- pci_write_config_word(dev, mcr_addr, (mcr | 0x8000));
- /* now read cable id register */
+ pci_write_config_word(dev, mcr_addr, mcr | 0x8000);
+ /* Debounce, then read cable ID register */
+ udelay(10);
pci_read_config_byte(dev, 0x5a, &scr1);
pci_write_config_word(dev, mcr_addr, mcr);
} else if (chip_type >= HPT370) {
@@ -1185,10 +1186,11 @@ static u8 hpt3xx_cable_detect(ide_hwif_t *hwif)
u8 scr2 = 0;
pci_read_config_byte(dev, 0x5b, &scr2);
- pci_write_config_byte(dev, 0x5b, (scr2 & ~1));
- /* now read cable id register */
+ pci_write_config_byte(dev, 0x5b, scr2 & ~1);
+ /* Debounce, then read cable ID register */
+ udelay(10);
pci_read_config_byte(dev, 0x5a, &scr1);
- pci_write_config_byte(dev, 0x5b, scr2);
+ pci_write_config_byte(dev, 0x5b, scr2);
} else
pci_read_config_byte(dev, 0x5a, &scr1);
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 06b14bc9a1d..d4136908f91 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -449,7 +449,6 @@ ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
ide_hwif_t *hwif = drive->hwif;
const struct ide_dma_ops *dma_ops = hwif->dma_ops;
struct ide_cmd *cmd = &hwif->cmd;
- struct request *rq;
ide_startstop_t ret = ide_stopped;
/*
@@ -487,14 +486,10 @@ ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
ide_dma_off_quietly(drive);
/*
- * un-busy drive etc and make sure request is sane
+ * make sure request is sane
*/
- rq = hwif->rq;
- if (rq) {
- hwif->rq = NULL;
- rq->errors = 0;
- ide_requeue_and_plug(drive, rq);
- }
+ if (hwif->rq)
+ hwif->rq->errors = 0;
return ret;
}
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index 12d5bf76302..8c8afc716b9 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -362,13 +362,13 @@ bail:
return ret;
}
-static int ipathfs_get_sb(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data, struct vfsmount *mnt)
+static struct dentry *ipathfs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
{
- int ret = get_sb_single(fs_type, flags, data,
- ipathfs_fill_super, mnt);
- if (ret >= 0)
- ipath_super = mnt->mnt_sb;
+ struct dentry *ret;
+ ret = mount_single(fs_type, flags, data, ipathfs_fill_super);
+ if (!IS_ERR(ret))
+ ipath_super = ret->d_sb;
return ret;
}
@@ -411,7 +411,7 @@ bail:
static struct file_system_type ipathfs_fs_type = {
.owner = THIS_MODULE,
.name = "ipathfs",
- .get_sb = ipathfs_get_sb,
+ .mount = ipathfs_mount,
.kill_sb = ipathfs_kill_super,
};
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index 7e433d75c77..f99bddc0171 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -555,13 +555,13 @@ bail:
return ret;
}
-static int qibfs_get_sb(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data, struct vfsmount *mnt)
+static struct dentry *qibfs_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data)
{
- int ret = get_sb_single(fs_type, flags, data,
- qibfs_fill_super, mnt);
- if (ret >= 0)
- qib_super = mnt->mnt_sb;
+ struct dentry *ret;
+ ret = mount_single(fs_type, flags, data, qibfs_fill_super);
+ if (!IS_ERR(ret))
+ qib_super = ret->d_sb;
return ret;
}
@@ -603,7 +603,7 @@ int qibfs_remove(struct qib_devdata *dd)
static struct file_system_type qibfs_fs_type = {
.owner = THIS_MODULE,
.name = "ipathfs",
- .get_sb = qibfs_get_sb,
+ .mount = qibfs_mount,
.kill_sb = qibfs_kill_super,
};
diff --git a/drivers/isdn/capi/capifs.c b/drivers/isdn/capi/capifs.c
index 2b83850997c..b4faed7fe0d 100644
--- a/drivers/isdn/capi/capifs.c
+++ b/drivers/isdn/capi/capifs.c
@@ -125,16 +125,16 @@ fail:
return -ENOMEM;
}
-static int capifs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data, struct vfsmount *mnt)
+static struct dentry *capifs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
{
- return get_sb_single(fs_type, flags, data, capifs_fill_super, mnt);
+ return mount_single(fs_type, flags, data, capifs_fill_super);
}
static struct file_system_type capifs_fs_type = {
.owner = THIS_MODULE,
.name = "capifs",
- .get_sb = capifs_get_sb,
+ .mount = capifs_mount,
.kill_sb = kill_anon_super,
};
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index e4fb58db545..5a1ffe3527a 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -212,7 +212,7 @@ static struct page *read_sb_page(mddev_t *mddev, loff_t offset,
target = rdev->sb_start + offset + index * (PAGE_SIZE/512);
- if (sync_page_io(rdev->bdev, target,
+ if (sync_page_io(rdev, target,
roundup(size, bdev_logical_block_size(rdev->bdev)),
page, READ)) {
page->index = index;
@@ -343,7 +343,7 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait)
atomic_inc(&bitmap->pending_writes);
set_buffer_locked(bh);
set_buffer_mapped(bh);
- submit_bh(WRITE, bh);
+ submit_bh(WRITE | REQ_UNPLUG | REQ_SYNC, bh);
bh = bh->b_this_page;
}
@@ -1101,7 +1101,7 @@ static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc)
bitmap_checkfree(bitmap, page);
}
static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
- sector_t offset, int *blocks,
+ sector_t offset, sector_t *blocks,
int create);
/*
@@ -1115,7 +1115,7 @@ void bitmap_daemon_work(mddev_t *mddev)
unsigned long j;
unsigned long flags;
struct page *page = NULL, *lastpage = NULL;
- int blocks;
+ sector_t blocks;
void *paddr;
struct dm_dirty_log *log = mddev->bitmap_info.log;
@@ -1258,7 +1258,7 @@ void bitmap_daemon_work(mddev_t *mddev)
}
static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
- sector_t offset, int *blocks,
+ sector_t offset, sector_t *blocks,
int create)
__releases(bitmap->lock)
__acquires(bitmap->lock)
@@ -1316,7 +1316,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
}
while (sectors) {
- int blocks;
+ sector_t blocks;
bitmap_counter_t *bmc;
spin_lock_irq(&bitmap->lock);
@@ -1381,7 +1381,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
success = 0;
while (sectors) {
- int blocks;
+ sector_t blocks;
unsigned long flags;
bitmap_counter_t *bmc;
@@ -1423,7 +1423,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
}
EXPORT_SYMBOL(bitmap_endwrite);
-static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks,
+static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
int degraded)
{
bitmap_counter_t *bmc;
@@ -1452,7 +1452,7 @@ static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *bloc
return rv;
}
-int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks,
+int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
int degraded)
{
/* bitmap_start_sync must always report on multiples of whole
@@ -1463,7 +1463,7 @@ int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks,
* Return the 'or' of the result.
*/
int rv = 0;
- int blocks1;
+ sector_t blocks1;
*blocks = 0;
while (*blocks < (PAGE_SIZE>>9)) {
@@ -1476,7 +1476,7 @@ int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks,
}
EXPORT_SYMBOL(bitmap_start_sync);
-void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted)
+void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted)
{
bitmap_counter_t *bmc;
unsigned long flags;
@@ -1515,7 +1515,7 @@ void bitmap_close_sync(struct bitmap *bitmap)
* RESYNC bit wherever it is still on
*/
sector_t sector = 0;
- int blocks;
+ sector_t blocks;
if (!bitmap)
return;
while (sector < bitmap->mddev->resync_max_sectors) {
@@ -1528,7 +1528,7 @@ EXPORT_SYMBOL(bitmap_close_sync);
void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
{
sector_t s = 0;
- int blocks;
+ sector_t blocks;
if (!bitmap)
return;
@@ -1562,7 +1562,7 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n
* be 0 at this point
*/
- int secs;
+ sector_t secs;
bitmap_counter_t *bmc;
spin_lock_irq(&bitmap->lock);
bmc = bitmap_get_counter(bitmap, offset, &secs, 1);
@@ -1790,7 +1790,7 @@ int bitmap_load(mddev_t *mddev)
* All chunks should be clean, but some might need_sync.
*/
while (sector < mddev->resync_max_sectors) {
- int blocks;
+ sector_t blocks;
bitmap_start_sync(bitmap, sector, &blocks, 0);
sector += blocks;
}
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index e872a7bad6b..931a7a7c379 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -271,8 +271,8 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset,
unsigned long sectors, int behind);
void bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
unsigned long sectors, int success, int behind);
-int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int degraded);
-void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted);
+int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int degraded);
+void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted);
void bitmap_close_sync(struct bitmap *bitmap);
void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector);
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 1a898788461..339fdc67075 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -210,7 +210,7 @@ static int make_request(mddev_t *mddev, struct bio *bio)
}
}
if (failit) {
- struct bio *b = bio_clone(bio, GFP_NOIO);
+ struct bio *b = bio_clone_mddev(bio, GFP_NOIO, mddev);
b->bi_bdev = conf->rdev->bdev;
b->bi_private = bio;
b->bi_end_io = faulty_fail;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 225815197a3..4e957f3140a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -57,8 +57,6 @@
#define DEBUG 0
#define dprintk(x...) ((void)(DEBUG && printk(x)))
-static DEFINE_MUTEX(md_mutex);
-
#ifndef MODULE
static void autostart_arrays(int part);
#endif
@@ -69,6 +67,8 @@ static DEFINE_SPINLOCK(pers_lock);
static void md_print_devices(void);
static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
+static struct workqueue_struct *md_wq;
+static struct workqueue_struct *md_misc_wq;
#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
@@ -149,6 +149,72 @@ static const struct block_device_operations md_fops;
static int start_readonly;
+/* bio_clone_mddev
+ * like bio_clone, but with a local bio set
+ */
+
+static void mddev_bio_destructor(struct bio *bio)
+{
+ mddev_t *mddev, **mddevp;
+
+ mddevp = (void*)bio;
+ mddev = mddevp[-1];
+
+ bio_free(bio, mddev->bio_set);
+}
+
+struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
+ mddev_t *mddev)
+{
+ struct bio *b;
+ mddev_t **mddevp;
+
+ if (!mddev || !mddev->bio_set)
+ return bio_alloc(gfp_mask, nr_iovecs);
+
+ b = bio_alloc_bioset(gfp_mask, nr_iovecs,
+ mddev->bio_set);
+ if (!b)
+ return NULL;
+ mddevp = (void*)b;
+ mddevp[-1] = mddev;
+ b->bi_destructor = mddev_bio_destructor;
+ return b;
+}
+EXPORT_SYMBOL_GPL(bio_alloc_mddev);
+
+struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
+ mddev_t *mddev)
+{
+ struct bio *b;
+ mddev_t **mddevp;
+
+ if (!mddev || !mddev->bio_set)
+ return bio_clone(bio, gfp_mask);
+
+ b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs,
+ mddev->bio_set);
+ if (!b)
+ return NULL;
+ mddevp = (void*)b;
+ mddevp[-1] = mddev;
+ b->bi_destructor = mddev_bio_destructor;
+ __bio_clone(b, bio);
+ if (bio_integrity(bio)) {
+ int ret;
+
+ ret = bio_integrity_clone(b, bio, gfp_mask, mddev->bio_set);
+
+ if (ret < 0) {
+ bio_put(b);
+ return NULL;
+ }
+ }
+
+ return b;
+}
+EXPORT_SYMBOL_GPL(bio_clone_mddev);
+
/*
* We have a system wide 'event count' that is incremented
* on any 'interesting' event, and readers of /proc/mdstat
@@ -300,7 +366,7 @@ static void md_end_flush(struct bio *bio, int err)
if (atomic_dec_and_test(&mddev->flush_pending)) {
/* The pre-request flush has finished */
- schedule_work(&mddev->flush_work);
+ queue_work(md_wq, &mddev->flush_work);
}
bio_put(bio);
}
@@ -321,7 +387,7 @@ static void submit_flushes(mddev_t *mddev)
atomic_inc(&rdev->nr_pending);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
- bi = bio_alloc(GFP_KERNEL, 0);
+ bi = bio_alloc_mddev(GFP_KERNEL, 0, mddev);
bi->bi_end_io = md_end_flush;
bi->bi_private = rdev;
bi->bi_bdev = rdev->bdev;
@@ -369,7 +435,7 @@ void md_flush_request(mddev_t *mddev, struct bio *bio)
submit_flushes(mddev);
if (atomic_dec_and_test(&mddev->flush_pending))
- schedule_work(&mddev->flush_work);
+ queue_work(md_wq, &mddev->flush_work);
}
EXPORT_SYMBOL(md_flush_request);
@@ -428,6 +494,8 @@ static void mddev_delayed_delete(struct work_struct *ws);
static void mddev_put(mddev_t *mddev)
{
+ struct bio_set *bs = NULL;
+
if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
return;
if (!mddev->raid_disks && list_empty(&mddev->disks) &&
@@ -435,19 +503,22 @@ static void mddev_put(mddev_t *mddev)
/* Array is not configured at all, and not held active,
* so destroy it */
list_del(&mddev->all_mddevs);
+ bs = mddev->bio_set;
+ mddev->bio_set = NULL;
if (mddev->gendisk) {
- /* we did a probe so need to clean up.
- * Call schedule_work inside the spinlock
- * so that flush_scheduled_work() after
- * mddev_find will succeed in waiting for the
- * work to be done.
+ /* We did a probe so need to clean up. Call
+ * queue_work inside the spinlock so that
+ * flush_workqueue() after mddev_find will
+ * succeed in waiting for the work to be done.
*/
INIT_WORK(&mddev->del_work, mddev_delayed_delete);
- schedule_work(&mddev->del_work);
+ queue_work(md_misc_wq, &mddev->del_work);
} else
kfree(mddev);
}
spin_unlock(&all_mddevs_lock);
+ if (bs)
+ bioset_free(bs);
}
void mddev_init(mddev_t *mddev)
@@ -691,7 +762,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
* if zero is reached.
* If an error occurred, call md_error
*/
- struct bio *bio = bio_alloc(GFP_NOIO, 1);
+ struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
bio->bi_bdev = rdev->bdev;
bio->bi_sector = sector;
@@ -722,16 +793,16 @@ static void bi_complete(struct bio *bio, int error)
complete((struct completion*)bio->bi_private);
}
-int sync_page_io(struct block_device *bdev, sector_t sector, int size,
- struct page *page, int rw)
+int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size,
+ struct page *page, int rw)
{
- struct bio *bio = bio_alloc(GFP_NOIO, 1);
+ struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
struct completion event;
int ret;
rw |= REQ_SYNC | REQ_UNPLUG;
- bio->bi_bdev = bdev;
+ bio->bi_bdev = rdev->bdev;
bio->bi_sector = sector;
bio_add_page(bio, page, size, 0);
init_completion(&event);
@@ -757,7 +828,7 @@ static int read_disk_sb(mdk_rdev_t * rdev, int size)
return 0;
- if (!sync_page_io(rdev->bdev, rdev->sb_start, size, rdev->sb_page, READ))
+ if (!sync_page_io(rdev, rdev->sb_start, size, rdev->sb_page, READ))
goto fail;
rdev->sb_loaded = 1;
return 0;
@@ -1850,7 +1921,7 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev)
synchronize_rcu();
INIT_WORK(&rdev->del_work, md_delayed_delete);
kobject_get(&rdev->kobj);
- schedule_work(&rdev->del_work);
+ queue_work(md_misc_wq, &rdev->del_work);
}
/*
@@ -2108,6 +2179,8 @@ repeat:
if (!mddev->persistent) {
clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
clear_bit(MD_CHANGE_DEVS, &mddev->flags);
+ if (!mddev->external)
+ clear_bit(MD_CHANGE_PENDING, &mddev->flags);
wake_up(&mddev->sb_wait);
return;
}
@@ -4192,10 +4265,10 @@ static int md_alloc(dev_t dev, char *name)
shift = partitioned ? MdpMinorShift : 0;
unit = MINOR(mddev->unit) >> shift;
- /* wait for any previous instance if this device
- * to be completed removed (mddev_delayed_delete).
+ /* wait for any previous instance of this device to be
+ * completely removed (mddev_delayed_delete).
*/
- flush_scheduled_work();
+ flush_workqueue(md_misc_wq);
mutex_lock(&disks_mutex);
error = -EEXIST;
@@ -4378,6 +4451,9 @@ int md_run(mddev_t *mddev)
sysfs_notify_dirent_safe(rdev->sysfs_state);
}
+ if (mddev->bio_set == NULL)
+ mddev->bio_set = bioset_create(BIO_POOL_SIZE, sizeof(mddev));
+
spin_lock(&pers_lock);
pers = find_pers(mddev->level, mddev->clevel);
if (!pers || !try_module_get(pers->owner)) {
@@ -5885,16 +5961,14 @@ static int md_open(struct block_device *bdev, fmode_t mode)
mddev_t *mddev = mddev_find(bdev->bd_dev);
int err;
- mutex_lock(&md_mutex);
if (mddev->gendisk != bdev->bd_disk) {
/* we are racing with mddev_put which is discarding this
* bd_disk.
*/
mddev_put(mddev);
/* Wait until bdev->bd_disk is definitely gone */
- flush_scheduled_work();
+ flush_workqueue(md_misc_wq);
/* Then retry the open from the top */
- mutex_unlock(&md_mutex);
return -ERESTARTSYS;
}
BUG_ON(mddev != bdev->bd_disk->private_data);
@@ -5908,7 +5982,6 @@ static int md_open(struct block_device *bdev, fmode_t mode)
check_disk_size_change(mddev->gendisk, bdev);
out:
- mutex_unlock(&md_mutex);
return err;
}
@@ -5917,10 +5990,8 @@ static int md_release(struct gendisk *disk, fmode_t mode)
mddev_t *mddev = disk->private_data;
BUG_ON(!mddev);
- mutex_lock(&md_mutex);
atomic_dec(&mddev->openers);
mddev_put(mddev);
- mutex_unlock(&md_mutex);
return 0;
}
@@ -6052,7 +6123,7 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
if (mddev->event_work.func)
- schedule_work(&mddev->event_work);
+ queue_work(md_misc_wq, &mddev->event_work);
md_new_event_inintr(mddev);
}
@@ -7212,12 +7283,23 @@ static void md_geninit(void)
static int __init md_init(void)
{
- if (register_blkdev(MD_MAJOR, "md"))
- return -1;
- if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
- unregister_blkdev(MD_MAJOR, "md");
- return -1;
- }
+ int ret = -ENOMEM;
+
+ md_wq = alloc_workqueue("md", WQ_RESCUER, 0);
+ if (!md_wq)
+ goto err_wq;
+
+ md_misc_wq = alloc_workqueue("md_misc", 0, 0);
+ if (!md_misc_wq)
+ goto err_misc_wq;
+
+ if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
+ goto err_md;
+
+ if ((ret = register_blkdev(0, "mdp")) < 0)
+ goto err_mdp;
+ mdp_major = ret;
+
blk_register_region(MKDEV(MD_MAJOR, 0), 1UL<<MINORBITS, THIS_MODULE,
md_probe, NULL, NULL);
blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
@@ -7228,8 +7310,16 @@ static int __init md_init(void)
md_geninit();
return 0;
-}
+err_mdp:
+ unregister_blkdev(MD_MAJOR, "md");
+err_md:
+ destroy_workqueue(md_misc_wq);
+err_misc_wq:
+ destroy_workqueue(md_wq);
+err_wq:
+ return ret;
+}
#ifndef MODULE
@@ -7316,6 +7406,8 @@ static __exit void md_exit(void)
export_array(mddev);
mddev->hold_active = 0;
}
+ destroy_workqueue(md_misc_wq);
+ destroy_workqueue(md_wq);
}
subsys_initcall(md_init);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 112a2c32db0..d05bab55df4 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -331,6 +331,8 @@ struct mddev_s
struct attribute_group *to_remove;
struct plug_handle *plug; /* if used by personality */
+ struct bio_set *bio_set;
+
/* Generic flush handling.
* The last to finish preflush schedules a worker to submit
* the rest of the request (without the REQ_FLUSH flag).
@@ -495,7 +497,7 @@ extern void md_flush_request(mddev_t *mddev, struct bio *bio);
extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
sector_t sector, int size, struct page *page);
extern void md_super_wait(mddev_t *mddev);
-extern int sync_page_io(struct block_device *bdev, sector_t sector, int size,
+extern int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size,
struct page *page, int rw);
extern void md_do_sync(mddev_t *mddev);
extern void md_new_event(mddev_t *mddev);
@@ -517,4 +519,8 @@ extern void md_rdev_init(mdk_rdev_t *rdev);
extern void mddev_suspend(mddev_t *mddev);
extern void mddev_resume(mddev_t *mddev);
+extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
+ mddev_t *mddev);
+extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
+ mddev_t *mddev);
#endif /* _MD_MD_H */
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 378a25894c5..45f8324196e 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -100,7 +100,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
* Allocate bios : 1 for reading, n-1 for writing
*/
for (j = pi->raid_disks ; j-- ; ) {
- bio = bio_alloc(gfp_flags, RESYNC_PAGES);
+ bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
if (!bio)
goto out_free_bio;
r1_bio->bios[j] = bio;
@@ -306,6 +306,28 @@ static void raid1_end_read_request(struct bio *bio, int error)
rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
}
+static void r1_bio_write_done(r1bio_t *r1_bio, int vcnt, struct bio_vec *bv,
+ int behind)
+{
+ if (atomic_dec_and_test(&r1_bio->remaining))
+ {
+ /* it really is the end of this request */
+ if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
+ /* free extra copy of the data pages */
+ int i = vcnt;
+ while (i--)
+ safe_put_page(bv[i].bv_page);
+ }
+ /* clear the bitmap if all writes complete successfully */
+ bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
+ r1_bio->sectors,
+ !test_bit(R1BIO_Degraded, &r1_bio->state),
+ behind);
+ md_write_end(r1_bio->mddev);
+ raid_end_bio_io(r1_bio);
+ }
+}
+
static void raid1_end_write_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -373,21 +395,7 @@ static void raid1_end_write_request(struct bio *bio, int error)
* Let's see if all mirrored write operations have finished
* already.
*/
- if (atomic_dec_and_test(&r1_bio->remaining)) {
- if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
- /* free extra copy of the data pages */
- int i = bio->bi_vcnt;
- while (i--)
- safe_put_page(bio->bi_io_vec[i].bv_page);
- }
- /* clear the bitmap if all writes complete successfully */
- bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
- r1_bio->sectors,
- !test_bit(R1BIO_Degraded, &r1_bio->state),
- behind);
- md_write_end(r1_bio->mddev);
- raid_end_bio_io(r1_bio);
- }
+ r1_bio_write_done(r1_bio, bio->bi_vcnt, bio->bi_io_vec, behind);
if (to_put)
bio_put(to_put);
@@ -411,11 +419,13 @@ static void raid1_end_write_request(struct bio *bio, int error)
static int read_balance(conf_t *conf, r1bio_t *r1_bio)
{
const sector_t this_sector = r1_bio->sector;
- int new_disk = conf->last_used, disk = new_disk;
- int wonly_disk = -1;
const int sectors = r1_bio->sectors;
+ int new_disk = -1;
+ int start_disk;
+ int i;
sector_t new_distance, current_distance;
mdk_rdev_t *rdev;
+ int choose_first;
rcu_read_lock();
/*
@@ -426,54 +436,33 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
retry:
if (conf->mddev->recovery_cp < MaxSector &&
(this_sector + sectors >= conf->next_resync)) {
- /* Choose the first operational device, for consistancy */
- new_disk = 0;
-
- for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
- r1_bio->bios[new_disk] == IO_BLOCKED ||
- !rdev || !test_bit(In_sync, &rdev->flags)
- || test_bit(WriteMostly, &rdev->flags);
- rdev = rcu_dereference(conf->mirrors[++new_disk].rdev)) {
-
- if (rdev && test_bit(In_sync, &rdev->flags) &&
- r1_bio->bios[new_disk] != IO_BLOCKED)
- wonly_disk = new_disk;
-
- if (new_disk == conf->raid_disks - 1) {
- new_disk = wonly_disk;
- break;
- }
- }
- goto rb_out;
+ choose_first = 1;
+ start_disk = 0;
+ } else {
+ choose_first = 0;
+ start_disk = conf->last_used;
}
-
/* make sure the disk is operational */
- for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
- r1_bio->bios[new_disk] == IO_BLOCKED ||
- !rdev || !test_bit(In_sync, &rdev->flags) ||
- test_bit(WriteMostly, &rdev->flags);
- rdev = rcu_dereference(conf->mirrors[new_disk].rdev)) {
-
- if (rdev && test_bit(In_sync, &rdev->flags) &&
- r1_bio->bios[new_disk] != IO_BLOCKED)
- wonly_disk = new_disk;
-
- if (new_disk <= 0)
- new_disk = conf->raid_disks;
- new_disk--;
- if (new_disk == disk) {
- new_disk = wonly_disk;
+ for (i = 0 ; i < conf->raid_disks ; i++) {
+ int disk = start_disk + i;
+ if (disk >= conf->raid_disks)
+ disk -= conf->raid_disks;
+
+ rdev = rcu_dereference(conf->mirrors[disk].rdev);
+ if (r1_bio->bios[disk] == IO_BLOCKED
+ || rdev == NULL
+ || !test_bit(In_sync, &rdev->flags))
+ continue;
+
+ new_disk = disk;
+ if (!test_bit(WriteMostly, &rdev->flags))
break;
- }
}
- if (new_disk < 0)
+ if (new_disk < 0 || choose_first)
goto rb_out;
- disk = new_disk;
- /* now disk == new_disk == starting point for search */
-
/*
* Don't change to another disk for sequential reads:
*/
@@ -482,20 +471,21 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
if (this_sector == conf->mirrors[new_disk].head_position)
goto rb_out;
- current_distance = abs(this_sector - conf->mirrors[disk].head_position);
-
- /* Find the disk whose head is closest */
+ current_distance = abs(this_sector
+ - conf->mirrors[new_disk].head_position);
- do {
- if (disk <= 0)
- disk = conf->raid_disks;
- disk--;
+ /* look for a better disk - i.e. head is closer */
+ start_disk = new_disk;
+ for (i = 1; i < conf->raid_disks; i++) {
+ int disk = start_disk + 1;
+ if (disk >= conf->raid_disks)
+ disk -= conf->raid_disks;
rdev = rcu_dereference(conf->mirrors[disk].rdev);
-
- if (!rdev || r1_bio->bios[disk] == IO_BLOCKED ||
- !test_bit(In_sync, &rdev->flags) ||
- test_bit(WriteMostly, &rdev->flags))
+ if (r1_bio->bios[disk] == IO_BLOCKED
+ || rdev == NULL
+ || !test_bit(In_sync, &rdev->flags)
+ || test_bit(WriteMostly, &rdev->flags))
continue;
if (!atomic_read(&rdev->nr_pending)) {
@@ -507,11 +497,9 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
current_distance = new_distance;
new_disk = disk;
}
- } while (disk != conf->last_used);
+ }
rb_out:
-
-
if (new_disk >= 0) {
rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
if (!rdev)
@@ -658,7 +646,7 @@ static void raise_barrier(conf_t *conf)
/* block any new IO from starting */
conf->barrier++;
- /* No wait for all pending IO to complete */
+ /* Now wait for all pending IO to complete */
wait_event_lock_irq(conf->wait_barrier,
!conf->nr_pending && conf->barrier < RESYNC_DEPTH,
conf->resync_lock,
@@ -735,23 +723,26 @@ static void unfreeze_array(conf_t *conf)
}
-/* duplicate the data pages for behind I/O */
-static struct page **alloc_behind_pages(struct bio *bio)
+/* duplicate the data pages for behind I/O
+ * We return a list of bio_vec rather than just page pointers
+ * as it makes freeing easier
+ */
+static struct bio_vec *alloc_behind_pages(struct bio *bio)
{
int i;
struct bio_vec *bvec;
- struct page **pages = kzalloc(bio->bi_vcnt * sizeof(struct page *),
+ struct bio_vec *pages = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec),
GFP_NOIO);
if (unlikely(!pages))
goto do_sync_io;
bio_for_each_segment(bvec, bio, i) {
- pages[i] = alloc_page(GFP_NOIO);
- if (unlikely(!pages[i]))
+ pages[i].bv_page = alloc_page(GFP_NOIO);
+ if (unlikely(!pages[i].bv_page))
goto do_sync_io;
- memcpy(kmap(pages[i]) + bvec->bv_offset,
+ memcpy(kmap(pages[i].bv_page) + bvec->bv_offset,
kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
- kunmap(pages[i]);
+ kunmap(pages[i].bv_page);
kunmap(bvec->bv_page);
}
@@ -759,8 +750,8 @@ static struct page **alloc_behind_pages(struct bio *bio)
do_sync_io:
if (pages)
- for (i = 0; i < bio->bi_vcnt && pages[i]; i++)
- put_page(pages[i]);
+ for (i = 0; i < bio->bi_vcnt && pages[i].bv_page; i++)
+ put_page(pages[i].bv_page);
kfree(pages);
PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
return NULL;
@@ -775,8 +766,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
int i, targets = 0, disks;
struct bitmap *bitmap;
unsigned long flags;
- struct bio_list bl;
- struct page **behind_pages = NULL;
+ struct bio_vec *behind_pages = NULL;
const int rw = bio_data_dir(bio);
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
@@ -851,7 +841,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
}
r1_bio->read_disk = rdisk;
- read_bio = bio_clone(bio, GFP_NOIO);
+ read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
r1_bio->bios[rdisk] = read_bio;
@@ -873,13 +863,6 @@ static int make_request(mddev_t *mddev, struct bio * bio)
* bios[x] to bio
*/
disks = conf->raid_disks;
-#if 0
- { static int first=1;
- if (first) printk("First Write sector %llu disks %d\n",
- (unsigned long long)r1_bio->sector, disks);
- first = 0;
- }
-#endif
retry_write:
blocked_rdev = NULL;
rcu_read_lock();
@@ -937,16 +920,17 @@ static int make_request(mddev_t *mddev, struct bio * bio)
(behind_pages = alloc_behind_pages(bio)) != NULL)
set_bit(R1BIO_BehindIO, &r1_bio->state);
- atomic_set(&r1_bio->remaining, 0);
+ atomic_set(&r1_bio->remaining, 1);
atomic_set(&r1_bio->behind_remaining, 0);
- bio_list_init(&bl);
+ bitmap_startwrite(bitmap, bio->bi_sector, r1_bio->sectors,
+ test_bit(R1BIO_BehindIO, &r1_bio->state));
for (i = 0; i < disks; i++) {
struct bio *mbio;
if (!r1_bio->bios[i])
continue;
- mbio = bio_clone(bio, GFP_NOIO);
+ mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
r1_bio->bios[i] = mbio;
mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
@@ -963,39 +947,29 @@ static int make_request(mddev_t *mddev, struct bio * bio)
* we clear any unused pointer in the io_vec, rather
* than leave them unchanged. This is important
* because when we come to free the pages, we won't
- * know the originial bi_idx, so we just free
+ * know the original bi_idx, so we just free
* them all
*/
__bio_for_each_segment(bvec, mbio, j, 0)
- bvec->bv_page = behind_pages[j];
+ bvec->bv_page = behind_pages[j].bv_page;
if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
atomic_inc(&r1_bio->behind_remaining);
}
atomic_inc(&r1_bio->remaining);
-
- bio_list_add(&bl, mbio);
+ spin_lock_irqsave(&conf->device_lock, flags);
+ bio_list_add(&conf->pending_bio_list, mbio);
+ blk_plug_device(mddev->queue);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
}
+ r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
kfree(behind_pages); /* the behind pages are attached to the bios now */
- bitmap_startwrite(bitmap, bio->bi_sector, r1_bio->sectors,
- test_bit(R1BIO_BehindIO, &r1_bio->state));
- spin_lock_irqsave(&conf->device_lock, flags);
- bio_list_merge(&conf->pending_bio_list, &bl);
- bio_list_init(&bl);
-
- blk_plug_device(mddev->queue);
- spin_unlock_irqrestore(&conf->device_lock, flags);
-
- /* In case raid1d snuck into freeze_array */
+ /* In case raid1d snuck in to freeze_array */
wake_up(&conf->wait_barrier);
if (do_sync)
md_wakeup_thread(mddev->thread);
-#if 0
- while ((bio = bio_list_pop(&bl)) != NULL)
- generic_make_request(bio);
-#endif
return 0;
}
@@ -1183,7 +1157,7 @@ static int raid1_remove_disk(mddev_t *mddev, int number)
err = -EBUSY;
goto abort;
}
- /* Only remove non-faulty devices is recovery
+ /* Only remove non-faulty devices if recovery
* is not possible.
*/
if (!test_bit(Faulty, &rdev->flags) &&
@@ -1245,7 +1219,7 @@ static void end_sync_write(struct bio *bio, int error)
break;
}
if (!uptodate) {
- int sync_blocks = 0;
+ sector_t sync_blocks = 0;
sector_t s = r1_bio->sector;
long sectors_to_go = r1_bio->sectors;
/* make sure these bits doesn't get cleared. */
@@ -1388,7 +1362,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
* active, and resync is currently active
*/
rdev = conf->mirrors[d].rdev;
- if (sync_page_io(rdev->bdev,
+ if (sync_page_io(rdev,
sect + rdev->data_offset,
s<<9,
bio->bi_io_vec[idx].bv_page,
@@ -1414,7 +1388,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
continue;
rdev = conf->mirrors[d].rdev;
atomic_add(s, &rdev->corrected_errors);
- if (sync_page_io(rdev->bdev,
+ if (sync_page_io(rdev,
sect + rdev->data_offset,
s<<9,
bio->bi_io_vec[idx].bv_page,
@@ -1429,7 +1403,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
if (r1_bio->bios[d]->bi_end_io != end_sync_read)
continue;
rdev = conf->mirrors[d].rdev;
- if (sync_page_io(rdev->bdev,
+ if (sync_page_io(rdev,
sect + rdev->data_offset,
s<<9,
bio->bi_io_vec[idx].bv_page,
@@ -1513,7 +1487,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
rdev = conf->mirrors[d].rdev;
if (rdev &&
test_bit(In_sync, &rdev->flags) &&
- sync_page_io(rdev->bdev,
+ sync_page_io(rdev,
sect + rdev->data_offset,
s<<9,
conf->tmppage, READ))
@@ -1539,7 +1513,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
rdev = conf->mirrors[d].rdev;
if (rdev &&
test_bit(In_sync, &rdev->flags)) {
- if (sync_page_io(rdev->bdev,
+ if (sync_page_io(rdev,
sect + rdev->data_offset,
s<<9, conf->tmppage, WRITE)
== 0)
@@ -1556,7 +1530,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
rdev = conf->mirrors[d].rdev;
if (rdev &&
test_bit(In_sync, &rdev->flags)) {
- if (sync_page_io(rdev->bdev,
+ if (sync_page_io(rdev,
sect + rdev->data_offset,
s<<9, conf->tmppage, READ)
== 0)
@@ -1646,7 +1620,8 @@ static void raid1d(mddev_t *mddev)
mddev->ro ? IO_BLOCKED : NULL;
r1_bio->read_disk = disk;
bio_put(bio);
- bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
+ bio = bio_clone_mddev(r1_bio->master_bio,
+ GFP_NOIO, mddev);
r1_bio->bios[r1_bio->read_disk] = bio;
rdev = conf->mirrors[disk].rdev;
if (printk_ratelimit())
@@ -1705,7 +1680,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
int i;
int wonly = -1;
int write_targets = 0, read_targets = 0;
- int sync_blocks;
+ sector_t sync_blocks;
int still_degraded = 0;
if (!conf->r1buf_pool)
@@ -1755,11 +1730,11 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
msleep_interruptible(1000);
bitmap_cond_end_sync(mddev->bitmap, sector_nr);
+ r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
raise_barrier(conf);
conf->next_resync = sector_nr;
- r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
rcu_read_lock();
/*
* If we get a correctably read error during resync or recovery,
@@ -1971,7 +1946,6 @@ static conf_t *setup_conf(mddev_t *mddev)
init_waitqueue_head(&conf->wait_barrier);
bio_list_init(&conf->pending_bio_list);
- bio_list_init(&conf->flushing_bio_list);
conf->last_used = -1;
for (i = 0; i < conf->raid_disks; i++) {
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index adf8cfd7331..cbfdf1a6acd 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -35,8 +35,6 @@ struct r1_private_data_s {
struct list_head retry_list;
/* queue pending writes and submit them on unplug */
struct bio_list pending_bio_list;
- /* queue of writes that have been unplugged */
- struct bio_list flushing_bio_list;
/* for use when syncing mirrors: */
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index f0d082f749b..c67aa54694a 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -120,7 +120,7 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
* Allocate bios.
*/
for (j = nalloc ; j-- ; ) {
- bio = bio_alloc(gfp_flags, RESYNC_PAGES);
+ bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
if (!bio)
goto out_free_bio;
r10_bio->devs[j].bio = bio;
@@ -801,7 +801,6 @@ static int make_request(mddev_t *mddev, struct bio * bio)
const int rw = bio_data_dir(bio);
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
- struct bio_list bl;
unsigned long flags;
mdk_rdev_t *blocked_rdev;
@@ -890,7 +889,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
}
mirror = conf->mirrors + disk;
- read_bio = bio_clone(bio, GFP_NOIO);
+ read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
r10_bio->devs[slot].bio = read_bio;
@@ -950,16 +949,16 @@ static int make_request(mddev_t *mddev, struct bio * bio)
goto retry_write;
}
- atomic_set(&r10_bio->remaining, 0);
+ atomic_set(&r10_bio->remaining, 1);
+ bitmap_startwrite(mddev->bitmap, bio->bi_sector, r10_bio->sectors, 0);
- bio_list_init(&bl);
for (i = 0; i < conf->copies; i++) {
struct bio *mbio;
int d = r10_bio->devs[i].devnum;
if (!r10_bio->devs[i].bio)
continue;
- mbio = bio_clone(bio, GFP_NOIO);
+ mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
r10_bio->devs[i].bio = mbio;
mbio->bi_sector = r10_bio->devs[i].addr+
@@ -970,22 +969,22 @@ static int make_request(mddev_t *mddev, struct bio * bio)
mbio->bi_private = r10_bio;
atomic_inc(&r10_bio->remaining);
- bio_list_add(&bl, mbio);
+ spin_lock_irqsave(&conf->device_lock, flags);
+ bio_list_add(&conf->pending_bio_list, mbio);
+ blk_plug_device(mddev->queue);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
}
- if (unlikely(!atomic_read(&r10_bio->remaining))) {
- /* the array is dead */
+ if (atomic_dec_and_test(&r10_bio->remaining)) {
+ /* This matches the end of raid10_end_write_request() */
+ bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
+ r10_bio->sectors,
+ !test_bit(R10BIO_Degraded, &r10_bio->state),
+ 0);
md_write_end(mddev);
raid_end_bio_io(r10_bio);
- return 0;
}
- bitmap_startwrite(mddev->bitmap, bio->bi_sector, r10_bio->sectors, 0);
- spin_lock_irqsave(&conf->device_lock, flags);
- bio_list_merge(&conf->pending_bio_list, &bl);
- blk_plug_device(mddev->queue);
- spin_unlock_irqrestore(&conf->device_lock, flags);
-
/* In case raid10d snuck in to freeze_array */
wake_up(&conf->wait_barrier);
@@ -1558,7 +1557,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
test_bit(In_sync, &rdev->flags)) {
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
- success = sync_page_io(rdev->bdev,
+ success = sync_page_io(rdev,
r10_bio->devs[sl].addr +
sect + rdev->data_offset,
s<<9,
@@ -1597,7 +1596,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
atomic_add(s, &rdev->corrected_errors);
- if (sync_page_io(rdev->bdev,
+ if (sync_page_io(rdev,
r10_bio->devs[sl].addr +
sect + rdev->data_offset,
s<<9, conf->tmppage, WRITE)
@@ -1634,7 +1633,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
char b[BDEVNAME_SIZE];
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
- if (sync_page_io(rdev->bdev,
+ if (sync_page_io(rdev,
r10_bio->devs[sl].addr +
sect + rdev->data_offset,
s<<9, conf->tmppage,
@@ -1747,7 +1746,8 @@ static void raid10d(mddev_t *mddev)
mdname(mddev),
bdevname(rdev->bdev,b),
(unsigned long long)r10_bio->sector);
- bio = bio_clone(r10_bio->master_bio, GFP_NOIO);
+ bio = bio_clone_mddev(r10_bio->master_bio,
+ GFP_NOIO, mddev);
r10_bio->devs[r10_bio->read_slot].bio = bio;
bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr
+ rdev->data_offset;
@@ -1820,7 +1820,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
int disk;
int i;
int max_sync;
- int sync_blocks;
+ sector_t sync_blocks;
sector_t sectors_skipped = 0;
int chunks_skipped = 0;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 31140d1259d..dc574f303f8 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3876,9 +3876,9 @@ static int chunk_aligned_read(mddev_t *mddev, struct bio * raid_bio)
return 0;
}
/*
- * use bio_clone to make a copy of the bio
+ * use bio_clone_mddev to make a copy of the bio
*/
- align_bi = bio_clone(raid_bio, GFP_NOIO);
+ align_bi = bio_clone_mddev(raid_bio, GFP_NOIO, mddev);
if (!align_bi)
return 0;
/*
@@ -4360,7 +4360,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
raid5_conf_t *conf = mddev->private;
struct stripe_head *sh;
sector_t max_sector = mddev->dev_sectors;
- int sync_blocks;
+ sector_t sync_blocks;
int still_degraded = 0;
int i;
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index 0a53500636c..d2d5d23416d 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -91,11 +91,10 @@ static void ibmasmfs_create_files (struct super_block *sb, struct dentry *root);
static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent);
-static int ibmasmfs_get_super(struct file_system_type *fst,
- int flags, const char *name, void *data,
- struct vfsmount *mnt)
+static struct dentry *ibmasmfs_mount(struct file_system_type *fst,
+ int flags, const char *name, void *data)
{
- return get_sb_single(fst, flags, data, ibmasmfs_fill_super, mnt);
+ return mount_single(fst, flags, data, ibmasmfs_fill_super);
}
static const struct super_operations ibmasmfs_s_ops = {
@@ -108,7 +107,7 @@ static const struct file_operations *ibmasmfs_dir_ops = &simple_dir_operations;
static struct file_system_type ibmasmfs_type = {
.owner = THIS_MODULE,
.name = "ibmasmfs",
- .get_sb = ibmasmfs_get_super,
+ .mount = ibmasmfs_mount,
.kill_sb = kill_litter_super,
};
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 5ef45487b65..a34a0fe1488 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -1030,17 +1030,15 @@ static const struct file_operations mtd_fops = {
#endif
};
-static int mtd_inodefs_get_sb(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data,
- struct vfsmount *mnt)
+static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
{
- return get_sb_pseudo(fs_type, "mtd_inode:", NULL, MTD_INODE_FS_MAGIC,
- mnt);
+ return mount_pseudo(fs_type, "mtd_inode:", NULL, MTD_INODE_FS_MAGIC);
}
static struct file_system_type mtd_inodefs_type = {
.name = "mtd_inodefs",
- .get_sb = mtd_inodefs_get_sb,
+ .mount = mtd_inodefs_mount,
.kill_sb = kill_anon_super,
};
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index 38e2ab07e7a..16b02a1fc10 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -54,11 +54,10 @@ static int get_sb_mtd_set(struct super_block *sb, void *_mtd)
/*
* get a superblock on an MTD-backed filesystem
*/
-static int get_sb_mtd_aux(struct file_system_type *fs_type, int flags,
+static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data,
struct mtd_info *mtd,
- int (*fill_super)(struct super_block *, void *, int),
- struct vfsmount *mnt)
+ int (*fill_super)(struct super_block *, void *, int))
{
struct super_block *sb;
int ret;
@@ -79,57 +78,49 @@ static int get_sb_mtd_aux(struct file_system_type *fs_type, int flags,
ret = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
if (ret < 0) {
deactivate_locked_super(sb);
- return ret;
+ return ERR_PTR(ret);
}
/* go */
sb->s_flags |= MS_ACTIVE;
- simple_set_mnt(mnt, sb);
-
- return 0;
+ return dget(sb->s_root);
/* new mountpoint for an already mounted superblock */
already_mounted:
DEBUG(1, "MTDSB: Device %d (\"%s\") is already mounted\n",
mtd->index, mtd->name);
- simple_set_mnt(mnt, sb);
- ret = 0;
- goto out_put;
+ put_mtd_device(mtd);
+ return dget(sb->s_root);
out_error:
- ret = PTR_ERR(sb);
-out_put:
put_mtd_device(mtd);
- return ret;
+ return ERR_CAST(sb);
}
/*
* get a superblock on an MTD-backed filesystem by MTD device number
*/
-static int get_sb_mtd_nr(struct file_system_type *fs_type, int flags,
+static struct dentry *mount_mtd_nr(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data, int mtdnr,
- int (*fill_super)(struct super_block *, void *, int),
- struct vfsmount *mnt)
+ int (*fill_super)(struct super_block *, void *, int))
{
struct mtd_info *mtd;
mtd = get_mtd_device(NULL, mtdnr);
if (IS_ERR(mtd)) {
DEBUG(0, "MTDSB: Device #%u doesn't appear to exist\n", mtdnr);
- return PTR_ERR(mtd);
+ return ERR_CAST(mtd);
}
- return get_sb_mtd_aux(fs_type, flags, dev_name, data, mtd, fill_super,
- mnt);
+ return mount_mtd_aux(fs_type, flags, dev_name, data, mtd, fill_super);
}
/*
* set up an MTD-based superblock
*/
-int get_sb_mtd(struct file_system_type *fs_type, int flags,
+struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data,
- int (*fill_super)(struct super_block *, void *, int),
- struct vfsmount *mnt)
+ int (*fill_super)(struct super_block *, void *, int))
{
#ifdef CONFIG_BLOCK
struct block_device *bdev;
@@ -138,7 +129,7 @@ int get_sb_mtd(struct file_system_type *fs_type, int flags,
int mtdnr;
if (!dev_name)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
DEBUG(2, "MTDSB: dev_name \"%s\"\n", dev_name);
@@ -156,10 +147,10 @@ int get_sb_mtd(struct file_system_type *fs_type, int flags,
mtd = get_mtd_device_nm(dev_name + 4);
if (!IS_ERR(mtd))
- return get_sb_mtd_aux(
+ return mount_mtd_aux(
fs_type, flags,
dev_name, data, mtd,
- fill_super, mnt);
+ fill_super);
printk(KERN_NOTICE "MTD:"
" MTD device with name \"%s\" not found.\n",
@@ -174,9 +165,9 @@ int get_sb_mtd(struct file_system_type *fs_type, int flags,
/* It was a valid number */
DEBUG(1, "MTDSB: mtd%%d, mtdnr %d\n",
mtdnr);
- return get_sb_mtd_nr(fs_type, flags,
+ return mount_mtd_nr(fs_type, flags,
dev_name, data,
- mtdnr, fill_super, mnt);
+ mtdnr, fill_super);
}
}
}
@@ -189,7 +180,7 @@ int get_sb_mtd(struct file_system_type *fs_type, int flags,
if (IS_ERR(bdev)) {
ret = PTR_ERR(bdev);
DEBUG(1, "MTDSB: lookup_bdev() returned %d\n", ret);
- return ret;
+ return ERR_PTR(ret);
}
DEBUG(1, "MTDSB: lookup_bdev() returned 0\n");
@@ -202,8 +193,7 @@ int get_sb_mtd(struct file_system_type *fs_type, int flags,
if (major != MTD_BLOCK_MAJOR)
goto not_an_MTD_device;
- return get_sb_mtd_nr(fs_type, flags, dev_name, data, mtdnr, fill_super,
- mnt);
+ return mount_mtd_nr(fs_type, flags, dev_name, data, mtdnr, fill_super);
not_an_MTD_device:
#endif /* CONFIG_BLOCK */
@@ -212,10 +202,10 @@ not_an_MTD_device:
printk(KERN_NOTICE
"MTD: Attempt to mount non-MTD device \"%s\"\n",
dev_name);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
-EXPORT_SYMBOL_GPL(get_sb_mtd);
+EXPORT_SYMBOL_GPL(mount_mtd);
/*
* destroy an MTD-based superblock
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index 449de59bf35..e9ff6f7770b 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -259,17 +259,17 @@ static int oprofilefs_fill_super(struct super_block *sb, void *data, int silent)
}
-static int oprofilefs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data, struct vfsmount *mnt)
+static struct dentry *oprofilefs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
{
- return get_sb_single(fs_type, flags, data, oprofilefs_fill_super, mnt);
+ return mount_single(fs_type, flags, data, oprofilefs_fill_super);
}
static struct file_system_type oprofilefs_type = {
.owner = THIS_MODULE,
.name = "oprofilefs",
- .get_sb = oprofilefs_get_sb,
+ .mount = oprofilefs_mount,
.kill_sb = kill_litter_super,
};
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 50cf96389d2..bf61274af3b 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2802,6 +2802,73 @@ dasd_eckd_steal_lock(struct dasd_device *device)
}
/*
+ * SNID - Sense Path Group ID
+ * This ioctl may be used in situations where I/O is stalled due to
+ * a reserve, so if the normal dasd_smalloc_request fails, we use the
+ * preallocated dasd_reserve_req.
+ */
+static int dasd_eckd_snid(struct dasd_device *device,
+ void __user *argp)
+{
+ struct dasd_ccw_req *cqr;
+ int rc;
+ struct ccw1 *ccw;
+ int useglobal;
+ struct dasd_snid_ioctl_data usrparm;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
+ return -EFAULT;
+
+ useglobal = 0;
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
+ sizeof(struct dasd_snid_data), device);
+ if (IS_ERR(cqr)) {
+ mutex_lock(&dasd_reserve_mutex);
+ useglobal = 1;
+ cqr = &dasd_reserve_req->cqr;
+ memset(cqr, 0, sizeof(*cqr));
+ memset(&dasd_reserve_req->ccw, 0,
+ sizeof(dasd_reserve_req->ccw));
+ cqr->cpaddr = &dasd_reserve_req->ccw;
+ cqr->data = &dasd_reserve_req->data;
+ cqr->magic = DASD_ECKD_MAGIC;
+ }
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_SNID;
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->count = 12;
+ ccw->cda = (__u32)(addr_t) cqr->data;
+ cqr->startdev = device;
+ cqr->memdev = device;
+ clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+ set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+ cqr->retries = 5;
+ cqr->expires = 10 * HZ;
+ cqr->buildclk = get_clock();
+ cqr->status = DASD_CQR_FILLED;
+ cqr->lpm = usrparm.path_mask;
+
+ rc = dasd_sleep_on_immediatly(cqr);
+ /* verify that I/O processing didn't modify the path mask */
+ if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask))
+ rc = -EIO;
+ if (!rc) {
+ usrparm.data = *((struct dasd_snid_data *)cqr->data);
+ if (copy_to_user(argp, &usrparm, sizeof(usrparm)))
+ rc = -EFAULT;
+ }
+
+ if (useglobal)
+ mutex_unlock(&dasd_reserve_mutex);
+ else
+ dasd_sfree_request(cqr, cqr->memdev);
+ return rc;
+}
+
+/*
* Read performance statistics
*/
static int
@@ -3036,6 +3103,8 @@ dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
return dasd_eckd_reserve(device);
case BIODASDSLCK:
return dasd_eckd_steal_lock(device);
+ case BIODASDSNID:
+ return dasd_eckd_snid(device, argp);
case BIODASDSYMMIO:
return dasd_symm_io(device, argp);
default:
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 0eb49655a6c..12097c24f2f 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -27,6 +27,7 @@
#define DASD_ECKD_CCW_WRITE_CKD 0x1d
#define DASD_ECKD_CCW_READ_CKD 0x1e
#define DASD_ECKD_CCW_PSF 0x27
+#define DASD_ECKD_CCW_SNID 0x34
#define DASD_ECKD_CCW_RSSD 0x3e
#define DASD_ECKD_CCW_LOCATE_RECORD 0x47
#define DASD_ECKD_CCW_SNSS 0x54
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 29c2d73d719..6c408670e08 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -1077,15 +1077,14 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
/* FIXME: What to do with the request? */
switch (PTR_ERR(irb)) {
case -ETIMEDOUT:
- DBF_LH(1, "(%s): Request timed out\n",
- dev_name(&cdev->dev));
+ DBF_LH(1, "(%08x): Request timed out\n",
+ device->cdev_id);
case -EIO:
__tape_end_request(device, request, -EIO);
break;
default:
- DBF_LH(1, "(%s): Unexpected i/o error %li\n",
- dev_name(&cdev->dev),
- PTR_ERR(irb));
+ DBF_LH(1, "(%08x): Unexpected i/o error %li\n",
+ device->cdev_id, PTR_ERR(irb));
}
return;
}
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
index 03f07e5dd6e..3c3f342149e 100644
--- a/drivers/s390/char/tape_std.c
+++ b/drivers/s390/char/tape_std.c
@@ -47,8 +47,8 @@ tape_std_assign_timeout(unsigned long data)
device->cdev_id);
rc = tape_cancel_io(device, request);
if(rc)
- DBF_EVENT(3, "(%s): Assign timeout: Cancel failed with rc = %i\n",
- dev_name(&device->cdev->dev), rc);
+ DBF_EVENT(3, "(%08x): Assign timeout: Cancel failed with rc = "
+ "%i\n", device->cdev_id, rc);
}
int
diff --git a/drivers/staging/autofs/init.c b/drivers/staging/autofs/init.c
index 765c72f4297..5e4b372ea66 100644
--- a/drivers/staging/autofs/init.c
+++ b/drivers/staging/autofs/init.c
@@ -14,16 +14,16 @@
#include <linux/init.h>
#include "autofs_i.h"
-static int autofs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data, struct vfsmount *mnt)
+static struct dentry *autofs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
{
- return get_sb_nodev(fs_type, flags, data, autofs_fill_super, mnt);
+ return mount_nodev(fs_type, flags, data, autofs_fill_super);
}
static struct file_system_type autofs_fs_type = {
.owner = THIS_MODULE,
.name = "autofs",
- .get_sb = autofs_get_sb,
+ .mount = autofs_mount,
.kill_sb = autofs_kill_sb,
};
diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
index c62d30017c0..61685ccceda 100644
--- a/drivers/staging/pohmelfs/inode.c
+++ b/drivers/staging/pohmelfs/inode.c
@@ -1937,11 +1937,10 @@ err_out_exit:
/*
* Some VFS magic here...
*/
-static int pohmelfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data, struct vfsmount *mnt)
+static struct dentry *pohmelfs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
{
- return get_sb_nodev(fs_type, flags, data, pohmelfs_fill_super,
- mnt);
+ return mount_nodev(fs_type, flags, data, pohmelfs_fill_super);
}
/*
@@ -1958,7 +1957,7 @@ static void pohmelfs_kill_super(struct super_block *sb)
static struct file_system_type pohmel_fs_type = {
.owner = THIS_MODULE,
.name = "pohmel",
- .get_sb = pohmelfs_get_sb,
+ .mount = pohmelfs_mount,
.kill_sb = pohmelfs_kill_super,
};
diff --git a/drivers/staging/smbfs/inode.c b/drivers/staging/smbfs/inode.c
index 552951aa749..f9c493591ce 100644
--- a/drivers/staging/smbfs/inode.c
+++ b/drivers/staging/smbfs/inode.c
@@ -793,16 +793,16 @@ out:
return error;
}
-static int smb_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data, struct vfsmount *mnt)
+static struct dentry *smb_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
{
- return get_sb_nodev(fs_type, flags, data, smb_fill_super, mnt);
+ return mount_nodev(fs_type, flags, data, smb_fill_super);
}
static struct file_system_type smb_fs_type = {
.owner = THIS_MODULE,
.name = "smbfs",
- .get_sb = smb_get_sb,
+ .mount = smb_mount,
.kill_sb = kill_anon_super,
.fs_flags = FS_BINARY_MOUNTDATA,
};
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index e2f63c0ea09..9819a4cc3b2 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -574,16 +574,16 @@ static void fs_remove_file (struct dentry *dentry)
/* --------------------------------------------------------------------- */
-static int usb_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data, struct vfsmount *mnt)
+static struct dentry *usb_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
{
- return get_sb_single(fs_type, flags, data, usbfs_fill_super, mnt);
+ return mount_single(fs_type, flags, data, usbfs_fill_super);
}
static struct file_system_type usb_fs_type = {
.owner = THIS_MODULE,
.name = "usbfs",
- .get_sb = usb_get_sb,
+ .mount = usb_mount,
.kill_sb = kill_litter_super,
};
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index f276e9594f0..4a830df4fc3 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -1176,9 +1176,9 @@ invalid:
/* "mount -t functionfs dev_name /dev/function" ends up here */
-static int
-ffs_fs_get_sb(struct file_system_type *t, int flags,
- const char *dev_name, void *opts, struct vfsmount *mnt)
+static struct dentry *
+ffs_fs_mount(struct file_system_type *t, int flags,
+ const char *dev_name, void *opts)
{
struct ffs_sb_fill_data data = {
.perms = {
@@ -1194,14 +1194,14 @@ ffs_fs_get_sb(struct file_system_type *t, int flags,
ret = functionfs_check_dev_callback(dev_name);
if (unlikely(ret < 0))
- return ret;
+ return ERR_PTR(ret);
ret = ffs_fs_parse_opts(&data, opts);
if (unlikely(ret < 0))
- return ret;
+ return ERR_PTR(ret);
data.dev_name = dev_name;
- return get_sb_single(t, flags, &data, ffs_sb_fill, mnt);
+ return mount_single(t, flags, &data, ffs_sb_fill);
}
static void
@@ -1220,7 +1220,7 @@ ffs_fs_kill_sb(struct super_block *sb)
static struct file_system_type ffs_fs_type = {
.owner = THIS_MODULE,
.name = "functionfs",
- .get_sb = ffs_fs_get_sb,
+ .mount = ffs_fs_mount,
.kill_sb = ffs_fs_kill_sb,
};
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index ba145e7fbe0..3ed73f49cf1 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -2097,11 +2097,11 @@ enomem0:
}
/* "mount -t gadgetfs path /dev/gadget" ends up here */
-static int
-gadgetfs_get_sb (struct file_system_type *t, int flags,
- const char *path, void *opts, struct vfsmount *mnt)
+static struct dentry *
+gadgetfs_mount (struct file_system_type *t, int flags,
+ const char *path, void *opts)
{
- return get_sb_single (t, flags, opts, gadgetfs_fill_super, mnt);
+ return mount_single (t, flags, opts, gadgetfs_fill_super);
}
static void
@@ -2119,7 +2119,7 @@ gadgetfs_kill_sb (struct super_block *sb)
static struct file_system_type gadgetfs_type = {
.owner = THIS_MODULE,
.name = shortname,
- .get_sb = gadgetfs_get_sb,
+ .mount = gadgetfs_mount,
.kill_sb = gadgetfs_kill_sb,
};
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index bf2e7d23453..2391c396ca3 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -93,8 +93,9 @@ config USB_EHCI_TT_NEWSCHED
config USB_EHCI_BIG_ENDIAN_MMIO
bool
- depends on USB_EHCI_HCD && (PPC_CELLEB || PPC_PS3 || 440EPX || ARCH_IXP4XX || \
- XPS_USB_HCD_XILINX || PPC_MPC512x)
+ depends on USB_EHCI_HCD && (PPC_CELLEB || PPC_PS3 || 440EPX || \
+ ARCH_IXP4XX || XPS_USB_HCD_XILINX || \
+ PPC_MPC512x || CPU_CAVIUM_OCTEON)
default y
config USB_EHCI_BIG_ENDIAN_DESC
@@ -434,3 +435,28 @@ config USB_IMX21_HCD
To compile this driver as a module, choose M here: the
module will be called "imx21-hcd".
+config USB_OCTEON_EHCI
+ bool "Octeon on-chip EHCI support"
+ depends on USB && USB_EHCI_HCD && CPU_CAVIUM_OCTEON
+ default n
+ select USB_EHCI_BIG_ENDIAN_MMIO
+ help
+ Enable support for the Octeon II SOC's on-chip EHCI
+ controller. It is needed for high-speed (480Mbit/sec)
+ USB 2.0 device support. All CN6XXX based chips with USB are
+ supported.
+
+config USB_OCTEON_OHCI
+ bool "Octeon on-chip OHCI support"
+ depends on USB && USB_OHCI_HCD && CPU_CAVIUM_OCTEON
+ default USB_OCTEON_EHCI
+ select USB_OHCI_BIG_ENDIAN_MMIO
+ select USB_OHCI_LITTLE_ENDIAN
+ help
+ Enable support for the Octeon II SOC's on-chip OHCI
+ controller. It is needed for low-speed USB 1.0 device
+ support. All CN6XXX based chips with USB are supported.
+
+config USB_OCTEON2_COMMON
+ bool
+ default y if USB_OCTEON_EHCI || USB_OCTEON_OHCI
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 91c5a1bd102..624a362f2fe 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -34,3 +34,4 @@ obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o
obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o
obj-$(CONFIG_USB_IMX21_HCD) += imx21-hcd.o
obj-$(CONFIG_USB_FSL_MPH_DR_OF) += fsl-mph-dr-of.o
+obj-$(CONFIG_USB_OCTEON2_COMMON) += octeon2-common.o
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 2adae8e39bb..502a7e6fef4 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1211,6 +1211,11 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ehci_atmel_driver
#endif
+#ifdef CONFIG_USB_OCTEON_EHCI
+#include "ehci-octeon.c"
+#define PLATFORM_DRIVER ehci_octeon_driver
+#endif
+
#if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \
!defined(PS3_SYSTEM_BUS_DRIVER) && !defined(OF_PLATFORM_DRIVER) && \
!defined(XILINX_OF_PLATFORM_DRIVER)
diff --git a/drivers/usb/host/ehci-octeon.c b/drivers/usb/host/ehci-octeon.c
new file mode 100644
index 00000000000..a31a031178a
--- /dev/null
+++ b/drivers/usb/host/ehci-octeon.c
@@ -0,0 +1,207 @@
+/*
+ * EHCI HCD glue for Cavium Octeon II SOCs.
+ *
+ * Loosely based on ehci-au1xxx.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2010 Cavium Networks
+ *
+ */
+
+#include <linux/platform_device.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-uctlx-defs.h>
+
+#define OCTEON_EHCI_HCD_NAME "octeon-ehci"
+
+/* Common clock init code. */
+void octeon2_usb_clocks_start(void);
+void octeon2_usb_clocks_stop(void);
+
+static void ehci_octeon_start(void)
+{
+ union cvmx_uctlx_ehci_ctl ehci_ctl;
+
+ octeon2_usb_clocks_start();
+
+ ehci_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_EHCI_CTL(0));
+ /* Use 64-bit addressing. */
+ ehci_ctl.s.ehci_64b_addr_en = 1;
+ ehci_ctl.s.l2c_addr_msb = 0;
+ ehci_ctl.s.l2c_buff_emod = 1; /* Byte swapped. */
+ ehci_ctl.s.l2c_desc_emod = 1; /* Byte swapped. */
+ cvmx_write_csr(CVMX_UCTLX_EHCI_CTL(0), ehci_ctl.u64);
+}
+
+static void ehci_octeon_stop(void)
+{
+ octeon2_usb_clocks_stop();
+}
+
+static const struct hc_driver ehci_octeon_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Octeon EHCI",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ehci_irq,
+ .flags = HCD_MEMORY | HCD_USB2,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = ehci_init,
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ehci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
+
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+};
+
+static u64 ehci_octeon_dma_mask = DMA_BIT_MASK(64);
+
+static int ehci_octeon_drv_probe(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd;
+ struct ehci_hcd *ehci;
+ struct resource *res_mem;
+ int irq;
+ int ret;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "No irq assigned\n");
+ return -ENODEV;
+ }
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res_mem == NULL) {
+ dev_err(&pdev->dev, "No register space assigned\n");
+ return -ENODEV;
+ }
+
+ /*
+ * We can DMA from anywhere. But the descriptors must be in
+ * the lower 4GB.
+ */
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ pdev->dev.dma_mask = &ehci_octeon_dma_mask;
+
+ hcd = usb_create_hcd(&ehci_octeon_hc_driver, &pdev->dev, "octeon");
+ if (!hcd)
+ return -ENOMEM;
+
+ hcd->rsrc_start = res_mem->start;
+ hcd->rsrc_len = res_mem->end - res_mem->start + 1;
+
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
+ OCTEON_EHCI_HCD_NAME)) {
+ dev_err(&pdev->dev, "request_mem_region failed\n");
+ ret = -EBUSY;
+ goto err1;
+ }
+
+ hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+ if (!hcd->regs) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto err2;
+ }
+
+ ehci_octeon_start();
+
+ ehci = hcd_to_ehci(hcd);
+
+ /* Octeon EHCI matches CPU endianness. */
+#ifdef __BIG_ENDIAN
+ ehci->big_endian_mmio = 1;
+#endif
+
+ ehci->caps = hcd->regs;
+ ehci->regs = hcd->regs +
+ HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
+ /* cache this readonly data; minimize chip reads */
+ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+
+ ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
+ if (ret) {
+ dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
+ goto err3;
+ }
+
+ platform_set_drvdata(pdev, hcd);
+
+ /* root ports should always stay powered */
+ ehci_port_power(ehci, 1);
+
+ return 0;
+err3:
+ ehci_octeon_stop();
+
+ iounmap(hcd->regs);
+err2:
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+err1:
+ usb_put_hcd(hcd);
+ return ret;
+}
+
+static int ehci_octeon_drv_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ usb_remove_hcd(hcd);
+
+ ehci_octeon_stop();
+ iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ usb_put_hcd(hcd);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver ehci_octeon_driver = {
+ .probe = ehci_octeon_drv_probe,
+ .remove = ehci_octeon_drv_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver = {
+ .name = OCTEON_EHCI_HCD_NAME,
+ .owner = THIS_MODULE,
+ }
+};
+
+MODULE_ALIAS("platform:" OCTEON_EHCI_HCD_NAME);
diff --git a/drivers/usb/host/octeon2-common.c b/drivers/usb/host/octeon2-common.c
new file mode 100644
index 00000000000..72d672cfcf3
--- /dev/null
+++ b/drivers/usb/host/octeon2-common.c
@@ -0,0 +1,185 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2010 Cavium Networks
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+
+#include <asm/atomic.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-uctlx-defs.h>
+
+static atomic_t octeon2_usb_clock_start_cnt = ATOMIC_INIT(0);
+
+void octeon2_usb_clocks_start(void)
+{
+ u64 div;
+ union cvmx_uctlx_if_ena if_ena;
+ union cvmx_uctlx_clk_rst_ctl clk_rst_ctl;
+ union cvmx_uctlx_uphy_ctl_status uphy_ctl_status;
+ union cvmx_uctlx_uphy_portx_ctl_status port_ctl_status;
+ int i;
+ unsigned long io_clk_64_to_ns;
+
+ if (atomic_inc_return(&octeon2_usb_clock_start_cnt) != 1)
+ return;
+
+ io_clk_64_to_ns = 64000000000ull / octeon_get_io_clock_rate();
+
+ /*
+ * Step 1: Wait for voltages stable. That surely happened
+ * before starting the kernel.
+ *
+ * Step 2: Enable SCLK of UCTL by writing UCTL0_IF_ENA[EN] = 1
+ */
+ if_ena.u64 = 0;
+ if_ena.s.en = 1;
+ cvmx_write_csr(CVMX_UCTLX_IF_ENA(0), if_ena.u64);
+
+ /* Step 3: Configure the reference clock, PHY, and HCLK */
+ clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0));
+ /* 3a */
+ clk_rst_ctl.s.p_por = 1;
+ clk_rst_ctl.s.hrst = 0;
+ clk_rst_ctl.s.p_prst = 0;
+ clk_rst_ctl.s.h_clkdiv_rst = 0;
+ clk_rst_ctl.s.o_clkdiv_rst = 0;
+ clk_rst_ctl.s.h_clkdiv_en = 0;
+ clk_rst_ctl.s.o_clkdiv_en = 0;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* 3b */
+ /* 12MHz crystal. */
+ clk_rst_ctl.s.p_refclk_sel = 0;
+ clk_rst_ctl.s.p_refclk_div = 0;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* 3c */
+ div = octeon_get_io_clock_rate() / 130000000ull;
+
+ switch (div) {
+ case 0:
+ div = 1;
+ break;
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ break;
+ case 5:
+ div = 4;
+ break;
+ case 6:
+ case 7:
+ div = 6;
+ break;
+ case 8:
+ case 9:
+ case 10:
+ case 11:
+ div = 8;
+ break;
+ default:
+ div = 12;
+ break;
+ }
+ clk_rst_ctl.s.h_div = div;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+ /* Read it back, */
+ clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0));
+ clk_rst_ctl.s.h_clkdiv_en = 1;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+ /* 3d */
+ clk_rst_ctl.s.h_clkdiv_rst = 1;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* 3e: delay 64 io clocks */
+ ndelay(io_clk_64_to_ns);
+
+ /*
+ * Step 4: Program the power-on reset field in the UCTL
+ * clock-reset-control register.
+ */
+ clk_rst_ctl.s.p_por = 0;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* Step 5: Wait 1 ms for the PHY clock to start. */
+ mdelay(1);
+
+ /*
+ * Step 6: Program the reset input from automatic test
+ * equipment field in the UPHY CSR
+ */
+ uphy_ctl_status.u64 = cvmx_read_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0));
+ uphy_ctl_status.s.ate_reset = 1;
+ cvmx_write_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0), uphy_ctl_status.u64);
+
+ /* Step 7: Wait for at least 10ns. */
+ ndelay(10);
+
+ /* Step 8: Clear the ATE_RESET field in the UPHY CSR. */
+ uphy_ctl_status.s.ate_reset = 0;
+ cvmx_write_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0), uphy_ctl_status.u64);
+
+ /*
+ * Step 9: Wait for at least 20ns for UPHY to output PHY clock
+ * signals and OHCI_CLK48
+ */
+ ndelay(20);
+
+ /* Step 10: Configure the OHCI_CLK48 and OHCI_CLK12 clocks. */
+ /* 10a */
+ clk_rst_ctl.s.o_clkdiv_rst = 1;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* 10b */
+ clk_rst_ctl.s.o_clkdiv_en = 1;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* 10c */
+ ndelay(io_clk_64_to_ns);
+
+ /*
+ * Step 11: Program the PHY reset field:
+ * UCTL0_CLK_RST_CTL[P_PRST] = 1
+ */
+ clk_rst_ctl.s.p_prst = 1;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* Step 12: Wait 1 uS. */
+ udelay(1);
+
+ /* Step 13: Program the HRESET_N field: UCTL0_CLK_RST_CTL[HRST] = 1 */
+ clk_rst_ctl.s.hrst = 1;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* Now we can set some other registers. */
+
+ for (i = 0; i <= 1; i++) {
+ port_ctl_status.u64 =
+ cvmx_read_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0));
+ /* Set txvreftune to 15 to obtain complient 'eye' diagram. */
+ port_ctl_status.s.txvreftune = 15;
+ cvmx_write_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0),
+ port_ctl_status.u64);
+ }
+}
+EXPORT_SYMBOL(octeon2_usb_clocks_start);
+
+void octeon2_usb_clocks_stop(void)
+{
+ union cvmx_uctlx_if_ena if_ena;
+
+ if (atomic_dec_return(&octeon2_usb_clock_start_cnt) != 0)
+ return;
+
+ if_ena.u64 = 0;
+ if_ena.s.en = 0;
+ cvmx_write_csr(CVMX_UCTLX_IF_ENA(0), if_ena.u64);
+}
+EXPORT_SYMBOL(octeon2_usb_clocks_stop);
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index f3713f43f3f..5179acb7aa2 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1106,6 +1106,11 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ohci_hcd_jz4740_driver
#endif
+#ifdef CONFIG_USB_OCTEON_OHCI
+#include "ohci-octeon.c"
+#define PLATFORM_DRIVER ohci_octeon_driver
+#endif
+
#if !defined(PCI_DRIVER) && \
!defined(PLATFORM_DRIVER) && \
!defined(OMAP1_PLATFORM_DRIVER) && \
diff --git a/drivers/usb/host/ohci-octeon.c b/drivers/usb/host/ohci-octeon.c
new file mode 100644
index 00000000000..e4ddfaf8870
--- /dev/null
+++ b/drivers/usb/host/ohci-octeon.c
@@ -0,0 +1,214 @@
+/*
+ * EHCI HCD glue for Cavium Octeon II SOCs.
+ *
+ * Loosely based on ehci-au1xxx.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2010 Cavium Networks
+ *
+ */
+
+#include <linux/platform_device.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-uctlx-defs.h>
+
+#define OCTEON_OHCI_HCD_NAME "octeon-ohci"
+
+/* Common clock init code. */
+void octeon2_usb_clocks_start(void);
+void octeon2_usb_clocks_stop(void);
+
+static void ohci_octeon_hw_start(void)
+{
+ union cvmx_uctlx_ohci_ctl ohci_ctl;
+
+ octeon2_usb_clocks_start();
+
+ ohci_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_OHCI_CTL(0));
+ ohci_ctl.s.l2c_addr_msb = 0;
+ ohci_ctl.s.l2c_buff_emod = 1; /* Byte swapped. */
+ ohci_ctl.s.l2c_desc_emod = 1; /* Byte swapped. */
+ cvmx_write_csr(CVMX_UCTLX_OHCI_CTL(0), ohci_ctl.u64);
+
+}
+
+static void ohci_octeon_hw_stop(void)
+{
+ /* Undo ohci_octeon_start() */
+ octeon2_usb_clocks_stop();
+}
+
+static int __devinit ohci_octeon_start(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int ret;
+
+ ret = ohci_init(ohci);
+
+ if (ret < 0)
+ return ret;
+
+ ret = ohci_run(ohci);
+
+ if (ret < 0) {
+ ohci_err(ohci, "can't start %s", hcd->self.bus_name);
+ ohci_stop(hcd);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct hc_driver ohci_octeon_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Octeon OHCI",
+ .hcd_priv_size = sizeof(struct ohci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ohci_irq,
+ .flags = HCD_USB11 | HCD_MEMORY,
+
+ /*
+ * basic lifecycle operations
+ */
+ .start = ohci_octeon_start,
+ .stop = ohci_stop,
+ .shutdown = ohci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ohci_urb_enqueue,
+ .urb_dequeue = ohci_urb_dequeue,
+ .endpoint_disable = ohci_endpoint_disable,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ohci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ohci_hub_status_data,
+ .hub_control = ohci_hub_control,
+
+ .start_port_reset = ohci_start_port_reset,
+};
+
+static int ohci_octeon_drv_probe(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd;
+ struct ohci_hcd *ohci;
+ void *reg_base;
+ struct resource *res_mem;
+ int irq;
+ int ret;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "No irq assigned\n");
+ return -ENODEV;
+ }
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res_mem == NULL) {
+ dev_err(&pdev->dev, "No register space assigned\n");
+ return -ENODEV;
+ }
+
+ /* Ohci is a 32-bit device. */
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+
+ hcd = usb_create_hcd(&ohci_octeon_hc_driver, &pdev->dev, "octeon");
+ if (!hcd)
+ return -ENOMEM;
+
+ hcd->rsrc_start = res_mem->start;
+ hcd->rsrc_len = res_mem->end - res_mem->start + 1;
+
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
+ OCTEON_OHCI_HCD_NAME)) {
+ dev_err(&pdev->dev, "request_mem_region failed\n");
+ ret = -EBUSY;
+ goto err1;
+ }
+
+ reg_base = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+ if (!reg_base) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto err2;
+ }
+
+ ohci_octeon_hw_start();
+
+ hcd->regs = reg_base;
+
+ ohci = hcd_to_ohci(hcd);
+
+ /* Octeon OHCI matches CPU endianness. */
+#ifdef __BIG_ENDIAN
+ ohci->flags |= OHCI_QUIRK_BE_MMIO;
+#endif
+
+ ohci_hcd_init(ohci);
+
+ ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
+ if (ret) {
+ dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
+ goto err3;
+ }
+
+ platform_set_drvdata(pdev, hcd);
+
+ return 0;
+
+err3:
+ ohci_octeon_hw_stop();
+
+ iounmap(hcd->regs);
+err2:
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+err1:
+ usb_put_hcd(hcd);
+ return ret;
+}
+
+static int ohci_octeon_drv_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ usb_remove_hcd(hcd);
+
+ ohci_octeon_hw_stop();
+ iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ usb_put_hcd(hcd);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver ohci_octeon_driver = {
+ .probe = ohci_octeon_drv_probe,
+ .remove = ohci_octeon_drv_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver = {
+ .name = OCTEON_OHCI_HCD_NAME,
+ .owner = THIS_MODULE,
+ }
+};
+
+MODULE_ALIAS("platform:" OCTEON_OHCI_HCD_NAME);
diff --git a/drivers/watchdog/octeon-wdt-main.c b/drivers/watchdog/octeon-wdt-main.c
index 909923800a0..945ee830030 100644
--- a/drivers/watchdog/octeon-wdt-main.c
+++ b/drivers/watchdog/octeon-wdt-main.c
@@ -478,7 +478,7 @@ static void octeon_wdt_calc_parameters(int t)
countdown_reset = periods > 2 ? periods - 2 : 0;
heartbeat = t;
- timeout_cnt = ((octeon_get_clock_rate() >> 8) * timeout_sec) >> 8;
+ timeout_cnt = ((octeon_get_io_clock_rate() >> 8) * timeout_sec) >> 8;
}
static int octeon_wdt_set_heartbeat(int t)
@@ -677,7 +677,7 @@ static int __init octeon_wdt_init(void)
max_timeout_sec = 6;
do {
max_timeout_sec--;
- timeout_cnt = ((octeon_get_clock_rate() >> 8) * max_timeout_sec) >> 8;
+ timeout_cnt = ((octeon_get_io_clock_rate() >> 8) * max_timeout_sec) >> 8;
} while (timeout_cnt > 65535);
BUG_ON(timeout_cnt == 0);
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
index d6662b789b6..f6339d11d59 100644
--- a/drivers/xen/xenfs/super.c
+++ b/drivers/xen/xenfs/super.c
@@ -121,17 +121,17 @@ static int xenfs_fill_super(struct super_block *sb, void *data, int silent)
return rc;
}
-static int xenfs_get_sb(struct file_system_type *fs_type,
+static int xenfs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name,
- void *data, struct vfsmount *mnt)
+ void *data)
{
- return get_sb_single(fs_type, flags, data, xenfs_fill_super, mnt);
+ return mount_single(fs_type, flags, data, xenfs_fill_super);
}
static struct file_system_type xenfs_type = {
.owner = THIS_MODULE,
.name = "xenfs",
- .get_sb = xenfs_get_sb,
+ .mount = xenfs_mount,
.kill_sb = kill_litter_super,
};