diff options
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/DAC960.c | 18 | ||||
-rw-r--r-- | drivers/block/Kconfig | 2 | ||||
-rw-r--r-- | drivers/block/brd.c | 20 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_bitmap.c | 50 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_nl.c | 6 | ||||
-rw-r--r-- | drivers/block/floppy.c | 56 | ||||
-rw-r--r-- | drivers/block/hd.c | 1 | ||||
-rw-r--r-- | drivers/block/loop.c | 40 | ||||
-rw-r--r-- | drivers/block/mtip32xx/mtip32xx.c | 11 | ||||
-rw-r--r-- | drivers/block/mtip32xx/mtip32xx.h | 5 | ||||
-rw-r--r-- | drivers/block/nbd.c | 296 | ||||
-rw-r--r-- | drivers/block/nvme.c | 2 | ||||
-rw-r--r-- | drivers/block/pktcdvd.c | 8 | ||||
-rw-r--r-- | drivers/block/rbd.c | 730 | ||||
-rw-r--r-- | drivers/block/rbd_types.h | 4 | ||||
-rw-r--r-- | drivers/block/sunvdc.c | 5 | ||||
-rw-r--r-- | drivers/block/sx8.c | 2 | ||||
-rw-r--r-- | drivers/block/ub.c | 39 | ||||
-rw-r--r-- | drivers/block/viodasd.c | 809 | ||||
-rw-r--r-- | drivers/block/xd.c | 1 | ||||
-rw-r--r-- | drivers/block/xen-blkfront.c | 3 |
21 files changed, 701 insertions, 1407 deletions
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index e086fbbbe85..8db9089127c 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c @@ -1177,7 +1177,8 @@ static bool DAC960_V1_EnableMemoryMailboxInterface(DAC960_Controller_T int TimeoutCounter; int i; - + memset(&CommandMailbox, 0, sizeof(DAC960_V1_CommandMailbox_T)); + if (pci_set_dma_mask(Controller->PCIDevice, DMA_BIT_MASK(32))) return DAC960_Failure(Controller, "DMA mask out of range"); Controller->BounceBufferLimit = DMA_BIT_MASK(32); @@ -4627,7 +4628,8 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command) DAC960_Controller_T *Controller = Command->Controller; DAC960_CommandType_T CommandType = Command->CommandType; DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox; - DAC960_V2_IOCTL_Opcode_T CommandOpcode = CommandMailbox->Common.IOCTL_Opcode; + DAC960_V2_IOCTL_Opcode_T IOCTLOpcode = CommandMailbox->Common.IOCTL_Opcode; + DAC960_V2_CommandOpcode_T CommandOpcode = CommandMailbox->SCSI_10.CommandOpcode; DAC960_V2_CommandStatus_T CommandStatus = Command->V2.CommandStatus; if (CommandType == DAC960_ReadCommand || @@ -4699,7 +4701,7 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command) { if (Controller->ShutdownMonitoringTimer) return; - if (CommandOpcode == DAC960_V2_GetControllerInfo) + if (IOCTLOpcode == DAC960_V2_GetControllerInfo) { DAC960_V2_ControllerInfo_T *NewControllerInfo = Controller->V2.NewControllerInformation; @@ -4719,14 +4721,14 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command) memcpy(ControllerInfo, NewControllerInfo, sizeof(DAC960_V2_ControllerInfo_T)); } - else if (CommandOpcode == DAC960_V2_GetEvent) + else if (IOCTLOpcode == DAC960_V2_GetEvent) { if (CommandStatus == DAC960_V2_NormalCompletion) { DAC960_V2_ReportEvent(Controller, Controller->V2.Event); } Controller->V2.NextEventSequenceNumber++; } - else if (CommandOpcode == DAC960_V2_GetPhysicalDeviceInfoValid && + else if (IOCTLOpcode == DAC960_V2_GetPhysicalDeviceInfoValid && CommandStatus == DAC960_V2_NormalCompletion) { DAC960_V2_PhysicalDeviceInfo_T *NewPhysicalDeviceInfo = @@ -4915,7 +4917,7 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command) NewPhysicalDeviceInfo->LogicalUnit++; Controller->V2.PhysicalDeviceIndex++; } - else if (CommandOpcode == DAC960_V2_GetPhysicalDeviceInfoValid) + else if (IOCTLOpcode == DAC960_V2_GetPhysicalDeviceInfoValid) { unsigned int DeviceIndex; for (DeviceIndex = Controller->V2.PhysicalDeviceIndex; @@ -4938,7 +4940,7 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command) } Controller->V2.NeedPhysicalDeviceInformation = false; } - else if (CommandOpcode == DAC960_V2_GetLogicalDeviceInfoValid && + else if (IOCTLOpcode == DAC960_V2_GetLogicalDeviceInfoValid && CommandStatus == DAC960_V2_NormalCompletion) { DAC960_V2_LogicalDeviceInfo_T *NewLogicalDeviceInfo = @@ -5065,7 +5067,7 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command) [LogicalDeviceNumber] = true; NewLogicalDeviceInfo->LogicalDeviceNumber++; } - else if (CommandOpcode == DAC960_V2_GetLogicalDeviceInfoValid) + else if (IOCTLOpcode == DAC960_V2_GetLogicalDeviceInfoValid) { int LogicalDriveNumber; for (LogicalDriveNumber = 0; diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 4e4c8a4a5fd..a796407123c 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -354,7 +354,7 @@ config BLK_DEV_SX8 Use devices /dev/sx8/$N and /dev/sx8/$Np$M. config BLK_DEV_UB - tristate "Low Performance USB Block driver" + tristate "Low Performance USB Block driver (deprecated)" depends on USB help This driver supports certain USB attached storage devices diff --git a/drivers/block/brd.c b/drivers/block/brd.c index ec246437f5a..531ceb31d0f 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -242,9 +242,9 @@ static void copy_to_brd(struct brd_device *brd, const void *src, page = brd_lookup_page(brd, sector); BUG_ON(!page); - dst = kmap_atomic(page, KM_USER1); + dst = kmap_atomic(page); memcpy(dst + offset, src, copy); - kunmap_atomic(dst, KM_USER1); + kunmap_atomic(dst); if (copy < n) { src += copy; @@ -253,9 +253,9 @@ static void copy_to_brd(struct brd_device *brd, const void *src, page = brd_lookup_page(brd, sector); BUG_ON(!page); - dst = kmap_atomic(page, KM_USER1); + dst = kmap_atomic(page); memcpy(dst, src, copy); - kunmap_atomic(dst, KM_USER1); + kunmap_atomic(dst); } } @@ -273,9 +273,9 @@ static void copy_from_brd(void *dst, struct brd_device *brd, copy = min_t(size_t, n, PAGE_SIZE - offset); page = brd_lookup_page(brd, sector); if (page) { - src = kmap_atomic(page, KM_USER1); + src = kmap_atomic(page); memcpy(dst, src + offset, copy); - kunmap_atomic(src, KM_USER1); + kunmap_atomic(src); } else memset(dst, 0, copy); @@ -285,9 +285,9 @@ static void copy_from_brd(void *dst, struct brd_device *brd, copy = n - copy; page = brd_lookup_page(brd, sector); if (page) { - src = kmap_atomic(page, KM_USER1); + src = kmap_atomic(page); memcpy(dst, src, copy); - kunmap_atomic(src, KM_USER1); + kunmap_atomic(src); } else memset(dst, 0, copy); } @@ -309,7 +309,7 @@ static int brd_do_bvec(struct brd_device *brd, struct page *page, goto out; } - mem = kmap_atomic(page, KM_USER0); + mem = kmap_atomic(page); if (rw == READ) { copy_from_brd(mem + off, brd, sector, len); flush_dcache_page(page); @@ -317,7 +317,7 @@ static int brd_do_bvec(struct brd_device *brd, struct page *page, flush_dcache_page(page); copy_to_brd(brd, mem + off, sector, len); } - kunmap_atomic(mem, KM_USER0); + kunmap_atomic(mem); out: return err; diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index 912f585a760..3030201c69d 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -289,25 +289,25 @@ static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr) return page_nr; } -static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx, const enum km_type km) +static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx) { struct page *page = b->bm_pages[idx]; - return (unsigned long *) kmap_atomic(page, km); + return (unsigned long *) kmap_atomic(page); } static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx) { - return __bm_map_pidx(b, idx, KM_IRQ1); + return __bm_map_pidx(b, idx); } -static void __bm_unmap(unsigned long *p_addr, const enum km_type km) +static void __bm_unmap(unsigned long *p_addr) { - kunmap_atomic(p_addr, km); + kunmap_atomic(p_addr); }; static void bm_unmap(unsigned long *p_addr) { - return __bm_unmap(p_addr, KM_IRQ1); + return __bm_unmap(p_addr); } /* long word offset of _bitmap_ sector */ @@ -543,15 +543,15 @@ static unsigned long bm_count_bits(struct drbd_bitmap *b) /* all but last page */ for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) { - p_addr = __bm_map_pidx(b, idx, KM_USER0); + p_addr = __bm_map_pidx(b, idx); for (i = 0; i < LWPP; i++) bits += hweight_long(p_addr[i]); - __bm_unmap(p_addr, KM_USER0); + __bm_unmap(p_addr); cond_resched(); } /* last (or only) page */ last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL; - p_addr = __bm_map_pidx(b, idx, KM_USER0); + p_addr = __bm_map_pidx(b, idx); for (i = 0; i < last_word; i++) bits += hweight_long(p_addr[i]); p_addr[last_word] &= cpu_to_lel(mask); @@ -559,7 +559,7 @@ static unsigned long bm_count_bits(struct drbd_bitmap *b) /* 32bit arch, may have an unused padding long */ if (BITS_PER_LONG == 32 && (last_word & 1) == 0) p_addr[last_word+1] = 0; - __bm_unmap(p_addr, KM_USER0); + __bm_unmap(p_addr); return bits; } @@ -970,11 +970,11 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must * to use pre-allocated page pool */ void *src, *dest; page = alloc_page(__GFP_HIGHMEM|__GFP_WAIT); - dest = kmap_atomic(page, KM_USER0); - src = kmap_atomic(b->bm_pages[page_nr], KM_USER1); + dest = kmap_atomic(page); + src = kmap_atomic(b->bm_pages[page_nr]); memcpy(dest, src, PAGE_SIZE); - kunmap_atomic(src, KM_USER1); - kunmap_atomic(dest, KM_USER0); + kunmap_atomic(src); + kunmap_atomic(dest); bm_store_page_idx(page, page_nr); } else page = b->bm_pages[page_nr]; @@ -1163,7 +1163,7 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc * this returns a bit number, NOT a sector! */ static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo, - const int find_zero_bit, const enum km_type km) + const int find_zero_bit) { struct drbd_bitmap *b = mdev->bitmap; unsigned long *p_addr; @@ -1178,7 +1178,7 @@ static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo, while (bm_fo < b->bm_bits) { /* bit offset of the first bit in the page */ bit_offset = bm_fo & ~BITS_PER_PAGE_MASK; - p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo), km); + p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo)); if (find_zero_bit) i = find_next_zero_bit_le(p_addr, @@ -1187,7 +1187,7 @@ static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo, i = find_next_bit_le(p_addr, PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK); - __bm_unmap(p_addr, km); + __bm_unmap(p_addr); if (i < PAGE_SIZE*8) { bm_fo = bit_offset + i; if (bm_fo >= b->bm_bits) @@ -1215,7 +1215,7 @@ static unsigned long bm_find_next(struct drbd_conf *mdev, if (BM_DONT_TEST & b->bm_flags) bm_print_lock_info(mdev); - i = __bm_find_next(mdev, bm_fo, find_zero_bit, KM_IRQ1); + i = __bm_find_next(mdev, bm_fo, find_zero_bit); spin_unlock_irq(&b->bm_lock); return i; @@ -1239,13 +1239,13 @@ unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo) { /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */ - return __bm_find_next(mdev, bm_fo, 0, KM_USER1); + return __bm_find_next(mdev, bm_fo, 0); } unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo) { /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */ - return __bm_find_next(mdev, bm_fo, 1, KM_USER1); + return __bm_find_next(mdev, bm_fo, 1); } /* returns number of bits actually changed. @@ -1273,14 +1273,14 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, unsigned int page_nr = bm_bit_to_page_idx(b, bitnr); if (page_nr != last_page_nr) { if (p_addr) - __bm_unmap(p_addr, KM_IRQ1); + __bm_unmap(p_addr); if (c < 0) bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); else if (c > 0) bm_set_page_need_writeout(b->bm_pages[last_page_nr]); changed_total += c; c = 0; - p_addr = __bm_map_pidx(b, page_nr, KM_IRQ1); + p_addr = __bm_map_pidx(b, page_nr); last_page_nr = page_nr; } if (val) @@ -1289,7 +1289,7 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr)); } if (p_addr) - __bm_unmap(p_addr, KM_IRQ1); + __bm_unmap(p_addr); if (c < 0) bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); else if (c > 0) @@ -1342,13 +1342,13 @@ static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b, { int i; int bits; - unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_IRQ1); + unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr]); for (i = first_word; i < last_word; i++) { bits = hweight_long(paddr[i]); paddr[i] = ~0UL; b->bm_set += BITS_PER_LONG - bits; } - kunmap_atomic(paddr, KM_IRQ1); + kunmap_atomic(paddr); } /* Same thing as drbd_bm_set_bits, diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index af2a25049bc..abfaacaaf34 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -179,7 +179,7 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd) dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb); drbd_bcast_ev_helper(mdev, cmd); - ret = call_usermodehelper(usermode_helper, argv, envp, 1); + ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC); if (ret) dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n", usermode_helper, cmd, mb, @@ -2526,10 +2526,10 @@ void drbd_bcast_ee(struct drbd_conf *mdev, page = e->pages; page_chain_for_each(page) { - void *d = kmap_atomic(page, KM_USER0); + void *d = kmap_atomic(page); unsigned l = min_t(unsigned, len, PAGE_SIZE); memcpy(tl, d, l); - kunmap_atomic(d, KM_USER0); + kunmap_atomic(d); tl = (unsigned short*)((char*)tl + l); len -= l; if (len == 0) diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 510fb10ec45..b0b00d70c16 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -202,7 +202,6 @@ static int slow_floppy; #include <asm/dma.h> #include <asm/irq.h> -#include <asm/system.h> static int FLOPPY_IRQ = 6; static int FLOPPY_DMA = 2; @@ -1031,37 +1030,6 @@ static int fd_wait_for_completion(unsigned long delay, timeout_fn function) return 0; } -static DEFINE_SPINLOCK(floppy_hlt_lock); -static int hlt_disabled; -static void floppy_disable_hlt(void) -{ - unsigned long flags; - - WARN_ONCE(1, "floppy_disable_hlt() scheduled for removal in 2012"); - spin_lock_irqsave(&floppy_hlt_lock, flags); - if (!hlt_disabled) { - hlt_disabled = 1; -#ifdef HAVE_DISABLE_HLT - disable_hlt(); -#endif - } - spin_unlock_irqrestore(&floppy_hlt_lock, flags); -} - -static void floppy_enable_hlt(void) -{ - unsigned long flags; - - spin_lock_irqsave(&floppy_hlt_lock, flags); - if (hlt_disabled) { - hlt_disabled = 0; -#ifdef HAVE_DISABLE_HLT - enable_hlt(); -#endif - } - spin_unlock_irqrestore(&floppy_hlt_lock, flags); -} - static void setup_DMA(void) { unsigned long f; @@ -1106,7 +1074,6 @@ static void setup_DMA(void) fd_enable_dma(); release_dma_lock(f); #endif - floppy_disable_hlt(); } static void show_floppy(void); @@ -1708,7 +1675,6 @@ irqreturn_t floppy_interrupt(int irq, void *dev_id) fd_disable_dma(); release_dma_lock(f); - floppy_enable_hlt(); do_floppy = NULL; if (fdc >= N_FDC || FDCS->address == -1) { /* we don't even know which FDC is the culprit */ @@ -1857,8 +1823,6 @@ static void floppy_shutdown(unsigned long data) show_floppy(); cancel_activity(); - floppy_enable_hlt(); - flags = claim_dma_lock(); fd_disable_dma(); release_dma_lock(flags); @@ -3832,7 +3796,7 @@ static int __floppy_read_block_0(struct block_device *bdev) bio.bi_size = size; bio.bi_bdev = bdev; bio.bi_sector = 0; - bio.bi_flags = BIO_QUIET; + bio.bi_flags = (1 << BIO_QUIET); init_completion(&complete); bio.bi_private = &complete; bio.bi_end_io = floppy_rb0_complete; @@ -4368,8 +4332,14 @@ out_unreg_blkdev: out_put_disk: while (dr--) { del_timer_sync(&motor_off_timer[dr]); - if (disks[dr]->queue) + if (disks[dr]->queue) { blk_cleanup_queue(disks[dr]->queue); + /* + * put_disk() is not paired with add_disk() and + * will put queue reference one extra time. fix it. + */ + disks[dr]->queue = NULL; + } put_disk(disks[dr]); } return err; @@ -4503,7 +4473,6 @@ static void floppy_release_irq_and_dma(void) #if N_FDC > 1 set_dor(1, ~8, 0); #endif - floppy_enable_hlt(); if (floppy_track_buffer && max_buffer_sectors) { tmpsize = max_buffer_sectors * 1024; @@ -4579,6 +4548,15 @@ static void __exit floppy_module_exit(void) platform_device_unregister(&floppy_device[drive]); } blk_cleanup_queue(disks[drive]->queue); + + /* + * These disks have not called add_disk(). Don't put down + * queue reference in put_disk(). + */ + if (!(allowed_drive_mask & (1 << drive)) || + fdc_state[FDC(drive)].version == FDC_NONE) + disks[drive]->queue = NULL; + put_disk(disks[drive]); } diff --git a/drivers/block/hd.c b/drivers/block/hd.c index b52c9ca146f..bf397bf108b 100644 --- a/drivers/block/hd.c +++ b/drivers/block/hd.c @@ -44,7 +44,6 @@ #define HD_IRQ 14 #define REALLY_SLOW_IO -#include <asm/system.h> #include <asm/io.h> #include <asm/uaccess.h> diff --git a/drivers/block/loop.c b/drivers/block/loop.c index f00257782fc..bbca966f8f6 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -93,16 +93,16 @@ static int transfer_none(struct loop_device *lo, int cmd, struct page *loop_page, unsigned loop_off, int size, sector_t real_block) { - char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off; - char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off; + char *raw_buf = kmap_atomic(raw_page) + raw_off; + char *loop_buf = kmap_atomic(loop_page) + loop_off; if (cmd == READ) memcpy(loop_buf, raw_buf, size); else memcpy(raw_buf, loop_buf, size); - kunmap_atomic(loop_buf, KM_USER1); - kunmap_atomic(raw_buf, KM_USER0); + kunmap_atomic(loop_buf); + kunmap_atomic(raw_buf); cond_resched(); return 0; } @@ -112,8 +112,8 @@ static int transfer_xor(struct loop_device *lo, int cmd, struct page *loop_page, unsigned loop_off, int size, sector_t real_block) { - char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off; - char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off; + char *raw_buf = kmap_atomic(raw_page) + raw_off; + char *loop_buf = kmap_atomic(loop_page) + loop_off; char *in, *out, *key; int i, keysize; @@ -130,8 +130,8 @@ static int transfer_xor(struct loop_device *lo, int cmd, for (i = 0; i < size; i++) *out++ = *in++ ^ key[(i & 511) % keysize]; - kunmap_atomic(loop_buf, KM_USER1); - kunmap_atomic(raw_buf, KM_USER0); + kunmap_atomic(loop_buf); + kunmap_atomic(raw_buf); cond_resched(); return 0; } @@ -356,14 +356,14 @@ lo_direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd) return __splice_from_pipe(pipe, sd, lo_splice_actor); } -static int +static ssize_t do_lo_receive(struct loop_device *lo, struct bio_vec *bvec, int bsize, loff_t pos) { struct lo_read_data cookie; struct splice_desc sd; struct file *file; - long retval; + ssize_t retval; cookie.lo = lo; cookie.page = bvec->bv_page; @@ -379,26 +379,28 @@ do_lo_receive(struct loop_device *lo, file = lo->lo_backing_file; retval = splice_direct_to_actor(file, &sd, lo_direct_splice_actor); - if (retval < 0) - return retval; - if (retval != bvec->bv_len) - return -EIO; - return 0; + return retval; } static int lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos) { struct bio_vec *bvec; - int i, ret = 0; + ssize_t s; + int i; bio_for_each_segment(bvec, bio, i) { - ret = do_lo_receive(lo, bvec, bsize, pos); - if (ret < 0) + s = do_lo_receive(lo, bvec, bsize, pos); + if (s < 0) + return s; + + if (s != bvec->bv_len) { + zero_fill_bio(bio); break; + } pos += bvec->bv_len; } - return ret; + return 0; } static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index b74eab70c3d..8eb81c96608 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -2068,8 +2068,6 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd, * when the read completes. * @data Callback data passed to the callback function * when the read completes. - * @barrier If non-zero, this command must be completed before - * issuing any other commands. * @dir Direction (read or write) * * return value @@ -2077,7 +2075,7 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd, */ static void mtip_hw_submit_io(struct driver_data *dd, sector_t start, int nsect, int nents, int tag, void *callback, - void *data, int barrier, int dir) + void *data, int dir) { struct host_to_dev_fis *fis; struct mtip_port *port = dd->port; @@ -2108,8 +2106,6 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t start, *((unsigned int *) &fis->lba_low) = (start & 0xFFFFFF); *((unsigned int *) &fis->lba_low_ex) = ((start >> 24) & 0xFFFFFF); fis->device = 1 << 6; - if (barrier) - fis->device |= FUA_BIT; fis->features = nsect & 0xFF; fis->features_ex = (nsect >> 8) & 0xFF; fis->sect_count = ((tag << 3) | (tag >> 5)); @@ -3087,7 +3083,6 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio) tag, bio_endio, bio, - bio->bi_rw & REQ_FUA, bio_data_dir(bio)); } else bio_io_error(bio); @@ -3187,6 +3182,10 @@ skip_create_disk: blk_queue_max_segments(dd->queue, MTIP_MAX_SG); blk_queue_physical_block_size(dd->queue, 4096); blk_queue_io_min(dd->queue, 4096); + /* + * write back cache is not supported in the device. FUA depends on + * write back cache support, hence setting flush support to zero. + */ blk_queue_flush(dd->queue, 0); /* Set the capacity of the device in 512 byte sectors. */ diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h index 723d7c4946d..e0554a8f223 100644 --- a/drivers/block/mtip32xx/mtip32xx.h +++ b/drivers/block/mtip32xx/mtip32xx.h @@ -104,9 +104,6 @@ /* BAR number used to access the HBA registers. */ #define MTIP_ABAR 5 -/* Forced Unit Access Bit */ -#define FUA_BIT 0x80 - #ifdef DEBUG #define dbg_printk(format, arg...) \ printk(pr_fmt(format), ##arg); @@ -415,8 +412,6 @@ struct driver_data { atomic_t resumeflag; /* Atomic variable to track suspend/resume */ - atomic_t eh_active; /* Flag for error handling tracking */ - struct task_struct *mtip_svc_handler; /* task_struct of svc thd */ }; diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index c3f0ee16594..061427a75d3 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -34,12 +34,11 @@ #include <linux/kthread.h> #include <asm/uaccess.h> -#include <asm/system.h> #include <asm/types.h> #include <linux/nbd.h> -#define LO_MAGIC 0x68797548 +#define NBD_MAGIC 0x68797548 #ifdef NDEBUG #define dprintk(flags, fmt...) @@ -116,7 +115,7 @@ static void nbd_end_request(struct request *req) spin_unlock_irqrestore(q->queue_lock, flags); } -static void sock_shutdown(struct nbd_device *lo, int lock) +static void sock_shutdown(struct nbd_device *nbd, int lock) { /* Forcibly shutdown the socket causing all listeners * to error @@ -125,14 +124,14 @@ static void sock_shutdown(struct nbd_device *lo, int lock) * there should be a more generic interface rather than * calling socket ops directly here */ if (lock) - mutex_lock(&lo->tx_lock); - if (lo->sock) { - dev_warn(disk_to_dev(lo->disk), "shutting down socket\n"); - kernel_sock_shutdown(lo->sock, SHUT_RDWR); - lo->sock = NULL; + mutex_lock(&nbd->tx_lock); + if (nbd->sock) { + dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n"); + kernel_sock_shutdown(nbd->sock, SHUT_RDWR); + nbd->sock = NULL; } if (lock) - mutex_unlock(&lo->tx_lock); + mutex_unlock(&nbd->tx_lock); } static void nbd_xmit_timeout(unsigned long arg) @@ -147,17 +146,17 @@ static void nbd_xmit_timeout(unsigned long arg) /* * Send or receive packet. */ -static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size, +static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size, int msg_flags) { - struct socket *sock = lo->sock; + struct socket *sock = nbd->sock; int result; struct msghdr msg; struct kvec iov; sigset_t blocked, oldset; if (unlikely(!sock)) { - dev_err(disk_to_dev(lo->disk), + dev_err(disk_to_dev(nbd->disk), "Attempted %s on closed socket in sock_xmit\n", (send ? "send" : "recv")); return -EINVAL; @@ -181,15 +180,15 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size, if (send) { struct timer_list ti; - if (lo->xmit_timeout) { + if (nbd->xmit_timeout) { init_timer(&ti); ti.function = nbd_xmit_timeout; ti.data = (unsigned long)current; - ti.expires = jiffies + lo->xmit_timeout; + ti.expires = jiffies + nbd->xmit_timeout; add_timer(&ti); } result = kernel_sendmsg(sock, &msg, &iov, 1, size); - if (lo->xmit_timeout) + if (nbd->xmit_timeout) del_timer_sync(&ti); } else result = kernel_recvmsg(sock, &msg, &iov, 1, size, @@ -201,7 +200,7 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size, task_pid_nr(current), current->comm, dequeue_signal_lock(current, ¤t->blocked, &info)); result = -EINTR; - sock_shutdown(lo, !send); + sock_shutdown(nbd, !send); break; } @@ -219,18 +218,19 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size, return result; } -static inline int sock_send_bvec(struct nbd_device *lo, struct bio_vec *bvec, +static inline int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec, int flags) { int result; void *kaddr = kmap(bvec->bv_page); - result = sock_xmit(lo, 1, kaddr + bvec->bv_offset, bvec->bv_len, flags); + result = sock_xmit(nbd, 1, kaddr + bvec->bv_offset, + bvec->bv_len, flags); kunmap(bvec->bv_page); return result; } /* always call with the tx_lock held */ -static int nbd_send_req(struct nbd_device *lo, struct request *req) +static int nbd_send_req(struct nbd_device *nbd, struct request *req) { int result, flags; struct nbd_request request; @@ -243,14 +243,14 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req) memcpy(request.handle, &req, sizeof(req)); dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n", - lo->disk->disk_name, req, + nbd->disk->disk_name, req, nbdcmd_to_ascii(nbd_cmd(req)), (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); - result = sock_xmit(lo, 1, &request, sizeof(request), + result = sock_xmit(nbd, 1, &request, sizeof(request), (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0); if (result <= 0) { - dev_err(disk_to_dev(lo->disk), + dev_err(disk_to_dev(nbd->disk), "Send control failed (result %d)\n", result); goto error_out; } @@ -267,10 +267,10 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req) if (!rq_iter_last(req, iter)) flags = MSG_MORE; dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n", - lo->disk->disk_name, req, bvec->bv_len); - result = sock_send_bvec(lo, bvec, flags); + nbd->disk->disk_name, req, bvec->bv_len); + result = sock_send_bvec(nbd, bvec, flags); if (result <= 0) { - dev_err(disk_to_dev(lo->disk), + dev_err(disk_to_dev(nbd->disk), "Send data failed (result %d)\n", result); goto error_out; @@ -283,25 +283,25 @@ error_out: return -EIO; } -static struct request *nbd_find_request(struct nbd_device *lo, +static struct request *nbd_find_request(struct nbd_device *nbd, struct request *xreq) { struct request *req, *tmp; int err; - err = wait_event_interruptible(lo->active_wq, lo->active_req != xreq); + err = wait_event_interruptible(nbd->active_wq, nbd->active_req != xreq); if (unlikely(err)) goto out; - spin_lock(&lo->queue_lock); - list_for_each_entry_safe(req, tmp, &lo->queue_head, queuelist) { + spin_lock(&nbd->queue_lock); + list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) { if (req != xreq) continue; list_del_init(&req->queuelist); - spin_unlock(&lo->queue_lock); + spin_unlock(&nbd->queue_lock); return req; } - spin_unlock(&lo->queue_lock); + spin_unlock(&nbd->queue_lock); err = -ENOENT; @@ -309,78 +309,78 @@ out: return ERR_PTR(err); } -static inline int sock_recv_bvec(struct nbd_device *lo, struct bio_vec *bvec) +static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec) { int result; void *kaddr = kmap(bvec->bv_page); - result = sock_xmit(lo, 0, kaddr + bvec->bv_offset, bvec->bv_len, + result = sock_xmit(nbd, 0, kaddr + bvec->bv_offset, bvec->bv_len, MSG_WAITALL); kunmap(bvec->bv_page); return result; } /* NULL returned = something went wrong, inform userspace */ -static struct request *nbd_read_stat(struct nbd_device *lo) +static struct request *nbd_read_stat(struct nbd_device *nbd) { int result; struct nbd_reply reply; struct request *req; reply.magic = 0; - result = sock_xmit(lo, 0, &reply, sizeof(reply), MSG_WAITALL); + result = sock_xmit(nbd, 0, &reply, sizeof(reply), MSG_WAITALL); if (result <= 0) { - dev_err(disk_to_dev(lo->disk), + dev_err(disk_to_dev(nbd->disk), "Receive control failed (result %d)\n", result); goto harderror; } if (ntohl(reply.magic) != NBD_REPLY_MAGIC) { - dev_err(disk_to_dev(lo->disk), "Wrong magic (0x%lx)\n", + dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n", (unsigned long)ntohl(reply.magic)); result = -EPROTO; goto harderror; } - req = nbd_find_request(lo, *(struct request **)reply.handle); + req = nbd_find_request(nbd, *(struct request **)reply.handle); if (IS_ERR(req)) { result = PTR_ERR(req); if (result != -ENOENT) goto harderror; - dev_err(disk_to_dev(lo->disk), "Unexpected reply (%p)\n", + dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%p)\n", reply.handle); result = -EBADR; goto harderror; } if (ntohl(reply.error)) { - dev_err(disk_to_dev(lo->disk), "Other side returned error (%d)\n", + dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", ntohl(reply.error)); req->errors++; return req; } dprintk(DBG_RX, "%s: request %p: got reply\n", - lo->disk->disk_name, req); + nbd->disk->disk_name, req); if (nbd_cmd(req) == NBD_CMD_READ) { struct req_iterator iter; struct bio_vec *bvec; rq_for_each_segment(bvec, req, iter) { - result = sock_recv_bvec(lo, bvec); + result = sock_recv_bvec(nbd, bvec); if (result <= 0) { - dev_err(disk_to_dev(lo->disk), "Receive data failed (result %d)\n", + dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", result); req->errors++; return req; } dprintk(DBG_RX, "%s: request %p: got %d bytes data\n", - lo->disk->disk_name, req, bvec->bv_len); + nbd->disk->disk_name, req, bvec->bv_len); } } return req; harderror: - lo->harderror = result; + nbd->harderror = result; return NULL; } @@ -398,48 +398,48 @@ static struct device_attribute pid_attr = { .show = pid_show, }; -static int nbd_do_it(struct nbd_device *lo) +static int nbd_do_it(struct nbd_device *nbd) { struct request *req; int ret; - BUG_ON(lo->magic != LO_MAGIC); + BUG_ON(nbd->magic != NBD_MAGIC); - lo->pid = task_pid_nr(current); - ret = device_create_file(disk_to_dev(lo->disk), &pid_attr); + nbd->pid = task_pid_nr(current); + ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr); if (ret) { - dev_err(disk_to_dev(lo->disk), "device_create_file failed!\n"); - lo->pid = 0; + dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n"); + nbd->pid = 0; return ret; } - while ((req = nbd_read_stat(lo)) != NULL) + while ((req = nbd_read_stat(nbd)) != NULL) nbd_end_request(req); - device_remove_file(disk_to_dev(lo->disk), &pid_attr); - lo->pid = 0; + device_remove_file(disk_to_dev(nbd->disk), &pid_attr); + nbd->pid = 0; return 0; } -static void nbd_clear_que(struct nbd_device *lo) +static void nbd_clear_que(struct nbd_device *nbd) { struct request *req; - BUG_ON(lo->magic != LO_MAGIC); + BUG_ON(nbd->magic != NBD_MAGIC); /* - * Because we have set lo->sock to NULL under the tx_lock, all + * Because we have set nbd->sock to NULL under the tx_lock, all * modifications to the list must have completed by now. For * the same reason, the active_req must be NULL. * * As a consequence, we don't need to take the spin lock while * purging the list here. */ - BUG_ON(lo->sock); - BUG_ON(lo->active_req); + BUG_ON(nbd->sock); + BUG_ON(nbd->active_req); - while (!list_empty(&lo->queue_head)) { - req = list_entry(lo->queue_head.next, struct request, + while (!list_empty(&nbd->queue_head)) { + req = list_entry(nbd->queue_head.next, struct request, queuelist); list_del_init(&req->queuelist); req->errors++; @@ -448,7 +448,7 @@ static void nbd_clear_que(struct nbd_device *lo) } -static void nbd_handle_req(struct nbd_device *lo, struct request *req) +static void nbd_handle_req(struct nbd_device *nbd, struct request *req) { if (req->cmd_type != REQ_TYPE_FS) goto error_out; @@ -456,8 +456,8 @@ static void nbd_handle_req(struct nbd_device *lo, struct request *req) nbd_cmd(req) = NBD_CMD_READ; if (rq_data_dir(req) == WRITE) { nbd_cmd(req) = NBD_CMD_WRITE; - if (lo->flags & NBD_READ_ONLY) { - dev_err(disk_to_dev(lo->disk), + if (nbd->flags & NBD_READ_ONLY) { + dev_err(disk_to_dev(nbd->disk), "Write on read-only\n"); goto error_out; } @@ -465,29 +465,29 @@ static void nbd_handle_req(struct nbd_device *lo, struct request *req) req->errors = 0; - mutex_lock(&lo->tx_lock); - if (unlikely(!lo->sock)) { - mutex_unlock(&lo->tx_lock); - dev_err(disk_to_dev(lo->disk), + mutex_lock(&nbd->tx_lock); + if (unlikely(!nbd->sock)) { + mutex_unlock(&nbd->tx_lock); + dev_err(disk_to_dev(nbd->disk), "Attempted send on closed socket\n"); goto error_out; } - lo->active_req = req; + nbd->active_req = req; - if (nbd_send_req(lo, req) != 0) { - dev_err(disk_to_dev(lo->disk), "Request send failed\n"); + if (nbd_send_req(nbd, req) != 0) { + dev_err(disk_to_dev(nbd->disk), "Request send failed\n"); req->errors++; nbd_end_request(req); } else { - spin_lock(&lo->queue_lock); - list_add(&req->queuelist, &lo->queue_head); - spin_unlock(&lo->queue_lock); + spin_lock(&nbd->queue_lock); + list_add(&req->queuelist, &nbd->queue_head); + spin_unlock(&nbd->queue_lock); } - lo->active_req = NULL; - mutex_unlock(&lo->tx_lock); - wake_up_all(&lo->active_wq); + nbd->active_req = NULL; + mutex_unlock(&nbd->tx_lock); + wake_up_all(&nbd->active_wq); return; @@ -498,28 +498,28 @@ error_out: static int nbd_thread(void *data) { - struct nbd_device *lo = data; + struct nbd_device *nbd = data; struct request *req; set_user_nice(current, -20); - while (!kthread_should_stop() || !list_empty(&lo->waiting_queue)) { + while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) { /* wait for something to do */ - wait_event_interruptible(lo->waiting_wq, + wait_event_interruptible(nbd->waiting_wq, kthread_should_stop() || - !list_empty(&lo->waiting_queue)); + !list_empty(&nbd->waiting_queue)); /* extract request */ - if (list_empty(&lo->waiting_queue)) + if (list_empty(&nbd->waiting_queue)) continue; - spin_lock_irq(&lo->queue_lock); - req = list_entry(lo->waiting_queue.next, struct request, + spin_lock_irq(&nbd->queue_lock); + req = list_entry(nbd->waiting_queue.next, struct request, queuelist); list_del_init(&req->queuelist); - spin_unlock_irq(&lo->queue_lock); + spin_unlock_irq(&nbd->queue_lock); /* handle request */ - nbd_handle_req(lo, req); + nbd_handle_req(nbd, req); } return 0; } @@ -527,7 +527,7 @@ static int nbd_thread(void *data) /* * We always wait for result of write, for now. It would be nice to make it optional * in future - * if ((rq_data_dir(req) == WRITE) && (lo->flags & NBD_WRITE_NOCHK)) + * if ((rq_data_dir(req) == WRITE) && (nbd->flags & NBD_WRITE_NOCHK)) * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); } */ @@ -536,19 +536,19 @@ static void do_nbd_request(struct request_queue *q) struct request *req; while ((req = blk_fetch_request(q)) != NULL) { - struct nbd_device *lo; + struct nbd_device *nbd; spin_unlock_irq(q->queue_lock); dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n", req->rq_disk->disk_name, req, req->cmd_type); - lo = req->rq_disk->private_data; + nbd = req->rq_disk->private_data; - BUG_ON(lo->magic != LO_MAGIC); + BUG_ON(nbd->magic != NBD_MAGIC); - if (unlikely(!lo->sock)) { - dev_err(disk_to_dev(lo->disk), + if (unlikely(!nbd->sock)) { + dev_err(disk_to_dev(nbd->disk), "Attempted send on closed socket\n"); req->errors++; nbd_end_request(req); @@ -556,11 +556,11 @@ static void do_nbd_request(struct request_queue *q) continue; } - spin_lock_irq(&lo->queue_lock); - list_add_tail(&req->queuelist, &lo->waiting_queue); - spin_unlock_irq(&lo->queue_lock); + spin_lock_irq(&nbd->queue_lock); + list_add_tail(&req->queuelist, &nbd->waiting_queue); + spin_unlock_irq(&nbd->queue_lock); - wake_up(&lo->waiting_wq); + wake_up(&nbd->waiting_wq); spin_lock_irq(q->queue_lock); } @@ -568,32 +568,32 @@ static void do_nbd_request(struct request_queue *q) /* Must be called with tx_lock held */ -static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo, +static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, unsigned int cmd, unsigned long arg) { switch (cmd) { case NBD_DISCONNECT: { struct request sreq; - dev_info(disk_to_dev(lo->disk), "NBD_DISCONNECT\n"); + dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); blk_rq_init(NULL, &sreq); sreq.cmd_type = REQ_TYPE_SPECIAL; nbd_cmd(&sreq) = NBD_CMD_DISC; - if (!lo->sock) + if (!nbd->sock) return -EINVAL; - nbd_send_req(lo, &sreq); + nbd_send_req(nbd, &sreq); return 0; } case NBD_CLEAR_SOCK: { struct file *file; - lo->sock = NULL; - file = lo->file; - lo->file = NULL; - nbd_clear_que(lo); - BUG_ON(!list_empty(&lo->queue_head)); + nbd->sock = NULL; + file = nbd->file; + nbd->file = NULL; + nbd_clear_que(nbd); + BUG_ON(!list_empty(&nbd->queue_head)); if (file) fput(file); return 0; @@ -601,14 +601,14 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo, case NBD_SET_SOCK: { struct file *file; - if (lo->file) + if (nbd->file) return -EBUSY; file = fget(arg); if (file) { struct inode *inode = file->f_path.dentry->d_inode; if (S_ISSOCK(inode->i_mode)) { - lo->file = file; - lo->sock = SOCKET_I(inode); + nbd->file = file; + nbd->sock = SOCKET_I(inode); if (max_part > 0) bdev->bd_invalidated = 1; return 0; @@ -620,29 +620,29 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo, } case NBD_SET_BLKSIZE: - lo->blksize = arg; - lo->bytesize &= ~(lo->blksize-1); - bdev->bd_inode->i_size = lo->bytesize; - set_blocksize(bdev, lo->blksize); - set_capacity(lo->disk, lo->bytesize >> 9); + nbd->blksize = arg; + nbd->bytesize &= ~(nbd->blksize-1); + bdev->bd_inode->i_size = nbd->bytesize; + set_blocksize(bdev, nbd->blksize); + set_capacity(nbd->disk, nbd->bytesize >> 9); return 0; case NBD_SET_SIZE: - lo->bytesize = arg & ~(lo->blksize-1); - bdev->bd_inode->i_size = lo->bytesize; - set_blocksize(bdev, lo->blksize); - set_capacity(lo->disk, lo->bytesize >> 9); + nbd->bytesize = arg & ~(nbd->blksize-1); + bdev->bd_inode->i_size = nbd->bytesize; + set_blocksize(bdev, nbd->blksize); + set_capacity(nbd->disk, nbd->bytesize >> 9); return 0; case NBD_SET_TIMEOUT: - lo->xmit_timeout = arg * HZ; + nbd->xmit_timeout = arg * HZ; return 0; case NBD_SET_SIZE_BLOCKS: - lo->bytesize = ((u64) arg) * lo->blksize; - bdev->bd_inode->i_size = lo->bytesize; - set_blocksize(bdev, lo->blksize); - set_capacity(lo->disk, lo->bytesize >> 9); + nbd->bytesize = ((u64) arg) * nbd->blksize; + bdev->bd_inode->i_size = nbd->bytesize; + set_blocksize(bdev, nbd->blksize); + set_capacity(nbd->disk, nbd->bytesize >> 9); return 0; case NBD_DO_IT: { @@ -650,38 +650,38 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo, struct file *file; int error; - if (lo->pid) + if (nbd->pid) return -EBUSY; - if (!lo->file) + if (!nbd->file) return -EINVAL; - mutex_unlock(&lo->tx_lock); + mutex_unlock(&nbd->tx_lock); - thread = kthread_create(nbd_thread, lo, lo->disk->disk_name); + thread = kthread_create(nbd_thread, nbd, nbd->disk->disk_name); if (IS_ERR(thread)) { - mutex_lock(&lo->tx_lock); + mutex_lock(&nbd->tx_lock); return PTR_ERR(thread); } wake_up_process(thread); - error = nbd_do_it(lo); + error = nbd_do_it(nbd); kthread_stop(thread); - mutex_lock(&lo->tx_lock); + mutex_lock(&nbd->tx_lock); if (error) return error; - sock_shutdown(lo, 0); - file = lo->file; - lo->file = NULL; - nbd_clear_que(lo); - dev_warn(disk_to_dev(lo->disk), "queue cleared\n"); + sock_shutdown(nbd, 0); + file = nbd->file; + nbd->file = NULL; + nbd_clear_que(nbd); + dev_warn(disk_to_dev(nbd->disk), "queue cleared\n"); if (file) fput(file); - lo->bytesize = 0; + nbd->bytesize = 0; bdev->bd_inode->i_size = 0; - set_capacity(lo->disk, 0); + set_capacity(nbd->disk, 0); if (max_part > 0) ioctl_by_bdev(bdev, BLKRRPART, 0); - return lo->harderror; + return nbd->harderror; } case NBD_CLEAR_QUE: @@ -689,14 +689,14 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo, * This is for compatibility only. The queue is always cleared * by NBD_DO_IT or NBD_CLEAR_SOCK. */ - BUG_ON(!lo->sock && !list_empty(&lo->queue_head)); + BUG_ON(!nbd->sock && !list_empty(&nbd->queue_head)); return 0; case NBD_PRINT_DEBUG: - dev_info(disk_to_dev(lo->disk), + dev_info(disk_to_dev(nbd->disk), "next = %p, prev = %p, head = %p\n", - lo->queue_head.next, lo->queue_head.prev, - &lo->queue_head); + nbd->queue_head.next, nbd->queue_head.prev, + &nbd->queue_head); return 0; } return -ENOTTY; @@ -705,21 +705,21 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo, static int nbd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { - struct nbd_device *lo = bdev->bd_disk->private_data; + struct nbd_device *nbd = bdev->bd_disk->private_data; int error; if (!capable(CAP_SYS_ADMIN)) return -EPERM; - BUG_ON(lo->magic != LO_MAGIC); + BUG_ON(nbd->magic != NBD_MAGIC); /* Anyone capable of this syscall can do *real bad* things */ dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n", - lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg); + nbd->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg); - mutex_lock(&lo->tx_lock); - error = __nbd_ioctl(bdev, lo, cmd, arg); - mutex_unlock(&lo->tx_lock); + mutex_lock(&nbd->tx_lock); + error = __nbd_ioctl(bdev, nbd, cmd, arg); + mutex_unlock(&nbd->tx_lock); return error; } @@ -805,7 +805,7 @@ static int __init nbd_init(void) for (i = 0; i < nbds_max; i++) { struct gendisk *disk = nbd_dev[i].disk; nbd_dev[i].file = NULL; - nbd_dev[i].magic = LO_MAGIC; + nbd_dev[i].magic = NBD_MAGIC; nbd_dev[i].flags = 0; INIT_LIST_HEAD(&nbd_dev[i].waiting_queue); spin_lock_init(&nbd_dev[i].queue_lock); diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c index 09776daf98e..38a2d063188 100644 --- a/drivers/block/nvme.c +++ b/drivers/block/nvme.c @@ -40,6 +40,8 @@ #include <linux/slab.h> #include <linux/types.h> +#include <asm-generic/io-64-nonatomic-lo-hi.h> + #define NVME_Q_DEPTH 1024 #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) #define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index d59edeabd93..ba66e4445f4 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -987,14 +987,14 @@ static void pkt_copy_bio_data(struct bio *src_bio, int seg, int offs, struct pag while (copy_size > 0) { struct bio_vec *src_bvl = bio_iovec_idx(src_bio, seg); - void *vfrom = kmap_atomic(src_bvl->bv_page, KM_USER0) + + void *vfrom = kmap_atomic(src_bvl->bv_page) + src_bvl->bv_offset + offs; void *vto = page_address(dst_page) + dst_offs; int len = min_t(int, copy_size, src_bvl->bv_len - offs); BUG_ON(len < 0); memcpy(vto, vfrom, len); - kunmap_atomic(vfrom, KM_USER0); + kunmap_atomic(vfrom); seg++; offs = 0; @@ -1019,10 +1019,10 @@ static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec) offs = 0; for (f = 0; f < pkt->frames; f++) { if (bvec[f].bv_page != pkt->pages[p]) { - void *vfrom = kmap_atomic(bvec[f].bv_page, KM_USER0) + bvec[f].bv_offset; + void *vfrom = kmap_atomic(bvec[f].bv_page) + bvec[f].bv_offset; void *vto = page_address(pkt->pages[p]) + offs; memcpy(vto, vfrom, CD_FRAMESIZE); - kunmap_atomic(vfrom, KM_USER0); + kunmap_atomic(vfrom); bvec[f].bv_page = pkt->pages[p]; bvec[f].bv_offset = offs; } else { diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index a6278e7e61a..013c7a549fb 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -41,19 +41,35 @@ #include "rbd_types.h" -#define DRV_NAME "rbd" -#define DRV_NAME_LONG "rbd (rados block device)" +/* + * The basic unit of block I/O is a sector. It is interpreted in a + * number of contexts in Linux (blk, bio, genhd), but the default is + * universally 512 bytes. These symbols are just slightly more + * meaningful than the bare numbers they represent. + */ +#define SECTOR_SHIFT 9 +#define SECTOR_SIZE (1ULL << SECTOR_SHIFT) + +#define RBD_DRV_NAME "rbd" +#define RBD_DRV_NAME_LONG "rbd (rados block device)" #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */ -#define RBD_MAX_MD_NAME_LEN (96 + sizeof(RBD_SUFFIX)) +#define RBD_MAX_MD_NAME_LEN (RBD_MAX_OBJ_NAME_LEN + sizeof(RBD_SUFFIX)) #define RBD_MAX_POOL_NAME_LEN 64 #define RBD_MAX_SNAP_NAME_LEN 32 #define RBD_MAX_OPT_LEN 1024 #define RBD_SNAP_HEAD_NAME "-" +/* + * An RBD device name will be "rbd#", where the "rbd" comes from + * RBD_DRV_NAME above, and # is a unique integer identifier. + * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big + * enough to hold all possible device names. + */ #define DEV_NAME_LEN 32 +#define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1) #define RBD_NOTIFY_TIMEOUT_DEFAULT 10 @@ -66,7 +82,6 @@ struct rbd_image_header { __u8 obj_order; __u8 crypt_type; __u8 comp_type; - struct rw_semaphore snap_rwsem; struct ceph_snap_context *snapc; size_t snap_names_len; u64 snap_seq; @@ -83,7 +98,7 @@ struct rbd_options { }; /* - * an instance of the client. multiple devices may share a client. + * an instance of the client. multiple devices may share an rbd client. */ struct rbd_client { struct ceph_client *client; @@ -92,20 +107,9 @@ struct rbd_client { struct list_head node; }; -struct rbd_req_coll; - /* - * a single io request + * a request completion status */ -struct rbd_request { - struct request *rq; /* blk layer request */ - struct bio *bio; /* cloned bio */ - struct page **pages; /* list of used pages */ - u64 len; - int coll_index; - struct rbd_req_coll *coll; -}; - struct rbd_req_status { int done; int rc; @@ -122,6 +126,18 @@ struct rbd_req_coll { struct rbd_req_status status[0]; }; +/* + * a single io request + */ +struct rbd_request { + struct request *rq; /* blk layer request */ + struct bio *bio; /* cloned bio */ + struct page **pages; /* list of used pages */ + u64 len; + int coll_index; + struct rbd_req_coll *coll; +}; + struct rbd_snap { struct device dev; const char *name; @@ -140,7 +156,6 @@ struct rbd_device { struct gendisk *disk; /* blkdev's gendisk and rq */ struct request_queue *q; - struct ceph_client *client; struct rbd_client *rbd_client; char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */ @@ -157,6 +172,8 @@ struct rbd_device { struct ceph_osd_event *watch_event; struct ceph_osd_request *watch_request; + /* protects updating the header */ + struct rw_semaphore header_rwsem; char snap_name[RBD_MAX_SNAP_NAME_LEN]; u32 cur_snap; /* index+1 of current snapshot within snap context 0 - for the head */ @@ -171,15 +188,13 @@ struct rbd_device { struct device dev; }; -static struct bus_type rbd_bus_type = { - .name = "rbd", -}; - -static spinlock_t node_lock; /* protects client get/put */ - static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */ + static LIST_HEAD(rbd_dev_list); /* devices */ -static LIST_HEAD(rbd_client_list); /* clients */ +static DEFINE_SPINLOCK(rbd_dev_list_lock); + +static LIST_HEAD(rbd_client_list); /* clients */ +static DEFINE_SPINLOCK(rbd_client_list_lock); static int __rbd_init_snaps_header(struct rbd_device *rbd_dev); static void rbd_dev_release(struct device *dev); @@ -190,12 +205,32 @@ static ssize_t rbd_snap_add(struct device *dev, static void __rbd_remove_snap_dev(struct rbd_device *rbd_dev, struct rbd_snap *snap); +static ssize_t rbd_add(struct bus_type *bus, const char *buf, + size_t count); +static ssize_t rbd_remove(struct bus_type *bus, const char *buf, + size_t count); -static struct rbd_device *dev_to_rbd(struct device *dev) +static struct bus_attribute rbd_bus_attrs[] = { + __ATTR(add, S_IWUSR, NULL, rbd_add), + __ATTR(remove, S_IWUSR, NULL, rbd_remove), + __ATTR_NULL +}; + +static struct bus_type rbd_bus_type = { + .name = "rbd", + .bus_attrs = rbd_bus_attrs, +}; + +static void rbd_root_dev_release(struct device *dev) { - return container_of(dev, struct rbd_device, dev); } +static struct device rbd_root_dev = { + .init_name = "rbd", + .release = rbd_root_dev_release, +}; + + static struct device *rbd_get_dev(struct rbd_device *rbd_dev) { return get_device(&rbd_dev->dev); @@ -210,8 +245,7 @@ static int __rbd_update_snaps(struct rbd_device *rbd_dev); static int rbd_open(struct block_device *bdev, fmode_t mode) { - struct gendisk *disk = bdev->bd_disk; - struct rbd_device *rbd_dev = disk->private_data; + struct rbd_device *rbd_dev = bdev->bd_disk->private_data; rbd_get_dev(rbd_dev); @@ -256,9 +290,11 @@ static struct rbd_client *rbd_client_create(struct ceph_options *opt, kref_init(&rbdc->kref); INIT_LIST_HEAD(&rbdc->node); + mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); + rbdc->client = ceph_create_client(opt, rbdc, 0, 0); if (IS_ERR(rbdc->client)) - goto out_rbdc; + goto out_mutex; opt = NULL; /* Now rbdc->client is responsible for opt */ ret = ceph_open_session(rbdc->client); @@ -267,16 +303,19 @@ static struct rbd_client *rbd_client_create(struct ceph_options *opt, rbdc->rbd_opts = rbd_opts; - spin_lock(&node_lock); + spin_lock(&rbd_client_list_lock); list_add_tail(&rbdc->node, &rbd_client_list); - spin_unlock(&node_lock); + spin_unlock(&rbd_client_list_lock); + + mutex_unlock(&ctl_mutex); dout("rbd_client_create created %p\n", rbdc); return rbdc; out_err: ceph_destroy_client(rbdc->client); -out_rbdc: +out_mutex: + mutex_unlock(&ctl_mutex); kfree(rbdc); out_opt: if (opt) @@ -324,7 +363,7 @@ static int parse_rbd_opts_token(char *c, void *private) substring_t argstr[MAX_OPT_ARGS]; int token, intval, ret; - token = match_token((char *)c, rbdopt_tokens, argstr); + token = match_token(c, rbdopt_tokens, argstr); if (token < 0) return -EINVAL; @@ -357,58 +396,54 @@ static int parse_rbd_opts_token(char *c, void *private) * Get a ceph client with specific addr and configuration, if one does * not exist create it. */ -static int rbd_get_client(struct rbd_device *rbd_dev, const char *mon_addr, - char *options) +static struct rbd_client *rbd_get_client(const char *mon_addr, + size_t mon_addr_len, + char *options) { struct rbd_client *rbdc; struct ceph_options *opt; - int ret; struct rbd_options *rbd_opts; rbd_opts = kzalloc(sizeof(*rbd_opts), GFP_KERNEL); if (!rbd_opts) - return -ENOMEM; + return ERR_PTR(-ENOMEM); rbd_opts->notify_timeout = RBD_NOTIFY_TIMEOUT_DEFAULT; - ret = ceph_parse_options(&opt, options, mon_addr, - mon_addr + strlen(mon_addr), parse_rbd_opts_token, rbd_opts); - if (ret < 0) - goto done_err; + opt = ceph_parse_options(options, mon_addr, + mon_addr + mon_addr_len, + parse_rbd_opts_token, rbd_opts); + if (IS_ERR(opt)) { + kfree(rbd_opts); + return ERR_CAST(opt); + } - spin_lock(&node_lock); + spin_lock(&rbd_client_list_lock); rbdc = __rbd_client_find(opt); if (rbdc) { + /* using an existing client */ + kref_get(&rbdc->kref); + spin_unlock(&rbd_client_list_lock); + ceph_destroy_options(opt); kfree(rbd_opts); - /* using an existing client */ - kref_get(&rbdc->kref); - rbd_dev->rbd_client = rbdc; - rbd_dev->client = rbdc->client; - spin_unlock(&node_lock); - return 0; + return rbdc; } - spin_unlock(&node_lock); + spin_unlock(&rbd_client_list_lock); rbdc = rbd_client_create(opt, rbd_opts); - if (IS_ERR(rbdc)) { - ret = PTR_ERR(rbdc); - goto done_err; - } - rbd_dev->rbd_client = rbdc; - rbd_dev->client = rbdc->client; - return 0; -done_err: - kfree(rbd_opts); - return ret; + if (IS_ERR(rbdc)) + kfree(rbd_opts); + + return rbdc; } /* * Destroy ceph client * - * Caller must hold node_lock. + * Caller must hold rbd_client_list_lock. */ static void rbd_client_release(struct kref *kref) { @@ -428,11 +463,10 @@ static void rbd_client_release(struct kref *kref) */ static void rbd_put_client(struct rbd_device *rbd_dev) { - spin_lock(&node_lock); + spin_lock(&rbd_client_list_lock); kref_put(&rbd_dev->rbd_client->kref, rbd_client_release); - spin_unlock(&node_lock); + spin_unlock(&rbd_client_list_lock); rbd_dev->rbd_client = NULL; - rbd_dev->client = NULL; } /* @@ -457,21 +491,19 @@ static int rbd_header_from_disk(struct rbd_image_header *header, gfp_t gfp_flags) { int i; - u32 snap_count = le32_to_cpu(ondisk->snap_count); - int ret = -ENOMEM; + u32 snap_count; - if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT))) { + if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT))) return -ENXIO; - } - init_rwsem(&header->snap_rwsem); - header->snap_names_len = le64_to_cpu(ondisk->snap_names_len); + snap_count = le32_to_cpu(ondisk->snap_count); header->snapc = kmalloc(sizeof(struct ceph_snap_context) + - snap_count * - sizeof(struct rbd_image_snap_ondisk), + snap_count * sizeof (*ondisk), gfp_flags); if (!header->snapc) return -ENOMEM; + + header->snap_names_len = le64_to_cpu(ondisk->snap_names_len); if (snap_count) { header->snap_names = kmalloc(header->snap_names_len, GFP_KERNEL); @@ -498,8 +530,7 @@ static int rbd_header_from_disk(struct rbd_image_header *header, header->snapc->num_snaps = snap_count; header->total_snaps = snap_count; - if (snap_count && - allocated_snaps == snap_count) { + if (snap_count && allocated_snaps == snap_count) { for (i = 0; i < snap_count; i++) { header->snapc->snaps[i] = le64_to_cpu(ondisk->snaps[i].id); @@ -518,7 +549,7 @@ err_names: kfree(header->snap_names); err_snapc: kfree(header->snapc); - return ret; + return -ENOMEM; } static int snap_index(struct rbd_image_header *header, int snap_num) @@ -542,35 +573,34 @@ static int snap_by_name(struct rbd_image_header *header, const char *snap_name, int i; char *p = header->snap_names; - for (i = 0; i < header->total_snaps; i++, p += strlen(p) + 1) { - if (strcmp(snap_name, p) == 0) - break; - } - if (i == header->total_snaps) - return -ENOENT; - if (seq) - *seq = header->snapc->snaps[i]; + for (i = 0; i < header->total_snaps; i++) { + if (!strcmp(snap_name, p)) { - if (size) - *size = header->snap_sizes[i]; + /* Found it. Pass back its id and/or size */ - return i; + if (seq) + *seq = header->snapc->snaps[i]; + if (size) + *size = header->snap_sizes[i]; + return i; + } + p += strlen(p) + 1; /* Skip ahead to the next name */ + } + return -ENOENT; } -static int rbd_header_set_snap(struct rbd_device *dev, - const char *snap_name, - u64 *size) +static int rbd_header_set_snap(struct rbd_device *dev, u64 *size) { struct rbd_image_header *header = &dev->header; struct ceph_snap_context *snapc = header->snapc; int ret = -ENOENT; - down_write(&header->snap_rwsem); + BUILD_BUG_ON(sizeof (dev->snap_name) < sizeof (RBD_SNAP_HEAD_NAME)); - if (!snap_name || - !*snap_name || - strcmp(snap_name, "-") == 0 || - strcmp(snap_name, RBD_SNAP_HEAD_NAME) == 0) { + down_write(&dev->header_rwsem); + + if (!memcmp(dev->snap_name, RBD_SNAP_HEAD_NAME, + sizeof (RBD_SNAP_HEAD_NAME))) { if (header->total_snaps) snapc->seq = header->snap_seq; else @@ -580,7 +610,7 @@ static int rbd_header_set_snap(struct rbd_device *dev, if (size) *size = header->image_size; } else { - ret = snap_by_name(header, snap_name, &snapc->seq, size); + ret = snap_by_name(header, dev->snap_name, &snapc->seq, size); if (ret < 0) goto done; @@ -590,7 +620,7 @@ static int rbd_header_set_snap(struct rbd_device *dev, ret = 0; done: - up_write(&header->snap_rwsem); + up_write(&dev->header_rwsem); return ret; } @@ -717,7 +747,7 @@ static struct bio *bio_chain_clone(struct bio **old, struct bio **next, /* split the bio. We'll release it either in the next call, or it will have to be released outside */ - bp = bio_split(old_chain, (len - total) / 512ULL); + bp = bio_split(old_chain, (len - total) / SECTOR_SIZE); if (!bp) goto err_out; @@ -857,7 +887,7 @@ static int rbd_do_request(struct request *rq, struct timespec mtime = CURRENT_TIME; struct rbd_request *req_data; struct ceph_osd_request_head *reqhead; - struct rbd_image_header *header = &dev->header; + struct ceph_osd_client *osdc; req_data = kzalloc(sizeof(*req_data), GFP_NOIO); if (!req_data) { @@ -874,15 +904,13 @@ static int rbd_do_request(struct request *rq, dout("rbd_do_request obj=%s ofs=%lld len=%lld\n", obj, len, ofs); - down_read(&header->snap_rwsem); + down_read(&dev->header_rwsem); - req = ceph_osdc_alloc_request(&dev->client->osdc, flags, - snapc, - ops, - false, - GFP_NOIO, pages, bio); + osdc = &dev->rbd_client->client->osdc; + req = ceph_osdc_alloc_request(osdc, flags, snapc, ops, + false, GFP_NOIO, pages, bio); if (!req) { - up_read(&header->snap_rwsem); + up_read(&dev->header_rwsem); ret = -ENOMEM; goto done_pages; } @@ -909,27 +937,27 @@ static int rbd_do_request(struct request *rq, layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER); layout->fl_pg_preferred = cpu_to_le32(-1); layout->fl_pg_pool = cpu_to_le32(dev->poolid); - ceph_calc_raw_layout(&dev->client->osdc, layout, snapid, - ofs, &len, &bno, req, ops); + ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno, + req, ops); ceph_osdc_build_request(req, ofs, &len, ops, snapc, &mtime, req->r_oid, req->r_oid_len); - up_read(&header->snap_rwsem); + up_read(&dev->header_rwsem); if (linger_req) { - ceph_osdc_set_request_linger(&dev->client->osdc, req); + ceph_osdc_set_request_linger(osdc, req); *linger_req = req; } - ret = ceph_osdc_start_request(&dev->client->osdc, req, false); + ret = ceph_osdc_start_request(osdc, req, false); if (ret < 0) goto done_err; if (!rbd_cb) { - ret = ceph_osdc_wait_request(&dev->client->osdc, req); + ret = ceph_osdc_wait_request(osdc, req); if (ver) *ver = le64_to_cpu(req->r_reassert_version.version); dout("reassert_ver=%lld\n", @@ -1213,8 +1241,8 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) rc = __rbd_update_snaps(dev); mutex_unlock(&ctl_mutex); if (rc) - pr_warning(DRV_NAME "%d got notification but failed to update" - " snaps: %d\n", dev->major, rc); + pr_warning(RBD_DRV_NAME "%d got notification but failed to " + " update snaps: %d\n", dev->major, rc); rbd_req_sync_notify_ack(dev, ver, notify_id, dev->obj_md_name); } @@ -1227,7 +1255,7 @@ static int rbd_req_sync_watch(struct rbd_device *dev, u64 ver) { struct ceph_osd_req_op *ops; - struct ceph_osd_client *osdc = &dev->client->osdc; + struct ceph_osd_client *osdc = &dev->rbd_client->client->osdc; int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_WATCH, 0); if (ret < 0) @@ -1314,7 +1342,7 @@ static int rbd_req_sync_notify(struct rbd_device *dev, const char *obj) { struct ceph_osd_req_op *ops; - struct ceph_osd_client *osdc = &dev->client->osdc; + struct ceph_osd_client *osdc = &dev->rbd_client->client->osdc; struct ceph_osd_event *event; struct rbd_notify_info info; int payload_len = sizeof(u32) + sizeof(u32); @@ -1421,9 +1449,7 @@ static void rbd_rq_fn(struct request_queue *q) struct request *rq; struct bio_pair *bp = NULL; - rq = blk_fetch_request(q); - - while (1) { + while ((rq = blk_fetch_request(q))) { struct bio *bio; struct bio *rq_bio, *next_bio = NULL; bool do_write; @@ -1441,32 +1467,32 @@ static void rbd_rq_fn(struct request_queue *q) /* filter out block requests we don't understand */ if ((rq->cmd_type != REQ_TYPE_FS)) { __blk_end_request_all(rq, 0); - goto next; + continue; } /* deduce our operation (read, write) */ do_write = (rq_data_dir(rq) == WRITE); size = blk_rq_bytes(rq); - ofs = blk_rq_pos(rq) * 512ULL; + ofs = blk_rq_pos(rq) * SECTOR_SIZE; rq_bio = rq->bio; if (do_write && rbd_dev->read_only) { __blk_end_request_all(rq, -EROFS); - goto next; + continue; } spin_unlock_irq(q->queue_lock); dout("%s 0x%x bytes at 0x%llx\n", do_write ? "write" : "read", - size, blk_rq_pos(rq) * 512ULL); + size, blk_rq_pos(rq) * SECTOR_SIZE); num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size); coll = rbd_alloc_coll(num_segs); if (!coll) { spin_lock_irq(q->queue_lock); __blk_end_request_all(rq, -ENOMEM); - goto next; + continue; } do { @@ -1512,8 +1538,6 @@ next_seg: if (bp) bio_pair_release(bp); spin_lock_irq(q->queue_lock); -next: - rq = blk_fetch_request(q); } } @@ -1526,13 +1550,17 @@ static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd, struct bio_vec *bvec) { struct rbd_device *rbd_dev = q->queuedata; - unsigned int chunk_sectors = 1 << (rbd_dev->header.obj_order - 9); - sector_t sector = bmd->bi_sector + get_start_sect(bmd->bi_bdev); - unsigned int bio_sectors = bmd->bi_size >> 9; + unsigned int chunk_sectors; + sector_t sector; + unsigned int bio_sectors; int max; + chunk_sectors = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT); + sector = bmd->bi_sector + get_start_sect(bmd->bi_bdev); + bio_sectors = bmd->bi_size >> SECTOR_SHIFT; + max = (chunk_sectors - ((sector & (chunk_sectors - 1)) - + bio_sectors)) << 9; + + bio_sectors)) << SECTOR_SHIFT; if (max < 0) max = 0; /* bio_add cannot handle a negative return */ if (max <= bvec->bv_len && bio_sectors == 0) @@ -1565,15 +1593,16 @@ static int rbd_read_header(struct rbd_device *rbd_dev, ssize_t rc; struct rbd_image_header_ondisk *dh; int snap_count = 0; - u64 snap_names_len = 0; u64 ver; + size_t len; + /* + * First reads the fixed-size header to determine the number + * of snapshots, then re-reads it, along with all snapshot + * records as well as their stored names. + */ + len = sizeof (*dh); while (1) { - int len = sizeof(*dh) + - snap_count * sizeof(struct rbd_image_snap_ondisk) + - snap_names_len; - - rc = -ENOMEM; dh = kmalloc(len, GFP_KERNEL); if (!dh) return -ENOMEM; @@ -1588,21 +1617,22 @@ static int rbd_read_header(struct rbd_device *rbd_dev, rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL); if (rc < 0) { - if (rc == -ENXIO) { + if (rc == -ENXIO) pr_warning("unrecognized header format" " for image %s", rbd_dev->obj); - } goto out_dh; } - if (snap_count != header->total_snaps) { - snap_count = header->total_snaps; - snap_names_len = header->snap_names_len; - rbd_header_free(header); - kfree(dh); - continue; - } - break; + if (snap_count == header->total_snaps) + break; + + snap_count = header->total_snaps; + len = sizeof (*dh) + + snap_count * sizeof(struct rbd_image_snap_ondisk) + + header->snap_names_len; + + rbd_header_free(header); + kfree(dh); } header->obj_version = ver; @@ -1623,13 +1653,14 @@ static int rbd_header_add_snap(struct rbd_device *dev, int ret; void *data, *p, *e; u64 ver; + struct ceph_mon_client *monc; /* we should create a snapshot only if we're pointing at the head */ if (dev->cur_snap) return -EINVAL; - ret = ceph_monc_create_snapid(&dev->client->monc, dev->poolid, - &new_snapid); + monc = &dev->rbd_client->client->monc; + ret = ceph_monc_create_snapid(monc, dev->poolid, &new_snapid); dout("created snapid=%lld\n", new_snapid); if (ret < 0) return ret; @@ -1684,9 +1715,9 @@ static int __rbd_update_snaps(struct rbd_device *rbd_dev) return ret; /* resized? */ - set_capacity(rbd_dev->disk, h.image_size / 512ULL); + set_capacity(rbd_dev->disk, h.image_size / SECTOR_SIZE); - down_write(&rbd_dev->header.snap_rwsem); + down_write(&rbd_dev->header_rwsem); snap_seq = rbd_dev->header.snapc->seq; if (rbd_dev->header.total_snaps && @@ -1711,7 +1742,7 @@ static int __rbd_update_snaps(struct rbd_device *rbd_dev) ret = __rbd_init_snaps_header(rbd_dev); - up_write(&rbd_dev->header.snap_rwsem); + up_write(&rbd_dev->header_rwsem); return ret; } @@ -1721,6 +1752,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) struct gendisk *disk; struct request_queue *q; int rc; + u64 segment_size; u64 total_size = 0; /* contact OSD, request size info about the object being mapped */ @@ -1733,7 +1765,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) if (rc) return rc; - rc = rbd_header_set_snap(rbd_dev, rbd_dev->snap_name, &total_size); + rc = rbd_header_set_snap(rbd_dev, &total_size); if (rc) return rc; @@ -1743,7 +1775,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) if (!disk) goto out; - snprintf(disk->disk_name, sizeof(disk->disk_name), DRV_NAME "%d", + snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d", rbd_dev->id); disk->major = rbd_dev->major; disk->first_minor = 0; @@ -1756,11 +1788,15 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) if (!q) goto out_disk; + /* We use the default size, but let's be explicit about it. */ + blk_queue_physical_block_size(q, SECTOR_SIZE); + /* set io sizes to object size */ - blk_queue_max_hw_sectors(q, rbd_obj_bytes(&rbd_dev->header) / 512ULL); - blk_queue_max_segment_size(q, rbd_obj_bytes(&rbd_dev->header)); - blk_queue_io_min(q, rbd_obj_bytes(&rbd_dev->header)); - blk_queue_io_opt(q, rbd_obj_bytes(&rbd_dev->header)); + segment_size = rbd_obj_bytes(&rbd_dev->header); + blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE); + blk_queue_max_segment_size(q, segment_size); + blk_queue_io_min(q, segment_size); + blk_queue_io_opt(q, segment_size); blk_queue_merge_bvec(q, rbd_merge_bvec); disk->queue = q; @@ -1771,7 +1807,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) rbd_dev->q = q; /* finally, announce the disk to the world */ - set_capacity(disk, total_size / 512ULL); + set_capacity(disk, total_size / SECTOR_SIZE); add_disk(disk); pr_info("%s: added with size 0x%llx\n", @@ -1788,10 +1824,15 @@ out: sysfs */ +static struct rbd_device *dev_to_rbd_dev(struct device *dev) +{ + return container_of(dev, struct rbd_device, dev); +} + static ssize_t rbd_size_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct rbd_device *rbd_dev = dev_to_rbd(dev); + struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); return sprintf(buf, "%llu\n", (unsigned long long)rbd_dev->header.image_size); } @@ -1799,7 +1840,7 @@ static ssize_t rbd_size_show(struct device *dev, static ssize_t rbd_major_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct rbd_device *rbd_dev = dev_to_rbd(dev); + struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); return sprintf(buf, "%d\n", rbd_dev->major); } @@ -1807,15 +1848,16 @@ static ssize_t rbd_major_show(struct device *dev, static ssize_t rbd_client_id_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct rbd_device *rbd_dev = dev_to_rbd(dev); + struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); - return sprintf(buf, "client%lld\n", ceph_client_id(rbd_dev->client)); + return sprintf(buf, "client%lld\n", + ceph_client_id(rbd_dev->rbd_client->client)); } static ssize_t rbd_pool_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct rbd_device *rbd_dev = dev_to_rbd(dev); + struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); return sprintf(buf, "%s\n", rbd_dev->pool_name); } @@ -1823,7 +1865,7 @@ static ssize_t rbd_pool_show(struct device *dev, static ssize_t rbd_name_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct rbd_device *rbd_dev = dev_to_rbd(dev); + struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); return sprintf(buf, "%s\n", rbd_dev->obj); } @@ -1832,7 +1874,7 @@ static ssize_t rbd_snap_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct rbd_device *rbd_dev = dev_to_rbd(dev); + struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); return sprintf(buf, "%s\n", rbd_dev->snap_name); } @@ -1842,7 +1884,7 @@ static ssize_t rbd_image_refresh(struct device *dev, const char *buf, size_t size) { - struct rbd_device *rbd_dev = dev_to_rbd(dev); + struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); int rc; int ret = size; @@ -1907,7 +1949,7 @@ static ssize_t rbd_snap_size_show(struct device *dev, { struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev); - return sprintf(buf, "%lld\n", (long long)snap->size); + return sprintf(buf, "%zd\n", snap->size); } static ssize_t rbd_snap_id_show(struct device *dev, @@ -1916,7 +1958,7 @@ static ssize_t rbd_snap_id_show(struct device *dev, { struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev); - return sprintf(buf, "%lld\n", (long long)snap->id); + return sprintf(buf, "%llu\n", (unsigned long long) snap->id); } static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL); @@ -2088,19 +2130,9 @@ static int __rbd_init_snaps_header(struct rbd_device *rbd_dev) return 0; } - -static void rbd_root_dev_release(struct device *dev) -{ -} - -static struct device rbd_root_dev = { - .init_name = "rbd", - .release = rbd_root_dev_release, -}; - static int rbd_bus_add_dev(struct rbd_device *rbd_dev) { - int ret = -ENOMEM; + int ret; struct device *dev; struct rbd_snap *snap; @@ -2114,7 +2146,7 @@ static int rbd_bus_add_dev(struct rbd_device *rbd_dev) dev_set_name(dev, "%d", rbd_dev->id); ret = device_register(dev); if (ret < 0) - goto done_free; + goto out; list_for_each_entry(snap, &rbd_dev->snaps, node) { ret = rbd_register_snap_dev(rbd_dev, snap, @@ -2122,10 +2154,7 @@ static int rbd_bus_add_dev(struct rbd_device *rbd_dev) if (ret < 0) break; } - - mutex_unlock(&ctl_mutex); - return 0; -done_free: +out: mutex_unlock(&ctl_mutex); return ret; } @@ -2154,104 +2183,250 @@ static int rbd_init_watch_dev(struct rbd_device *rbd_dev) return ret; } +static atomic64_t rbd_id_max = ATOMIC64_INIT(0); + +/* + * Get a unique rbd identifier for the given new rbd_dev, and add + * the rbd_dev to the global list. The minimum rbd id is 1. + */ +static void rbd_id_get(struct rbd_device *rbd_dev) +{ + rbd_dev->id = atomic64_inc_return(&rbd_id_max); + + spin_lock(&rbd_dev_list_lock); + list_add_tail(&rbd_dev->node, &rbd_dev_list); + spin_unlock(&rbd_dev_list_lock); +} + +/* + * Remove an rbd_dev from the global list, and record that its + * identifier is no longer in use. + */ +static void rbd_id_put(struct rbd_device *rbd_dev) +{ + struct list_head *tmp; + int rbd_id = rbd_dev->id; + int max_id; + + BUG_ON(rbd_id < 1); + + spin_lock(&rbd_dev_list_lock); + list_del_init(&rbd_dev->node); + + /* + * If the id being "put" is not the current maximum, there + * is nothing special we need to do. + */ + if (rbd_id != atomic64_read(&rbd_id_max)) { + spin_unlock(&rbd_dev_list_lock); + return; + } + + /* + * We need to update the current maximum id. Search the + * list to find out what it is. We're more likely to find + * the maximum at the end, so search the list backward. + */ + max_id = 0; + list_for_each_prev(tmp, &rbd_dev_list) { + struct rbd_device *rbd_dev; + + rbd_dev = list_entry(tmp, struct rbd_device, node); + if (rbd_id > max_id) + max_id = rbd_id; + } + spin_unlock(&rbd_dev_list_lock); + + /* + * The max id could have been updated by rbd_id_get(), in + * which case it now accurately reflects the new maximum. + * Be careful not to overwrite the maximum value in that + * case. + */ + atomic64_cmpxchg(&rbd_id_max, rbd_id, max_id); +} + +/* + * Skips over white space at *buf, and updates *buf to point to the + * first found non-space character (if any). Returns the length of + * the token (string of non-white space characters) found. Note + * that *buf must be terminated with '\0'. + */ +static inline size_t next_token(const char **buf) +{ + /* + * These are the characters that produce nonzero for + * isspace() in the "C" and "POSIX" locales. + */ + const char *spaces = " \f\n\r\t\v"; + + *buf += strspn(*buf, spaces); /* Find start of token */ + + return strcspn(*buf, spaces); /* Return token length */ +} + +/* + * Finds the next token in *buf, and if the provided token buffer is + * big enough, copies the found token into it. The result, if + * copied, is guaranteed to be terminated with '\0'. Note that *buf + * must be terminated with '\0' on entry. + * + * Returns the length of the token found (not including the '\0'). + * Return value will be 0 if no token is found, and it will be >= + * token_size if the token would not fit. + * + * The *buf pointer will be updated to point beyond the end of the + * found token. Note that this occurs even if the token buffer is + * too small to hold it. + */ +static inline size_t copy_token(const char **buf, + char *token, + size_t token_size) +{ + size_t len; + + len = next_token(buf); + if (len < token_size) { + memcpy(token, *buf, len); + *(token + len) = '\0'; + } + *buf += len; + + return len; +} + +/* + * This fills in the pool_name, obj, obj_len, snap_name, obj_len, + * rbd_dev, rbd_md_name, and name fields of the given rbd_dev, based + * on the list of monitor addresses and other options provided via + * /sys/bus/rbd/add. + */ +static int rbd_add_parse_args(struct rbd_device *rbd_dev, + const char *buf, + const char **mon_addrs, + size_t *mon_addrs_size, + char *options, + size_t options_size) +{ + size_t len; + + /* The first four tokens are required */ + + len = next_token(&buf); + if (!len) + return -EINVAL; + *mon_addrs_size = len + 1; + *mon_addrs = buf; + + buf += len; + + len = copy_token(&buf, options, options_size); + if (!len || len >= options_size) + return -EINVAL; + + len = copy_token(&buf, rbd_dev->pool_name, sizeof (rbd_dev->pool_name)); + if (!len || len >= sizeof (rbd_dev->pool_name)) + return -EINVAL; + + len = copy_token(&buf, rbd_dev->obj, sizeof (rbd_dev->obj)); + if (!len || len >= sizeof (rbd_dev->obj)) + return -EINVAL; + + /* We have the object length in hand, save it. */ + + rbd_dev->obj_len = len; + + BUILD_BUG_ON(RBD_MAX_MD_NAME_LEN + < RBD_MAX_OBJ_NAME_LEN + sizeof (RBD_SUFFIX)); + sprintf(rbd_dev->obj_md_name, "%s%s", rbd_dev->obj, RBD_SUFFIX); + + /* + * The snapshot name is optional, but it's an error if it's + * too long. If no snapshot is supplied, fill in the default. + */ + len = copy_token(&buf, rbd_dev->snap_name, sizeof (rbd_dev->snap_name)); + if (!len) + memcpy(rbd_dev->snap_name, RBD_SNAP_HEAD_NAME, + sizeof (RBD_SNAP_HEAD_NAME)); + else if (len >= sizeof (rbd_dev->snap_name)) + return -EINVAL; + + return 0; +} + static ssize_t rbd_add(struct bus_type *bus, const char *buf, size_t count) { - struct ceph_osd_client *osdc; struct rbd_device *rbd_dev; - ssize_t rc = -ENOMEM; - int irc, new_id = 0; - struct list_head *tmp; - char *mon_dev_name; - char *options; + const char *mon_addrs = NULL; + size_t mon_addrs_size = 0; + char *options = NULL; + struct ceph_osd_client *osdc; + int rc = -ENOMEM; if (!try_module_get(THIS_MODULE)) return -ENODEV; - mon_dev_name = kmalloc(RBD_MAX_OPT_LEN, GFP_KERNEL); - if (!mon_dev_name) - goto err_out_mod; - - options = kmalloc(RBD_MAX_OPT_LEN, GFP_KERNEL); - if (!options) - goto err_mon_dev; - - /* new rbd_device object */ rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL); if (!rbd_dev) - goto err_out_opt; + goto err_nomem; + options = kmalloc(count, GFP_KERNEL); + if (!options) + goto err_nomem; /* static rbd_device initialization */ spin_lock_init(&rbd_dev->lock); INIT_LIST_HEAD(&rbd_dev->node); INIT_LIST_HEAD(&rbd_dev->snaps); + init_rwsem(&rbd_dev->header_rwsem); - init_rwsem(&rbd_dev->header.snap_rwsem); + init_rwsem(&rbd_dev->header_rwsem); /* generate unique id: find highest unique id, add one */ - mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); - - list_for_each(tmp, &rbd_dev_list) { - struct rbd_device *rbd_dev; + rbd_id_get(rbd_dev); - rbd_dev = list_entry(tmp, struct rbd_device, node); - if (rbd_dev->id >= new_id) - new_id = rbd_dev->id + 1; - } - - rbd_dev->id = new_id; - - /* add to global list */ - list_add_tail(&rbd_dev->node, &rbd_dev_list); + /* Fill in the device name, now that we have its id. */ + BUILD_BUG_ON(DEV_NAME_LEN + < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH); + sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->id); /* parse add command */ - if (sscanf(buf, "%" __stringify(RBD_MAX_OPT_LEN) "s " - "%" __stringify(RBD_MAX_OPT_LEN) "s " - "%" __stringify(RBD_MAX_POOL_NAME_LEN) "s " - "%" __stringify(RBD_MAX_OBJ_NAME_LEN) "s" - "%" __stringify(RBD_MAX_SNAP_NAME_LEN) "s", - mon_dev_name, options, rbd_dev->pool_name, - rbd_dev->obj, rbd_dev->snap_name) < 4) { - rc = -EINVAL; - goto err_out_slot; - } - - if (rbd_dev->snap_name[0] == 0) - rbd_dev->snap_name[0] = '-'; - - rbd_dev->obj_len = strlen(rbd_dev->obj); - snprintf(rbd_dev->obj_md_name, sizeof(rbd_dev->obj_md_name), "%s%s", - rbd_dev->obj, RBD_SUFFIX); - - /* initialize rest of new object */ - snprintf(rbd_dev->name, DEV_NAME_LEN, DRV_NAME "%d", rbd_dev->id); - rc = rbd_get_client(rbd_dev, mon_dev_name, options); - if (rc < 0) - goto err_out_slot; + rc = rbd_add_parse_args(rbd_dev, buf, &mon_addrs, &mon_addrs_size, + options, count); + if (rc) + goto err_put_id; - mutex_unlock(&ctl_mutex); + rbd_dev->rbd_client = rbd_get_client(mon_addrs, mon_addrs_size - 1, + options); + if (IS_ERR(rbd_dev->rbd_client)) { + rc = PTR_ERR(rbd_dev->rbd_client); + goto err_put_id; + } /* pick the pool */ - osdc = &rbd_dev->client->osdc; + osdc = &rbd_dev->rbd_client->client->osdc; rc = ceph_pg_poolid_by_name(osdc->osdmap, rbd_dev->pool_name); if (rc < 0) goto err_out_client; rbd_dev->poolid = rc; /* register our block device */ - irc = register_blkdev(0, rbd_dev->name); - if (irc < 0) { - rc = irc; + rc = register_blkdev(0, rbd_dev->name); + if (rc < 0) goto err_out_client; - } - rbd_dev->major = irc; + rbd_dev->major = rc; rc = rbd_bus_add_dev(rbd_dev); if (rc) goto err_out_blkdev; - /* set up and announce blkdev mapping */ + /* + * At this point cleanup in the event of an error is the job + * of the sysfs code (initiated by rbd_bus_del_dev()). + * + * Set up and announce blkdev mapping. + */ rc = rbd_init_disk(rbd_dev); if (rc) goto err_out_bus; @@ -2263,35 +2438,26 @@ static ssize_t rbd_add(struct bus_type *bus, return count; err_out_bus: - mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); - list_del_init(&rbd_dev->node); - mutex_unlock(&ctl_mutex); - /* this will also clean up rest of rbd_dev stuff */ rbd_bus_del_dev(rbd_dev); kfree(options); - kfree(mon_dev_name); return rc; err_out_blkdev: unregister_blkdev(rbd_dev->major, rbd_dev->name); err_out_client: rbd_put_client(rbd_dev); - mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); -err_out_slot: - list_del_init(&rbd_dev->node); - mutex_unlock(&ctl_mutex); - - kfree(rbd_dev); -err_out_opt: +err_put_id: + rbd_id_put(rbd_dev); +err_nomem: kfree(options); -err_mon_dev: - kfree(mon_dev_name); -err_out_mod: + kfree(rbd_dev); + dout("Error adding device %s\n", buf); module_put(THIS_MODULE); - return rc; + + return (ssize_t) rc; } static struct rbd_device *__rbd_get_dev(unsigned long id) @@ -2299,22 +2465,28 @@ static struct rbd_device *__rbd_get_dev(unsigned long id) struct list_head *tmp; struct rbd_device *rbd_dev; + spin_lock(&rbd_dev_list_lock); list_for_each(tmp, &rbd_dev_list) { rbd_dev = list_entry(tmp, struct rbd_device, node); - if (rbd_dev->id == id) + if (rbd_dev->id == id) { + spin_unlock(&rbd_dev_list_lock); return rbd_dev; + } } + spin_unlock(&rbd_dev_list_lock); return NULL; } static void rbd_dev_release(struct device *dev) { - struct rbd_device *rbd_dev = - container_of(dev, struct rbd_device, dev); + struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); - if (rbd_dev->watch_request) - ceph_osdc_unregister_linger_request(&rbd_dev->client->osdc, + if (rbd_dev->watch_request) { + struct ceph_client *client = rbd_dev->rbd_client->client; + + ceph_osdc_unregister_linger_request(&client->osdc, rbd_dev->watch_request); + } if (rbd_dev->watch_event) rbd_req_sync_unwatch(rbd_dev, rbd_dev->obj_md_name); @@ -2323,6 +2495,9 @@ static void rbd_dev_release(struct device *dev) /* clean up and free blkdev */ rbd_free_disk(rbd_dev); unregister_blkdev(rbd_dev->major, rbd_dev->name); + + /* done with the id, and with the rbd_dev */ + rbd_id_put(rbd_dev); kfree(rbd_dev); /* release module ref */ @@ -2355,8 +2530,6 @@ static ssize_t rbd_remove(struct bus_type *bus, goto done; } - list_del_init(&rbd_dev->node); - __rbd_remove_all_snaps(rbd_dev); rbd_bus_del_dev(rbd_dev); @@ -2370,7 +2543,7 @@ static ssize_t rbd_snap_add(struct device *dev, const char *buf, size_t count) { - struct rbd_device *rbd_dev = dev_to_rbd(dev); + struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); int ret; char *name = kmalloc(count + 1, GFP_KERNEL); if (!name) @@ -2406,12 +2579,6 @@ err_unlock: return ret; } -static struct bus_attribute rbd_bus_attrs[] = { - __ATTR(add, S_IWUSR, NULL, rbd_add), - __ATTR(remove, S_IWUSR, NULL, rbd_remove), - __ATTR_NULL -}; - /* * create control files in sysfs * /sys/bus/rbd/... @@ -2420,21 +2587,21 @@ static int rbd_sysfs_init(void) { int ret; - rbd_bus_type.bus_attrs = rbd_bus_attrs; - - ret = bus_register(&rbd_bus_type); - if (ret < 0) + ret = device_register(&rbd_root_dev); + if (ret < 0) return ret; - ret = device_register(&rbd_root_dev); + ret = bus_register(&rbd_bus_type); + if (ret < 0) + device_unregister(&rbd_root_dev); return ret; } static void rbd_sysfs_cleanup(void) { - device_unregister(&rbd_root_dev); bus_unregister(&rbd_bus_type); + device_unregister(&rbd_root_dev); } int __init rbd_init(void) @@ -2444,8 +2611,7 @@ int __init rbd_init(void) rc = rbd_sysfs_init(); if (rc) return rc; - spin_lock_init(&node_lock); - pr_info("loaded " DRV_NAME_LONG "\n"); + pr_info("loaded " RBD_DRV_NAME_LONG "\n"); return 0; } diff --git a/drivers/block/rbd_types.h b/drivers/block/rbd_types.h index fc6c678aa2c..950708688f1 100644 --- a/drivers/block/rbd_types.h +++ b/drivers/block/rbd_types.h @@ -41,10 +41,6 @@ #define RBD_HEADER_SIGNATURE "RBD" #define RBD_HEADER_VERSION "001.005" -struct rbd_info { - __le64 max_id; -} __attribute__ ((packed)); - struct rbd_image_snap_ondisk { __le64 id; __le64 image_size; diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 48e8fee9f2d..9dcf76a10bb 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -839,10 +839,7 @@ static struct vio_driver vdc_port_driver = { .id_table = vdc_port_match, .probe = vdc_port_probe, .remove = vdc_port_remove, - .driver = { - .name = "vdc_port", - .owner = THIS_MODULE, - } + .name = "vdc_port", }; static int __init vdc_init(void) diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c index e7472f567c9..3fb6ab4c8b4 100644 --- a/drivers/block/sx8.c +++ b/drivers/block/sx8.c @@ -1120,7 +1120,7 @@ static inline void carm_handle_resp(struct carm_host *host, break; case MISC_GET_FW_VER: { struct carm_fw_ver *ver = (struct carm_fw_ver *) - mem + sizeof(struct carm_msg_get_fw_ver); + (mem + sizeof(struct carm_msg_get_fw_ver)); if (!error) { host->fw_ver = le32_to_cpu(ver->version); host->flags |= (ver->features & FL_FW_VER_MASK); diff --git a/drivers/block/ub.c b/drivers/block/ub.c index 7333b9e4441..fcec0225ac7 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c @@ -119,43 +119,6 @@ /* */ - -/* command block wrapper */ -struct bulk_cb_wrap { - __le32 Signature; /* contains 'USBC' */ - u32 Tag; /* unique per command id */ - __le32 DataTransferLength; /* size of data */ - u8 Flags; /* direction in bit 0 */ - u8 Lun; /* LUN */ - u8 Length; /* of of the CDB */ - u8 CDB[UB_MAX_CDB_SIZE]; /* max command */ -}; - -#define US_BULK_CB_WRAP_LEN 31 -#define US_BULK_CB_SIGN 0x43425355 /*spells out USBC */ -#define US_BULK_FLAG_IN 1 -#define US_BULK_FLAG_OUT 0 - -/* command status wrapper */ -struct bulk_cs_wrap { - __le32 Signature; /* should = 'USBS' */ - u32 Tag; /* same as original command */ - __le32 Residue; /* amount not transferred */ - u8 Status; /* see below */ -}; - -#define US_BULK_CS_WRAP_LEN 13 -#define US_BULK_CS_SIGN 0x53425355 /* spells out 'USBS' */ -#define US_BULK_STAT_OK 0 -#define US_BULK_STAT_FAIL 1 -#define US_BULK_STAT_PHASE 2 - -/* bulk-only class specific requests */ -#define US_BULK_RESET_REQUEST 0xff -#define US_BULK_GET_MAX_LUN 0xfe - -/* - */ struct ub_dev; #define UB_MAX_REQ_SG 9 /* cdrecord requires 32KB and maybe a header */ @@ -2477,6 +2440,8 @@ static int __init ub_init(void) int rc; int i; + pr_info("'Low Performance USB Block' driver is deprecated. " + "Please switch to usb-storage\n"); for (i = 0; i < UB_QLOCK_NUM; i++) spin_lock_init(&ub_qlockv[i]); diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c deleted file mode 100644 index 9a5b2a2d616..00000000000 --- a/drivers/block/viodasd.c +++ /dev/null @@ -1,809 +0,0 @@ -/* -*- linux-c -*- - * viodasd.c - * Authors: Dave Boutcher <boutcher@us.ibm.com> - * Ryan Arnold <ryanarn@us.ibm.com> - * Colin Devilbiss <devilbis@us.ibm.com> - * Stephen Rothwell - * - * (C) Copyright 2000-2004 IBM Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * This routine provides access to disk space (termed "DASD" in historical - * IBM terms) owned and managed by an OS/400 partition running on the - * same box as this Linux partition. - * - * All disk operations are performed by sending messages back and forth to - * the OS/400 partition. - */ - -#define pr_fmt(fmt) "viod: " fmt - -#include <linux/major.h> -#include <linux/fs.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/blkdev.h> -#include <linux/genhd.h> -#include <linux/hdreg.h> -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/string.h> -#include <linux/mutex.h> -#include <linux/dma-mapping.h> -#include <linux/completion.h> -#include <linux/device.h> -#include <linux/scatterlist.h> - -#include <asm/uaccess.h> -#include <asm/vio.h> -#include <asm/iseries/hv_types.h> -#include <asm/iseries/hv_lp_event.h> -#include <asm/iseries/hv_lp_config.h> -#include <asm/iseries/vio.h> -#include <asm/firmware.h> - -MODULE_DESCRIPTION("iSeries Virtual DASD"); -MODULE_AUTHOR("Dave Boutcher"); -MODULE_LICENSE("GPL"); - -/* - * We only support 7 partitions per physical disk....so with minor - * numbers 0-255 we get a maximum of 32 disks. - */ -#define VIOD_GENHD_NAME "iseries/vd" - -#define VIOD_VERS "1.64" - -enum { - PARTITION_SHIFT = 3, - MAX_DISKNO = HVMAXARCHITECTEDVIRTUALDISKS, - MAX_DISK_NAME = FIELD_SIZEOF(struct gendisk, disk_name) -}; - -static DEFINE_MUTEX(viodasd_mutex); -static DEFINE_SPINLOCK(viodasd_spinlock); - -#define VIOMAXREQ 16 - -#define DEVICE_NO(cell) ((struct viodasd_device *)(cell) - &viodasd_devices[0]) - -struct viodasd_waitevent { - struct completion com; - int rc; - u16 sub_result; - int max_disk; /* open */ -}; - -static const struct vio_error_entry viodasd_err_table[] = { - { 0x0201, EINVAL, "Invalid Range" }, - { 0x0202, EINVAL, "Invalid Token" }, - { 0x0203, EIO, "DMA Error" }, - { 0x0204, EIO, "Use Error" }, - { 0x0205, EIO, "Release Error" }, - { 0x0206, EINVAL, "Invalid Disk" }, - { 0x0207, EBUSY, "Can't Lock" }, - { 0x0208, EIO, "Already Locked" }, - { 0x0209, EIO, "Already Unlocked" }, - { 0x020A, EIO, "Invalid Arg" }, - { 0x020B, EIO, "Bad IFS File" }, - { 0x020C, EROFS, "Read Only Device" }, - { 0x02FF, EIO, "Internal Error" }, - { 0x0000, 0, NULL }, -}; - -/* - * Figure out the biggest I/O request (in sectors) we can accept - */ -#define VIODASD_MAXSECTORS (4096 / 512 * VIOMAXBLOCKDMA) - -/* - * Number of disk I/O requests we've sent to OS/400 - */ -static int num_req_outstanding; - -/* - * This is our internal structure for keeping track of disk devices - */ -struct viodasd_device { - u16 cylinders; - u16 tracks; - u16 sectors; - u16 bytes_per_sector; - u64 size; - int read_only; - spinlock_t q_lock; - struct gendisk *disk; - struct device *dev; -} viodasd_devices[MAX_DISKNO]; - -/* - * External open entry point. - */ -static int viodasd_open(struct block_device *bdev, fmode_t mode) -{ - struct viodasd_device *d = bdev->bd_disk->private_data; - HvLpEvent_Rc hvrc; - struct viodasd_waitevent we; - u16 flags = 0; - - if (d->read_only) { - if (mode & FMODE_WRITE) - return -EROFS; - flags = vioblockflags_ro; - } - - init_completion(&we.com); - - /* Send the open event to OS/400 */ - hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, - HvLpEvent_Type_VirtualIo, - viomajorsubtype_blockio | vioblockopen, - HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck, - viopath_sourceinst(viopath_hostLp), - viopath_targetinst(viopath_hostLp), - (u64)(unsigned long)&we, VIOVERSION << 16, - ((u64)DEVICE_NO(d) << 48) | ((u64)flags << 32), - 0, 0, 0); - if (hvrc != 0) { - pr_warning("HV open failed %d\n", (int)hvrc); - return -EIO; - } - - wait_for_completion(&we.com); - - /* Check the return code */ - if (we.rc != 0) { - const struct vio_error_entry *err = - vio_lookup_rc(viodasd_err_table, we.sub_result); - - pr_warning("bad rc opening disk: %d:0x%04x (%s)\n", - (int)we.rc, we.sub_result, err->msg); - return -EIO; - } - - return 0; -} - -static int viodasd_unlocked_open(struct block_device *bdev, fmode_t mode) -{ - int ret; - - mutex_lock(&viodasd_mutex); - ret = viodasd_open(bdev, mode); - mutex_unlock(&viodasd_mutex); - - return ret; -} - - -/* - * External release entry point. - */ -static int viodasd_release(struct gendisk *disk, fmode_t mode) -{ - struct viodasd_device *d = disk->private_data; - HvLpEvent_Rc hvrc; - - mutex_lock(&viodasd_mutex); - /* Send the event to OS/400. We DON'T expect a response */ - hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, - HvLpEvent_Type_VirtualIo, - viomajorsubtype_blockio | vioblockclose, - HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck, - viopath_sourceinst(viopath_hostLp), - viopath_targetinst(viopath_hostLp), - 0, VIOVERSION << 16, - ((u64)DEVICE_NO(d) << 48) /* | ((u64)flags << 32) */, - 0, 0, 0); - if (hvrc != 0) - pr_warning("HV close call failed %d\n", (int)hvrc); - - mutex_unlock(&viodasd_mutex); - - return 0; -} - - -/* External ioctl entry point. - */ -static int viodasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) -{ - struct gendisk *disk = bdev->bd_disk; - struct viodasd_device *d = disk->private_data; - - geo->sectors = d->sectors ? d->sectors : 32; - geo->heads = d->tracks ? d->tracks : 64; - geo->cylinders = d->cylinders ? d->cylinders : - get_capacity(disk) / (geo->sectors * geo->heads); - - return 0; -} - -/* - * Our file operations table - */ -static const struct block_device_operations viodasd_fops = { - .owner = THIS_MODULE, - .open = viodasd_unlocked_open, - .release = viodasd_release, - .getgeo = viodasd_getgeo, -}; - -/* - * End a request - */ -static void viodasd_end_request(struct request *req, int error, - int num_sectors) -{ - __blk_end_request(req, error, num_sectors << 9); -} - -/* - * Send an actual I/O request to OS/400 - */ -static int send_request(struct request *req) -{ - u64 start; - int direction; - int nsg; - u16 viocmd; - HvLpEvent_Rc hvrc; - struct vioblocklpevent *bevent; - struct HvLpEvent *hev; - struct scatterlist sg[VIOMAXBLOCKDMA]; - int sgindex; - struct viodasd_device *d; - unsigned long flags; - - start = (u64)blk_rq_pos(req) << 9; - - if (rq_data_dir(req) == READ) { - direction = DMA_FROM_DEVICE; - viocmd = viomajorsubtype_blockio | vioblockread; - } else { - direction = DMA_TO_DEVICE; - viocmd = viomajorsubtype_blockio | vioblockwrite; - } - - d = req->rq_disk->private_data; - - /* Now build the scatter-gather list */ - sg_init_table(sg, VIOMAXBLOCKDMA); - nsg = blk_rq_map_sg(req->q, req, sg); - nsg = dma_map_sg(d->dev, sg, nsg, direction); - - spin_lock_irqsave(&viodasd_spinlock, flags); - num_req_outstanding++; - - /* This optimization handles a single DMA block */ - if (nsg == 1) - hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, - HvLpEvent_Type_VirtualIo, viocmd, - HvLpEvent_AckInd_DoAck, - HvLpEvent_AckType_ImmediateAck, - viopath_sourceinst(viopath_hostLp), - viopath_targetinst(viopath_hostLp), - (u64)(unsigned long)req, VIOVERSION << 16, - ((u64)DEVICE_NO(d) << 48), start, - ((u64)sg_dma_address(&sg[0])) << 32, - sg_dma_len(&sg[0])); - else { - bevent = (struct vioblocklpevent *) - vio_get_event_buffer(viomajorsubtype_blockio); - if (bevent == NULL) { - pr_warning("error allocating disk event buffer\n"); - goto error_ret; - } - - /* - * Now build up the actual request. Note that we store - * the pointer to the request in the correlation - * token so we can match the response up later - */ - memset(bevent, 0, sizeof(struct vioblocklpevent)); - hev = &bevent->event; - hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DO_ACK | - HV_LP_EVENT_INT; - hev->xType = HvLpEvent_Type_VirtualIo; - hev->xSubtype = viocmd; - hev->xSourceLp = HvLpConfig_getLpIndex(); - hev->xTargetLp = viopath_hostLp; - hev->xSizeMinus1 = - offsetof(struct vioblocklpevent, u.rw_data.dma_info) + - (sizeof(bevent->u.rw_data.dma_info[0]) * nsg) - 1; - hev->xSourceInstanceId = viopath_sourceinst(viopath_hostLp); - hev->xTargetInstanceId = viopath_targetinst(viopath_hostLp); - hev->xCorrelationToken = (u64)req; - bevent->version = VIOVERSION; - bevent->disk = DEVICE_NO(d); - bevent->u.rw_data.offset = start; - - /* - * Copy just the dma information from the sg list - * into the request - */ - for (sgindex = 0; sgindex < nsg; sgindex++) { - bevent->u.rw_data.dma_info[sgindex].token = - sg_dma_address(&sg[sgindex]); - bevent->u.rw_data.dma_info[sgindex].len = - sg_dma_len(&sg[sgindex]); - } - - /* Send the request */ - hvrc = HvCallEvent_signalLpEvent(&bevent->event); - vio_free_event_buffer(viomajorsubtype_blockio, bevent); - } - - if (hvrc != HvLpEvent_Rc_Good) { - pr_warning("error sending disk event to OS/400 (rc %d)\n", - (int)hvrc); - goto error_ret; - } - spin_unlock_irqrestore(&viodasd_spinlock, flags); - return 0; - -error_ret: - num_req_outstanding--; - spin_unlock_irqrestore(&viodasd_spinlock, flags); - dma_unmap_sg(d->dev, sg, nsg, direction); - return -1; -} - -/* - * This is the external request processing routine - */ -static void do_viodasd_request(struct request_queue *q) -{ - struct request *req; - - /* - * If we already have the maximum number of requests - * outstanding to OS/400 just bail out. We'll come - * back later. - */ - while (num_req_outstanding < VIOMAXREQ) { - req = blk_fetch_request(q); - if (req == NULL) - return; - /* check that request contains a valid command */ - if (req->cmd_type != REQ_TYPE_FS) { - viodasd_end_request(req, -EIO, blk_rq_sectors(req)); - continue; - } - /* Try sending the request */ - if (send_request(req) != 0) - viodasd_end_request(req, -EIO, blk_rq_sectors(req)); - } -} - -/* - * Probe a single disk and fill in the viodasd_device structure - * for it. - */ -static int probe_disk(struct viodasd_device *d) -{ - HvLpEvent_Rc hvrc; - struct viodasd_waitevent we; - int dev_no = DEVICE_NO(d); - struct gendisk *g; - struct request_queue *q; - u16 flags = 0; - -retry: - init_completion(&we.com); - - /* Send the open event to OS/400 */ - hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, - HvLpEvent_Type_VirtualIo, - viomajorsubtype_blockio | vioblockopen, - HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck, - viopath_sourceinst(viopath_hostLp), - viopath_targetinst(viopath_hostLp), - (u64)(unsigned long)&we, VIOVERSION << 16, - ((u64)dev_no << 48) | ((u64)flags<< 32), - 0, 0, 0); - if (hvrc != 0) { - pr_warning("bad rc on HV open %d\n", (int)hvrc); - return 0; - } - - wait_for_completion(&we.com); - - if (we.rc != 0) { - if (flags != 0) - return 0; - /* try again with read only flag set */ - flags = vioblockflags_ro; - goto retry; - } - if (we.max_disk > (MAX_DISKNO - 1)) { - printk_once(KERN_INFO pr_fmt("Only examining the first %d of %d disks connected\n"), - MAX_DISKNO, we.max_disk + 1); - } - - /* Send the close event to OS/400. We DON'T expect a response */ - hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, - HvLpEvent_Type_VirtualIo, - viomajorsubtype_blockio | vioblockclose, - HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck, - viopath_sourceinst(viopath_hostLp), - viopath_targetinst(viopath_hostLp), - 0, VIOVERSION << 16, - ((u64)dev_no << 48) | ((u64)flags << 32), - 0, 0, 0); - if (hvrc != 0) { - pr_warning("bad rc sending event to OS/400 %d\n", (int)hvrc); - return 0; - } - - if (d->dev == NULL) { - /* this is when we reprobe for new disks */ - if (vio_create_viodasd(dev_no) == NULL) { - pr_warning("cannot allocate virtual device for disk %d\n", - dev_no); - return 0; - } - /* - * The vio_create_viodasd will have recursed into this - * routine with d->dev set to the new vio device and - * will finish the setup of the disk below. - */ - return 1; - } - - /* create the request queue for the disk */ - spin_lock_init(&d->q_lock); - q = blk_init_queue(do_viodasd_request, &d->q_lock); - if (q == NULL) { - pr_warning("cannot allocate queue for disk %d\n", dev_no); - return 0; - } - g = alloc_disk(1 << PARTITION_SHIFT); - if (g == NULL) { - pr_warning("cannot allocate disk structure for disk %d\n", - dev_no); - blk_cleanup_queue(q); - return 0; - } - - d->disk = g; - blk_queue_max_segments(q, VIOMAXBLOCKDMA); - blk_queue_max_hw_sectors(q, VIODASD_MAXSECTORS); - g->major = VIODASD_MAJOR; - g->first_minor = dev_no << PARTITION_SHIFT; - if (dev_no >= 26) - snprintf(g->disk_name, sizeof(g->disk_name), - VIOD_GENHD_NAME "%c%c", - 'a' + (dev_no / 26) - 1, 'a' + (dev_no % 26)); - else - snprintf(g->disk_name, sizeof(g->disk_name), - VIOD_GENHD_NAME "%c", 'a' + (dev_no % 26)); - g->fops = &viodasd_fops; - g->queue = q; - g->private_data = d; - g->driverfs_dev = d->dev; - set_capacity(g, d->size >> 9); - - pr_info("disk %d: %lu sectors (%lu MB) CHS=%d/%d/%d sector size %d%s\n", - dev_no, (unsigned long)(d->size >> 9), - (unsigned long)(d->size >> 20), - (int)d->cylinders, (int)d->tracks, - (int)d->sectors, (int)d->bytes_per_sector, - d->read_only ? " (RO)" : ""); - - /* register us in the global list */ - add_disk(g); - return 1; -} - -/* returns the total number of scatterlist elements converted */ -static int block_event_to_scatterlist(const struct vioblocklpevent *bevent, - struct scatterlist *sg, int *total_len) -{ - int i, numsg; - const struct rw_data *rw_data = &bevent->u.rw_data; - static const int offset = - offsetof(struct vioblocklpevent, u.rw_data.dma_info); - static const int element_size = sizeof(rw_data->dma_info[0]); - - numsg = ((bevent->event.xSizeMinus1 + 1) - offset) / element_size; - if (numsg > VIOMAXBLOCKDMA) - numsg = VIOMAXBLOCKDMA; - - *total_len = 0; - sg_init_table(sg, VIOMAXBLOCKDMA); - for (i = 0; (i < numsg) && (rw_data->dma_info[i].len > 0); ++i) { - sg_dma_address(&sg[i]) = rw_data->dma_info[i].token; - sg_dma_len(&sg[i]) = rw_data->dma_info[i].len; - *total_len += rw_data->dma_info[i].len; - } - return i; -} - -/* - * Restart all queues, starting with the one _after_ the disk given, - * thus reducing the chance of starvation of higher numbered disks. - */ -static void viodasd_restart_all_queues_starting_from(int first_index) -{ - int i; - - for (i = first_index + 1; i < MAX_DISKNO; ++i) - if (viodasd_devices[i].disk) - blk_run_queue(viodasd_devices[i].disk->queue); - for (i = 0; i <= first_index; ++i) - if (viodasd_devices[i].disk) - blk_run_queue(viodasd_devices[i].disk->queue); -} - -/* - * For read and write requests, decrement the number of outstanding requests, - * Free the DMA buffers we allocated. - */ -static int viodasd_handle_read_write(struct vioblocklpevent *bevent) -{ - int num_sg, num_sect, pci_direction, total_len; - struct request *req; - struct scatterlist sg[VIOMAXBLOCKDMA]; - struct HvLpEvent *event = &bevent->event; - unsigned long irq_flags; - struct viodasd_device *d; - int error; - spinlock_t *qlock; - - num_sg = block_event_to_scatterlist(bevent, sg, &total_len); - num_sect = total_len >> 9; - if (event->xSubtype == (viomajorsubtype_blockio | vioblockread)) - pci_direction = DMA_FROM_DEVICE; - else - pci_direction = DMA_TO_DEVICE; - req = (struct request *)bevent->event.xCorrelationToken; - d = req->rq_disk->private_data; - - dma_unmap_sg(d->dev, sg, num_sg, pci_direction); - - /* - * Since this is running in interrupt mode, we need to make sure - * we're not stepping on any global I/O operations - */ - spin_lock_irqsave(&viodasd_spinlock, irq_flags); - num_req_outstanding--; - spin_unlock_irqrestore(&viodasd_spinlock, irq_flags); - - error = (event->xRc == HvLpEvent_Rc_Good) ? 0 : -EIO; - if (error) { - const struct vio_error_entry *err; - err = vio_lookup_rc(viodasd_err_table, bevent->sub_result); - pr_warning("read/write error %d:0x%04x (%s)\n", - event->xRc, bevent->sub_result, err->msg); - num_sect = blk_rq_sectors(req); - } - qlock = req->q->queue_lock; - spin_lock_irqsave(qlock, irq_flags); - viodasd_end_request(req, error, num_sect); - spin_unlock_irqrestore(qlock, irq_flags); - - /* Finally, try to get more requests off of this device's queue */ - viodasd_restart_all_queues_starting_from(DEVICE_NO(d)); - - return 0; -} - -/* This routine handles incoming block LP events */ -static void handle_block_event(struct HvLpEvent *event) -{ - struct vioblocklpevent *bevent = (struct vioblocklpevent *)event; - struct viodasd_waitevent *pwe; - - if (event == NULL) - /* Notification that a partition went away! */ - return; - /* First, we should NEVER get an int here...only acks */ - if (hvlpevent_is_int(event)) { - pr_warning("Yikes! got an int in viodasd event handler!\n"); - if (hvlpevent_need_ack(event)) { - event->xRc = HvLpEvent_Rc_InvalidSubtype; - HvCallEvent_ackLpEvent(event); - } - } - - switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) { - case vioblockopen: - /* - * Handle a response to an open request. We get all the - * disk information in the response, so update it. The - * correlation token contains a pointer to a waitevent - * structure that has a completion in it. update the - * return code in the waitevent structure and post the - * completion to wake up the guy who sent the request - */ - pwe = (struct viodasd_waitevent *)event->xCorrelationToken; - pwe->rc = event->xRc; - pwe->sub_result = bevent->sub_result; - if (event->xRc == HvLpEvent_Rc_Good) { - const struct open_data *data = &bevent->u.open_data; - struct viodasd_device *device = - &viodasd_devices[bevent->disk]; - device->read_only = - bevent->flags & vioblockflags_ro; - device->size = data->disk_size; - device->cylinders = data->cylinders; - device->tracks = data->tracks; - device->sectors = data->sectors; - device->bytes_per_sector = data->bytes_per_sector; - pwe->max_disk = data->max_disk; - } - complete(&pwe->com); - break; - case vioblockclose: - break; - case vioblockread: - case vioblockwrite: - viodasd_handle_read_write(bevent); - break; - - default: - pr_warning("invalid subtype!"); - if (hvlpevent_need_ack(event)) { - event->xRc = HvLpEvent_Rc_InvalidSubtype; - HvCallEvent_ackLpEvent(event); - } - } -} - -/* - * Get the driver to reprobe for more disks. - */ -static ssize_t probe_disks(struct device_driver *drv, const char *buf, - size_t count) -{ - struct viodasd_device *d; - - for (d = viodasd_devices; d < &viodasd_devices[MAX_DISKNO]; d++) { - if (d->disk == NULL) - probe_disk(d); - } - return count; -} -static DRIVER_ATTR(probe, S_IWUSR, NULL, probe_disks); - -static int viodasd_probe(struct vio_dev *vdev, const struct vio_device_id *id) -{ - struct viodasd_device *d = &viodasd_devices[vdev->unit_address]; - - d->dev = &vdev->dev; - if (!probe_disk(d)) - return -ENODEV; - return 0; -} - -static int viodasd_remove(struct vio_dev *vdev) -{ - struct viodasd_device *d; - - d = &viodasd_devices[vdev->unit_address]; - if (d->disk) { - del_gendisk(d->disk); - blk_cleanup_queue(d->disk->queue); - put_disk(d->disk); - d->disk = NULL; - } - d->dev = NULL; - return 0; -} - -/** - * viodasd_device_table: Used by vio.c to match devices that we - * support. - */ -static struct vio_device_id viodasd_device_table[] __devinitdata = { - { "block", "IBM,iSeries-viodasd" }, - { "", "" } -}; -MODULE_DEVICE_TABLE(vio, viodasd_device_table); - -static struct vio_driver viodasd_driver = { - .id_table = viodasd_device_table, - .probe = viodasd_probe, - .remove = viodasd_remove, - .driver = { - .name = "viodasd", - .owner = THIS_MODULE, - } -}; - -static int need_delete_probe; - -/* - * Initialize the whole device driver. Handle module and non-module - * versions - */ -static int __init viodasd_init(void) -{ - int rc; - - if (!firmware_has_feature(FW_FEATURE_ISERIES)) { - rc = -ENODEV; - goto early_fail; - } - - /* Try to open to our host lp */ - if (viopath_hostLp == HvLpIndexInvalid) - vio_set_hostlp(); - - if (viopath_hostLp == HvLpIndexInvalid) { - pr_warning("invalid hosting partition\n"); - rc = -EIO; - goto early_fail; - } - - pr_info("vers " VIOD_VERS ", hosting partition %d\n", viopath_hostLp); - - /* register the block device */ - rc = register_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME); - if (rc) { - pr_warning("Unable to get major number %d for %s\n", - VIODASD_MAJOR, VIOD_GENHD_NAME); - goto early_fail; - } - /* Actually open the path to the hosting partition */ - rc = viopath_open(viopath_hostLp, viomajorsubtype_blockio, - VIOMAXREQ + 2); - if (rc) { - pr_warning("error opening path to host partition %d\n", - viopath_hostLp); - goto unregister_blk; - } - - /* Initialize our request handler */ - vio_setHandler(viomajorsubtype_blockio, handle_block_event); - - rc = vio_register_driver(&viodasd_driver); - if (rc) { - pr_warning("vio_register_driver failed\n"); - goto unset_handler; - } - - /* - * If this call fails, it just means that we cannot dynamically - * add virtual disks, but the driver will still work fine for - * all existing disk, so ignore the failure. - */ - if (!driver_create_file(&viodasd_driver.driver, &driver_attr_probe)) - need_delete_probe = 1; - - return 0; - -unset_handler: - vio_clearHandler(viomajorsubtype_blockio); - viopath_close(viopath_hostLp, viomajorsubtype_blockio, VIOMAXREQ + 2); -unregister_blk: - unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME); -early_fail: - return rc; -} -module_init(viodasd_init); - -void __exit viodasd_exit(void) -{ - if (need_delete_probe) - driver_remove_file(&viodasd_driver.driver, &driver_attr_probe); - vio_unregister_driver(&viodasd_driver); - vio_clearHandler(viomajorsubtype_blockio); - viopath_close(viopath_hostLp, viomajorsubtype_blockio, VIOMAXREQ + 2); - unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME); -} -module_exit(viodasd_exit); diff --git a/drivers/block/xd.c b/drivers/block/xd.c index 51a972704db..ff540520bad 100644 --- a/drivers/block/xd.c +++ b/drivers/block/xd.c @@ -52,7 +52,6 @@ #include <linux/io.h> #include <linux/gfp.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/dma.h> diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 2f22874c0a3..98cbeba8cd5 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1475,6 +1475,9 @@ static int __init xlblk_init(void) if (!xen_domain()) return -ENODEV; + if (xen_hvm_domain() && !xen_platform_pci_unplug) + return -ENODEV; + if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) { printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n", XENVBD_MAJOR, DEV_NAME); |