aboutsummaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
authorDavid Barksdale <amatus@amatus.name>2014-08-13 16:29:15 -0500
committerDavid Barksdale <amatus@amatus.name>2014-08-13 16:29:15 -0500
commit205ac4d83fc388c1e2d0bb590a2a36e9a4c2fd78 (patch)
tree1dbd529848c396058dfc9c8a4f402dcbe3546317 /drivers/md
parentace6c6d243016e272050787c14e27a83ecd94a25 (diff)
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid5.c31
2 files changed, 31 insertions, 2 deletions
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 122d07af5b5..d3a4ce06015 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -453,7 +453,7 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio)
int cpu;
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
- md_barrier_request(mddev, bio);
+ bio_endio(bio, -EOPNOTSUPP);
return 0;
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 0d403ca12ae..1cb830da54d 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -96,6 +96,16 @@
#define printk_rl(args...) ((void) (printk_ratelimit() && printk(args)))
+#ifdef CONFIG_MD_RAID_SKIP_BIO_COPY
+/* Define SKIP_BIO_COPY_ERR to enable work around for the issue
+ * SKIP_BIO_COPY malfunction when RAID degrades
+ */
+#define SKIP_BIO_COPY_ERR
+#if defined(SKIP_BIO_COPY_ERR)
+static int degraded = 0;
+#endif
+#endif
+
/*
* We maintain a biased count of active stripes in the bottom 16 bits of
* bi_phys_segments, and a count of processed stripes in the upper 16 bits
@@ -956,6 +966,12 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
(unsigned long long)sh->sector);
#ifdef CONFIG_MD_RAID_SKIP_BIO_COPY
+#ifdef SKIP_BIO_COPY_ERR
+ if(degraded){
+ fswrite = 0;
+ goto not_use_skip_bio;
+ }
+#endif
/* initially assume that the operation is a full-stripe write*/
for (i = disks; i-- ;) {
struct r5dev *dev = &sh->dev[i];
@@ -1018,6 +1034,9 @@ do_copy:
async_tx_issue_pending_all();
}
#endif
+#ifdef SKIP_BIO_COPY_ERR
+not_use_skip_bio:
+#endif
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
@@ -1127,7 +1146,7 @@ ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
* set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
* for the synchronous xor case
*/
- flags = ASYNC_TX_ACK |
+ flags = ASYNC_TX_FENCE | ASYNC_TX_ACK |
(prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
atomic_inc(&sh->count);
@@ -1718,6 +1737,7 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
*/
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
}
+ degraded = 1;
set_bit(Faulty, &rdev->flags);
printk(KERN_ALERT
"raid5: Disk failure on %s, disabling device.\n"
@@ -3977,6 +3997,12 @@ static int make_request(struct request_queue *q, struct bio * bi)
const int rw = bio_data_dir(bi);
int cpu, remaining;
+#if defined(SKIP_BIO_COPY_ERR)
+ if(mddev->degraded)
+ degraded = 1;
+ else
+ degraded = 0;
+#endif
if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) {
/* Drain all pending writes. We only really need
* to ensure they have been submitted, but this is
@@ -5235,6 +5261,9 @@ static int stop(mddev_t *mddev)
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
free_conf(conf);
mddev->private = &raid5_attrs_group;
+#if defined(SKIP_BIO_COPY_ERR)
+ degraded = 0;
+#endif
return 0;
}