aboutsummaryrefslogtreecommitdiff
path: root/block/blk-map.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-map.c')
-rw-r--r--block/blk-map.c22
1 files changed, 10 insertions, 12 deletions
diff --git a/block/blk-map.c b/block/blk-map.c
index e663ac2d8e6..f890d4345b0 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -20,7 +20,7 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
rq->biotail->bi_next = bio;
rq->biotail = bio;
- rq->__data_len += bio->bi_size;
+ rq->__data_len += bio->bi_iter.bi_size;
}
return 0;
}
@@ -76,7 +76,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
ret = blk_rq_append_bio(q, rq, bio);
if (!ret)
- return bio->bi_size;
+ return bio->bi_iter.bi_size;
/* if it was boucned we must call the end io function */
bio_endio(bio, 0);
@@ -155,7 +155,6 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
if (!bio_flagged(bio, BIO_USER_MAPPED))
rq->cmd_flags |= REQ_COPY_USER;
- rq->buffer = NULL;
return 0;
unmap_rq:
blk_rq_unmap_user(bio);
@@ -188,7 +187,7 @@ EXPORT_SYMBOL(blk_rq_map_user);
* unmapping.
*/
int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
- struct rq_map_data *map_data, struct sg_iovec *iov,
+ struct rq_map_data *map_data, const struct sg_iovec *iov,
int iov_count, unsigned int len, gfp_t gfp_mask)
{
struct bio *bio;
@@ -204,10 +203,11 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
if (!iov[i].iov_len)
return -EINVAL;
- if (uaddr & queue_dma_alignment(q)) {
+ /*
+ * Keep going so we check length of all segments
+ */
+ if (uaddr & queue_dma_alignment(q))
unaligned = 1;
- break;
- }
}
if (unaligned || (q->dma_pad_mask & len) || map_data)
@@ -219,7 +219,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
if (IS_ERR(bio))
return PTR_ERR(bio);
- if (bio->bi_size != len) {
+ if (bio->bi_iter.bi_size != len) {
/*
* Grab an extra reference to this bio, as bio_unmap_user()
* expects to be able to drop it twice as it happens on the
@@ -237,7 +237,6 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
blk_queue_bounce(q, &bio);
bio_get(bio);
blk_rq_bio_prep(q, rq, bio);
- rq->buffer = NULL;
return 0;
}
EXPORT_SYMBOL(blk_rq_map_user_iov);
@@ -284,7 +283,7 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
*
* Description:
* Data will be mapped directly if possible. Otherwise a bounce
- * buffer is used. Can be called multple times to append multple
+ * buffer is used. Can be called multiple times to append multiple
* buffers.
*/
int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
@@ -310,7 +309,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
if (IS_ERR(bio))
return PTR_ERR(bio);
- if (rq_data_dir(rq) == WRITE)
+ if (!reading)
bio->bi_rw |= REQ_WRITE;
if (do_copy)
@@ -324,7 +323,6 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
}
blk_queue_bounce(q, &rq->bio);
- rq->buffer = NULL;
return 0;
}
EXPORT_SYMBOL(blk_rq_map_kern);