aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-map.c
diff options
context:
space:
mode:
authorTejun Heo <htejun@gmail.com>2008-02-19 11:35:38 +0100
committerJens Axboe <jens.axboe@oracle.com>2008-02-19 11:35:38 +0100
commit40b01b9bbdf51ae543a04744283bf2d56c4a6afa (patch)
tree0f81015c01834b97fdb5646a63c3fe1a24936d81 /block/blk-map.c
parent56c819df77f96c3fc0c2a979e12b478403728790 (diff)
downloadkernel_samsung_tuna-40b01b9bbdf51ae543a04744283bf2d56c4a6afa.tar.gz
kernel_samsung_tuna-40b01b9bbdf51ae543a04744283bf2d56c4a6afa.tar.bz2
kernel_samsung_tuna-40b01b9bbdf51ae543a04744283bf2d56c4a6afa.zip
block: update bio according to DMA alignment padding
DMA start address and transfer size alignment for PC requests are achieved using bio_copy_user() instead of bio_map_user(). This works because bio_copy_user() always uses full pages and block DMA alignment isn't allowed to go over PAGE_SIZE. However, the implementation didn't update the last bio of the request to make this padding visible to lower layers. This patch makes blk_rq_map_user() extend the last bio such that it includes the padding area and the size of area pointed to by the request is properly aligned. Signed-off-by: Tejun Heo <htejun@gmail.com> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-map.c')
-rw-r--r--block/blk-map.c17
1 files changed, 17 insertions, 0 deletions
diff --git a/block/blk-map.c b/block/blk-map.c
index bc5ce60691c..a7cf63ccb5c 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -139,6 +139,23 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
ubuf += ret;
}
+ /*
+ * __blk_rq_map_user() copies the buffers if starting address
+ * or length isn't aligned. As the copied buffer is always
+ * page aligned, we know that there's enough room for padding.
+ * Extend the last bio and update rq->data_len accordingly.
+ *
+ * On unmap, bio_uncopy_user() will use unmodified
+ * bio_map_data pointed to by bio->bi_private.
+ */
+ if (len & queue_dma_alignment(q)) {
+ unsigned int pad_len = (queue_dma_alignment(q) & ~len) + 1;
+ struct bio *bio = rq->biotail;
+
+ bio->bi_io_vec[bio->bi_vcnt - 1].bv_len += pad_len;
+ bio->bi_size += pad_len;
+ }
+
rq->buffer = rq->data = NULL;
return 0;
unmap_rq: