2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-20 03:04:01 +08:00

xen/blkfront: map REQ_FLUSH into a full barrier

Implement a flush as a full barrier, since we have nothing weaker.

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Acked-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
Jeremy Fitzhardinge 2010-11-01 14:32:27 -04:00
parent c8ddb2713c
commit c64e38ea17

View File

@ -245,14 +245,11 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
}
/*
* blkif_queue_request
* Generate a Xen blkfront IO request from a blk layer request. Reads
* and writes are handled as expected. Since we lack a loose flush
* request, we map flushes into a full ordered barrier.
*
* request block io
*
* id: for guest use only.
* operation: BLKIF_OP_{READ,WRITE,PROBE}
* buffer: buffer to read/write into. this should be a
* virtual address in the guest os.
* @req: a request struct
*/
static int blkif_queue_request(struct request *req)
{
@ -289,7 +286,7 @@ static int blkif_queue_request(struct request *req)
ring_req->operation = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ;
if (req->cmd_flags & REQ_HARDBARRIER)
if (req->cmd_flags & REQ_FLUSH)
ring_req->operation = BLKIF_OP_WRITE_BARRIER;
ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
@ -1069,14 +1066,8 @@ static void blkfront_connect(struct blkfront_info *info)
*/
info->feature_flush = 0;
/*
* The driver doesn't properly handled empty flushes, so
* lets disable barrier support for now.
*/
#if 0
if (!err && barrier)
info->feature_flush = REQ_FLUSH;
#endif
err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
if (err) {