block: Finalize conversion of block limits functions

Remove compatibility wrappers and update remaining drivers.

Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
Martin K. Petersen 2010-03-10 00:48:32 -05:00 committed by Jens Axboe
parent 2cda2728aa
commit ee714f2dd3
3 changed files with 2 additions and 28 deletions

View File

@ -2533,7 +2533,6 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
Controller->RequestQueue[n] = RequestQueue;
blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit);
RequestQueue->queuedata = Controller;
blk_queue_max_hw_segments(RequestQueue, Controller->DriverScatterGatherLimit);
blk_queue_max_segments(RequestQueue, Controller->DriverScatterGatherLimit);
blk_queue_max_hw_sectors(RequestQueue, Controller->MaxBlocksPerCommand);
disk->queue = RequestQueue;

View File

@ -347,14 +347,13 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
set_capacity(vblk->disk, cap);
/* We can handle whatever the host told us to handle. */
blk_queue_max_phys_segments(q, vblk->sg_elems-2);
blk_queue_max_hw_segments(q, vblk->sg_elems-2);
blk_queue_max_segments(q, vblk->sg_elems-2);
/* No need to bounce any requests */
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
/* No real sector limit. */
blk_queue_max_sectors(q, -1U);
blk_queue_max_hw_sectors(q, -1U);
/* Host can optionally specify maximum segment size and number of
* segments. */

View File

@ -921,26 +921,7 @@ extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
/* Temporary compatibility wrapper */
static inline void blk_queue_max_sectors(struct request_queue *q, unsigned int max)
{
blk_queue_max_hw_sectors(q, max);
}
extern void blk_queue_max_segments(struct request_queue *, unsigned short);
static inline void blk_queue_max_phys_segments(struct request_queue *q, unsigned short max)
{
blk_queue_max_segments(q, max);
}
static inline void blk_queue_max_hw_segments(struct request_queue *q, unsigned short max)
{
blk_queue_max_segments(q, max);
}
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
extern void blk_queue_max_discard_sectors(struct request_queue *q,
unsigned int max_discard_sectors);
@ -1030,11 +1011,6 @@ static inline int sb_issue_discard(struct super_block *sb,
extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
#define MAX_PHYS_SEGMENTS 128
#define MAX_HW_SEGMENTS 128
#define SAFE_MAX_SECTORS 255
#define MAX_SEGMENT_SIZE 65536
enum blk_default_limits {
BLK_MAX_SEGMENTS = 128,
BLK_SAFE_MAX_SECTORS = 255,