mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-19 02:34:01 +08:00
drbd: announce FLUSH/FUA capability to upper layers
Unconditionally announce FLUSH/FUA to upper layers. If the lower layers on either node do not actually support this, generic_make_request() will deal with it. If this causes performance regressions on your setup, make sure there are no volatile caches involved, and mount -o nobarrier or equivalent. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
This commit is contained in:
parent
db141b2f42
commit
a73ff3231d
@ -876,7 +876,11 @@ int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
|
||||
unsigned int enr, count = 0;
|
||||
struct lc_element *e;
|
||||
|
||||
if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
|
||||
/* this should be an empty REQ_FLUSH */
|
||||
if (size == 0)
|
||||
return 0;
|
||||
|
||||
if (size < 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
|
||||
dev_err(DEV, "sector: %llus, size: %d\n",
|
||||
(unsigned long long)sector, size);
|
||||
return 0;
|
||||
|
@ -3636,6 +3636,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
|
||||
q->backing_dev_info.congested_data = mdev;
|
||||
|
||||
blk_queue_make_request(q, drbd_make_request);
|
||||
blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
|
||||
/* Setting the max_hw_sectors to an odd value of 8kibyte here
|
||||
This triggers a max_bio_size message upon first attach or connect */
|
||||
blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
|
||||
|
@ -277,6 +277,9 @@ static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
|
||||
atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
|
||||
int i;
|
||||
|
||||
if (page == NULL)
|
||||
return;
|
||||
|
||||
if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count)
|
||||
i = page_chain_free(page);
|
||||
else {
|
||||
@ -316,7 +319,7 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
|
||||
gfp_t gfp_mask) __must_hold(local)
|
||||
{
|
||||
struct drbd_epoch_entry *e;
|
||||
struct page *page;
|
||||
struct page *page = NULL;
|
||||
unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
|
||||
|
||||
if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
|
||||
@ -329,9 +332,11 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
|
||||
if (!page)
|
||||
goto fail;
|
||||
if (data_size) {
|
||||
page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
|
||||
if (!page)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
INIT_HLIST_NODE(&e->collision);
|
||||
e->epoch = NULL;
|
||||
@ -1270,7 +1275,6 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __
|
||||
|
||||
data_size -= dgs;
|
||||
|
||||
ERR_IF(data_size == 0) return NULL;
|
||||
ERR_IF(data_size & 0x1ff) return NULL;
|
||||
ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL;
|
||||
|
||||
@ -1291,6 +1295,9 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __
|
||||
if (!e)
|
||||
return NULL;
|
||||
|
||||
if (!data_size)
|
||||
return e;
|
||||
|
||||
ds = data_size;
|
||||
page = e->pages;
|
||||
page_chain_for_each(page) {
|
||||
@ -1715,6 +1722,10 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
|
||||
|
||||
dp_flags = be32_to_cpu(p->dp_flags);
|
||||
rw |= wire_flags_to_bio(mdev, dp_flags);
|
||||
if (e->pages == NULL) {
|
||||
D_ASSERT(e->size == 0);
|
||||
D_ASSERT(dp_flags & DP_FLUSH);
|
||||
}
|
||||
|
||||
if (dp_flags & DP_MAY_SET_IN_SYNC)
|
||||
e->flags |= EE_MAY_SET_IN_SYNC;
|
||||
|
@ -1111,13 +1111,12 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
|
||||
/*
|
||||
* what we "blindly" assume:
|
||||
*/
|
||||
D_ASSERT(bio->bi_size > 0);
|
||||
D_ASSERT((bio->bi_size & 0x1ff) == 0);
|
||||
|
||||
/* to make some things easier, force alignment of requests within the
|
||||
* granularity of our hash tables */
|
||||
s_enr = bio->bi_sector >> HT_SHIFT;
|
||||
e_enr = (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT;
|
||||
e_enr = bio->bi_size ? (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT : s_enr;
|
||||
|
||||
if (likely(s_enr == e_enr)) {
|
||||
do {
|
||||
|
Loading…
Reference in New Issue
Block a user