From 8a8d56176635515d607c520580c73d61f300b409 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Wed, 9 Nov 2016 16:47:54 +0800 Subject: [PATCH 1/3] ceph: use default file splice read callback Splice read/write implementation changed recently. When using generic_file_splice_read(), iov_iter with type == ITER_PIPE is passed to filesystem's read_iter callback. But ceph_sync_read() can't serve ITER_PIPE iov_iter correctly (ITER_PIPE iov_iter expects pages from page cache). Fixing ceph_sync_read() requires a big patch. So use default splice read callback for now. Signed-off-by: Yan, Zheng Signed-off-by: Ilya Dryomov --- fs/ceph/file.c | 1 - 1 file changed, 1 deletion(-) diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 18630e800208..f995e3528a33 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -1770,7 +1770,6 @@ const struct file_operations ceph_file_fops = { .fsync = ceph_fsync, .lock = ceph_lock, .flock = ceph_flock, - .splice_read = generic_file_splice_read, .splice_write = iter_file_splice_write, .unlocked_ioctl = ceph_ioctl, .compat_ioctl = ceph_ioctl, From 3890dce1d3a8b9fe3bc36de99496792e468cd079 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Wed, 9 Nov 2016 16:42:48 +0800 Subject: [PATCH 2/3] libceph: fix legacy layout decode with pool 0 If your data pool was pool 0, ceph_file_layout_from_legacy() transform that to -1 unconditionally, which broke upgrades. We only want do that for a fully zeroed ceph_file_layout, so that it still maps to a file_layout_t. If any fields are set, though, we trust the fl_pgpool to be a valid pool. Fixes: 7627151ea30bc ("libceph: define new ceph_file_layout structure") Link: http://tracker.ceph.com/issues/17825 Signed-off-by: Yan, Zheng Signed-off-by: Ilya Dryomov --- net/ceph/ceph_fs.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/ceph/ceph_fs.c b/net/ceph/ceph_fs.c index 7d54e944de5e..dcbe67ff3e2b 100644 --- a/net/ceph/ceph_fs.c +++ b/net/ceph/ceph_fs.c @@ -34,7 +34,8 @@ void ceph_file_layout_from_legacy(struct ceph_file_layout *fl, fl->stripe_count = le32_to_cpu(legacy->fl_stripe_count); fl->object_size = le32_to_cpu(legacy->fl_object_size); fl->pool_id = le32_to_cpu(legacy->fl_pg_pool); - if (fl->pool_id == 0) + if (fl->pool_id == 0 && fl->stripe_unit == 0 && + fl->stripe_count == 0 && fl->object_size == 0) fl->pool_id = -1; } EXPORT_SYMBOL(ceph_file_layout_from_legacy); From 264048afab27d7c27eedf5394714e0b396d787f7 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Tue, 8 Nov 2016 15:15:24 +0100 Subject: [PATCH 3/3] libceph: initialize last_linger_id with a large integer osdc->last_linger_id is a counter for lreq->linger_id, which is used for watch cookies. Starting with a large integer should ease the task of telling apart kernel and userspace clients. Signed-off-by: Ilya Dryomov --- include/linux/ceph/osd_client.h | 2 ++ net/ceph/osd_client.c | 1 + 2 files changed, 3 insertions(+) diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 96337b15a60d..a8e66344bacc 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -258,6 +258,8 @@ struct ceph_watch_item { struct ceph_entity_addr addr; }; +#define CEPH_LINGER_ID_START 0xffff000000000000ULL + struct ceph_osd_client { struct ceph_client *client; diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index d9bf7a1d0a58..e6ae15bc41b7 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -4094,6 +4094,7 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) osd_init(&osdc->homeless_osd); osdc->homeless_osd.o_osdc = osdc; osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD; + osdc->last_linger_id = CEPH_LINGER_ID_START; osdc->linger_requests = RB_ROOT; osdc->map_checks = RB_ROOT; osdc->linger_map_checks = RB_ROOT;