mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-26 05:34:13 +08:00
b284d4d5a6
- support for rbd "fancy" striping (myself). The striping feature bit is now fully implemented, allowing mapping v2 images with non-default striping patterns. This completes support for --image-format 2. - CephFS quota support (Luis Henriques and Zheng Yan). This set is based on the new SnapRealm code in the upcoming v13.y.z ("Mimic") release. Quota handling will be rejected on older filesystems. - memory usage improvements in CephFS (Chengguang Xu). Directory specific bits have been split out of ceph_file_info and some effort went into improving cap reservation code to avoid OOM crashes. Also included a bunch of assorted fixes all over the place from Chengguang and others. -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQEcBAABCAAGBQJazOI/AAoJEEp/3jgCEfOLOu0IAKGFkcCo0UdQDGHHJZHn2rAm CSWMMwyYGAhoWI6Gva0jx1A2omZLFSeq/MC8dWLL/MNAKt8i/qo8bTsTrwCHMR2Q D0FsvMWIhkWRS1/FcD1uVDhn0a/DFm5Kfy8kzz3v695TDCt+BYWrCqyHTB/wSdRR VpO3KdpHQ9h3ojNBRgIniOCNPeQP+QzLXy+P0h0oKbP2Y03mwJlsWG4L6zakkkwT e2I+RVdlOMUDJ7rZxiXESBr6BuLI4oOkPe8roQGmZPy1Xe17xa9M5iWVNuM6RUhO Z9bS2aLMhbDyeCPqvzgAnsUtFT0PAQjB5NYw2yqisbHs/wrU5kMOOpcLqz/Ls/s= =v1I9 -----END PGP SIGNATURE----- Merge tag 'ceph-for-4.17-rc1' of git://github.com/ceph/ceph-client Pull ceph updates from Ilya Dryomov: "The big ticket items are: - support for rbd "fancy" striping (myself). The striping feature bit is now fully implemented, allowing mapping v2 images with non-default striping patterns. This completes support for --image-format 2. - CephFS quota support (Luis Henriques and Zheng Yan). This set is based on the new SnapRealm code in the upcoming v13.y.z ("Mimic") release. Quota handling will be rejected on older filesystems. - memory usage improvements in CephFS (Chengguang Xu). Directory specific bits have been split out of ceph_file_info and some effort went into improving cap reservation code to avoid OOM crashes. Also included a bunch of assorted fixes all over the place from Chengguang and others" * tag 'ceph-for-4.17-rc1' of git://github.com/ceph/ceph-client: (67 commits) ceph: quota: report root dir quota usage in statfs ceph: quota: add counter for snaprealms with quota ceph: quota: cache inode pointer in ceph_snap_realm ceph: fix root quota realm check ceph: don't check quota for snap inode ceph: quota: update MDS when max_bytes is approaching ceph: quota: support for ceph.quota.max_bytes ceph: quota: don't allow cross-quota renames ceph: quota: support for ceph.quota.max_files ceph: quota: add initial infrastructure to support cephfs quotas rbd: remove VLA usage rbd: fix spelling mistake: "reregisteration" -> "reregistration" ceph: rename function drop_leases() to a more descriptive name ceph: fix invalid point dereference for error case in mdsc destroy ceph: return proper bool type to caller instead of pointer ceph: optimize memory usage ceph: optimize mds session register libceph, ceph: add __init attribution to init funcitons ceph: filter out used flags when printing unused open flags ceph: don't wait on writeback when there is no more dirty pages ...
362 lines
9.0 KiB
C
362 lines
9.0 KiB
C
/*
|
|
* Ceph cache definitions.
|
|
*
|
|
* Copyright (C) 2013 by Adfin Solutions, Inc. All Rights Reserved.
|
|
* Written by Milosz Tanski (milosz@adfin.com)
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2
|
|
* as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to:
|
|
* Free Software Foundation
|
|
* 51 Franklin Street, Fifth Floor
|
|
* Boston, MA 02111-1301 USA
|
|
*
|
|
*/
|
|
|
|
#include "super.h"
|
|
#include "cache.h"
|
|
|
|
struct ceph_aux_inode {
|
|
u64 version;
|
|
struct timespec mtime;
|
|
};
|
|
|
|
struct fscache_netfs ceph_cache_netfs = {
|
|
.name = "ceph",
|
|
.version = 0,
|
|
};
|
|
|
|
static DEFINE_MUTEX(ceph_fscache_lock);
|
|
static LIST_HEAD(ceph_fscache_list);
|
|
|
|
struct ceph_fscache_entry {
|
|
struct list_head list;
|
|
struct fscache_cookie *fscache;
|
|
size_t uniq_len;
|
|
/* The following members must be last */
|
|
struct ceph_fsid fsid;
|
|
char uniquifier[0];
|
|
};
|
|
|
|
static const struct fscache_cookie_def ceph_fscache_fsid_object_def = {
|
|
.name = "CEPH.fsid",
|
|
.type = FSCACHE_COOKIE_TYPE_INDEX,
|
|
};
|
|
|
|
int __init ceph_fscache_register(void)
|
|
{
|
|
return fscache_register_netfs(&ceph_cache_netfs);
|
|
}
|
|
|
|
void ceph_fscache_unregister(void)
|
|
{
|
|
fscache_unregister_netfs(&ceph_cache_netfs);
|
|
}
|
|
|
|
int ceph_fscache_register_fs(struct ceph_fs_client* fsc)
|
|
{
|
|
const struct ceph_fsid *fsid = &fsc->client->fsid;
|
|
const char *fscache_uniq = fsc->mount_options->fscache_uniq;
|
|
size_t uniq_len = fscache_uniq ? strlen(fscache_uniq) : 0;
|
|
struct ceph_fscache_entry *ent;
|
|
int err = 0;
|
|
|
|
mutex_lock(&ceph_fscache_lock);
|
|
list_for_each_entry(ent, &ceph_fscache_list, list) {
|
|
if (memcmp(&ent->fsid, fsid, sizeof(*fsid)))
|
|
continue;
|
|
if (ent->uniq_len != uniq_len)
|
|
continue;
|
|
if (uniq_len && memcmp(ent->uniquifier, fscache_uniq, uniq_len))
|
|
continue;
|
|
|
|
pr_err("fscache cookie already registered for fsid %pU\n", fsid);
|
|
pr_err(" use fsc=%%s mount option to specify a uniquifier\n");
|
|
err = -EBUSY;
|
|
goto out_unlock;
|
|
}
|
|
|
|
ent = kzalloc(sizeof(*ent) + uniq_len, GFP_KERNEL);
|
|
if (!ent) {
|
|
err = -ENOMEM;
|
|
goto out_unlock;
|
|
}
|
|
|
|
memcpy(&ent->fsid, fsid, sizeof(*fsid));
|
|
if (uniq_len > 0) {
|
|
memcpy(&ent->uniquifier, fscache_uniq, uniq_len);
|
|
ent->uniq_len = uniq_len;
|
|
}
|
|
|
|
fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index,
|
|
&ceph_fscache_fsid_object_def,
|
|
&ent->fsid, sizeof(ent->fsid) + uniq_len,
|
|
NULL, 0,
|
|
fsc, 0, true);
|
|
|
|
if (fsc->fscache) {
|
|
ent->fscache = fsc->fscache;
|
|
list_add_tail(&ent->list, &ceph_fscache_list);
|
|
} else {
|
|
kfree(ent);
|
|
pr_err("unable to register fscache cookie for fsid %pU\n",
|
|
fsid);
|
|
/* all other fs ignore this error */
|
|
}
|
|
out_unlock:
|
|
mutex_unlock(&ceph_fscache_lock);
|
|
return err;
|
|
}
|
|
|
|
static enum fscache_checkaux ceph_fscache_inode_check_aux(
|
|
void *cookie_netfs_data, const void *data, uint16_t dlen,
|
|
loff_t object_size)
|
|
{
|
|
struct ceph_aux_inode aux;
|
|
struct ceph_inode_info* ci = cookie_netfs_data;
|
|
struct inode* inode = &ci->vfs_inode;
|
|
|
|
if (dlen != sizeof(aux) ||
|
|
i_size_read(inode) != object_size)
|
|
return FSCACHE_CHECKAUX_OBSOLETE;
|
|
|
|
memset(&aux, 0, sizeof(aux));
|
|
aux.version = ci->i_version;
|
|
aux.mtime = inode->i_mtime;
|
|
|
|
if (memcmp(data, &aux, sizeof(aux)) != 0)
|
|
return FSCACHE_CHECKAUX_OBSOLETE;
|
|
|
|
dout("ceph inode 0x%p cached okay\n", ci);
|
|
return FSCACHE_CHECKAUX_OKAY;
|
|
}
|
|
|
|
static const struct fscache_cookie_def ceph_fscache_inode_object_def = {
|
|
.name = "CEPH.inode",
|
|
.type = FSCACHE_COOKIE_TYPE_DATAFILE,
|
|
.check_aux = ceph_fscache_inode_check_aux,
|
|
};
|
|
|
|
void ceph_fscache_register_inode_cookie(struct inode *inode)
|
|
{
|
|
struct ceph_inode_info *ci = ceph_inode(inode);
|
|
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
|
|
struct ceph_aux_inode aux;
|
|
|
|
/* No caching for filesystem */
|
|
if (!fsc->fscache)
|
|
return;
|
|
|
|
/* Only cache for regular files that are read only */
|
|
if (!S_ISREG(inode->i_mode))
|
|
return;
|
|
|
|
inode_lock_nested(inode, I_MUTEX_CHILD);
|
|
if (!ci->fscache) {
|
|
memset(&aux, 0, sizeof(aux));
|
|
aux.version = ci->i_version;
|
|
aux.mtime = inode->i_mtime;
|
|
ci->fscache = fscache_acquire_cookie(fsc->fscache,
|
|
&ceph_fscache_inode_object_def,
|
|
&ci->i_vino, sizeof(ci->i_vino),
|
|
&aux, sizeof(aux),
|
|
ci, i_size_read(inode), false);
|
|
}
|
|
inode_unlock(inode);
|
|
}
|
|
|
|
void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
|
|
{
|
|
struct fscache_cookie* cookie;
|
|
|
|
if ((cookie = ci->fscache) == NULL)
|
|
return;
|
|
|
|
ci->fscache = NULL;
|
|
|
|
fscache_uncache_all_inode_pages(cookie, &ci->vfs_inode);
|
|
fscache_relinquish_cookie(cookie, &ci->i_vino, false);
|
|
}
|
|
|
|
static bool ceph_fscache_can_enable(void *data)
|
|
{
|
|
struct inode *inode = data;
|
|
return !inode_is_open_for_write(inode);
|
|
}
|
|
|
|
void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp)
|
|
{
|
|
struct ceph_inode_info *ci = ceph_inode(inode);
|
|
|
|
if (!fscache_cookie_valid(ci->fscache))
|
|
return;
|
|
|
|
if (inode_is_open_for_write(inode)) {
|
|
dout("fscache_file_set_cookie %p %p disabling cache\n",
|
|
inode, filp);
|
|
fscache_disable_cookie(ci->fscache, &ci->i_vino, false);
|
|
fscache_uncache_all_inode_pages(ci->fscache, inode);
|
|
} else {
|
|
fscache_enable_cookie(ci->fscache, &ci->i_vino, i_size_read(inode),
|
|
ceph_fscache_can_enable, inode);
|
|
if (fscache_cookie_enabled(ci->fscache)) {
|
|
dout("fscache_file_set_cookie %p %p enabling cache\n",
|
|
inode, filp);
|
|
}
|
|
}
|
|
}
|
|
|
|
static void ceph_readpage_from_fscache_complete(struct page *page, void *data, int error)
|
|
{
|
|
if (!error)
|
|
SetPageUptodate(page);
|
|
|
|
unlock_page(page);
|
|
}
|
|
|
|
static inline bool cache_valid(struct ceph_inode_info *ci)
|
|
{
|
|
return ci->i_fscache_gen == ci->i_rdcache_gen;
|
|
}
|
|
|
|
|
|
/* Atempt to read from the fscache,
|
|
*
|
|
* This function is called from the readpage_nounlock context. DO NOT attempt to
|
|
* unlock the page here (or in the callback).
|
|
*/
|
|
int ceph_readpage_from_fscache(struct inode *inode, struct page *page)
|
|
{
|
|
struct ceph_inode_info *ci = ceph_inode(inode);
|
|
int ret;
|
|
|
|
if (!cache_valid(ci))
|
|
return -ENOBUFS;
|
|
|
|
ret = fscache_read_or_alloc_page(ci->fscache, page,
|
|
ceph_readpage_from_fscache_complete, NULL,
|
|
GFP_KERNEL);
|
|
|
|
switch (ret) {
|
|
case 0: /* Page found */
|
|
dout("page read submitted\n");
|
|
return 0;
|
|
case -ENOBUFS: /* Pages were not found, and can't be */
|
|
case -ENODATA: /* Pages were not found */
|
|
dout("page/inode not in cache\n");
|
|
return ret;
|
|
default:
|
|
dout("%s: unknown error ret = %i\n", __func__, ret);
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
int ceph_readpages_from_fscache(struct inode *inode,
|
|
struct address_space *mapping,
|
|
struct list_head *pages,
|
|
unsigned *nr_pages)
|
|
{
|
|
struct ceph_inode_info *ci = ceph_inode(inode);
|
|
int ret;
|
|
|
|
if (!cache_valid(ci))
|
|
return -ENOBUFS;
|
|
|
|
ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages,
|
|
ceph_readpage_from_fscache_complete,
|
|
NULL, mapping_gfp_mask(mapping));
|
|
|
|
switch (ret) {
|
|
case 0: /* All pages found */
|
|
dout("all-page read submitted\n");
|
|
return 0;
|
|
case -ENOBUFS: /* Some pages were not found, and can't be */
|
|
case -ENODATA: /* some pages were not found */
|
|
dout("page/inode not in cache\n");
|
|
return ret;
|
|
default:
|
|
dout("%s: unknown error ret = %i\n", __func__, ret);
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
void ceph_readpage_to_fscache(struct inode *inode, struct page *page)
|
|
{
|
|
struct ceph_inode_info *ci = ceph_inode(inode);
|
|
int ret;
|
|
|
|
if (!PageFsCache(page))
|
|
return;
|
|
|
|
if (!cache_valid(ci))
|
|
return;
|
|
|
|
ret = fscache_write_page(ci->fscache, page, i_size_read(inode),
|
|
GFP_KERNEL);
|
|
if (ret)
|
|
fscache_uncache_page(ci->fscache, page);
|
|
}
|
|
|
|
void ceph_invalidate_fscache_page(struct inode* inode, struct page *page)
|
|
{
|
|
struct ceph_inode_info *ci = ceph_inode(inode);
|
|
|
|
if (!PageFsCache(page))
|
|
return;
|
|
|
|
fscache_wait_on_page_write(ci->fscache, page);
|
|
fscache_uncache_page(ci->fscache, page);
|
|
}
|
|
|
|
void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc)
|
|
{
|
|
if (fscache_cookie_valid(fsc->fscache)) {
|
|
struct ceph_fscache_entry *ent;
|
|
bool found = false;
|
|
|
|
mutex_lock(&ceph_fscache_lock);
|
|
list_for_each_entry(ent, &ceph_fscache_list, list) {
|
|
if (ent->fscache == fsc->fscache) {
|
|
list_del(&ent->list);
|
|
kfree(ent);
|
|
found = true;
|
|
break;
|
|
}
|
|
}
|
|
WARN_ON_ONCE(!found);
|
|
mutex_unlock(&ceph_fscache_lock);
|
|
|
|
__fscache_relinquish_cookie(fsc->fscache, NULL, false);
|
|
}
|
|
fsc->fscache = NULL;
|
|
}
|
|
|
|
/*
|
|
* caller should hold CEPH_CAP_FILE_{RD,CACHE}
|
|
*/
|
|
void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci)
|
|
{
|
|
if (cache_valid(ci))
|
|
return;
|
|
|
|
/* resue i_truncate_mutex. There should be no pending
|
|
* truncate while the caller holds CEPH_CAP_FILE_RD */
|
|
mutex_lock(&ci->i_truncate_mutex);
|
|
if (!cache_valid(ci)) {
|
|
if (fscache_check_consistency(ci->fscache, &ci->i_vino))
|
|
fscache_invalidate(ci->fscache);
|
|
spin_lock(&ci->i_ceph_lock);
|
|
ci->i_fscache_gen = ci->i_rdcache_gen;
|
|
spin_unlock(&ci->i_ceph_lock);
|
|
}
|
|
mutex_unlock(&ci->i_truncate_mutex);
|
|
}
|