2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-16 01:04:08 +08:00

cifsd: add file operations

This adds file operations and buffer pool for cifsd.

Signed-off-by: Namjae Jeon <namjae.jeon@samsung.com>
Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Signed-off-by: Hyunchul Lee <hyc.lee@gmail.com>
Acked-by: Ronnie Sahlberg <lsahlber@redhat.com>
Signed-off-by: Steve French <stfrench@microsoft.com>
This commit is contained in:
Namjae Jeon 2021-03-16 10:50:04 +09:00 committed by Steve French
parent e2f34481b2
commit f441584858
6 changed files with 3691 additions and 0 deletions

292
fs/cifsd/buffer_pool.c Normal file
View File

@ -0,0 +1,292 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2018 Samsung Electronics Co., Ltd.
*/
#include <linux/kernel.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/rwlock.h>
#include "glob.h"
#include "buffer_pool.h"
#include "connection.h"
#include "mgmt/ksmbd_ida.h"
static struct kmem_cache *filp_cache;
struct wm {
struct list_head list;
unsigned int sz;
char buffer[0];
};
struct wm_list {
struct list_head list;
unsigned int sz;
spinlock_t wm_lock;
int avail_wm;
struct list_head idle_wm;
wait_queue_head_t wm_wait;
};
static LIST_HEAD(wm_lists);
static DEFINE_RWLOCK(wm_lists_lock);
void *ksmbd_alloc(size_t size)
{
return kvmalloc(size, GFP_KERNEL | __GFP_ZERO);
}
void ksmbd_free(void *ptr)
{
kvfree(ptr);
}
static struct wm *wm_alloc(size_t sz, gfp_t flags)
{
struct wm *wm;
size_t alloc_sz = sz + sizeof(struct wm);
wm = kvmalloc(alloc_sz, flags);
if (!wm)
return NULL;
wm->sz = sz;
return wm;
}
static int register_wm_size_class(size_t sz)
{
struct wm_list *l, *nl;
nl = kvmalloc(sizeof(struct wm_list), GFP_KERNEL);
if (!nl)
return -ENOMEM;
nl->sz = sz;
spin_lock_init(&nl->wm_lock);
INIT_LIST_HEAD(&nl->idle_wm);
INIT_LIST_HEAD(&nl->list);
init_waitqueue_head(&nl->wm_wait);
nl->avail_wm = 0;
write_lock(&wm_lists_lock);
list_for_each_entry(l, &wm_lists, list) {
if (l->sz == sz) {
write_unlock(&wm_lists_lock);
kvfree(nl);
return 0;
}
}
list_add(&nl->list, &wm_lists);
write_unlock(&wm_lists_lock);
return 0;
}
static struct wm_list *match_wm_list(size_t size)
{
struct wm_list *l, *rl = NULL;
read_lock(&wm_lists_lock);
list_for_each_entry(l, &wm_lists, list) {
if (l->sz == size) {
rl = l;
break;
}
}
read_unlock(&wm_lists_lock);
return rl;
}
static struct wm *find_wm(size_t size)
{
struct wm_list *wm_list;
struct wm *wm;
wm_list = match_wm_list(size);
if (!wm_list) {
if (register_wm_size_class(size))
return NULL;
wm_list = match_wm_list(size);
}
if (!wm_list)
return NULL;
while (1) {
spin_lock(&wm_list->wm_lock);
if (!list_empty(&wm_list->idle_wm)) {
wm = list_entry(wm_list->idle_wm.next,
struct wm,
list);
list_del(&wm->list);
spin_unlock(&wm_list->wm_lock);
return wm;
}
if (wm_list->avail_wm > num_online_cpus()) {
spin_unlock(&wm_list->wm_lock);
wait_event(wm_list->wm_wait,
!list_empty(&wm_list->idle_wm));
continue;
}
wm_list->avail_wm++;
spin_unlock(&wm_list->wm_lock);
wm = wm_alloc(size, GFP_KERNEL);
if (!wm) {
spin_lock(&wm_list->wm_lock);
wm_list->avail_wm--;
spin_unlock(&wm_list->wm_lock);
wait_event(wm_list->wm_wait,
!list_empty(&wm_list->idle_wm));
continue;
}
break;
}
return wm;
}
static void release_wm(struct wm *wm, struct wm_list *wm_list)
{
if (!wm)
return;
spin_lock(&wm_list->wm_lock);
if (wm_list->avail_wm <= num_online_cpus()) {
list_add(&wm->list, &wm_list->idle_wm);
spin_unlock(&wm_list->wm_lock);
wake_up(&wm_list->wm_wait);
return;
}
wm_list->avail_wm--;
spin_unlock(&wm_list->wm_lock);
ksmbd_free(wm);
}
static void wm_list_free(struct wm_list *l)
{
struct wm *wm;
while (!list_empty(&l->idle_wm)) {
wm = list_entry(l->idle_wm.next, struct wm, list);
list_del(&wm->list);
kvfree(wm);
}
kvfree(l);
}
static void wm_lists_destroy(void)
{
struct wm_list *l;
while (!list_empty(&wm_lists)) {
l = list_entry(wm_lists.next, struct wm_list, list);
list_del(&l->list);
wm_list_free(l);
}
}
void ksmbd_free_request(void *addr)
{
kvfree(addr);
}
void *ksmbd_alloc_request(size_t size)
{
return kvmalloc(size, GFP_KERNEL);
}
void ksmbd_free_response(void *buffer)
{
kvfree(buffer);
}
void *ksmbd_alloc_response(size_t size)
{
return kvmalloc(size, GFP_KERNEL | __GFP_ZERO);
}
void *ksmbd_find_buffer(size_t size)
{
struct wm *wm;
wm = find_wm(size);
WARN_ON(!wm);
if (wm)
return wm->buffer;
return NULL;
}
void ksmbd_release_buffer(void *buffer)
{
struct wm_list *wm_list;
struct wm *wm;
if (!buffer)
return;
wm = container_of(buffer, struct wm, buffer);
wm_list = match_wm_list(wm->sz);
WARN_ON(!wm_list);
if (wm_list)
release_wm(wm, wm_list);
}
void *ksmbd_realloc_response(void *ptr, size_t old_sz, size_t new_sz)
{
size_t sz = min(old_sz, new_sz);
void *nptr;
nptr = ksmbd_alloc_response(new_sz);
if (!nptr)
return ptr;
memcpy(nptr, ptr, sz);
ksmbd_free_response(ptr);
return nptr;
}
void ksmbd_free_file_struct(void *filp)
{
kmem_cache_free(filp_cache, filp);
}
void *ksmbd_alloc_file_struct(void)
{
return kmem_cache_zalloc(filp_cache, GFP_KERNEL);
}
void ksmbd_destroy_buffer_pools(void)
{
wm_lists_destroy();
ksmbd_work_pool_destroy();
kmem_cache_destroy(filp_cache);
}
int ksmbd_init_buffer_pools(void)
{
if (ksmbd_work_pool_init())
goto out;
filp_cache = kmem_cache_create("ksmbd_file_cache",
sizeof(struct ksmbd_file), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!filp_cache)
goto out;
return 0;
out:
ksmbd_err("failed to allocate memory\n");
ksmbd_destroy_buffer_pools();
return -ENOMEM;
}

28
fs/cifsd/buffer_pool.h Normal file
View File

@ -0,0 +1,28 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2018 Samsung Electronics Co., Ltd.
*/
#ifndef __KSMBD_BUFFER_POOL_H__
#define __KSMBD_BUFFER_POOL_H__
void *ksmbd_find_buffer(size_t size);
void ksmbd_release_buffer(void *buffer);
void *ksmbd_alloc(size_t size);
void ksmbd_free(void *ptr);
void ksmbd_free_request(void *addr);
void *ksmbd_alloc_request(size_t size);
void ksmbd_free_response(void *buffer);
void *ksmbd_alloc_response(size_t size);
void *ksmbd_realloc_response(void *ptr, size_t old_sz, size_t new_sz);
void ksmbd_free_file_struct(void *filp);
void *ksmbd_alloc_file_struct(void);
void ksmbd_destroy_buffer_pools(void);
int ksmbd_init_buffer_pools(void);
#endif /* __KSMBD_BUFFER_POOL_H__ */

1989
fs/cifsd/vfs.c Normal file

File diff suppressed because it is too large Load Diff

314
fs/cifsd/vfs.h Normal file
View File

@ -0,0 +1,314 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
* Copyright (C) 2018 Samsung Electronics Co., Ltd.
*/
#ifndef __KSMBD_VFS_H__
#define __KSMBD_VFS_H__
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/namei.h>
#include <uapi/linux/xattr.h>
#include <linux/posix_acl.h>
#include "smbacl.h"
/* STREAM XATTR PREFIX */
#define STREAM_PREFIX "DosStream."
#define STREAM_PREFIX_LEN (sizeof(STREAM_PREFIX) - 1)
#define XATTR_NAME_STREAM (XATTR_USER_PREFIX STREAM_PREFIX)
#define XATTR_NAME_STREAM_LEN (sizeof(XATTR_NAME_STREAM) - 1)
enum {
XATTR_DOSINFO_ATTRIB = 0x00000001,
XATTR_DOSINFO_EA_SIZE = 0x00000002,
XATTR_DOSINFO_SIZE = 0x00000004,
XATTR_DOSINFO_ALLOC_SIZE = 0x00000008,
XATTR_DOSINFO_CREATE_TIME = 0x00000010,
XATTR_DOSINFO_CHANGE_TIME = 0x00000020,
XATTR_DOSINFO_ITIME = 0x00000040
};
struct xattr_dos_attrib {
__u16 version;
__u32 flags;
__u32 attr;
__u32 ea_size;
__u64 size;
__u64 alloc_size;
__u64 create_time;
__u64 change_time;
__u64 itime;
};
/* DOS ATTRIBUITE XATTR PREFIX */
#define DOS_ATTRIBUTE_PREFIX "DOSATTRIB"
#define DOS_ATTRIBUTE_PREFIX_LEN (sizeof(DOS_ATTRIBUTE_PREFIX) - 1)
#define XATTR_NAME_DOS_ATTRIBUTE \
(XATTR_USER_PREFIX DOS_ATTRIBUTE_PREFIX)
#define XATTR_NAME_DOS_ATTRIBUTE_LEN \
(sizeof(XATTR_USER_PREFIX DOS_ATTRIBUTE_PREFIX) - 1)
#define XATTR_SD_HASH_TYPE_SHA256 0x1
#define XATTR_SD_HASH_SIZE 64
#define SMB_ACL_READ 4
#define SMB_ACL_WRITE 2
#define SMB_ACL_EXECUTE 1
enum {
SMB_ACL_TAG_INVALID = 0,
SMB_ACL_USER,
SMB_ACL_USER_OBJ,
SMB_ACL_GROUP,
SMB_ACL_GROUP_OBJ,
SMB_ACL_OTHER,
SMB_ACL_MASK
};
struct xattr_acl_entry {
int type;
uid_t uid;
gid_t gid;
mode_t perm;
};
struct xattr_smb_acl {
int count;
int next;
struct xattr_acl_entry entries[0];
};
struct xattr_ntacl {
__u16 version;
void *sd_buf;
__u32 sd_size;
__u16 hash_type;
__u8 desc[10];
__u16 desc_len;
__u64 current_time;
__u8 hash[XATTR_SD_HASH_SIZE];
__u8 posix_acl_hash[XATTR_SD_HASH_SIZE];
};
/* SECURITY DESCRIPTOR XATTR PREFIX */
#define SD_PREFIX "NTACL"
#define SD_PREFIX_LEN (sizeof(SD_PREFIX) - 1)
#define XATTR_NAME_SD \
(XATTR_SECURITY_PREFIX SD_PREFIX)
#define XATTR_NAME_SD_LEN \
(sizeof(XATTR_SECURITY_PREFIX SD_PREFIX) - 1)
/* CreateOptions */
/* Flag is set, it must not be a file , valid for directory only */
#define FILE_DIRECTORY_FILE_LE cpu_to_le32(0x00000001)
#define FILE_WRITE_THROUGH_LE cpu_to_le32(0x00000002)
#define FILE_SEQUENTIAL_ONLY_LE cpu_to_le32(0x00000004)
/* Should not buffer on server*/
#define FILE_NO_INTERMEDIATE_BUFFERING_LE cpu_to_le32(0x00000008)
/* MBZ */
#define FILE_SYNCHRONOUS_IO_ALERT_LE cpu_to_le32(0x00000010)
/* MBZ */
#define FILE_SYNCHRONOUS_IO_NONALERT_LE cpu_to_le32(0x00000020)
/* Flaf must not be set for directory */
#define FILE_NON_DIRECTORY_FILE_LE cpu_to_le32(0x00000040)
/* Should be zero */
#define CREATE_TREE_CONNECTION cpu_to_le32(0x00000080)
#define FILE_COMPLETE_IF_OPLOCKED_LE cpu_to_le32(0x00000100)
#define FILE_NO_EA_KNOWLEDGE_LE cpu_to_le32(0x00000200)
#define FILE_OPEN_REMOTE_INSTANCE cpu_to_le32(0x00000400)
/**
* Doc says this is obsolete "open for recovery" flag should be zero
* in any case.
*/
#define CREATE_OPEN_FOR_RECOVERY cpu_to_le32(0x00000400)
#define FILE_RANDOM_ACCESS_LE cpu_to_le32(0x00000800)
#define FILE_DELETE_ON_CLOSE_LE cpu_to_le32(0x00001000)
#define FILE_OPEN_BY_FILE_ID_LE cpu_to_le32(0x00002000)
#define FILE_OPEN_FOR_BACKUP_INTENT_LE cpu_to_le32(0x00004000)
#define FILE_NO_COMPRESSION_LE cpu_to_le32(0x00008000)
/* Should be zero*/
#define FILE_OPEN_REQUIRING_OPLOCK cpu_to_le32(0x00010000)
#define FILE_DISALLOW_EXCLUSIVE cpu_to_le32(0x00020000)
#define FILE_RESERVE_OPFILTER_LE cpu_to_le32(0x00100000)
#define FILE_OPEN_REPARSE_POINT_LE cpu_to_le32(0x00200000)
#define FILE_OPEN_NO_RECALL_LE cpu_to_le32(0x00400000)
/* Should be zero */
#define FILE_OPEN_FOR_FREE_SPACE_QUERY_LE cpu_to_le32(0x00800000)
#define CREATE_OPTIONS_MASK cpu_to_le32(0x00FFFFFF)
#define CREATE_OPTION_READONLY 0x10000000
/* system. NB not sent over wire */
#define CREATE_OPTION_SPECIAL 0x20000000
struct ksmbd_work;
struct ksmbd_file;
struct ksmbd_conn;
struct ksmbd_dir_info {
const char *name;
char *wptr;
char *rptr;
int name_len;
int out_buf_len;
int num_entry;
int data_count;
int last_entry_offset;
bool hide_dot_file;
int flags;
};
struct ksmbd_readdir_data {
struct dir_context ctx;
union {
void *private;
char *dirent;
};
unsigned int used;
unsigned int dirent_count;
unsigned int file_attr;
};
/* ksmbd kstat wrapper to get valid create time when reading dir entry */
struct ksmbd_kstat {
struct kstat *kstat;
unsigned long long create_time;
__le32 file_attributes;
};
struct ksmbd_fs_sector_size {
unsigned short logical_sector_size;
unsigned int physical_sector_size;
unsigned int optimal_io_size;
};
int ksmbd_vfs_inode_permission(struct dentry *dentry, int acc_mode,
bool delete);
int ksmbd_vfs_query_maximal_access(struct dentry *dentry, __le32 *daccess);
int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode);
int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode);
int ksmbd_vfs_read(struct ksmbd_work *work, struct ksmbd_file *fp,
size_t count, loff_t *pos);
int ksmbd_vfs_write(struct ksmbd_work *work, struct ksmbd_file *fp,
char *buf, size_t count, loff_t *pos, bool sync, ssize_t *written);
int ksmbd_vfs_fsync(struct ksmbd_work *work, uint64_t fid, uint64_t p_id);
int ksmbd_vfs_remove_file(struct ksmbd_work *work, char *name);
int ksmbd_vfs_link(struct ksmbd_work *work,
const char *oldname, const char *newname);
int ksmbd_vfs_getattr(struct path *path, struct kstat *stat);
int ksmbd_vfs_symlink(const char *name, const char *symname);
int ksmbd_vfs_readlink(struct path *path, char *buf, int lenp);
int ksmbd_vfs_fp_rename(struct ksmbd_work *work, struct ksmbd_file *fp,
char *newname);
int ksmbd_vfs_rename_slowpath(struct ksmbd_work *work,
char *oldname, char *newname);
int ksmbd_vfs_truncate(struct ksmbd_work *work, const char *name,
struct ksmbd_file *fp, loff_t size);
struct srv_copychunk;
int ksmbd_vfs_copy_file_ranges(struct ksmbd_work *work,
struct ksmbd_file *src_fp,
struct ksmbd_file *dst_fp,
struct srv_copychunk *chunks,
unsigned int chunk_count,
unsigned int *chunk_count_written,
unsigned int *chunk_size_written,
loff_t *total_size_written);
struct ksmbd_file *ksmbd_vfs_dentry_open(struct ksmbd_work *work,
const struct path *path,
int flags,
__le32 option,
int fexist);
ssize_t ksmbd_vfs_listxattr(struct dentry *dentry, char **list);
ssize_t ksmbd_vfs_getxattr(struct dentry *dentry,
char *xattr_name,
char **xattr_buf);
ssize_t ksmbd_vfs_casexattr_len(struct dentry *dentry,
char *attr_name,
int attr_name_len);
int ksmbd_vfs_setxattr(struct dentry *dentry,
const char *attr_name,
const void *attr_value,
size_t attr_size,
int flags);
int ksmbd_vfs_fsetxattr(const char *filename,
const char *attr_name,
const void *attr_value,
size_t attr_size,
int flags);
int ksmbd_vfs_xattr_stream_name(char *stream_name,
char **xattr_stream_name,
size_t *xattr_stream_name_size,
int s_type);
int ksmbd_vfs_truncate_xattr(struct dentry *dentry, int wo_streams);
int ksmbd_vfs_remove_xattr(struct dentry *dentry, char *attr_name);
void ksmbd_vfs_xattr_free(char *xattr);
int ksmbd_vfs_kern_path(char *name, unsigned int flags, struct path *path,
bool caseless);
int ksmbd_vfs_empty_dir(struct ksmbd_file *fp);
void ksmbd_vfs_set_fadvise(struct file *filp, __le32 option);
int ksmbd_vfs_lock(struct file *filp, int cmd, struct file_lock *flock);
int ksmbd_vfs_readdir(struct file *file, struct ksmbd_readdir_data *rdata);
int ksmbd_vfs_alloc_size(struct ksmbd_work *work,
struct ksmbd_file *fp,
loff_t len);
int ksmbd_vfs_zero_data(struct ksmbd_work *work,
struct ksmbd_file *fp,
loff_t off,
loff_t len);
struct file_allocated_range_buffer;
int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length,
struct file_allocated_range_buffer *ranges,
int in_count, int *out_count);
int ksmbd_vfs_unlink(struct dentry *dir, struct dentry *dentry);
unsigned short ksmbd_vfs_logical_sector_size(struct inode *inode);
void ksmbd_vfs_smb2_sector_size(struct inode *inode,
struct ksmbd_fs_sector_size *fs_ss);
void *ksmbd_vfs_init_kstat(char **p, struct ksmbd_kstat *ksmbd_kstat);
int ksmbd_vfs_fill_dentry_attrs(struct ksmbd_work *work,
struct dentry *dentry,
struct ksmbd_kstat *ksmbd_kstat);
int ksmbd_vfs_posix_lock_wait(struct file_lock *flock);
int ksmbd_vfs_posix_lock_wait_timeout(struct file_lock *flock, long timeout);
void ksmbd_vfs_posix_lock_unblock(struct file_lock *flock);
int ksmbd_vfs_remove_acl_xattrs(struct dentry *dentry);
int ksmbd_vfs_remove_sd_xattrs(struct dentry *dentry);
int ksmbd_vfs_set_sd_xattr(struct ksmbd_conn *conn, struct dentry *dentry,
struct smb_ntsd *pntsd, int len);
int ksmbd_vfs_get_sd_xattr(struct ksmbd_conn *conn, struct dentry *dentry,
struct smb_ntsd **pntsd);
int ksmbd_vfs_set_dos_attrib_xattr(struct dentry *dentry,
struct xattr_dos_attrib *da);
int ksmbd_vfs_get_dos_attrib_xattr(struct dentry *dentry,
struct xattr_dos_attrib *da);
struct posix_acl *ksmbd_vfs_posix_acl_alloc(int count, gfp_t flags);
struct posix_acl *ksmbd_vfs_get_acl(struct inode *inode, int type);
int ksmbd_vfs_set_posix_acl(struct inode *inode, int type,
struct posix_acl *acl);
int ksmbd_vfs_set_init_posix_acl(struct inode *inode);
int ksmbd_vfs_inherit_posix_acl(struct inode *inode,
struct inode *parent_inode);
#endif /* __KSMBD_VFS_H__ */

855
fs/cifsd/vfs_cache.c Normal file
View File

@ -0,0 +1,855 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
* Copyright (C) 2019 Samsung Electronics Co., Ltd.
*/
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
/* @FIXME */
#include "glob.h"
#include "vfs_cache.h"
#include "buffer_pool.h"
#include "oplock.h"
#include "vfs.h"
#include "connection.h"
#include "mgmt/tree_connect.h"
#include "mgmt/user_session.h"
/* @FIXME */
#include "smb_common.h"
#define S_DEL_PENDING 1
#define S_DEL_ON_CLS 2
#define S_DEL_ON_CLS_STREAM 8
static unsigned int inode_hash_mask __read_mostly;
static unsigned int inode_hash_shift __read_mostly;
static struct hlist_head *inode_hashtable __read_mostly;
static DEFINE_RWLOCK(inode_hash_lock);
static struct ksmbd_file_table global_ft;
static atomic_long_t fd_limit;
void ksmbd_set_fd_limit(unsigned long limit)
{
limit = min(limit, get_max_files());
atomic_long_set(&fd_limit, limit);
}
static bool fd_limit_depleted(void)
{
long v = atomic_long_dec_return(&fd_limit);
if (v >= 0)
return false;
atomic_long_inc(&fd_limit);
return true;
}
static void fd_limit_close(void)
{
atomic_long_inc(&fd_limit);
}
/*
* INODE hash
*/
static unsigned long inode_hash(struct super_block *sb, unsigned long hashval)
{
unsigned long tmp;
tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
L1_CACHE_BYTES;
tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> inode_hash_shift);
return tmp & inode_hash_mask;
}
static struct ksmbd_inode *__ksmbd_inode_lookup(struct inode *inode)
{
struct hlist_head *head = inode_hashtable +
inode_hash(inode->i_sb, inode->i_ino);
struct ksmbd_inode *ci = NULL, *ret_ci = NULL;
hlist_for_each_entry(ci, head, m_hash) {
if (ci->m_inode == inode) {
if (atomic_inc_not_zero(&ci->m_count))
ret_ci = ci;
break;
}
}
return ret_ci;
}
static struct ksmbd_inode *ksmbd_inode_lookup(struct ksmbd_file *fp)
{
return __ksmbd_inode_lookup(FP_INODE(fp));
}
static struct ksmbd_inode *ksmbd_inode_lookup_by_vfsinode(struct inode *inode)
{
struct ksmbd_inode *ci;
read_lock(&inode_hash_lock);
ci = __ksmbd_inode_lookup(inode);
read_unlock(&inode_hash_lock);
return ci;
}
int ksmbd_query_inode_status(struct inode *inode)
{
struct ksmbd_inode *ci;
int ret = KSMBD_INODE_STATUS_UNKNOWN;
read_lock(&inode_hash_lock);
ci = __ksmbd_inode_lookup(inode);
if (ci) {
ret = KSMBD_INODE_STATUS_OK;
if (ci->m_flags & S_DEL_PENDING)
ret = KSMBD_INODE_STATUS_PENDING_DELETE;
atomic_dec(&ci->m_count);
}
read_unlock(&inode_hash_lock);
return ret;
}
bool ksmbd_inode_pending_delete(struct ksmbd_file *fp)
{
return (fp->f_ci->m_flags & S_DEL_PENDING);
}
void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp)
{
fp->f_ci->m_flags |= S_DEL_PENDING;
}
void ksmbd_clear_inode_pending_delete(struct ksmbd_file *fp)
{
fp->f_ci->m_flags &= ~S_DEL_PENDING;
}
void ksmbd_fd_set_delete_on_close(struct ksmbd_file *fp,
int file_info)
{
if (ksmbd_stream_fd(fp)) {
fp->f_ci->m_flags |= S_DEL_ON_CLS_STREAM;
return;
}
fp->f_ci->m_flags |= S_DEL_ON_CLS;
}
static void ksmbd_inode_hash(struct ksmbd_inode *ci)
{
struct hlist_head *b = inode_hashtable +
inode_hash(ci->m_inode->i_sb, ci->m_inode->i_ino);
hlist_add_head(&ci->m_hash, b);
}
static void ksmbd_inode_unhash(struct ksmbd_inode *ci)
{
write_lock(&inode_hash_lock);
hlist_del_init(&ci->m_hash);
write_unlock(&inode_hash_lock);
}
static int ksmbd_inode_init(struct ksmbd_inode *ci, struct ksmbd_file *fp)
{
ci->m_inode = FP_INODE(fp);
atomic_set(&ci->m_count, 1);
atomic_set(&ci->op_count, 0);
atomic_set(&ci->sop_count, 0);
ci->m_flags = 0;
ci->m_fattr = 0;
INIT_LIST_HEAD(&ci->m_fp_list);
INIT_LIST_HEAD(&ci->m_op_list);
rwlock_init(&ci->m_lock);
return 0;
}
static struct ksmbd_inode *ksmbd_inode_get(struct ksmbd_file *fp)
{
struct ksmbd_inode *ci, *tmpci;
int rc;
read_lock(&inode_hash_lock);
ci = ksmbd_inode_lookup(fp);
read_unlock(&inode_hash_lock);
if (ci)
return ci;
ci = kmalloc(sizeof(struct ksmbd_inode), GFP_KERNEL);
if (!ci)
return NULL;
rc = ksmbd_inode_init(ci, fp);
if (rc) {
ksmbd_err("inode initialized failed\n");
kfree(ci);
return NULL;
}
write_lock(&inode_hash_lock);
tmpci = ksmbd_inode_lookup(fp);
if (!tmpci) {
ksmbd_inode_hash(ci);
} else {
kfree(ci);
ci = tmpci;
}
write_unlock(&inode_hash_lock);
return ci;
}
static void ksmbd_inode_free(struct ksmbd_inode *ci)
{
ksmbd_inode_unhash(ci);
kfree(ci);
}
static void ksmbd_inode_put(struct ksmbd_inode *ci)
{
if (atomic_dec_and_test(&ci->m_count))
ksmbd_inode_free(ci);
}
int __init ksmbd_inode_hash_init(void)
{
unsigned int loop;
unsigned long numentries = 16384;
unsigned long bucketsize = sizeof(struct hlist_head);
unsigned long size;
inode_hash_shift = ilog2(numentries);
inode_hash_mask = (1 << inode_hash_shift) - 1;
size = bucketsize << inode_hash_shift;
/* init master fp hash table */
inode_hashtable = vmalloc(size);
if (!inode_hashtable)
return -ENOMEM;
for (loop = 0; loop < (1U << inode_hash_shift); loop++)
INIT_HLIST_HEAD(&inode_hashtable[loop]);
return 0;
}
void __exit ksmbd_release_inode_hash(void)
{
vfree(inode_hashtable);
}
static void __ksmbd_inode_close(struct ksmbd_file *fp)
{
struct dentry *dir, *dentry;
struct ksmbd_inode *ci = fp->f_ci;
int err;
struct file *filp;
filp = fp->filp;
if (ksmbd_stream_fd(fp) && (ci->m_flags & S_DEL_ON_CLS_STREAM)) {
ci->m_flags &= ~S_DEL_ON_CLS_STREAM;
err = ksmbd_vfs_remove_xattr(filp->f_path.dentry,
fp->stream.name);
if (err)
ksmbd_err("remove xattr failed : %s\n",
fp->stream.name);
}
if (atomic_dec_and_test(&ci->m_count)) {
write_lock(&ci->m_lock);
if (ci->m_flags & (S_DEL_ON_CLS | S_DEL_PENDING)) {
dentry = filp->f_path.dentry;
dir = dentry->d_parent;
ci->m_flags &= ~(S_DEL_ON_CLS | S_DEL_PENDING);
write_unlock(&ci->m_lock);
ksmbd_vfs_unlink(dir, dentry);
write_lock(&ci->m_lock);
}
write_unlock(&ci->m_lock);
ksmbd_inode_free(ci);
}
}
static void __ksmbd_remove_durable_fd(struct ksmbd_file *fp)
{
if (!HAS_FILE_ID(fp->persistent_id))
return;
write_lock(&global_ft.lock);
idr_remove(global_ft.idr, fp->persistent_id);
write_unlock(&global_ft.lock);
}
static void __ksmbd_remove_fd(struct ksmbd_file_table *ft,
struct ksmbd_file *fp)
{
if (!HAS_FILE_ID(fp->volatile_id))
return;
write_lock(&fp->f_ci->m_lock);
list_del_init(&fp->node);
write_unlock(&fp->f_ci->m_lock);
write_lock(&ft->lock);
idr_remove(ft->idr, fp->volatile_id);
write_unlock(&ft->lock);
}
static void __ksmbd_close_fd(struct ksmbd_file_table *ft,
struct ksmbd_file *fp)
{
struct file *filp;
fd_limit_close();
__ksmbd_remove_durable_fd(fp);
__ksmbd_remove_fd(ft, fp);
close_id_del_oplock(fp);
filp = fp->filp;
__ksmbd_inode_close(fp);
if (!IS_ERR_OR_NULL(filp))
fput(filp);
kfree(fp->filename);
if (ksmbd_stream_fd(fp))
kfree(fp->stream.name);
ksmbd_free_file_struct(fp);
}
static struct ksmbd_file *ksmbd_fp_get(struct ksmbd_file *fp)
{
if (!atomic_inc_not_zero(&fp->refcount))
return NULL;
return fp;
}
static struct ksmbd_file *__ksmbd_lookup_fd(struct ksmbd_file_table *ft,
unsigned int id)
{
bool unclaimed = true;
struct ksmbd_file *fp;
read_lock(&ft->lock);
fp = idr_find(ft->idr, id);
if (fp)
fp = ksmbd_fp_get(fp);
if (fp && fp->f_ci) {
read_lock(&fp->f_ci->m_lock);
unclaimed = list_empty(&fp->node);
read_unlock(&fp->f_ci->m_lock);
}
read_unlock(&ft->lock);
if (fp && unclaimed) {
atomic_dec(&fp->refcount);
return NULL;
}
return fp;
}
static void __put_fd_final(struct ksmbd_work *work,
struct ksmbd_file *fp)
{
__ksmbd_close_fd(&work->sess->file_table, fp);
atomic_dec(&work->conn->stats.open_files_count);
}
static void set_close_state_blocked_works(struct ksmbd_file *fp)
{
struct ksmbd_work *cancel_work, *ctmp;
spin_lock(&fp->f_lock);
list_for_each_entry_safe(cancel_work, ctmp, &fp->blocked_works,
fp_entry) {
list_del(&cancel_work->fp_entry);
cancel_work->state = KSMBD_WORK_CLOSED;
cancel_work->cancel_fn(cancel_work->cancel_argv);
}
spin_unlock(&fp->f_lock);
}
int ksmbd_close_fd(struct ksmbd_work *work, unsigned int id)
{
struct ksmbd_file *fp;
struct ksmbd_file_table *ft;
if (!HAS_FILE_ID(id))
return 0;
ft = &work->sess->file_table;
read_lock(&ft->lock);
fp = idr_find(ft->idr, id);
if (fp) {
set_close_state_blocked_works(fp);
if (!atomic_dec_and_test(&fp->refcount))
fp = NULL;
}
read_unlock(&ft->lock);
if (!fp)
return -EINVAL;
__put_fd_final(work, fp);
return 0;
}
void ksmbd_fd_put(struct ksmbd_work *work,
struct ksmbd_file *fp)
{
if (!fp)
return;
if (!atomic_dec_and_test(&fp->refcount))
return;
__put_fd_final(work, fp);
}
static bool __sanity_check(struct ksmbd_tree_connect *tcon,
struct ksmbd_file *fp)
{
if (!fp)
return false;
if (fp->tcon != tcon)
return false;
return true;
}
struct ksmbd_file *ksmbd_lookup_foreign_fd(struct ksmbd_work *work,
unsigned int id)
{
return __ksmbd_lookup_fd(&work->sess->file_table, id);
}
struct ksmbd_file *ksmbd_lookup_fd_fast(struct ksmbd_work *work,
unsigned int id)
{
struct ksmbd_file *fp = __ksmbd_lookup_fd(&work->sess->file_table, id);
if (__sanity_check(work->tcon, fp))
return fp;
ksmbd_fd_put(work, fp);
return NULL;
}
struct ksmbd_file *ksmbd_lookup_fd_slow(struct ksmbd_work *work,
unsigned int id,
unsigned int pid)
{
struct ksmbd_file *fp;
if (!HAS_FILE_ID(id)) {
id = work->compound_fid;
pid = work->compound_pfid;
}
if (!HAS_FILE_ID(id))
return NULL;
fp = __ksmbd_lookup_fd(&work->sess->file_table, id);
if (!__sanity_check(work->tcon, fp)) {
ksmbd_fd_put(work, fp);
return NULL;
}
if (fp->persistent_id != pid) {
ksmbd_fd_put(work, fp);
return NULL;
}
return fp;
}
struct ksmbd_file *ksmbd_lookup_durable_fd(unsigned long long id)
{
return __ksmbd_lookup_fd(&global_ft, id);
}
int ksmbd_close_fd_app_id(struct ksmbd_work *work,
char *app_id)
{
struct ksmbd_file *fp = NULL;
unsigned int id;
read_lock(&global_ft.lock);
idr_for_each_entry(global_ft.idr, fp, id) {
if (!memcmp(fp->app_instance_id,
app_id,
SMB2_CREATE_GUID_SIZE)) {
if (!atomic_dec_and_test(&fp->refcount))
fp = NULL;
break;
}
}
read_unlock(&global_ft.lock);
if (!fp)
return -EINVAL;
__put_fd_final(work, fp);
return 0;
}
struct ksmbd_file *ksmbd_lookup_fd_cguid(char *cguid)
{
struct ksmbd_file *fp = NULL;
unsigned int id;
read_lock(&global_ft.lock);
idr_for_each_entry(global_ft.idr, fp, id) {
if (!memcmp(fp->create_guid,
cguid,
SMB2_CREATE_GUID_SIZE)) {
fp = ksmbd_fp_get(fp);
break;
}
}
read_unlock(&global_ft.lock);
return fp;
}
struct ksmbd_file *ksmbd_lookup_fd_filename(struct ksmbd_work *work,
char *filename)
{
struct ksmbd_file *fp = NULL;
unsigned int id;
read_lock(&work->sess->file_table.lock);
idr_for_each_entry(work->sess->file_table.idr, fp, id) {
if (!strcmp(fp->filename, filename)) {
fp = ksmbd_fp_get(fp);
break;
}
}
read_unlock(&work->sess->file_table.lock);
return fp;
}
struct ksmbd_file *ksmbd_lookup_fd_inode(struct inode *inode)
{
struct ksmbd_file *lfp;
struct ksmbd_inode *ci;
struct list_head *cur;
ci = ksmbd_inode_lookup_by_vfsinode(inode);
if (!ci)
return NULL;
read_lock(&ci->m_lock);
list_for_each(cur, &ci->m_fp_list) {
lfp = list_entry(cur, struct ksmbd_file, node);
if (inode == FP_INODE(lfp)) {
atomic_dec(&ci->m_count);
read_unlock(&ci->m_lock);
return lfp;
}
}
atomic_dec(&ci->m_count);
read_unlock(&ci->m_lock);
return NULL;
}
#define OPEN_ID_TYPE_VOLATILE_ID (0)
#define OPEN_ID_TYPE_PERSISTENT_ID (1)
static void __open_id_set(struct ksmbd_file *fp, unsigned int id, int type)
{
if (type == OPEN_ID_TYPE_VOLATILE_ID)
fp->volatile_id = id;
if (type == OPEN_ID_TYPE_PERSISTENT_ID)
fp->persistent_id = id;
}
static int __open_id(struct ksmbd_file_table *ft,
struct ksmbd_file *fp,
int type)
{
unsigned int id = 0;
int ret;
if (type == OPEN_ID_TYPE_VOLATILE_ID && fd_limit_depleted()) {
__open_id_set(fp, KSMBD_NO_FID, type);
return -EMFILE;
}
idr_preload(GFP_KERNEL);
write_lock(&ft->lock);
ret = idr_alloc_cyclic(ft->idr, fp, 0, INT_MAX, GFP_NOWAIT);
if (ret >= 0) {
id = ret;
ret = 0;
} else {
id = KSMBD_NO_FID;
fd_limit_close();
}
__open_id_set(fp, id, type);
write_unlock(&ft->lock);
idr_preload_end();
return ret;
}
unsigned int ksmbd_open_durable_fd(struct ksmbd_file *fp)
{
__open_id(&global_ft, fp, OPEN_ID_TYPE_PERSISTENT_ID);
return fp->persistent_id;
}
struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work,
struct file *filp)
{
struct ksmbd_file *fp;
int ret;
fp = ksmbd_alloc_file_struct();
if (!fp) {
ksmbd_err("Failed to allocate memory\n");
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&fp->blocked_works);
INIT_LIST_HEAD(&fp->node);
spin_lock_init(&fp->f_lock);
atomic_set(&fp->refcount, 1);
fp->filp = filp;
fp->conn = work->sess->conn;
fp->tcon = work->tcon;
fp->volatile_id = KSMBD_NO_FID;
fp->persistent_id = KSMBD_NO_FID;
fp->f_ci = ksmbd_inode_get(fp);
if (!fp->f_ci) {
ksmbd_free_file_struct(fp);
return ERR_PTR(-ENOMEM);
}
ret = __open_id(&work->sess->file_table, fp, OPEN_ID_TYPE_VOLATILE_ID);
if (ret) {
ksmbd_inode_put(fp->f_ci);
ksmbd_free_file_struct(fp);
return ERR_PTR(ret);
}
atomic_inc(&work->conn->stats.open_files_count);
return fp;
}
static inline bool is_reconnectable(struct ksmbd_file *fp)
{
struct oplock_info *opinfo = opinfo_get(fp);
bool reconn = false;
if (!opinfo)
return false;
if (opinfo->op_state != OPLOCK_STATE_NONE) {
opinfo_put(opinfo);
return false;
}
if (fp->is_resilient || fp->is_persistent)
reconn = true;
else if (fp->is_durable && opinfo->is_lease &&
opinfo->o_lease->state & SMB2_LEASE_HANDLE_CACHING_LE)
reconn = true;
else if (fp->is_durable && opinfo->level == SMB2_OPLOCK_LEVEL_BATCH)
reconn = true;
opinfo_put(opinfo);
return reconn;
}
static int
__close_file_table_ids(struct ksmbd_file_table *ft,
struct ksmbd_tree_connect *tcon,
bool (*skip)(struct ksmbd_tree_connect *tcon,
struct ksmbd_file *fp))
{
unsigned int id;
struct ksmbd_file *fp;
int num = 0;
idr_for_each_entry(ft->idr, fp, id) {
if (skip(tcon, fp))
continue;
set_close_state_blocked_works(fp);
if (!atomic_dec_and_test(&fp->refcount))
continue;
__ksmbd_close_fd(ft, fp);
num++;
}
return num;
}
static bool tree_conn_fd_check(struct ksmbd_tree_connect *tcon,
struct ksmbd_file *fp)
{
return fp->tcon != tcon;
}
static bool session_fd_check(struct ksmbd_tree_connect *tcon,
struct ksmbd_file *fp)
{
if (!is_reconnectable(fp))
return false;
fp->conn = NULL;
fp->tcon = NULL;
fp->volatile_id = KSMBD_NO_FID;
return true;
}
void ksmbd_close_tree_conn_fds(struct ksmbd_work *work)
{
int num = __close_file_table_ids(&work->sess->file_table,
work->tcon,
tree_conn_fd_check);
atomic_sub(num, &work->conn->stats.open_files_count);
}
void ksmbd_close_session_fds(struct ksmbd_work *work)
{
int num = __close_file_table_ids(&work->sess->file_table,
work->tcon,
session_fd_check);
atomic_sub(num, &work->conn->stats.open_files_count);
}
int ksmbd_init_global_file_table(void)
{
return ksmbd_init_file_table(&global_ft);
}
void ksmbd_free_global_file_table(void)
{
struct ksmbd_file *fp = NULL;
unsigned int id;
idr_for_each_entry(global_ft.idr, fp, id) {
__ksmbd_remove_durable_fd(fp);
ksmbd_free_file_struct(fp);
}
ksmbd_destroy_file_table(&global_ft);
}
int ksmbd_reopen_durable_fd(struct ksmbd_work *work,
struct ksmbd_file *fp)
{
if (!fp->is_durable || fp->conn || fp->tcon) {
ksmbd_err("Invalid durable fd [%p:%p]\n",
fp->conn, fp->tcon);
return -EBADF;
}
if (HAS_FILE_ID(fp->volatile_id)) {
ksmbd_err("Still in use durable fd: %u\n", fp->volatile_id);
return -EBADF;
}
fp->conn = work->sess->conn;
fp->tcon = work->tcon;
__open_id(&work->sess->file_table, fp, OPEN_ID_TYPE_VOLATILE_ID);
if (!HAS_FILE_ID(fp->volatile_id)) {
fp->conn = NULL;
fp->tcon = NULL;
return -EBADF;
}
return 0;
}
static void close_fd_list(struct ksmbd_work *work, struct list_head *head)
{
while (!list_empty(head)) {
struct ksmbd_file *fp;
fp = list_first_entry(head, struct ksmbd_file, node);
list_del_init(&fp->node);
__ksmbd_close_fd(&work->sess->file_table, fp);
}
}
int ksmbd_close_inode_fds(struct ksmbd_work *work, struct inode *inode)
{
struct ksmbd_inode *ci;
bool unlinked = true;
struct ksmbd_file *fp, *fptmp;
LIST_HEAD(dispose);
ci = ksmbd_inode_lookup_by_vfsinode(inode);
if (!ci)
return true;
if (ci->m_flags & (S_DEL_ON_CLS | S_DEL_PENDING))
unlinked = false;
write_lock(&ci->m_lock);
list_for_each_entry_safe(fp, fptmp, &ci->m_fp_list, node) {
if (fp->conn)
continue;
list_del(&fp->node);
list_add(&fp->node, &dispose);
}
atomic_dec(&ci->m_count);
write_unlock(&ci->m_lock);
close_fd_list(work, &dispose);
return unlinked;
}
int ksmbd_file_table_flush(struct ksmbd_work *work)
{
struct ksmbd_file *fp = NULL;
unsigned int id;
int ret;
read_lock(&work->sess->file_table.lock);
idr_for_each_entry(work->sess->file_table.idr, fp, id) {
ret = ksmbd_vfs_fsync(work, fp->volatile_id, KSMBD_NO_FID);
if (ret)
break;
}
read_unlock(&work->sess->file_table.lock);
return ret;
}
int ksmbd_init_file_table(struct ksmbd_file_table *ft)
{
ft->idr = ksmbd_alloc(sizeof(struct idr));
if (!ft->idr)
return -ENOMEM;
idr_init(ft->idr);
rwlock_init(&ft->lock);
return 0;
}
void ksmbd_destroy_file_table(struct ksmbd_file_table *ft)
{
if (!ft->idr)
return;
__close_file_table_ids(ft, NULL, session_fd_check);
idr_destroy(ft->idr);
ksmbd_free(ft->idr);
ft->idr = NULL;
}

213
fs/cifsd/vfs_cache.h Normal file
View File

@ -0,0 +1,213 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2019 Samsung Electronics Co., Ltd.
*/
#ifndef __VFS_CACHE_H__
#define __VFS_CACHE_H__
#include <linux/version.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/rwsem.h>
#include <linux/spinlock.h>
#include <linux/idr.h>
#include <linux/workqueue.h>
#include "vfs.h"
/* Windows style file permissions for extended response */
#define FILE_GENERIC_ALL 0x1F01FF
#define FILE_GENERIC_READ 0x120089
#define FILE_GENERIC_WRITE 0x120116
#define FILE_GENERIC_EXECUTE 0X1200a0
#define KSMBD_START_FID 0
#define KSMBD_NO_FID (UINT_MAX)
#define SMB2_NO_FID (0xFFFFFFFFFFFFFFFFULL)
#define FP_FILENAME(fp) fp->filp->f_path.dentry->d_name.name
#define FP_INODE(fp) fp->filp->f_path.dentry->d_inode
#define PARENT_INODE(fp) fp->filp->f_path.dentry->d_parent->d_inode
#define ATTR_FP(fp) (fp->attrib_only && \
(fp->cdoption != FILE_OVERWRITE_IF_LE && \
fp->cdoption != FILE_OVERWRITE_LE && \
fp->cdoption != FILE_SUPERSEDE_LE))
struct ksmbd_conn;
struct ksmbd_session;
struct ksmbd_lock {
struct file_lock *fl;
struct list_head glist;
struct list_head llist;
unsigned int flags;
int cmd;
int zero_len;
unsigned long long start;
unsigned long long end;
};
struct stream {
char *name;
ssize_t size;
};
struct ksmbd_inode {
rwlock_t m_lock;
atomic_t m_count;
atomic_t op_count;
/* opinfo count for streams */
atomic_t sop_count;
struct inode *m_inode;
unsigned int m_flags;
struct hlist_node m_hash;
struct list_head m_fp_list;
struct list_head m_op_list;
struct oplock_info *m_opinfo;
__le32 m_fattr;
};
struct ksmbd_file {
struct file *filp;
char *filename;
unsigned int persistent_id;
unsigned int volatile_id;
spinlock_t f_lock;
struct ksmbd_inode *f_ci;
struct ksmbd_inode *f_parent_ci;
struct oplock_info __rcu *f_opinfo;
struct ksmbd_conn *conn;
struct ksmbd_tree_connect *tcon;
atomic_t refcount;
__le32 daccess;
__le32 saccess;
__le32 coption;
__le32 cdoption;
__u64 create_time;
__u64 itime;
bool is_durable;
bool is_resilient;
bool is_persistent;
bool is_nt_open;
bool attrib_only;
char client_guid[16];
char create_guid[16];
char app_instance_id[16];
struct stream stream;
struct list_head node;
struct list_head blocked_works;
int durable_timeout;
/* for SMB1 */
int pid;
/* conflict lock fail count for SMB1 */
unsigned int cflock_cnt;
/* last lock failure start offset for SMB1 */
unsigned long long llock_fstart;
int dirent_offset;
/* if ls is happening on directory, below is valid*/
struct ksmbd_readdir_data readdir_data;
int dot_dotdot[2];
};
static inline void set_ctx_actor(struct dir_context *ctx,
filldir_t actor)
{
ctx->actor = actor;
}
#define KSMBD_NR_OPEN_DEFAULT BITS_PER_LONG
struct ksmbd_file_table {
rwlock_t lock;
struct idr *idr;
};
static inline bool HAS_FILE_ID(unsigned long long req)
{
unsigned int id = (unsigned int)req;
return id < KSMBD_NO_FID;
}
static inline bool ksmbd_stream_fd(struct ksmbd_file *fp)
{
return fp->stream.name != NULL;
}
int ksmbd_init_file_table(struct ksmbd_file_table *ft);
void ksmbd_destroy_file_table(struct ksmbd_file_table *ft);
int ksmbd_close_fd(struct ksmbd_work *work, unsigned int id);
struct ksmbd_file *ksmbd_lookup_fd_fast(struct ksmbd_work *work,
unsigned int id);
struct ksmbd_file *ksmbd_lookup_foreign_fd(struct ksmbd_work *work,
unsigned int id);
struct ksmbd_file *ksmbd_lookup_fd_slow(struct ksmbd_work *work,
unsigned int id,
unsigned int pid);
void ksmbd_fd_put(struct ksmbd_work *work, struct ksmbd_file *fp);
int ksmbd_close_fd_app_id(struct ksmbd_work *work, char *app_id);
struct ksmbd_file *ksmbd_lookup_durable_fd(unsigned long long id);
struct ksmbd_file *ksmbd_lookup_fd_cguid(char *cguid);
struct ksmbd_file *ksmbd_lookup_fd_filename(struct ksmbd_work *work,
char *filename);
struct ksmbd_file *ksmbd_lookup_fd_inode(struct inode *inode);
unsigned int ksmbd_open_durable_fd(struct ksmbd_file *fp);
struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work,
struct file *filp);
void ksmbd_close_tree_conn_fds(struct ksmbd_work *work);
void ksmbd_close_session_fds(struct ksmbd_work *work);
int ksmbd_close_inode_fds(struct ksmbd_work *work, struct inode *inode);
int ksmbd_reopen_durable_fd(struct ksmbd_work *work,
struct ksmbd_file *fp);
int ksmbd_init_global_file_table(void);
void ksmbd_free_global_file_table(void);
int ksmbd_file_table_flush(struct ksmbd_work *work);
void ksmbd_set_fd_limit(unsigned long limit);
/*
* INODE hash
*/
int __init ksmbd_inode_hash_init(void);
void __exit ksmbd_release_inode_hash(void);
enum KSMBD_INODE_STATUS {
KSMBD_INODE_STATUS_OK,
KSMBD_INODE_STATUS_UNKNOWN,
KSMBD_INODE_STATUS_PENDING_DELETE,
};
int ksmbd_query_inode_status(struct inode *inode);
bool ksmbd_inode_pending_delete(struct ksmbd_file *fp);
void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp);
void ksmbd_clear_inode_pending_delete(struct ksmbd_file *fp);
void ksmbd_fd_set_delete_on_close(struct ksmbd_file *fp,
int file_info);
#endif /* __VFS_CACHE_H__ */