linux/fs/netfs/fscache_main.c
David Howells a9d47a50cf
netfs: Revert "netfs: Switch debug logging to pr_debug()"
Revert commit 163eae0fb0 to get back the
original operation of the debugging macros.

Signed-off-by: David Howells <dhowells@redhat.com>
Link: https://lore.kernel.org/r/20240608151352.22860-2-ukleinek@kernel.org
Link: https://lore.kernel.org/r/1410685.1721333252@warthog.procyon.org.uk
cc: Uwe Kleine-König <ukleinek@kernel.org>
cc: Christian Brauner <brauner@kernel.org>
cc: Jeff Layton <jlayton@kernel.org>
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
Signed-off-by: Christian Brauner <brauner@kernel.org>
2024-07-24 10:15:37 +02:00

109 lines
2.5 KiB
C

// SPDX-License-Identifier: GPL-2.0-or-later
/* General filesystem local caching manager
*
* Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#define FSCACHE_DEBUG_LEVEL CACHE
#include <linux/module.h>
#include <linux/init.h>
#include "internal.h"
#define CREATE_TRACE_POINTS
#include <trace/events/fscache.h>
EXPORT_TRACEPOINT_SYMBOL(fscache_access_cache);
EXPORT_TRACEPOINT_SYMBOL(fscache_access_volume);
EXPORT_TRACEPOINT_SYMBOL(fscache_access);
struct workqueue_struct *fscache_wq;
EXPORT_SYMBOL(fscache_wq);
/*
* Mixing scores (in bits) for (7,20):
* Input delta: 1-bit 2-bit
* 1 round: 330.3 9201.6
* 2 rounds: 1246.4 25475.4
* 3 rounds: 1907.1 31295.1
* 4 rounds: 2042.3 31718.6
* Perfect: 2048 31744
* (32*64) (32*31/2 * 64)
*/
#define HASH_MIX(x, y, a) \
( x ^= (a), \
y ^= x, x = rol32(x, 7),\
x += y, y = rol32(y,20),\
y *= 9 )
static inline unsigned int fold_hash(unsigned long x, unsigned long y)
{
/* Use arch-optimized multiply if one exists */
return __hash_32(y ^ __hash_32(x));
}
/*
* Generate a hash. This is derived from full_name_hash(), but we want to be
* sure it is arch independent and that it doesn't change as bits of the
* computed hash value might appear on disk. The caller must guarantee that
* the source data is a multiple of four bytes in size.
*/
unsigned int fscache_hash(unsigned int salt, const void *data, size_t len)
{
const __le32 *p = data;
unsigned int a, x = 0, y = salt, n = len / sizeof(__le32);
for (; n; n--) {
a = le32_to_cpu(*p++);
HASH_MIX(x, y, a);
}
return fold_hash(x, y);
}
/*
* initialise the fs caching module
*/
int __init fscache_init(void)
{
int ret = -ENOMEM;
fscache_wq = alloc_workqueue("fscache", WQ_UNBOUND | WQ_FREEZABLE, 0);
if (!fscache_wq)
goto error_wq;
ret = fscache_proc_init();
if (ret < 0)
goto error_proc;
fscache_cookie_jar = kmem_cache_create("fscache_cookie_jar",
sizeof(struct fscache_cookie),
0, 0, NULL);
if (!fscache_cookie_jar) {
pr_notice("Failed to allocate a cookie jar\n");
ret = -ENOMEM;
goto error_cookie_jar;
}
pr_notice("FS-Cache loaded\n");
return 0;
error_cookie_jar:
fscache_proc_cleanup();
error_proc:
destroy_workqueue(fscache_wq);
error_wq:
return ret;
}
/*
* clean up on module removal
*/
void __exit fscache_exit(void)
{
_enter("");
kmem_cache_destroy(fscache_cookie_jar);
fscache_proc_cleanup();
destroy_workqueue(fscache_wq);
pr_notice("FS-Cache unloaded\n");
}