2019-05-27 14:55:05 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2021-03-31 00:44:56 +08:00
|
|
|
/*
|
2006-10-04 17:16:22 +08:00
|
|
|
* eCryptfs: Linux filesystem encryption layer
|
|
|
|
* This is where eCryptfs coordinates the symmetric encryption and
|
|
|
|
* decryption of the file data as it passes between the lower
|
|
|
|
* encrypted file and the upper decrypted file.
|
|
|
|
*
|
|
|
|
* Copyright (C) 1997-2003 Erez Zadok
|
|
|
|
* Copyright (C) 2001-2003 Stony Brook University
|
2007-02-12 16:53:46 +08:00
|
|
|
* Copyright (C) 2004-2007 International Business Machines Corp.
|
2006-10-04 17:16:22 +08:00
|
|
|
* Author(s): Michael A. Halcrow <mahalcro@us.ibm.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/writeback.h>
|
|
|
|
#include <linux/page-flags.h>
|
|
|
|
#include <linux/mount.h>
|
|
|
|
#include <linux/file.h>
|
|
|
|
#include <linux/scatterlist.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
|
|
|
#include <linux/slab.h>
|
2016-09-29 23:48:42 +08:00
|
|
|
#include <linux/xattr.h>
|
2024-10-02 03:35:57 +08:00
|
|
|
#include <linux/unaligned.h>
|
2006-10-04 17:16:22 +08:00
|
|
|
#include "ecryptfs_kernel.h"
|
|
|
|
|
2021-03-31 00:44:56 +08:00
|
|
|
/*
|
2007-10-16 16:28:14 +08:00
|
|
|
* ecryptfs_get_locked_page
|
2006-10-04 17:16:22 +08:00
|
|
|
*
|
|
|
|
* Get one page from cache or lower f/s, return error otherwise.
|
|
|
|
*
|
2007-10-16 16:28:14 +08:00
|
|
|
* Returns locked and up-to-date page (if ok), with increased
|
2006-10-04 17:16:22 +08:00
|
|
|
* refcnt.
|
|
|
|
*/
|
2010-05-21 23:02:14 +08:00
|
|
|
struct page *ecryptfs_get_locked_page(struct inode *inode, loff_t index)
|
2006-10-04 17:16:22 +08:00
|
|
|
{
|
2010-05-21 23:02:14 +08:00
|
|
|
struct page *page = read_mapping_page(inode->i_mapping, index, NULL);
|
2007-10-16 16:28:14 +08:00
|
|
|
if (!IS_ERR(page))
|
|
|
|
lock_page(page);
|
|
|
|
return page;
|
2006-10-04 17:16:22 +08:00
|
|
|
}
|
|
|
|
|
2024-10-26 03:08:11 +08:00
|
|
|
/*
|
2012-01-25 15:40:31 +08:00
|
|
|
* This is where we encrypt the data and pass the encrypted data to
|
|
|
|
* the lower filesystem. In OpenPGP-compatible mode, we operate on
|
|
|
|
* entire underlying packets.
|
2006-10-04 17:16:22 +08:00
|
|
|
*/
|
2024-10-26 03:08:11 +08:00
|
|
|
static int ecryptfs_writepages(struct address_space *mapping,
|
|
|
|
struct writeback_control *wbc)
|
2006-10-04 17:16:22 +08:00
|
|
|
{
|
2024-10-26 03:08:11 +08:00
|
|
|
struct folio *folio = NULL;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
while ((folio = writeback_iter(mapping, wbc, folio, &error))) {
|
|
|
|
error = ecryptfs_encrypt_page(&folio->page);
|
|
|
|
if (error) {
|
|
|
|
ecryptfs_printk(KERN_WARNING,
|
|
|
|
"Error encrypting folio (index [0x%.16lx])\n",
|
|
|
|
folio->index);
|
|
|
|
folio_clear_uptodate(folio);
|
|
|
|
mapping_set_error(mapping, error);
|
|
|
|
}
|
|
|
|
folio_unlock(folio);
|
2006-10-04 17:16:22 +08:00
|
|
|
}
|
2024-10-26 03:08:11 +08:00
|
|
|
|
|
|
|
return error;
|
2006-10-04 17:16:22 +08:00
|
|
|
}
|
|
|
|
|
2010-02-11 14:02:32 +08:00
|
|
|
static void strip_xattr_flag(char *page_virt,
|
|
|
|
struct ecryptfs_crypt_stat *crypt_stat)
|
|
|
|
{
|
|
|
|
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
|
|
|
|
size_t written;
|
|
|
|
|
|
|
|
crypt_stat->flags &= ~ECRYPTFS_METADATA_IN_XATTR;
|
|
|
|
ecryptfs_write_crypt_stat_flags(page_virt, crypt_stat,
|
|
|
|
&written);
|
|
|
|
crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-31 00:44:56 +08:00
|
|
|
/*
|
2007-02-12 16:53:47 +08:00
|
|
|
* Header Extent:
|
|
|
|
* Octets 0-7: Unencrypted file size (big-endian)
|
|
|
|
* Octets 8-15: eCryptfs special marker
|
|
|
|
* Octets 16-19: Flags
|
|
|
|
* Octet 16: File format version number (between 0 and 255)
|
|
|
|
* Octets 17-18: Reserved
|
|
|
|
* Octet 19: Bit 1 (lsb): Reserved
|
|
|
|
* Bit 2: Encrypted?
|
|
|
|
* Bits 3-8: Reserved
|
|
|
|
* Octets 20-23: Header extent size (big-endian)
|
|
|
|
* Octets 24-25: Number of header extents at front of file
|
|
|
|
* (big-endian)
|
|
|
|
* Octet 26: Begin RFC 2440 authentication token packet set
|
|
|
|
*/
|
2006-10-04 17:16:22 +08:00
|
|
|
|
2007-10-16 16:28:11 +08:00
|
|
|
/**
|
|
|
|
* ecryptfs_copy_up_encrypted_with_header
|
|
|
|
* @page: Sort of a ``virtual'' representation of the encrypted lower
|
|
|
|
* file. The actual lower file does not have the metadata in
|
|
|
|
* the header. This is locked.
|
|
|
|
* @crypt_stat: The eCryptfs inode's cryptographic context
|
|
|
|
*
|
|
|
|
* The ``view'' is the version of the file that userspace winds up
|
|
|
|
* seeing, with the header information inserted.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
ecryptfs_copy_up_encrypted_with_header(struct page *page,
|
|
|
|
struct ecryptfs_crypt_stat *crypt_stat)
|
|
|
|
{
|
|
|
|
loff_t extent_num_in_page = 0;
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 20:29:47 +08:00
|
|
|
loff_t num_extents_per_page = (PAGE_SIZE
|
2007-10-16 16:28:11 +08:00
|
|
|
/ crypt_stat->extent_size);
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
while (extent_num_in_page < num_extents_per_page) {
|
2007-10-16 16:28:12 +08:00
|
|
|
loff_t view_extent_num = ((((loff_t)page->index)
|
|
|
|
* num_extents_per_page)
|
2007-10-16 16:28:11 +08:00
|
|
|
+ extent_num_in_page);
|
2008-02-06 17:38:32 +08:00
|
|
|
size_t num_header_extents_at_front =
|
2010-02-11 19:09:14 +08:00
|
|
|
(crypt_stat->metadata_size / crypt_stat->extent_size);
|
2007-10-16 16:28:11 +08:00
|
|
|
|
2008-02-06 17:38:32 +08:00
|
|
|
if (view_extent_num < num_header_extents_at_front) {
|
2007-10-16 16:28:11 +08:00
|
|
|
/* This is a header extent */
|
|
|
|
char *page_virt;
|
|
|
|
|
fs/ecryptfs: Use kmap_local_page() in copy_up_encrypted_with_header()
kmap_atomic() has been deprecated in favor of kmap_local_page().
Therefore, replace kmap_atomic() with kmap_local_page() in
ecryptfs_copy_up_encrypted_with_header().
kmap_atomic() is implemented like a kmap_local_page() which also
disables page-faults and preemption (the latter only in !PREEMPT_RT
kernels). The kernel virtual addresses returned by these two API are
only valid in the context of the callers (i.e., they cannot be handed to
other threads).
With kmap_local_page() the mappings are per thread and CPU local like
in kmap_atomic(); however, they can handle page-faults and can be called
from any context (including interrupts). The tasks that call
kmap_local_page() can be preempted and, when they are scheduled to run
again, the kernel virtual addresses are restored and are still valid.
In ecryptfs_copy_up_encrypted_with_header(), the block of code between
the mapping and un-mapping does not depend on the above-mentioned side
effects of kmap_aatomic(), so that the mere replacements of the old API
with the new one is all that is required (i.e., there is no need to
explicitly call pagefault_disable() and/or preempt_disable()).
Tested in a QEMU/KVM x86_32 VM, 6GB RAM, booting a kernel with
HIGHMEM64GB enabled.
Cc: Ira Weiny <ira.weiny@intel.com>
Signed-off-by: "Fabio M. De Francesco" <fmdefrancesco@gmail.com>
Message-Id: <20230426172223.8896-4-fmdefrancesco@gmail.com>
Signed-off-by: Christian Brauner <brauner@kernel.org>
2023-04-27 01:22:23 +08:00
|
|
|
page_virt = kmap_local_page(page);
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 20:29:47 +08:00
|
|
|
memset(page_virt, 0, PAGE_SIZE);
|
2007-10-16 16:28:11 +08:00
|
|
|
/* TODO: Support more than one header extent */
|
|
|
|
if (view_extent_num == 0) {
|
2010-02-11 21:10:38 +08:00
|
|
|
size_t written;
|
|
|
|
|
2007-10-16 16:28:11 +08:00
|
|
|
rc = ecryptfs_read_xattr_region(
|
|
|
|
page_virt, page->mapping->host);
|
2010-02-11 14:02:32 +08:00
|
|
|
strip_xattr_flag(page_virt + 16, crypt_stat);
|
2010-02-11 21:10:38 +08:00
|
|
|
ecryptfs_write_header_metadata(page_virt + 20,
|
|
|
|
crypt_stat,
|
|
|
|
&written);
|
2007-10-16 16:28:11 +08:00
|
|
|
}
|
fs/ecryptfs: Use kmap_local_page() in copy_up_encrypted_with_header()
kmap_atomic() has been deprecated in favor of kmap_local_page().
Therefore, replace kmap_atomic() with kmap_local_page() in
ecryptfs_copy_up_encrypted_with_header().
kmap_atomic() is implemented like a kmap_local_page() which also
disables page-faults and preemption (the latter only in !PREEMPT_RT
kernels). The kernel virtual addresses returned by these two API are
only valid in the context of the callers (i.e., they cannot be handed to
other threads).
With kmap_local_page() the mappings are per thread and CPU local like
in kmap_atomic(); however, they can handle page-faults and can be called
from any context (including interrupts). The tasks that call
kmap_local_page() can be preempted and, when they are scheduled to run
again, the kernel virtual addresses are restored and are still valid.
In ecryptfs_copy_up_encrypted_with_header(), the block of code between
the mapping and un-mapping does not depend on the above-mentioned side
effects of kmap_aatomic(), so that the mere replacements of the old API
with the new one is all that is required (i.e., there is no need to
explicitly call pagefault_disable() and/or preempt_disable()).
Tested in a QEMU/KVM x86_32 VM, 6GB RAM, booting a kernel with
HIGHMEM64GB enabled.
Cc: Ira Weiny <ira.weiny@intel.com>
Signed-off-by: "Fabio M. De Francesco" <fmdefrancesco@gmail.com>
Message-Id: <20230426172223.8896-4-fmdefrancesco@gmail.com>
Signed-off-by: Christian Brauner <brauner@kernel.org>
2023-04-27 01:22:23 +08:00
|
|
|
kunmap_local(page_virt);
|
2007-10-16 16:28:11 +08:00
|
|
|
flush_dcache_page(page);
|
|
|
|
if (rc) {
|
|
|
|
printk(KERN_ERR "%s: Error reading xattr "
|
2008-04-29 15:59:48 +08:00
|
|
|
"region; rc = [%d]\n", __func__, rc);
|
2007-10-16 16:28:11 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* This is an encrypted data extent */
|
|
|
|
loff_t lower_offset =
|
2008-02-06 17:38:32 +08:00
|
|
|
((view_extent_num * crypt_stat->extent_size)
|
2010-02-11 19:09:14 +08:00
|
|
|
- crypt_stat->metadata_size);
|
2007-10-16 16:28:11 +08:00
|
|
|
|
|
|
|
rc = ecryptfs_read_lower_page_segment(
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 20:29:47 +08:00
|
|
|
page, (lower_offset >> PAGE_SHIFT),
|
|
|
|
(lower_offset & ~PAGE_MASK),
|
2007-10-16 16:28:11 +08:00
|
|
|
crypt_stat->extent_size, page->mapping->host);
|
|
|
|
if (rc) {
|
|
|
|
printk(KERN_ERR "%s: Error attempting to read "
|
|
|
|
"extent at offset [%lld] in the lower "
|
2008-04-29 15:59:48 +08:00
|
|
|
"file; rc = [%d]\n", __func__,
|
2007-10-16 16:28:11 +08:00
|
|
|
lower_offset, rc);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
extent_num_in_page++;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2006-10-04 17:16:22 +08:00
|
|
|
/**
|
2022-04-29 23:12:16 +08:00
|
|
|
* ecryptfs_read_folio
|
2007-10-16 16:28:11 +08:00
|
|
|
* @file: An eCryptfs file
|
2022-04-29 23:12:16 +08:00
|
|
|
* @folio: Folio from eCryptfs inode mapping into which to stick the read data
|
2006-10-04 17:16:22 +08:00
|
|
|
*
|
2022-04-29 23:12:16 +08:00
|
|
|
* Read in a folio, decrypting if necessary.
|
2006-10-04 17:16:22 +08:00
|
|
|
*
|
|
|
|
* Returns zero on success; non-zero on error.
|
|
|
|
*/
|
2022-04-29 23:12:16 +08:00
|
|
|
static int ecryptfs_read_folio(struct file *file, struct folio *folio)
|
2006-10-04 17:16:22 +08:00
|
|
|
{
|
2022-04-29 23:12:16 +08:00
|
|
|
struct page *page = &folio->page;
|
2007-10-16 16:28:11 +08:00
|
|
|
struct ecryptfs_crypt_stat *crypt_stat =
|
2010-05-21 22:56:12 +08:00
|
|
|
&ecryptfs_inode_to_private(page->mapping->host)->crypt_stat;
|
2006-10-04 17:16:22 +08:00
|
|
|
int rc = 0;
|
|
|
|
|
2011-02-23 14:54:20 +08:00
|
|
|
if (!crypt_stat || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
|
2007-10-16 16:28:11 +08:00
|
|
|
rc = ecryptfs_read_lower_page_segment(page, page->index, 0,
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 20:29:47 +08:00
|
|
|
PAGE_SIZE,
|
2007-10-16 16:28:11 +08:00
|
|
|
page->mapping->host);
|
2007-02-12 16:53:47 +08:00
|
|
|
} else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) {
|
|
|
|
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
|
2007-10-16 16:28:11 +08:00
|
|
|
rc = ecryptfs_copy_up_encrypted_with_header(page,
|
|
|
|
crypt_stat);
|
|
|
|
if (rc) {
|
|
|
|
printk(KERN_ERR "%s: Error attempting to copy "
|
|
|
|
"the encrypted content from the lower "
|
|
|
|
"file whilst inserting the metadata "
|
|
|
|
"from the xattr into the header; rc = "
|
2008-04-29 15:59:48 +08:00
|
|
|
"[%d]\n", __func__, rc);
|
2007-10-16 16:28:11 +08:00
|
|
|
goto out;
|
2007-02-12 16:53:47 +08:00
|
|
|
}
|
2007-10-16 16:28:11 +08:00
|
|
|
|
2007-02-12 16:53:47 +08:00
|
|
|
} else {
|
2007-10-16 16:28:11 +08:00
|
|
|
rc = ecryptfs_read_lower_page_segment(
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 20:29:47 +08:00
|
|
|
page, page->index, 0, PAGE_SIZE,
|
2007-10-16 16:28:11 +08:00
|
|
|
page->mapping->host);
|
2007-02-12 16:53:47 +08:00
|
|
|
if (rc) {
|
|
|
|
printk(KERN_ERR "Error reading page; rc = "
|
|
|
|
"[%d]\n", rc);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
2006-10-04 17:16:22 +08:00
|
|
|
} else {
|
2007-10-16 16:28:08 +08:00
|
|
|
rc = ecryptfs_decrypt_page(page);
|
2006-10-04 17:16:22 +08:00
|
|
|
if (rc) {
|
|
|
|
ecryptfs_printk(KERN_ERR, "Error decrypting page; "
|
|
|
|
"rc = [%d]\n", rc);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
out:
|
2007-10-16 16:28:14 +08:00
|
|
|
if (rc)
|
|
|
|
ClearPageUptodate(page);
|
|
|
|
else
|
|
|
|
SetPageUptodate(page);
|
2010-11-11 07:46:16 +08:00
|
|
|
ecryptfs_printk(KERN_DEBUG, "Unlocking page with index = [0x%.16lx]\n",
|
2006-10-04 17:16:22 +08:00
|
|
|
page->index);
|
|
|
|
unlock_page(page);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2021-03-31 00:44:56 +08:00
|
|
|
/*
|
2007-02-12 16:53:46 +08:00
|
|
|
* Called with lower inode mutex held.
|
|
|
|
*/
|
2024-07-11 04:42:35 +08:00
|
|
|
static int fill_zeros_to_end_of_page(struct folio *folio, unsigned int to)
|
2006-10-04 17:16:22 +08:00
|
|
|
{
|
2024-07-11 04:42:35 +08:00
|
|
|
struct inode *inode = folio->mapping->host;
|
2006-10-04 17:16:22 +08:00
|
|
|
int end_byte_in_page;
|
|
|
|
|
2024-07-11 04:42:35 +08:00
|
|
|
if ((i_size_read(inode) / PAGE_SIZE) != folio->index)
|
2007-02-12 16:53:48 +08:00
|
|
|
goto out;
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 20:29:47 +08:00
|
|
|
end_byte_in_page = i_size_read(inode) % PAGE_SIZE;
|
2007-02-12 16:53:48 +08:00
|
|
|
if (to > end_byte_in_page)
|
|
|
|
end_byte_in_page = to;
|
2024-07-11 04:42:35 +08:00
|
|
|
folio_zero_segment(folio, end_byte_in_page, PAGE_SIZE);
|
2006-10-04 17:16:22 +08:00
|
|
|
out:
|
2007-02-12 16:53:48 +08:00
|
|
|
return 0;
|
2006-10-04 17:16:22 +08:00
|
|
|
}
|
|
|
|
|
2008-03-05 06:29:24 +08:00
|
|
|
/**
|
2008-10-16 13:02:50 +08:00
|
|
|
* ecryptfs_write_begin
|
2008-03-05 06:29:24 +08:00
|
|
|
* @file: The eCryptfs file
|
2008-10-16 13:02:50 +08:00
|
|
|
* @mapping: The eCryptfs object
|
|
|
|
* @pos: The file offset at which to start writing
|
|
|
|
* @len: Length of the write
|
2024-07-16 02:24:01 +08:00
|
|
|
* @foliop: Pointer to return the folio
|
2008-10-16 13:02:50 +08:00
|
|
|
* @fsdata: Pointer to return fs data (unused)
|
2008-03-05 06:29:24 +08:00
|
|
|
*
|
|
|
|
* This function must zero any hole we create
|
|
|
|
*
|
|
|
|
* Returns zero on success; non-zero otherwise
|
|
|
|
*/
|
2008-10-16 13:02:50 +08:00
|
|
|
static int ecryptfs_write_begin(struct file *file,
|
|
|
|
struct address_space *mapping,
|
2022-02-23 03:31:43 +08:00
|
|
|
loff_t pos, unsigned len,
|
2024-07-16 02:24:01 +08:00
|
|
|
struct folio **foliop, void **fsdata)
|
2006-10-04 17:16:22 +08:00
|
|
|
{
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 20:29:47 +08:00
|
|
|
pgoff_t index = pos >> PAGE_SHIFT;
|
2024-07-11 23:42:46 +08:00
|
|
|
struct folio *folio;
|
2007-12-18 08:20:10 +08:00
|
|
|
loff_t prev_page_end_size;
|
2008-03-05 06:29:24 +08:00
|
|
|
int rc = 0;
|
2006-10-04 17:16:22 +08:00
|
|
|
|
2024-07-11 23:42:46 +08:00
|
|
|
folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
|
|
|
|
mapping_gfp_mask(mapping));
|
|
|
|
if (IS_ERR(folio))
|
|
|
|
return PTR_ERR(folio);
|
2024-07-16 02:24:01 +08:00
|
|
|
*foliop = folio;
|
2008-10-16 13:02:50 +08:00
|
|
|
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 20:29:47 +08:00
|
|
|
prev_page_end_size = ((loff_t)index << PAGE_SHIFT);
|
2024-07-11 23:42:46 +08:00
|
|
|
if (!folio_test_uptodate(folio)) {
|
2008-03-05 06:29:24 +08:00
|
|
|
struct ecryptfs_crypt_stat *crypt_stat =
|
2010-05-21 22:56:12 +08:00
|
|
|
&ecryptfs_inode_to_private(mapping->host)->crypt_stat;
|
2008-03-05 06:29:24 +08:00
|
|
|
|
2011-02-23 14:54:20 +08:00
|
|
|
if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
|
2008-03-05 06:29:24 +08:00
|
|
|
rc = ecryptfs_read_lower_page_segment(
|
2024-07-11 23:42:46 +08:00
|
|
|
&folio->page, index, 0, PAGE_SIZE, mapping->host);
|
2008-03-05 06:29:24 +08:00
|
|
|
if (rc) {
|
2015-05-20 22:54:02 +08:00
|
|
|
printk(KERN_ERR "%s: Error attempting to read "
|
2008-03-05 06:29:24 +08:00
|
|
|
"lower page segment; rc = [%d]\n",
|
2008-04-29 15:59:48 +08:00
|
|
|
__func__, rc);
|
2024-07-11 23:42:46 +08:00
|
|
|
folio_clear_uptodate(folio);
|
2008-03-05 06:29:24 +08:00
|
|
|
goto out;
|
|
|
|
} else
|
2024-07-11 23:42:46 +08:00
|
|
|
folio_mark_uptodate(folio);
|
2008-03-05 06:29:24 +08:00
|
|
|
} else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) {
|
|
|
|
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
|
|
|
|
rc = ecryptfs_copy_up_encrypted_with_header(
|
2024-07-11 23:42:46 +08:00
|
|
|
&folio->page, crypt_stat);
|
2008-03-05 06:29:24 +08:00
|
|
|
if (rc) {
|
|
|
|
printk(KERN_ERR "%s: Error attempting "
|
|
|
|
"to copy the encrypted content "
|
|
|
|
"from the lower file whilst "
|
|
|
|
"inserting the metadata from "
|
|
|
|
"the xattr into the header; rc "
|
2008-04-29 15:59:48 +08:00
|
|
|
"= [%d]\n", __func__, rc);
|
2024-07-11 23:42:46 +08:00
|
|
|
folio_clear_uptodate(folio);
|
2008-03-05 06:29:24 +08:00
|
|
|
goto out;
|
|
|
|
}
|
2024-07-11 23:42:46 +08:00
|
|
|
folio_mark_uptodate(folio);
|
2008-03-05 06:29:24 +08:00
|
|
|
} else {
|
|
|
|
rc = ecryptfs_read_lower_page_segment(
|
2024-07-11 23:42:46 +08:00
|
|
|
&folio->page, index, 0, PAGE_SIZE,
|
2008-10-16 13:02:50 +08:00
|
|
|
mapping->host);
|
2008-03-05 06:29:24 +08:00
|
|
|
if (rc) {
|
|
|
|
printk(KERN_ERR "%s: Error reading "
|
|
|
|
"page; rc = [%d]\n",
|
2008-04-29 15:59:48 +08:00
|
|
|
__func__, rc);
|
2024-07-11 23:42:46 +08:00
|
|
|
folio_clear_uptodate(folio);
|
2008-03-05 06:29:24 +08:00
|
|
|
goto out;
|
|
|
|
}
|
2024-07-11 23:42:46 +08:00
|
|
|
folio_mark_uptodate(folio);
|
2008-03-05 06:29:24 +08:00
|
|
|
}
|
|
|
|
} else {
|
2010-11-16 02:43:22 +08:00
|
|
|
if (prev_page_end_size
|
2024-07-11 23:42:46 +08:00
|
|
|
>= i_size_read(mapping->host)) {
|
|
|
|
folio_zero_range(folio, 0, PAGE_SIZE);
|
|
|
|
folio_mark_uptodate(folio);
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 20:29:47 +08:00
|
|
|
} else if (len < PAGE_SIZE) {
|
2024-07-11 23:42:46 +08:00
|
|
|
rc = ecryptfs_decrypt_page(&folio->page);
|
2010-11-16 02:43:22 +08:00
|
|
|
if (rc) {
|
|
|
|
printk(KERN_ERR "%s: Error decrypting "
|
|
|
|
"page at index [%ld]; "
|
|
|
|
"rc = [%d]\n",
|
2024-07-11 23:42:46 +08:00
|
|
|
__func__, folio->index, rc);
|
|
|
|
folio_clear_uptodate(folio);
|
2010-11-16 02:43:22 +08:00
|
|
|
goto out;
|
|
|
|
}
|
2024-07-11 23:42:46 +08:00
|
|
|
folio_mark_uptodate(folio);
|
2008-03-05 06:29:24 +08:00
|
|
|
}
|
|
|
|
}
|
2007-10-16 16:28:14 +08:00
|
|
|
}
|
2008-03-05 06:29:24 +08:00
|
|
|
/* If creating a page or more of holes, zero them out via truncate.
|
|
|
|
* Note, this will increase i_size. */
|
2008-10-16 13:02:50 +08:00
|
|
|
if (index != 0) {
|
2024-07-11 23:42:46 +08:00
|
|
|
if (prev_page_end_size > i_size_read(mapping->host)) {
|
2007-06-28 05:09:44 +08:00
|
|
|
rc = ecryptfs_truncate(file->f_path.dentry,
|
2007-12-18 08:20:10 +08:00
|
|
|
prev_page_end_size);
|
2007-06-28 05:09:44 +08:00
|
|
|
if (rc) {
|
2008-03-05 06:29:24 +08:00
|
|
|
printk(KERN_ERR "%s: Error on attempt to "
|
2007-06-28 05:09:44 +08:00
|
|
|
"truncate to (higher) offset [%lld];"
|
2008-04-29 15:59:48 +08:00
|
|
|
" rc = [%d]\n", __func__,
|
2008-03-05 06:29:24 +08:00
|
|
|
prev_page_end_size, rc);
|
2007-06-28 05:09:44 +08:00
|
|
|
goto out;
|
|
|
|
}
|
2007-05-24 04:58:15 +08:00
|
|
|
}
|
2007-12-18 08:20:10 +08:00
|
|
|
}
|
2008-03-05 06:29:24 +08:00
|
|
|
/* Writing to a new page, and creating a small hole from start
|
|
|
|
* of page? Zero it out. */
|
2008-10-16 13:02:50 +08:00
|
|
|
if ((i_size_read(mapping->host) == prev_page_end_size)
|
|
|
|
&& (pos != 0))
|
2024-07-11 23:42:46 +08:00
|
|
|
folio_zero_range(folio, 0, PAGE_SIZE);
|
2006-10-04 17:16:22 +08:00
|
|
|
out:
|
2011-03-10 01:49:13 +08:00
|
|
|
if (unlikely(rc)) {
|
2024-07-11 23:42:46 +08:00
|
|
|
folio_unlock(folio);
|
|
|
|
folio_put(folio);
|
2011-03-10 01:49:13 +08:00
|
|
|
}
|
2006-10-04 17:16:22 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2021-03-31 00:44:56 +08:00
|
|
|
/*
|
2006-10-04 17:16:22 +08:00
|
|
|
* ecryptfs_write_inode_size_to_header
|
|
|
|
*
|
|
|
|
* Writes the lower file size to the first 8 bytes of the header.
|
|
|
|
*
|
|
|
|
* Returns zero on success; non-zero on error.
|
|
|
|
*/
|
2007-10-16 16:28:08 +08:00
|
|
|
static int ecryptfs_write_inode_size_to_header(struct inode *ecryptfs_inode)
|
2006-10-04 17:16:22 +08:00
|
|
|
{
|
2007-10-16 16:28:08 +08:00
|
|
|
char *file_size_virt;
|
|
|
|
int rc;
|
2006-10-04 17:16:22 +08:00
|
|
|
|
2007-10-16 16:28:08 +08:00
|
|
|
file_size_virt = kmalloc(sizeof(u64), GFP_KERNEL);
|
|
|
|
if (!file_size_virt) {
|
|
|
|
rc = -ENOMEM;
|
2007-03-01 12:12:16 +08:00
|
|
|
goto out;
|
|
|
|
}
|
2008-07-24 12:30:07 +08:00
|
|
|
put_unaligned_be64(i_size_read(ecryptfs_inode), file_size_virt);
|
2007-10-16 16:28:08 +08:00
|
|
|
rc = ecryptfs_write_lower(ecryptfs_inode, file_size_virt, 0,
|
|
|
|
sizeof(u64));
|
|
|
|
kfree(file_size_virt);
|
2009-09-17 08:04:20 +08:00
|
|
|
if (rc < 0)
|
2007-10-16 16:28:08 +08:00
|
|
|
printk(KERN_ERR "%s: Error writing file size to header; "
|
2008-04-29 15:59:48 +08:00
|
|
|
"rc = [%d]\n", __func__, rc);
|
2009-09-17 08:04:20 +08:00
|
|
|
else
|
|
|
|
rc = 0;
|
2006-10-04 17:16:22 +08:00
|
|
|
out:
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2007-10-16 16:28:08 +08:00
|
|
|
struct kmem_cache *ecryptfs_xattr_cache;
|
|
|
|
|
|
|
|
static int ecryptfs_write_inode_size_to_xattr(struct inode *ecryptfs_inode)
|
2007-02-12 16:53:46 +08:00
|
|
|
{
|
|
|
|
ssize_t size;
|
|
|
|
void *xattr_virt;
|
2007-10-16 16:28:08 +08:00
|
|
|
struct dentry *lower_dentry =
|
2014-10-31 13:22:04 +08:00
|
|
|
ecryptfs_inode_to_private(ecryptfs_inode)->lower_file->f_path.dentry;
|
2015-03-18 06:25:59 +08:00
|
|
|
struct inode *lower_inode = d_inode(lower_dentry);
|
2007-02-12 16:53:46 +08:00
|
|
|
int rc;
|
|
|
|
|
2016-09-29 23:48:42 +08:00
|
|
|
if (!(lower_inode->i_opflags & IOP_XATTR)) {
|
2007-10-16 16:28:08 +08:00
|
|
|
printk(KERN_WARNING
|
|
|
|
"No support for setting xattr in lower filesystem\n");
|
|
|
|
rc = -ENOSYS;
|
|
|
|
goto out;
|
|
|
|
}
|
2007-02-12 16:53:46 +08:00
|
|
|
xattr_virt = kmem_cache_alloc(ecryptfs_xattr_cache, GFP_KERNEL);
|
|
|
|
if (!xattr_virt) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
2016-01-23 04:40:57 +08:00
|
|
|
inode_lock(lower_inode);
|
2016-09-29 23:48:42 +08:00
|
|
|
size = __vfs_getxattr(lower_dentry, lower_inode, ECRYPTFS_XATTR_NAME,
|
|
|
|
xattr_virt, PAGE_SIZE);
|
2007-02-12 16:53:46 +08:00
|
|
|
if (size < 0)
|
|
|
|
size = 8;
|
2008-07-24 12:30:07 +08:00
|
|
|
put_unaligned_be64(i_size_read(ecryptfs_inode), xattr_virt);
|
2023-01-13 19:49:23 +08:00
|
|
|
rc = __vfs_setxattr(&nop_mnt_idmap, lower_dentry, lower_inode,
|
2021-01-21 21:19:28 +08:00
|
|
|
ECRYPTFS_XATTR_NAME, xattr_virt, size, 0);
|
2016-01-23 04:40:57 +08:00
|
|
|
inode_unlock(lower_inode);
|
2007-02-12 16:53:46 +08:00
|
|
|
if (rc)
|
|
|
|
printk(KERN_ERR "Error whilst attempting to write inode size "
|
|
|
|
"to lower file xattr; rc = [%d]\n", rc);
|
|
|
|
kmem_cache_free(ecryptfs_xattr_cache, xattr_virt);
|
|
|
|
out:
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2007-10-16 16:28:08 +08:00
|
|
|
int ecryptfs_write_inode_size_to_metadata(struct inode *ecryptfs_inode)
|
2007-02-12 16:53:46 +08:00
|
|
|
{
|
|
|
|
struct ecryptfs_crypt_stat *crypt_stat;
|
|
|
|
|
2007-10-16 16:28:08 +08:00
|
|
|
crypt_stat = &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
|
2009-04-14 04:29:27 +08:00
|
|
|
BUG_ON(!(crypt_stat->flags & ECRYPTFS_ENCRYPTED));
|
2007-02-12 16:53:46 +08:00
|
|
|
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
|
2007-10-16 16:28:08 +08:00
|
|
|
return ecryptfs_write_inode_size_to_xattr(ecryptfs_inode);
|
2007-02-12 16:53:46 +08:00
|
|
|
else
|
2007-10-16 16:28:08 +08:00
|
|
|
return ecryptfs_write_inode_size_to_header(ecryptfs_inode);
|
2007-02-12 16:53:46 +08:00
|
|
|
}
|
|
|
|
|
2006-10-04 17:16:22 +08:00
|
|
|
/**
|
2008-10-16 13:02:50 +08:00
|
|
|
* ecryptfs_write_end
|
2006-10-04 17:16:22 +08:00
|
|
|
* @file: The eCryptfs file object
|
2008-10-16 13:02:50 +08:00
|
|
|
* @mapping: The eCryptfs object
|
|
|
|
* @pos: The file position
|
|
|
|
* @len: The length of the data (unused)
|
|
|
|
* @copied: The amount of data copied
|
2024-07-11 03:45:32 +08:00
|
|
|
* @folio: The eCryptfs folio
|
2008-10-16 13:02:50 +08:00
|
|
|
* @fsdata: The fsdata (unused)
|
2006-10-04 17:16:22 +08:00
|
|
|
*/
|
2008-10-16 13:02:50 +08:00
|
|
|
static int ecryptfs_write_end(struct file *file,
|
|
|
|
struct address_space *mapping,
|
|
|
|
loff_t pos, unsigned len, unsigned copied,
|
2024-07-11 03:45:32 +08:00
|
|
|
struct folio *folio, void *fsdata)
|
2006-10-04 17:16:22 +08:00
|
|
|
{
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 20:29:47 +08:00
|
|
|
pgoff_t index = pos >> PAGE_SHIFT;
|
|
|
|
unsigned from = pos & (PAGE_SIZE - 1);
|
2008-10-16 13:02:50 +08:00
|
|
|
unsigned to = from + copied;
|
|
|
|
struct inode *ecryptfs_inode = mapping->host;
|
2007-10-16 16:28:11 +08:00
|
|
|
struct ecryptfs_crypt_stat *crypt_stat =
|
2010-05-21 22:56:12 +08:00
|
|
|
&ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
|
2006-10-04 17:16:22 +08:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
ecryptfs_printk(KERN_DEBUG, "Calling fill_zeros_to_end_of_page"
|
2010-11-11 07:46:16 +08:00
|
|
|
"(page w/ index = [0x%.16lx], to = [%d])\n", index, to);
|
2009-04-14 04:29:27 +08:00
|
|
|
if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
|
2024-07-11 04:42:35 +08:00
|
|
|
rc = ecryptfs_write_lower_page_segment(ecryptfs_inode,
|
|
|
|
&folio->page, 0, to);
|
2009-04-14 04:29:27 +08:00
|
|
|
if (!rc) {
|
|
|
|
rc = copied;
|
|
|
|
fsstack_copy_inode_size(ecryptfs_inode,
|
|
|
|
ecryptfs_inode_to_lower(ecryptfs_inode));
|
|
|
|
}
|
|
|
|
goto out;
|
|
|
|
}
|
2024-07-11 04:42:35 +08:00
|
|
|
if (!folio_test_uptodate(folio)) {
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 20:29:47 +08:00
|
|
|
if (copied < PAGE_SIZE) {
|
eCryptfs: Avoid unnecessary disk read and data decryption during writing
ecryptfs_write_begin grabs a page from page cache for writing.
If the page contains invalid data, or data older than the
counterpart on the disk, eCryptfs will read out the
corresponing data from the disk into the page, decrypt them,
then perform writing. However, for this page, if the length
of the data to be written into is equal to page size,
that means the whole page of data will be overwritten,
in which case, it does not matter whatever the data were before,
it is beneficial to perform writing directly rather than bothering
to read and decrypt first.
With this optimization, according to our test on a machine with
Intel Core 2 Duo processor, iozone 'write' operation on an existing
file with write size being multiple of page size will enjoy a steady
3x speedup.
Signed-off-by: Li Wang <wangli@kylinos.com.cn>
Signed-off-by: Yunchuan Wen <wenyunchuan@kylinos.com.cn>
Signed-off-by: Tyler Hicks <tyhicks@canonical.com>
2012-10-30 19:52:40 +08:00
|
|
|
rc = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
2024-07-11 04:42:35 +08:00
|
|
|
folio_mark_uptodate(folio);
|
eCryptfs: Avoid unnecessary disk read and data decryption during writing
ecryptfs_write_begin grabs a page from page cache for writing.
If the page contains invalid data, or data older than the
counterpart on the disk, eCryptfs will read out the
corresponing data from the disk into the page, decrypt them,
then perform writing. However, for this page, if the length
of the data to be written into is equal to page size,
that means the whole page of data will be overwritten,
in which case, it does not matter whatever the data were before,
it is beneficial to perform writing directly rather than bothering
to read and decrypt first.
With this optimization, according to our test on a machine with
Intel Core 2 Duo processor, iozone 'write' operation on an existing
file with write size being multiple of page size will enjoy a steady
3x speedup.
Signed-off-by: Li Wang <wangli@kylinos.com.cn>
Signed-off-by: Yunchuan Wen <wenyunchuan@kylinos.com.cn>
Signed-off-by: Tyler Hicks <tyhicks@canonical.com>
2012-10-30 19:52:40 +08:00
|
|
|
}
|
2007-10-16 16:28:11 +08:00
|
|
|
/* Fills in zeros if 'to' goes beyond inode size */
|
2024-07-11 04:42:35 +08:00
|
|
|
rc = fill_zeros_to_end_of_page(folio, to);
|
2006-10-04 17:16:22 +08:00
|
|
|
if (rc) {
|
|
|
|
ecryptfs_printk(KERN_WARNING, "Error attempting to fill "
|
2010-11-11 07:46:16 +08:00
|
|
|
"zeros in page with index = [0x%.16lx]\n", index);
|
2006-10-04 17:16:22 +08:00
|
|
|
goto out;
|
|
|
|
}
|
2024-07-11 04:42:35 +08:00
|
|
|
rc = ecryptfs_encrypt_page(&folio->page);
|
2012-07-04 07:50:57 +08:00
|
|
|
if (rc) {
|
|
|
|
ecryptfs_printk(KERN_WARNING, "Error encrypting page (upper "
|
|
|
|
"index [0x%.16lx])\n", index);
|
|
|
|
goto out;
|
|
|
|
}
|
2008-10-16 13:02:50 +08:00
|
|
|
if (pos + copied > i_size_read(ecryptfs_inode)) {
|
|
|
|
i_size_write(ecryptfs_inode, pos + copied);
|
2006-10-04 17:16:22 +08:00
|
|
|
ecryptfs_printk(KERN_DEBUG, "Expanded file size to "
|
2010-11-11 07:46:16 +08:00
|
|
|
"[0x%.16llx]\n",
|
|
|
|
(unsigned long long)i_size_read(ecryptfs_inode));
|
2006-10-04 17:16:22 +08:00
|
|
|
}
|
2012-07-04 07:50:57 +08:00
|
|
|
rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode);
|
|
|
|
if (rc)
|
|
|
|
printk(KERN_ERR "Error writing inode size to metadata; "
|
|
|
|
"rc = [%d]\n", rc);
|
|
|
|
else
|
|
|
|
rc = copied;
|
2006-10-04 17:16:22 +08:00
|
|
|
out:
|
2024-07-11 04:42:35 +08:00
|
|
|
folio_unlock(folio);
|
|
|
|
folio_put(folio);
|
2006-10-04 17:16:22 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static sector_t ecryptfs_bmap(struct address_space *mapping, sector_t block)
|
|
|
|
{
|
2020-01-09 21:30:43 +08:00
|
|
|
struct inode *lower_inode = ecryptfs_inode_to_lower(mapping->host);
|
|
|
|
int ret = bmap(lower_inode, &block);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return 0;
|
|
|
|
return block;
|
2006-10-04 17:16:22 +08:00
|
|
|
}
|
|
|
|
|
2021-06-29 10:36:12 +08:00
|
|
|
#include <linux/buffer_head.h>
|
|
|
|
|
2009-09-22 08:01:10 +08:00
|
|
|
const struct address_space_operations ecryptfs_aops = {
|
2021-06-29 10:36:12 +08:00
|
|
|
/*
|
|
|
|
* XXX: This is pretty broken for multiple reasons: ecryptfs does not
|
|
|
|
* actually use buffer_heads, and ecryptfs will crash without
|
|
|
|
* CONFIG_BLOCK. But it matches the behavior before the default for
|
2022-02-10 04:22:15 +08:00
|
|
|
* address_space_operations without the ->dirty_folio method was
|
2021-06-29 10:36:12 +08:00
|
|
|
* cleaned up, so this is the best we can do without maintainer
|
|
|
|
* feedback.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_BLOCK
|
2022-02-10 04:22:12 +08:00
|
|
|
.dirty_folio = block_dirty_folio,
|
2022-02-10 04:21:34 +08:00
|
|
|
.invalidate_folio = block_invalidate_folio,
|
2021-06-29 10:36:12 +08:00
|
|
|
#endif
|
2024-10-26 03:08:11 +08:00
|
|
|
.writepages = ecryptfs_writepages,
|
2022-04-29 23:12:16 +08:00
|
|
|
.read_folio = ecryptfs_read_folio,
|
2008-10-16 13:02:50 +08:00
|
|
|
.write_begin = ecryptfs_write_begin,
|
|
|
|
.write_end = ecryptfs_write_end,
|
2024-10-26 03:08:11 +08:00
|
|
|
.migrate_folio = filemap_migrate_folio,
|
2006-10-04 17:16:22 +08:00
|
|
|
.bmap = ecryptfs_bmap,
|
|
|
|
};
|