2021-08-13 22:21:29 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
|
|
|
*
|
|
|
|
* Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/blkdev.h>
|
|
|
|
#include <linux/buffer_head.h>
|
|
|
|
#include <linux/fiemap.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/nls.h>
|
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
|
|
|
|
#include "debug.h"
|
|
|
|
#include "ntfs.h"
|
|
|
|
#include "ntfs_fs.h"
|
|
|
|
#ifdef CONFIG_NTFS3_LZX_XPRESS
|
|
|
|
#include "lib/lib.h"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static struct mft_inode *ni_ins_mi(struct ntfs_inode *ni, struct rb_root *tree,
|
|
|
|
CLST ino, struct rb_node *ins)
|
|
|
|
{
|
|
|
|
struct rb_node **p = &tree->rb_node;
|
|
|
|
struct rb_node *pr = NULL;
|
|
|
|
|
|
|
|
while (*p) {
|
|
|
|
struct mft_inode *mi;
|
|
|
|
|
|
|
|
pr = *p;
|
|
|
|
mi = rb_entry(pr, struct mft_inode, node);
|
|
|
|
if (mi->rno > ino)
|
|
|
|
p = &pr->rb_left;
|
|
|
|
else if (mi->rno < ino)
|
|
|
|
p = &pr->rb_right;
|
|
|
|
else
|
|
|
|
return mi;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ins)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
rb_link_node(ins, pr, p);
|
|
|
|
rb_insert_color(ins, tree);
|
|
|
|
return rb_entry(ins, struct mft_inode, node);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_find_mi
|
|
|
|
*
|
|
|
|
* finds mft_inode by record number
|
|
|
|
*/
|
|
|
|
static struct mft_inode *ni_find_mi(struct ntfs_inode *ni, CLST rno)
|
|
|
|
{
|
|
|
|
return ni_ins_mi(ni, &ni->mi_tree, rno, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_add_mi
|
|
|
|
*
|
|
|
|
* adds new mft_inode into ntfs_inode
|
|
|
|
*/
|
|
|
|
static void ni_add_mi(struct ntfs_inode *ni, struct mft_inode *mi)
|
|
|
|
{
|
|
|
|
ni_ins_mi(ni, &ni->mi_tree, mi->rno, &mi->node);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_remove_mi
|
|
|
|
*
|
|
|
|
* removes mft_inode from ntfs_inode
|
|
|
|
*/
|
|
|
|
void ni_remove_mi(struct ntfs_inode *ni, struct mft_inode *mi)
|
|
|
|
{
|
|
|
|
rb_erase(&mi->node, &ni->mi_tree);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_std
|
|
|
|
*
|
|
|
|
* returns pointer into std_info from primary record
|
|
|
|
*/
|
|
|
|
struct ATTR_STD_INFO *ni_std(struct ntfs_inode *ni)
|
|
|
|
{
|
|
|
|
const struct ATTRIB *attr;
|
|
|
|
|
|
|
|
attr = mi_find_attr(&ni->mi, NULL, ATTR_STD, NULL, 0, NULL);
|
|
|
|
return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO))
|
|
|
|
: NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_std5
|
|
|
|
*
|
|
|
|
* returns pointer into std_info from primary record
|
|
|
|
*/
|
|
|
|
struct ATTR_STD_INFO5 *ni_std5(struct ntfs_inode *ni)
|
|
|
|
{
|
|
|
|
const struct ATTRIB *attr;
|
|
|
|
|
|
|
|
attr = mi_find_attr(&ni->mi, NULL, ATTR_STD, NULL, 0, NULL);
|
|
|
|
|
|
|
|
return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO5))
|
|
|
|
: NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_clear
|
|
|
|
*
|
|
|
|
* clears resources allocated by ntfs_inode
|
|
|
|
*/
|
|
|
|
void ni_clear(struct ntfs_inode *ni)
|
|
|
|
{
|
|
|
|
struct rb_node *node;
|
|
|
|
|
|
|
|
if (!ni->vfs_inode.i_nlink && is_rec_inuse(ni->mi.mrec))
|
|
|
|
ni_delete_all(ni);
|
|
|
|
|
|
|
|
al_destroy(ni);
|
|
|
|
|
|
|
|
for (node = rb_first(&ni->mi_tree); node;) {
|
|
|
|
struct rb_node *next = rb_next(node);
|
|
|
|
struct mft_inode *mi = rb_entry(node, struct mft_inode, node);
|
|
|
|
|
|
|
|
rb_erase(node, &ni->mi_tree);
|
|
|
|
mi_put(mi);
|
|
|
|
node = next;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* bad inode always has mode == S_IFREG */
|
|
|
|
if (ni->ni_flags & NI_FLAG_DIR)
|
|
|
|
indx_clear(&ni->dir);
|
|
|
|
else {
|
|
|
|
run_close(&ni->file.run);
|
|
|
|
#ifdef CONFIG_NTFS3_LZX_XPRESS
|
|
|
|
if (ni->file.offs_page) {
|
|
|
|
/* on-demand allocated page for offsets */
|
|
|
|
put_page(ni->file.offs_page);
|
|
|
|
ni->file.offs_page = NULL;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
mi_clear(&ni->mi);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_load_mi_ex
|
|
|
|
*
|
|
|
|
* finds mft_inode by record number.
|
|
|
|
*/
|
|
|
|
int ni_load_mi_ex(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct mft_inode *r;
|
|
|
|
|
|
|
|
r = ni_find_mi(ni, rno);
|
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
err = mi_get(ni->mi.sbi, rno, &r);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
ni_add_mi(ni, r);
|
|
|
|
|
|
|
|
out:
|
|
|
|
if (mi)
|
|
|
|
*mi = r;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_load_mi
|
|
|
|
*
|
|
|
|
* load mft_inode corresponded list_entry
|
|
|
|
*/
|
|
|
|
int ni_load_mi(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le,
|
|
|
|
struct mft_inode **mi)
|
|
|
|
{
|
|
|
|
CLST rno;
|
|
|
|
|
|
|
|
if (!le) {
|
|
|
|
*mi = &ni->mi;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
rno = ino_get(&le->ref);
|
|
|
|
if (rno == ni->mi.rno) {
|
|
|
|
*mi = &ni->mi;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return ni_load_mi_ex(ni, rno, mi);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_find_attr
|
|
|
|
*
|
|
|
|
* returns attribute and record this attribute belongs to
|
|
|
|
*/
|
|
|
|
struct ATTRIB *ni_find_attr(struct ntfs_inode *ni, struct ATTRIB *attr,
|
|
|
|
struct ATTR_LIST_ENTRY **le_o, enum ATTR_TYPE type,
|
|
|
|
const __le16 *name, u8 name_len, const CLST *vcn,
|
|
|
|
struct mft_inode **mi)
|
|
|
|
{
|
|
|
|
struct ATTR_LIST_ENTRY *le;
|
|
|
|
struct mft_inode *m;
|
|
|
|
|
|
|
|
if (!ni->attr_list.size ||
|
|
|
|
(!name_len && (type == ATTR_LIST || type == ATTR_STD))) {
|
|
|
|
if (le_o)
|
|
|
|
*le_o = NULL;
|
|
|
|
if (mi)
|
|
|
|
*mi = &ni->mi;
|
|
|
|
|
|
|
|
/* Look for required attribute in primary record */
|
|
|
|
return mi_find_attr(&ni->mi, attr, type, name, name_len, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* first look for list entry of required type */
|
|
|
|
le = al_find_ex(ni, le_o ? *le_o : NULL, type, name, name_len, vcn);
|
|
|
|
if (!le)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (le_o)
|
|
|
|
*le_o = le;
|
|
|
|
|
|
|
|
/* Load record that contains this attribute */
|
|
|
|
if (ni_load_mi(ni, le, &m))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* Look for required attribute */
|
|
|
|
attr = mi_find_attr(m, NULL, type, name, name_len, &le->id);
|
|
|
|
|
|
|
|
if (!attr)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (!attr->non_res) {
|
|
|
|
if (vcn && *vcn)
|
|
|
|
goto out;
|
|
|
|
} else if (!vcn) {
|
|
|
|
if (attr->nres.svcn)
|
|
|
|
goto out;
|
|
|
|
} else if (le64_to_cpu(attr->nres.svcn) > *vcn ||
|
|
|
|
*vcn > le64_to_cpu(attr->nres.evcn)) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mi)
|
|
|
|
*mi = m;
|
|
|
|
return attr;
|
|
|
|
|
|
|
|
out:
|
|
|
|
ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_enum_attr_ex
|
|
|
|
*
|
|
|
|
* enumerates attributes in ntfs_inode
|
|
|
|
*/
|
|
|
|
struct ATTRIB *ni_enum_attr_ex(struct ntfs_inode *ni, struct ATTRIB *attr,
|
|
|
|
struct ATTR_LIST_ENTRY **le,
|
|
|
|
struct mft_inode **mi)
|
|
|
|
{
|
|
|
|
struct mft_inode *mi2;
|
|
|
|
struct ATTR_LIST_ENTRY *le2;
|
|
|
|
|
|
|
|
/* Do we have an attribute list? */
|
|
|
|
if (!ni->attr_list.size) {
|
|
|
|
*le = NULL;
|
|
|
|
if (mi)
|
|
|
|
*mi = &ni->mi;
|
|
|
|
/* Enum attributes in primary record */
|
|
|
|
return mi_enum_attr(&ni->mi, attr);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* get next list entry */
|
|
|
|
le2 = *le = al_enumerate(ni, attr ? *le : NULL);
|
|
|
|
if (!le2)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* Load record that contains the required attribute */
|
|
|
|
if (ni_load_mi(ni, le2, &mi2))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (mi)
|
|
|
|
*mi = mi2;
|
|
|
|
|
|
|
|
/* Find attribute in loaded record */
|
|
|
|
return rec_find_attr_le(mi2, le2);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_load_attr
|
|
|
|
*
|
|
|
|
* loads attribute that contains given vcn
|
|
|
|
*/
|
|
|
|
struct ATTRIB *ni_load_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
|
|
|
|
const __le16 *name, u8 name_len, CLST vcn,
|
|
|
|
struct mft_inode **pmi)
|
|
|
|
{
|
|
|
|
struct ATTR_LIST_ENTRY *le;
|
|
|
|
struct ATTRIB *attr;
|
|
|
|
struct mft_inode *mi;
|
|
|
|
struct ATTR_LIST_ENTRY *next;
|
|
|
|
|
|
|
|
if (!ni->attr_list.size) {
|
|
|
|
if (pmi)
|
|
|
|
*pmi = &ni->mi;
|
|
|
|
return mi_find_attr(&ni->mi, NULL, type, name, name_len, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
le = al_find_ex(ni, NULL, type, name, name_len, NULL);
|
|
|
|
if (!le)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unfortunately ATTR_LIST_ENTRY contains only start vcn
|
|
|
|
* So to find the ATTRIB segment that contains 'vcn' we should
|
|
|
|
* enumerate some entries
|
|
|
|
*/
|
|
|
|
if (vcn) {
|
|
|
|
for (;; le = next) {
|
|
|
|
next = al_find_ex(ni, le, type, name, name_len, NULL);
|
|
|
|
if (!next || le64_to_cpu(next->vcn) > vcn)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ni_load_mi(ni, le, &mi))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (pmi)
|
|
|
|
*pmi = mi;
|
|
|
|
|
|
|
|
attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
|
|
|
|
if (!attr)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (!attr->non_res)
|
|
|
|
return attr;
|
|
|
|
|
|
|
|
if (le64_to_cpu(attr->nres.svcn) <= vcn &&
|
|
|
|
vcn <= le64_to_cpu(attr->nres.evcn))
|
|
|
|
return attr;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_load_all_mi
|
|
|
|
*
|
|
|
|
* loads all subrecords
|
|
|
|
*/
|
|
|
|
int ni_load_all_mi(struct ntfs_inode *ni)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct ATTR_LIST_ENTRY *le;
|
|
|
|
|
|
|
|
if (!ni->attr_list.size)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
le = NULL;
|
|
|
|
|
|
|
|
while ((le = al_enumerate(ni, le))) {
|
|
|
|
CLST rno = ino_get(&le->ref);
|
|
|
|
|
|
|
|
if (rno == ni->mi.rno)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
err = ni_load_mi_ex(ni, rno, NULL);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_add_subrecord
|
|
|
|
*
|
|
|
|
* allocate + format + attach a new subrecord
|
|
|
|
*/
|
|
|
|
bool ni_add_subrecord(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi)
|
|
|
|
{
|
|
|
|
struct mft_inode *m;
|
|
|
|
|
2021-08-25 02:37:07 +08:00
|
|
|
m = kzalloc(sizeof(struct mft_inode), GFP_NOFS);
|
2021-08-13 22:21:29 +08:00
|
|
|
if (!m)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (mi_format_new(m, ni->mi.sbi, rno, 0, ni->mi.rno == MFT_REC_MFT)) {
|
|
|
|
mi_put(m);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
mi_get_ref(&ni->mi, &m->mrec->parent_ref);
|
|
|
|
|
|
|
|
ni_add_mi(ni, m);
|
|
|
|
*mi = m;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_remove_attr
|
|
|
|
*
|
|
|
|
* removes all attributes for the given type/name/id
|
|
|
|
*/
|
|
|
|
int ni_remove_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
|
|
|
|
const __le16 *name, size_t name_len, bool base_only,
|
|
|
|
const __le16 *id)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct ATTRIB *attr;
|
|
|
|
struct ATTR_LIST_ENTRY *le;
|
|
|
|
struct mft_inode *mi;
|
|
|
|
u32 type_in;
|
|
|
|
int diff;
|
|
|
|
|
|
|
|
if (base_only || type == ATTR_LIST || !ni->attr_list.size) {
|
|
|
|
attr = mi_find_attr(&ni->mi, NULL, type, name, name_len, id);
|
|
|
|
if (!attr)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
mi_remove_attr(&ni->mi, attr);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
type_in = le32_to_cpu(type);
|
|
|
|
le = NULL;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
le = al_enumerate(ni, le);
|
|
|
|
if (!le)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
next_le2:
|
|
|
|
diff = le32_to_cpu(le->type) - type_in;
|
|
|
|
if (diff < 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (diff > 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (le->name_len != name_len)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (name_len &&
|
|
|
|
memcmp(le_name(le), name, name_len * sizeof(short)))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (id && le->id != *id)
|
|
|
|
continue;
|
|
|
|
err = ni_load_mi(ni, le, &mi);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
al_remove_le(ni, le);
|
|
|
|
|
|
|
|
attr = mi_find_attr(mi, NULL, type, name, name_len, id);
|
|
|
|
if (!attr)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
mi_remove_attr(mi, attr);
|
|
|
|
|
|
|
|
if (PtrOffset(ni->attr_list.le, le) >= ni->attr_list.size)
|
|
|
|
return 0;
|
|
|
|
goto next_le2;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_ins_new_attr
|
|
|
|
*
|
|
|
|
* inserts the attribute into record
|
|
|
|
* Returns not full constructed attribute or NULL if not possible to create
|
|
|
|
*/
|
|
|
|
static struct ATTRIB *ni_ins_new_attr(struct ntfs_inode *ni,
|
|
|
|
struct mft_inode *mi,
|
|
|
|
struct ATTR_LIST_ENTRY *le,
|
|
|
|
enum ATTR_TYPE type, const __le16 *name,
|
|
|
|
u8 name_len, u32 asize, u16 name_off,
|
|
|
|
CLST svcn)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct ATTRIB *attr;
|
|
|
|
bool le_added = false;
|
|
|
|
struct MFT_REF ref;
|
|
|
|
|
|
|
|
mi_get_ref(mi, &ref);
|
|
|
|
|
|
|
|
if (type != ATTR_LIST && !le && ni->attr_list.size) {
|
|
|
|
err = al_add_le(ni, type, name, name_len, svcn, cpu_to_le16(-1),
|
|
|
|
&ref, &le);
|
|
|
|
if (err) {
|
|
|
|
/* no memory or no space */
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
le_added = true;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* al_add_le -> attr_set_size (list) -> ni_expand_list
|
|
|
|
* which moves some attributes out of primary record
|
|
|
|
* this means that name may point into moved memory
|
|
|
|
* reinit 'name' from le
|
|
|
|
*/
|
|
|
|
name = le->name;
|
|
|
|
}
|
|
|
|
|
|
|
|
attr = mi_insert_attr(mi, type, name, name_len, asize, name_off);
|
|
|
|
if (!attr) {
|
|
|
|
if (le_added)
|
|
|
|
al_remove_le(ni, le);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type == ATTR_LIST) {
|
|
|
|
/*attr list is not in list entry array*/
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!le)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* Update ATTRIB Id and record reference */
|
|
|
|
le->id = attr->id;
|
|
|
|
ni->attr_list.dirty = true;
|
|
|
|
le->ref = ref;
|
|
|
|
|
|
|
|
out:
|
|
|
|
return attr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* random write access to sparsed or compressed file may result to
|
|
|
|
* not optimized packed runs.
|
|
|
|
* Here it is the place to optimize it
|
|
|
|
*/
|
|
|
|
static int ni_repack(struct ntfs_inode *ni)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
struct ntfs_sb_info *sbi = ni->mi.sbi;
|
|
|
|
struct mft_inode *mi, *mi_p = NULL;
|
|
|
|
struct ATTRIB *attr = NULL, *attr_p;
|
|
|
|
struct ATTR_LIST_ENTRY *le = NULL, *le_p;
|
|
|
|
CLST alloc = 0;
|
|
|
|
u8 cluster_bits = sbi->cluster_bits;
|
|
|
|
CLST svcn, evcn = 0, svcn_p, evcn_p, next_svcn;
|
|
|
|
u32 roff, rs = sbi->record_size;
|
|
|
|
struct runs_tree run;
|
|
|
|
|
|
|
|
run_init(&run);
|
|
|
|
|
|
|
|
while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi))) {
|
|
|
|
if (!attr->non_res)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
svcn = le64_to_cpu(attr->nres.svcn);
|
|
|
|
if (svcn != le64_to_cpu(le->vcn)) {
|
|
|
|
err = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!svcn) {
|
|
|
|
alloc = le64_to_cpu(attr->nres.alloc_size) >>
|
|
|
|
cluster_bits;
|
|
|
|
mi_p = NULL;
|
|
|
|
} else if (svcn != evcn + 1) {
|
|
|
|
err = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
evcn = le64_to_cpu(attr->nres.evcn);
|
|
|
|
|
|
|
|
if (svcn > evcn + 1) {
|
|
|
|
err = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!mi_p) {
|
|
|
|
/* do not try if too little free space */
|
|
|
|
if (le32_to_cpu(mi->mrec->used) + 8 >= rs)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* do not try if last attribute segment */
|
|
|
|
if (evcn + 1 == alloc)
|
|
|
|
continue;
|
|
|
|
run_close(&run);
|
|
|
|
}
|
|
|
|
|
|
|
|
roff = le16_to_cpu(attr->nres.run_off);
|
|
|
|
err = run_unpack(&run, sbi, ni->mi.rno, svcn, evcn, svcn,
|
|
|
|
Add2Ptr(attr, roff),
|
|
|
|
le32_to_cpu(attr->size) - roff);
|
|
|
|
if (err < 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (!mi_p) {
|
|
|
|
mi_p = mi;
|
|
|
|
attr_p = attr;
|
|
|
|
svcn_p = svcn;
|
|
|
|
evcn_p = evcn;
|
|
|
|
le_p = le;
|
|
|
|
err = 0;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* run contains data from two records: mi_p and mi
|
|
|
|
* try to pack in one
|
|
|
|
*/
|
|
|
|
err = mi_pack_runs(mi_p, attr_p, &run, evcn + 1 - svcn_p);
|
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
|
|
|
|
next_svcn = le64_to_cpu(attr_p->nres.evcn) + 1;
|
|
|
|
|
|
|
|
if (next_svcn >= evcn + 1) {
|
|
|
|
/* we can remove this attribute segment */
|
|
|
|
al_remove_le(ni, le);
|
|
|
|
mi_remove_attr(mi, attr);
|
|
|
|
le = le_p;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
attr->nres.svcn = le->vcn = cpu_to_le64(next_svcn);
|
|
|
|
mi->dirty = true;
|
|
|
|
ni->attr_list.dirty = true;
|
|
|
|
|
|
|
|
if (evcn + 1 == alloc) {
|
|
|
|
err = mi_pack_runs(mi, attr, &run,
|
|
|
|
evcn + 1 - next_svcn);
|
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
mi_p = NULL;
|
|
|
|
} else {
|
|
|
|
mi_p = mi;
|
|
|
|
attr_p = attr;
|
|
|
|
svcn_p = next_svcn;
|
|
|
|
evcn_p = evcn;
|
|
|
|
le_p = le;
|
|
|
|
run_truncate_head(&run, next_svcn);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (err) {
|
|
|
|
ntfs_inode_warn(&ni->vfs_inode, "repack problem");
|
|
|
|
ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
|
|
|
|
|
|
|
|
/* Pack loaded but not packed runs */
|
|
|
|
if (mi_p)
|
|
|
|
mi_pack_runs(mi_p, attr_p, &run, evcn_p + 1 - svcn_p);
|
|
|
|
}
|
|
|
|
|
|
|
|
run_close(&run);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_try_remove_attr_list
|
|
|
|
*
|
|
|
|
* Can we remove attribute list?
|
|
|
|
* Check the case when primary record contains enough space for all attributes
|
|
|
|
*/
|
|
|
|
static int ni_try_remove_attr_list(struct ntfs_inode *ni)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
struct ntfs_sb_info *sbi = ni->mi.sbi;
|
|
|
|
struct ATTRIB *attr, *attr_list, *attr_ins;
|
|
|
|
struct ATTR_LIST_ENTRY *le;
|
|
|
|
struct mft_inode *mi;
|
|
|
|
u32 asize, free;
|
|
|
|
struct MFT_REF ref;
|
|
|
|
__le16 id;
|
|
|
|
|
|
|
|
if (!ni->attr_list.dirty)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err = ni_repack(ni);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
attr_list = mi_find_attr(&ni->mi, NULL, ATTR_LIST, NULL, 0, NULL);
|
|
|
|
if (!attr_list)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
asize = le32_to_cpu(attr_list->size);
|
|
|
|
|
|
|
|
/* free space in primary record without attribute list */
|
|
|
|
free = sbi->record_size - le32_to_cpu(ni->mi.mrec->used) + asize;
|
|
|
|
mi_get_ref(&ni->mi, &ref);
|
|
|
|
|
|
|
|
le = NULL;
|
|
|
|
while ((le = al_enumerate(ni, le))) {
|
|
|
|
if (!memcmp(&le->ref, &ref, sizeof(ref)))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (le->vcn)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
mi = ni_find_mi(ni, ino_get(&le->ref));
|
|
|
|
if (!mi)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
attr = mi_find_attr(mi, NULL, le->type, le_name(le),
|
|
|
|
le->name_len, &le->id);
|
|
|
|
if (!attr)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
asize = le32_to_cpu(attr->size);
|
|
|
|
if (asize > free)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
free -= asize;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Is seems that attribute list can be removed from primary record */
|
|
|
|
mi_remove_attr(&ni->mi, attr_list);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Repeat the cycle above and move all attributes to primary record.
|
|
|
|
* It should be success!
|
|
|
|
*/
|
|
|
|
le = NULL;
|
|
|
|
while ((le = al_enumerate(ni, le))) {
|
|
|
|
if (!memcmp(&le->ref, &ref, sizeof(ref)))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
mi = ni_find_mi(ni, ino_get(&le->ref));
|
|
|
|
|
|
|
|
attr = mi_find_attr(mi, NULL, le->type, le_name(le),
|
|
|
|
le->name_len, &le->id);
|
|
|
|
asize = le32_to_cpu(attr->size);
|
|
|
|
|
|
|
|
/* insert into primary record */
|
|
|
|
attr_ins = mi_insert_attr(&ni->mi, le->type, le_name(le),
|
|
|
|
le->name_len, asize,
|
|
|
|
le16_to_cpu(attr->name_off));
|
|
|
|
id = attr_ins->id;
|
|
|
|
|
|
|
|
/* copy all except id */
|
|
|
|
memcpy(attr_ins, attr, asize);
|
|
|
|
attr_ins->id = id;
|
|
|
|
|
|
|
|
/* remove from original record */
|
|
|
|
mi_remove_attr(mi, attr);
|
|
|
|
}
|
|
|
|
|
|
|
|
run_deallocate(sbi, &ni->attr_list.run, true);
|
|
|
|
run_close(&ni->attr_list.run);
|
|
|
|
ni->attr_list.size = 0;
|
2021-08-25 02:37:07 +08:00
|
|
|
kfree(ni->attr_list.le);
|
2021-08-13 22:21:29 +08:00
|
|
|
ni->attr_list.le = NULL;
|
|
|
|
ni->attr_list.dirty = false;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_create_attr_list
|
|
|
|
*
|
|
|
|
* generates an attribute list for this primary record
|
|
|
|
*/
|
|
|
|
int ni_create_attr_list(struct ntfs_inode *ni)
|
|
|
|
{
|
|
|
|
struct ntfs_sb_info *sbi = ni->mi.sbi;
|
|
|
|
int err;
|
|
|
|
u32 lsize;
|
|
|
|
struct ATTRIB *attr;
|
|
|
|
struct ATTRIB *arr_move[7];
|
|
|
|
struct ATTR_LIST_ENTRY *le, *le_b[7];
|
|
|
|
struct MFT_REC *rec;
|
|
|
|
bool is_mft;
|
|
|
|
CLST rno = 0;
|
|
|
|
struct mft_inode *mi;
|
|
|
|
u32 free_b, nb, to_free, rs;
|
|
|
|
u16 sz;
|
|
|
|
|
|
|
|
is_mft = ni->mi.rno == MFT_REC_MFT;
|
|
|
|
rec = ni->mi.mrec;
|
|
|
|
rs = sbi->record_size;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Skip estimating exact memory requirement
|
|
|
|
* Looks like one record_size is always enough
|
|
|
|
*/
|
2021-08-25 02:37:07 +08:00
|
|
|
le = kmalloc(al_aligned(rs), GFP_NOFS);
|
2021-08-13 22:21:29 +08:00
|
|
|
if (!le) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
mi_get_ref(&ni->mi, &le->ref);
|
|
|
|
ni->attr_list.le = le;
|
|
|
|
|
|
|
|
attr = NULL;
|
|
|
|
nb = 0;
|
|
|
|
free_b = 0;
|
|
|
|
attr = NULL;
|
|
|
|
|
|
|
|
for (; (attr = mi_enum_attr(&ni->mi, attr)); le = Add2Ptr(le, sz)) {
|
|
|
|
sz = le_size(attr->name_len);
|
|
|
|
le->type = attr->type;
|
|
|
|
le->size = cpu_to_le16(sz);
|
|
|
|
le->name_len = attr->name_len;
|
|
|
|
le->name_off = offsetof(struct ATTR_LIST_ENTRY, name);
|
|
|
|
le->vcn = 0;
|
|
|
|
if (le != ni->attr_list.le)
|
|
|
|
le->ref = ni->attr_list.le->ref;
|
|
|
|
le->id = attr->id;
|
|
|
|
|
|
|
|
if (attr->name_len)
|
|
|
|
memcpy(le->name, attr_name(attr),
|
|
|
|
sizeof(short) * attr->name_len);
|
|
|
|
else if (attr->type == ATTR_STD)
|
|
|
|
continue;
|
|
|
|
else if (attr->type == ATTR_LIST)
|
|
|
|
continue;
|
|
|
|
else if (is_mft && attr->type == ATTR_DATA)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!nb || nb < ARRAY_SIZE(arr_move)) {
|
|
|
|
le_b[nb] = le;
|
|
|
|
arr_move[nb++] = attr;
|
|
|
|
free_b += le32_to_cpu(attr->size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
lsize = PtrOffset(ni->attr_list.le, le);
|
|
|
|
ni->attr_list.size = lsize;
|
|
|
|
|
|
|
|
to_free = le32_to_cpu(rec->used) + lsize + SIZEOF_RESIDENT;
|
|
|
|
if (to_free <= rs) {
|
|
|
|
to_free = 0;
|
|
|
|
} else {
|
|
|
|
to_free -= rs;
|
|
|
|
|
|
|
|
if (to_free > free_b) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate child mft. */
|
|
|
|
err = ntfs_look_free_mft(sbi, &rno, is_mft, ni, &mi);
|
|
|
|
if (err)
|
|
|
|
goto out1;
|
|
|
|
|
|
|
|
/* Call 'mi_remove_attr' in reverse order to keep pointers 'arr_move' valid */
|
|
|
|
while (to_free > 0) {
|
|
|
|
struct ATTRIB *b = arr_move[--nb];
|
|
|
|
u32 asize = le32_to_cpu(b->size);
|
|
|
|
u16 name_off = le16_to_cpu(b->name_off);
|
|
|
|
|
|
|
|
attr = mi_insert_attr(mi, b->type, Add2Ptr(b, name_off),
|
|
|
|
b->name_len, asize, name_off);
|
|
|
|
WARN_ON(!attr);
|
|
|
|
|
|
|
|
mi_get_ref(mi, &le_b[nb]->ref);
|
|
|
|
le_b[nb]->id = attr->id;
|
|
|
|
|
|
|
|
/* copy all except id */
|
|
|
|
memcpy(attr, b, asize);
|
|
|
|
attr->id = le_b[nb]->id;
|
|
|
|
|
|
|
|
WARN_ON(!mi_remove_attr(&ni->mi, b));
|
|
|
|
|
|
|
|
if (to_free <= asize)
|
|
|
|
break;
|
|
|
|
to_free -= asize;
|
|
|
|
WARN_ON(!nb);
|
|
|
|
}
|
|
|
|
|
|
|
|
attr = mi_insert_attr(&ni->mi, ATTR_LIST, NULL, 0,
|
|
|
|
lsize + SIZEOF_RESIDENT, SIZEOF_RESIDENT);
|
|
|
|
WARN_ON(!attr);
|
|
|
|
|
|
|
|
attr->non_res = 0;
|
|
|
|
attr->flags = 0;
|
|
|
|
attr->res.data_size = cpu_to_le32(lsize);
|
|
|
|
attr->res.data_off = SIZEOF_RESIDENT_LE;
|
|
|
|
attr->res.flags = 0;
|
|
|
|
attr->res.res = 0;
|
|
|
|
|
|
|
|
memcpy(resident_data_ex(attr, lsize), ni->attr_list.le, lsize);
|
|
|
|
|
|
|
|
ni->attr_list.dirty = false;
|
|
|
|
|
|
|
|
mark_inode_dirty(&ni->vfs_inode);
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
out1:
|
2021-08-25 02:37:07 +08:00
|
|
|
kfree(ni->attr_list.le);
|
2021-08-13 22:21:29 +08:00
|
|
|
ni->attr_list.le = NULL;
|
|
|
|
ni->attr_list.size = 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_ins_attr_ext
|
|
|
|
*
|
|
|
|
* This method adds an external attribute to the ntfs_inode.
|
|
|
|
*/
|
|
|
|
static int ni_ins_attr_ext(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le,
|
|
|
|
enum ATTR_TYPE type, const __le16 *name, u8 name_len,
|
|
|
|
u32 asize, CLST svcn, u16 name_off, bool force_ext,
|
|
|
|
struct ATTRIB **ins_attr, struct mft_inode **ins_mi)
|
|
|
|
{
|
|
|
|
struct ATTRIB *attr;
|
|
|
|
struct mft_inode *mi;
|
|
|
|
CLST rno;
|
|
|
|
u64 vbo;
|
|
|
|
struct rb_node *node;
|
|
|
|
int err;
|
|
|
|
bool is_mft, is_mft_data;
|
|
|
|
struct ntfs_sb_info *sbi = ni->mi.sbi;
|
|
|
|
|
|
|
|
is_mft = ni->mi.rno == MFT_REC_MFT;
|
|
|
|
is_mft_data = is_mft && type == ATTR_DATA && !name_len;
|
|
|
|
|
|
|
|
if (asize > sbi->max_bytes_per_attr) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* standard information and attr_list cannot be made external.
|
|
|
|
* The Log File cannot have any external attributes
|
|
|
|
*/
|
|
|
|
if (type == ATTR_STD || type == ATTR_LIST ||
|
|
|
|
ni->mi.rno == MFT_REC_LOG) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create attribute list if it is not already existed */
|
|
|
|
if (!ni->attr_list.size) {
|
|
|
|
err = ni_create_attr_list(ni);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
vbo = is_mft_data ? ((u64)svcn << sbi->cluster_bits) : 0;
|
|
|
|
|
|
|
|
if (force_ext)
|
|
|
|
goto insert_ext;
|
|
|
|
|
|
|
|
/* Load all subrecords into memory. */
|
|
|
|
err = ni_load_all_mi(ni);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* Check each of loaded subrecord */
|
|
|
|
for (node = rb_first(&ni->mi_tree); node; node = rb_next(node)) {
|
|
|
|
mi = rb_entry(node, struct mft_inode, node);
|
|
|
|
|
|
|
|
if (is_mft_data &&
|
|
|
|
(mi_enum_attr(mi, NULL) ||
|
|
|
|
vbo <= ((u64)mi->rno << sbi->record_bits))) {
|
|
|
|
/* We can't accept this record 'case MFT's bootstrapping */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (is_mft &&
|
|
|
|
mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, NULL)) {
|
|
|
|
/*
|
|
|
|
* This child record already has a ATTR_DATA.
|
|
|
|
* So it can't accept any other records.
|
|
|
|
*/
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((type != ATTR_NAME || name_len) &&
|
|
|
|
mi_find_attr(mi, NULL, type, name, name_len, NULL)) {
|
|
|
|
/* Only indexed attributes can share same record */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Try to insert attribute into this subrecord */
|
|
|
|
attr = ni_ins_new_attr(ni, mi, le, type, name, name_len, asize,
|
|
|
|
name_off, svcn);
|
|
|
|
if (!attr)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (ins_attr)
|
|
|
|
*ins_attr = attr;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
insert_ext:
|
|
|
|
/* We have to allocate a new child subrecord*/
|
|
|
|
err = ntfs_look_free_mft(sbi, &rno, is_mft_data, ni, &mi);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (is_mft_data && vbo <= ((u64)rno << sbi->record_bits)) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out1;
|
|
|
|
}
|
|
|
|
|
|
|
|
attr = ni_ins_new_attr(ni, mi, le, type, name, name_len, asize,
|
|
|
|
name_off, svcn);
|
|
|
|
if (!attr)
|
|
|
|
goto out2;
|
|
|
|
|
|
|
|
if (ins_attr)
|
|
|
|
*ins_attr = attr;
|
|
|
|
if (ins_mi)
|
|
|
|
*ins_mi = mi;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out2:
|
|
|
|
ni_remove_mi(ni, mi);
|
|
|
|
mi_put(mi);
|
|
|
|
err = -EINVAL;
|
|
|
|
|
|
|
|
out1:
|
|
|
|
ntfs_mark_rec_free(sbi, rno);
|
|
|
|
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_insert_attr
|
|
|
|
*
|
|
|
|
* inserts an attribute into the file.
|
|
|
|
*
|
|
|
|
* If the primary record has room, it will just insert the attribute.
|
|
|
|
* If not, it may make the attribute external.
|
|
|
|
* For $MFT::Data it may make room for the attribute by
|
|
|
|
* making other attributes external.
|
|
|
|
*
|
|
|
|
* NOTE:
|
|
|
|
* The ATTR_LIST and ATTR_STD cannot be made external.
|
|
|
|
* This function does not fill new attribute full
|
|
|
|
* It only fills 'size'/'type'/'id'/'name_len' fields
|
|
|
|
*/
|
|
|
|
static int ni_insert_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
|
|
|
|
const __le16 *name, u8 name_len, u32 asize,
|
|
|
|
u16 name_off, CLST svcn, struct ATTRIB **ins_attr,
|
|
|
|
struct mft_inode **ins_mi)
|
|
|
|
{
|
|
|
|
struct ntfs_sb_info *sbi = ni->mi.sbi;
|
|
|
|
int err;
|
|
|
|
struct ATTRIB *attr, *eattr;
|
|
|
|
struct MFT_REC *rec;
|
|
|
|
bool is_mft;
|
|
|
|
struct ATTR_LIST_ENTRY *le;
|
|
|
|
u32 list_reserve, max_free, free, used, t32;
|
|
|
|
__le16 id;
|
|
|
|
u16 t16;
|
|
|
|
|
|
|
|
is_mft = ni->mi.rno == MFT_REC_MFT;
|
|
|
|
rec = ni->mi.mrec;
|
|
|
|
|
|
|
|
list_reserve = SIZEOF_NONRESIDENT + 3 * (1 + 2 * sizeof(u32));
|
|
|
|
used = le32_to_cpu(rec->used);
|
|
|
|
free = sbi->record_size - used;
|
|
|
|
|
|
|
|
if (is_mft && type != ATTR_LIST) {
|
|
|
|
/* Reserve space for the ATTRIB List. */
|
|
|
|
if (free < list_reserve)
|
|
|
|
free = 0;
|
|
|
|
else
|
|
|
|
free -= list_reserve;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (asize <= free) {
|
|
|
|
attr = ni_ins_new_attr(ni, &ni->mi, NULL, type, name, name_len,
|
|
|
|
asize, name_off, svcn);
|
|
|
|
if (attr) {
|
|
|
|
if (ins_attr)
|
|
|
|
*ins_attr = attr;
|
|
|
|
if (ins_mi)
|
|
|
|
*ins_mi = &ni->mi;
|
|
|
|
err = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!is_mft || type != ATTR_DATA || svcn) {
|
|
|
|
/* This ATTRIB will be external. */
|
|
|
|
err = ni_ins_attr_ext(ni, NULL, type, name, name_len, asize,
|
|
|
|
svcn, name_off, false, ins_attr, ins_mi);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Here we have: "is_mft && type == ATTR_DATA && !svcn
|
|
|
|
*
|
|
|
|
* The first chunk of the $MFT::Data ATTRIB must be the base record.
|
|
|
|
* Evict as many other attributes as possible.
|
|
|
|
*/
|
|
|
|
max_free = free;
|
|
|
|
|
|
|
|
/* Estimate the result of moving all possible attributes away.*/
|
|
|
|
attr = NULL;
|
|
|
|
|
|
|
|
while ((attr = mi_enum_attr(&ni->mi, attr))) {
|
|
|
|
if (attr->type == ATTR_STD)
|
|
|
|
continue;
|
|
|
|
if (attr->type == ATTR_LIST)
|
|
|
|
continue;
|
|
|
|
max_free += le32_to_cpu(attr->size);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (max_free < asize + list_reserve) {
|
|
|
|
/* Impossible to insert this attribute into primary record */
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Start real attribute moving */
|
|
|
|
attr = NULL;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
attr = mi_enum_attr(&ni->mi, attr);
|
|
|
|
if (!attr) {
|
|
|
|
/* We should never be here 'cause we have already check this case */
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Skip attributes that MUST be primary record */
|
|
|
|
if (attr->type == ATTR_STD || attr->type == ATTR_LIST)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
le = NULL;
|
|
|
|
if (ni->attr_list.size) {
|
|
|
|
le = al_find_le(ni, NULL, attr);
|
|
|
|
if (!le) {
|
|
|
|
/* Really this is a serious bug */
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
t32 = le32_to_cpu(attr->size);
|
|
|
|
t16 = le16_to_cpu(attr->name_off);
|
|
|
|
err = ni_ins_attr_ext(ni, le, attr->type, Add2Ptr(attr, t16),
|
|
|
|
attr->name_len, t32, attr_svcn(attr), t16,
|
|
|
|
false, &eattr, NULL);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
id = eattr->id;
|
|
|
|
memcpy(eattr, attr, t32);
|
|
|
|
eattr->id = id;
|
|
|
|
|
|
|
|
/* remove attrib from primary record */
|
|
|
|
mi_remove_attr(&ni->mi, attr);
|
|
|
|
|
|
|
|
/* attr now points to next attribute */
|
|
|
|
if (attr->type == ATTR_END)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
while (asize + list_reserve > sbi->record_size - le32_to_cpu(rec->used))
|
|
|
|
;
|
|
|
|
|
|
|
|
attr = ni_ins_new_attr(ni, &ni->mi, NULL, type, name, name_len, asize,
|
|
|
|
name_off, svcn);
|
|
|
|
if (!attr) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ins_attr)
|
|
|
|
*ins_attr = attr;
|
|
|
|
if (ins_mi)
|
|
|
|
*ins_mi = &ni->mi;
|
|
|
|
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_expand_mft_list
|
|
|
|
*
|
|
|
|
* This method splits ATTR_DATA of $MFT
|
|
|
|
*/
|
|
|
|
static int ni_expand_mft_list(struct ntfs_inode *ni)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
struct runs_tree *run = &ni->file.run;
|
|
|
|
u32 asize, run_size, done = 0;
|
|
|
|
struct ATTRIB *attr;
|
|
|
|
struct rb_node *node;
|
|
|
|
CLST mft_min, mft_new, svcn, evcn, plen;
|
|
|
|
struct mft_inode *mi, *mi_min, *mi_new;
|
|
|
|
struct ntfs_sb_info *sbi = ni->mi.sbi;
|
|
|
|
|
|
|
|
/* Find the nearest Mft */
|
|
|
|
mft_min = 0;
|
|
|
|
mft_new = 0;
|
|
|
|
mi_min = NULL;
|
|
|
|
|
|
|
|
for (node = rb_first(&ni->mi_tree); node; node = rb_next(node)) {
|
|
|
|
mi = rb_entry(node, struct mft_inode, node);
|
|
|
|
|
|
|
|
attr = mi_enum_attr(mi, NULL);
|
|
|
|
|
|
|
|
if (!attr) {
|
|
|
|
mft_min = mi->rno;
|
|
|
|
mi_min = mi;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ntfs_look_free_mft(sbi, &mft_new, true, ni, &mi_new)) {
|
|
|
|
mft_new = 0;
|
|
|
|
// really this is not critical
|
|
|
|
} else if (mft_min > mft_new) {
|
|
|
|
mft_min = mft_new;
|
|
|
|
mi_min = mi_new;
|
|
|
|
} else {
|
|
|
|
ntfs_mark_rec_free(sbi, mft_new);
|
|
|
|
mft_new = 0;
|
|
|
|
ni_remove_mi(ni, mi_new);
|
|
|
|
}
|
|
|
|
|
|
|
|
attr = mi_find_attr(&ni->mi, NULL, ATTR_DATA, NULL, 0, NULL);
|
|
|
|
if (!attr) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
asize = le32_to_cpu(attr->size);
|
|
|
|
|
|
|
|
evcn = le64_to_cpu(attr->nres.evcn);
|
|
|
|
svcn = bytes_to_cluster(sbi, (u64)(mft_min + 1) << sbi->record_bits);
|
|
|
|
if (evcn + 1 >= svcn) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* split primary attribute [0 evcn] in two parts [0 svcn) + [svcn evcn]
|
|
|
|
*
|
|
|
|
* Update first part of ATTR_DATA in 'primary MFT
|
|
|
|
*/
|
|
|
|
err = run_pack(run, 0, svcn, Add2Ptr(attr, SIZEOF_NONRESIDENT),
|
|
|
|
asize - SIZEOF_NONRESIDENT, &plen);
|
|
|
|
if (err < 0)
|
|
|
|
goto out;
|
|
|
|
|
fs/ntfs3: Use kernel ALIGN macros over driver specific
The static checkers (Smatch) were complaining because QuadAlign() was
buggy. If you try to align something higher than UINT_MAX it got
truncated to a u32.
Smatch warning was:
fs/ntfs3/attrib.c:383 attr_set_size_res()
warn: was expecting a 64 bit value instead of '~7'
So that this will not happen again we will change all these macros to
kernel made ones. This can also help some other static analyzing tools
to give us better warnings.
Patch was generated with Coccinelle script and after that some style
issue was hand fixed.
Coccinelle script:
virtual patch
@alloc depends on patch@
expression x;
@@
(
- #define QuadAlign(n) (((n) + 7u) & (~7u))
|
- QuadAlign(x)
+ ALIGN(x, 8)
|
- #define IsQuadAligned(n) (!((size_t)(n)&7u))
|
- IsQuadAligned(x)
+ IS_ALIGNED(x, 8)
|
- #define Quad2Align(n) (((n) + 15u) & (~15u))
|
- Quad2Align(x)
+ ALIGN(x, 16)
|
- #define IsQuad2Aligned(n) (!((size_t)(n)&15u))
|
- IsQuad2Aligned(x)
+ IS_ALIGNED(x, 16)
|
- #define Quad4Align(n) (((n) + 31u) & (~31u))
|
- Quad4Align(x)
+ ALIGN(x, 32)
|
- #define IsSizeTAligned(n) (!((size_t)(n) & (sizeof(size_t) - 1)))
|
- IsSizeTAligned(x)
+ IS_ALIGNED(x, sizeof(size_t))
|
- #define DwordAlign(n) (((n) + 3u) & (~3u))
|
- DwordAlign(x)
+ ALIGN(x, 4)
|
- #define IsDwordAligned(n) (!((size_t)(n)&3u))
|
- IsDwordAligned(x)
+ IS_ALIGNED(x, 4)
|
- #define WordAlign(n) (((n) + 1u) & (~1u))
|
- WordAlign(x)
+ ALIGN(x, 2)
|
- #define IsWordAligned(n) (!((size_t)(n)&1u))
|
- IsWordAligned(x)
+ IS_ALIGNED(x, 2)
|
)
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Kari Argillander <kari.argillander@gmail.com>
Signed-off-by: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
2021-08-26 16:56:29 +08:00
|
|
|
run_size = ALIGN(err, 8);
|
2021-08-13 22:21:29 +08:00
|
|
|
err = 0;
|
|
|
|
|
|
|
|
if (plen < svcn) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
attr->nres.evcn = cpu_to_le64(svcn - 1);
|
|
|
|
attr->size = cpu_to_le32(run_size + SIZEOF_NONRESIDENT);
|
|
|
|
/* 'done' - how many bytes of primary MFT becomes free */
|
|
|
|
done = asize - run_size - SIZEOF_NONRESIDENT;
|
|
|
|
le32_sub_cpu(&ni->mi.mrec->used, done);
|
|
|
|
|
|
|
|
/* Estimate the size of second part: run_buf=NULL */
|
|
|
|
err = run_pack(run, svcn, evcn + 1 - svcn, NULL, sbi->record_size,
|
|
|
|
&plen);
|
|
|
|
if (err < 0)
|
|
|
|
goto out;
|
|
|
|
|
fs/ntfs3: Use kernel ALIGN macros over driver specific
The static checkers (Smatch) were complaining because QuadAlign() was
buggy. If you try to align something higher than UINT_MAX it got
truncated to a u32.
Smatch warning was:
fs/ntfs3/attrib.c:383 attr_set_size_res()
warn: was expecting a 64 bit value instead of '~7'
So that this will not happen again we will change all these macros to
kernel made ones. This can also help some other static analyzing tools
to give us better warnings.
Patch was generated with Coccinelle script and after that some style
issue was hand fixed.
Coccinelle script:
virtual patch
@alloc depends on patch@
expression x;
@@
(
- #define QuadAlign(n) (((n) + 7u) & (~7u))
|
- QuadAlign(x)
+ ALIGN(x, 8)
|
- #define IsQuadAligned(n) (!((size_t)(n)&7u))
|
- IsQuadAligned(x)
+ IS_ALIGNED(x, 8)
|
- #define Quad2Align(n) (((n) + 15u) & (~15u))
|
- Quad2Align(x)
+ ALIGN(x, 16)
|
- #define IsQuad2Aligned(n) (!((size_t)(n)&15u))
|
- IsQuad2Aligned(x)
+ IS_ALIGNED(x, 16)
|
- #define Quad4Align(n) (((n) + 31u) & (~31u))
|
- Quad4Align(x)
+ ALIGN(x, 32)
|
- #define IsSizeTAligned(n) (!((size_t)(n) & (sizeof(size_t) - 1)))
|
- IsSizeTAligned(x)
+ IS_ALIGNED(x, sizeof(size_t))
|
- #define DwordAlign(n) (((n) + 3u) & (~3u))
|
- DwordAlign(x)
+ ALIGN(x, 4)
|
- #define IsDwordAligned(n) (!((size_t)(n)&3u))
|
- IsDwordAligned(x)
+ IS_ALIGNED(x, 4)
|
- #define WordAlign(n) (((n) + 1u) & (~1u))
|
- WordAlign(x)
+ ALIGN(x, 2)
|
- #define IsWordAligned(n) (!((size_t)(n)&1u))
|
- IsWordAligned(x)
+ IS_ALIGNED(x, 2)
|
)
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Kari Argillander <kari.argillander@gmail.com>
Signed-off-by: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
2021-08-26 16:56:29 +08:00
|
|
|
run_size = ALIGN(err, 8);
|
2021-08-13 22:21:29 +08:00
|
|
|
err = 0;
|
|
|
|
|
|
|
|
if (plen < evcn + 1 - svcn) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function may implicitly call expand attr_list
|
|
|
|
* Insert second part of ATTR_DATA in 'mi_min'
|
|
|
|
*/
|
|
|
|
attr = ni_ins_new_attr(ni, mi_min, NULL, ATTR_DATA, NULL, 0,
|
|
|
|
SIZEOF_NONRESIDENT + run_size,
|
|
|
|
SIZEOF_NONRESIDENT, svcn);
|
|
|
|
if (!attr) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
attr->non_res = 1;
|
|
|
|
attr->name_off = SIZEOF_NONRESIDENT_LE;
|
|
|
|
attr->flags = 0;
|
|
|
|
|
|
|
|
run_pack(run, svcn, evcn + 1 - svcn, Add2Ptr(attr, SIZEOF_NONRESIDENT),
|
|
|
|
run_size, &plen);
|
|
|
|
|
|
|
|
attr->nres.svcn = cpu_to_le64(svcn);
|
|
|
|
attr->nres.evcn = cpu_to_le64(evcn);
|
|
|
|
attr->nres.run_off = cpu_to_le16(SIZEOF_NONRESIDENT);
|
|
|
|
|
|
|
|
out:
|
|
|
|
if (mft_new) {
|
|
|
|
ntfs_mark_rec_free(sbi, mft_new);
|
|
|
|
ni_remove_mi(ni, mi_new);
|
|
|
|
}
|
|
|
|
|
|
|
|
return !err && !done ? -EOPNOTSUPP : err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_expand_list
|
|
|
|
*
|
|
|
|
* This method moves all possible attributes out of primary record
|
|
|
|
*/
|
|
|
|
int ni_expand_list(struct ntfs_inode *ni)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
u32 asize, done = 0;
|
|
|
|
struct ATTRIB *attr, *ins_attr;
|
|
|
|
struct ATTR_LIST_ENTRY *le;
|
|
|
|
bool is_mft = ni->mi.rno == MFT_REC_MFT;
|
|
|
|
struct MFT_REF ref;
|
|
|
|
|
|
|
|
mi_get_ref(&ni->mi, &ref);
|
|
|
|
le = NULL;
|
|
|
|
|
|
|
|
while ((le = al_enumerate(ni, le))) {
|
|
|
|
if (le->type == ATTR_STD)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (memcmp(&ref, &le->ref, sizeof(struct MFT_REF)))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (is_mft && le->type == ATTR_DATA)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Find attribute in primary record */
|
|
|
|
attr = rec_find_attr_le(&ni->mi, le);
|
|
|
|
if (!attr) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
asize = le32_to_cpu(attr->size);
|
|
|
|
|
|
|
|
/* Always insert into new record to avoid collisions (deep recursive) */
|
|
|
|
err = ni_ins_attr_ext(ni, le, attr->type, attr_name(attr),
|
|
|
|
attr->name_len, asize, attr_svcn(attr),
|
|
|
|
le16_to_cpu(attr->name_off), true,
|
|
|
|
&ins_attr, NULL);
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
memcpy(ins_attr, attr, asize);
|
|
|
|
ins_attr->id = le->id;
|
|
|
|
mi_remove_attr(&ni->mi, attr);
|
|
|
|
|
|
|
|
done += asize;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!is_mft) {
|
|
|
|
err = -EFBIG; /* attr list is too big(?) */
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* split mft data as much as possible */
|
|
|
|
err = ni_expand_mft_list(ni);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
out:
|
|
|
|
return !err && !done ? -EOPNOTSUPP : err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_insert_nonresident
|
|
|
|
*
|
|
|
|
* inserts new nonresident attribute
|
|
|
|
*/
|
|
|
|
int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
|
|
|
|
const __le16 *name, u8 name_len,
|
|
|
|
const struct runs_tree *run, CLST svcn, CLST len,
|
|
|
|
__le16 flags, struct ATTRIB **new_attr,
|
|
|
|
struct mft_inode **mi)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
CLST plen;
|
|
|
|
struct ATTRIB *attr;
|
|
|
|
bool is_ext =
|
|
|
|
(flags & (ATTR_FLAG_SPARSED | ATTR_FLAG_COMPRESSED)) && !svcn;
|
fs/ntfs3: Use kernel ALIGN macros over driver specific
The static checkers (Smatch) were complaining because QuadAlign() was
buggy. If you try to align something higher than UINT_MAX it got
truncated to a u32.
Smatch warning was:
fs/ntfs3/attrib.c:383 attr_set_size_res()
warn: was expecting a 64 bit value instead of '~7'
So that this will not happen again we will change all these macros to
kernel made ones. This can also help some other static analyzing tools
to give us better warnings.
Patch was generated with Coccinelle script and after that some style
issue was hand fixed.
Coccinelle script:
virtual patch
@alloc depends on patch@
expression x;
@@
(
- #define QuadAlign(n) (((n) + 7u) & (~7u))
|
- QuadAlign(x)
+ ALIGN(x, 8)
|
- #define IsQuadAligned(n) (!((size_t)(n)&7u))
|
- IsQuadAligned(x)
+ IS_ALIGNED(x, 8)
|
- #define Quad2Align(n) (((n) + 15u) & (~15u))
|
- Quad2Align(x)
+ ALIGN(x, 16)
|
- #define IsQuad2Aligned(n) (!((size_t)(n)&15u))
|
- IsQuad2Aligned(x)
+ IS_ALIGNED(x, 16)
|
- #define Quad4Align(n) (((n) + 31u) & (~31u))
|
- Quad4Align(x)
+ ALIGN(x, 32)
|
- #define IsSizeTAligned(n) (!((size_t)(n) & (sizeof(size_t) - 1)))
|
- IsSizeTAligned(x)
+ IS_ALIGNED(x, sizeof(size_t))
|
- #define DwordAlign(n) (((n) + 3u) & (~3u))
|
- DwordAlign(x)
+ ALIGN(x, 4)
|
- #define IsDwordAligned(n) (!((size_t)(n)&3u))
|
- IsDwordAligned(x)
+ IS_ALIGNED(x, 4)
|
- #define WordAlign(n) (((n) + 1u) & (~1u))
|
- WordAlign(x)
+ ALIGN(x, 2)
|
- #define IsWordAligned(n) (!((size_t)(n)&1u))
|
- IsWordAligned(x)
+ IS_ALIGNED(x, 2)
|
)
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Kari Argillander <kari.argillander@gmail.com>
Signed-off-by: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
2021-08-26 16:56:29 +08:00
|
|
|
u32 name_size = ALIGN(name_len * sizeof(short), 8);
|
2021-08-13 22:21:29 +08:00
|
|
|
u32 name_off = is_ext ? SIZEOF_NONRESIDENT_EX : SIZEOF_NONRESIDENT;
|
|
|
|
u32 run_off = name_off + name_size;
|
|
|
|
u32 run_size, asize;
|
|
|
|
struct ntfs_sb_info *sbi = ni->mi.sbi;
|
|
|
|
|
|
|
|
err = run_pack(run, svcn, len, NULL, sbi->max_bytes_per_attr - run_off,
|
|
|
|
&plen);
|
|
|
|
if (err < 0)
|
|
|
|
goto out;
|
|
|
|
|
fs/ntfs3: Use kernel ALIGN macros over driver specific
The static checkers (Smatch) were complaining because QuadAlign() was
buggy. If you try to align something higher than UINT_MAX it got
truncated to a u32.
Smatch warning was:
fs/ntfs3/attrib.c:383 attr_set_size_res()
warn: was expecting a 64 bit value instead of '~7'
So that this will not happen again we will change all these macros to
kernel made ones. This can also help some other static analyzing tools
to give us better warnings.
Patch was generated with Coccinelle script and after that some style
issue was hand fixed.
Coccinelle script:
virtual patch
@alloc depends on patch@
expression x;
@@
(
- #define QuadAlign(n) (((n) + 7u) & (~7u))
|
- QuadAlign(x)
+ ALIGN(x, 8)
|
- #define IsQuadAligned(n) (!((size_t)(n)&7u))
|
- IsQuadAligned(x)
+ IS_ALIGNED(x, 8)
|
- #define Quad2Align(n) (((n) + 15u) & (~15u))
|
- Quad2Align(x)
+ ALIGN(x, 16)
|
- #define IsQuad2Aligned(n) (!((size_t)(n)&15u))
|
- IsQuad2Aligned(x)
+ IS_ALIGNED(x, 16)
|
- #define Quad4Align(n) (((n) + 31u) & (~31u))
|
- Quad4Align(x)
+ ALIGN(x, 32)
|
- #define IsSizeTAligned(n) (!((size_t)(n) & (sizeof(size_t) - 1)))
|
- IsSizeTAligned(x)
+ IS_ALIGNED(x, sizeof(size_t))
|
- #define DwordAlign(n) (((n) + 3u) & (~3u))
|
- DwordAlign(x)
+ ALIGN(x, 4)
|
- #define IsDwordAligned(n) (!((size_t)(n)&3u))
|
- IsDwordAligned(x)
+ IS_ALIGNED(x, 4)
|
- #define WordAlign(n) (((n) + 1u) & (~1u))
|
- WordAlign(x)
+ ALIGN(x, 2)
|
- #define IsWordAligned(n) (!((size_t)(n)&1u))
|
- IsWordAligned(x)
+ IS_ALIGNED(x, 2)
|
)
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Kari Argillander <kari.argillander@gmail.com>
Signed-off-by: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
2021-08-26 16:56:29 +08:00
|
|
|
run_size = ALIGN(err, 8);
|
2021-08-13 22:21:29 +08:00
|
|
|
|
|
|
|
if (plen < len) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
asize = run_off + run_size;
|
|
|
|
|
|
|
|
if (asize > sbi->max_bytes_per_attr) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = ni_insert_attr(ni, type, name, name_len, asize, name_off, svcn,
|
|
|
|
&attr, mi);
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
attr->non_res = 1;
|
|
|
|
attr->name_off = cpu_to_le16(name_off);
|
|
|
|
attr->flags = flags;
|
|
|
|
|
|
|
|
run_pack(run, svcn, len, Add2Ptr(attr, run_off), run_size, &plen);
|
|
|
|
|
|
|
|
attr->nres.svcn = cpu_to_le64(svcn);
|
|
|
|
attr->nres.evcn = cpu_to_le64((u64)svcn + len - 1);
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
if (new_attr)
|
|
|
|
*new_attr = attr;
|
|
|
|
|
|
|
|
*(__le64 *)&attr->nres.run_off = cpu_to_le64(run_off);
|
|
|
|
|
|
|
|
attr->nres.alloc_size =
|
|
|
|
svcn ? 0 : cpu_to_le64((u64)len << ni->mi.sbi->cluster_bits);
|
|
|
|
attr->nres.data_size = attr->nres.alloc_size;
|
|
|
|
attr->nres.valid_size = attr->nres.alloc_size;
|
|
|
|
|
|
|
|
if (is_ext) {
|
|
|
|
if (flags & ATTR_FLAG_COMPRESSED)
|
|
|
|
attr->nres.c_unit = COMPRESSION_UNIT;
|
|
|
|
attr->nres.total_size = attr->nres.alloc_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_insert_resident
|
|
|
|
*
|
|
|
|
* inserts new resident attribute
|
|
|
|
*/
|
|
|
|
int ni_insert_resident(struct ntfs_inode *ni, u32 data_size,
|
|
|
|
enum ATTR_TYPE type, const __le16 *name, u8 name_len,
|
|
|
|
struct ATTRIB **new_attr, struct mft_inode **mi)
|
|
|
|
{
|
|
|
|
int err;
|
fs/ntfs3: Use kernel ALIGN macros over driver specific
The static checkers (Smatch) were complaining because QuadAlign() was
buggy. If you try to align something higher than UINT_MAX it got
truncated to a u32.
Smatch warning was:
fs/ntfs3/attrib.c:383 attr_set_size_res()
warn: was expecting a 64 bit value instead of '~7'
So that this will not happen again we will change all these macros to
kernel made ones. This can also help some other static analyzing tools
to give us better warnings.
Patch was generated with Coccinelle script and after that some style
issue was hand fixed.
Coccinelle script:
virtual patch
@alloc depends on patch@
expression x;
@@
(
- #define QuadAlign(n) (((n) + 7u) & (~7u))
|
- QuadAlign(x)
+ ALIGN(x, 8)
|
- #define IsQuadAligned(n) (!((size_t)(n)&7u))
|
- IsQuadAligned(x)
+ IS_ALIGNED(x, 8)
|
- #define Quad2Align(n) (((n) + 15u) & (~15u))
|
- Quad2Align(x)
+ ALIGN(x, 16)
|
- #define IsQuad2Aligned(n) (!((size_t)(n)&15u))
|
- IsQuad2Aligned(x)
+ IS_ALIGNED(x, 16)
|
- #define Quad4Align(n) (((n) + 31u) & (~31u))
|
- Quad4Align(x)
+ ALIGN(x, 32)
|
- #define IsSizeTAligned(n) (!((size_t)(n) & (sizeof(size_t) - 1)))
|
- IsSizeTAligned(x)
+ IS_ALIGNED(x, sizeof(size_t))
|
- #define DwordAlign(n) (((n) + 3u) & (~3u))
|
- DwordAlign(x)
+ ALIGN(x, 4)
|
- #define IsDwordAligned(n) (!((size_t)(n)&3u))
|
- IsDwordAligned(x)
+ IS_ALIGNED(x, 4)
|
- #define WordAlign(n) (((n) + 1u) & (~1u))
|
- WordAlign(x)
+ ALIGN(x, 2)
|
- #define IsWordAligned(n) (!((size_t)(n)&1u))
|
- IsWordAligned(x)
+ IS_ALIGNED(x, 2)
|
)
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Kari Argillander <kari.argillander@gmail.com>
Signed-off-by: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
2021-08-26 16:56:29 +08:00
|
|
|
u32 name_size = ALIGN(name_len * sizeof(short), 8);
|
|
|
|
u32 asize = SIZEOF_RESIDENT + name_size + ALIGN(data_size, 8);
|
2021-08-13 22:21:29 +08:00
|
|
|
struct ATTRIB *attr;
|
|
|
|
|
|
|
|
err = ni_insert_attr(ni, type, name, name_len, asize, SIZEOF_RESIDENT,
|
|
|
|
0, &attr, mi);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
attr->non_res = 0;
|
|
|
|
attr->flags = 0;
|
|
|
|
|
|
|
|
attr->res.data_size = cpu_to_le32(data_size);
|
|
|
|
attr->res.data_off = cpu_to_le16(SIZEOF_RESIDENT + name_size);
|
|
|
|
if (type == ATTR_NAME)
|
|
|
|
attr->res.flags = RESIDENT_FLAG_INDEXED;
|
|
|
|
attr->res.res = 0;
|
|
|
|
|
|
|
|
if (new_attr)
|
|
|
|
*new_attr = attr;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_remove_attr_le
|
|
|
|
*
|
|
|
|
* removes attribute from record
|
|
|
|
*/
|
|
|
|
int ni_remove_attr_le(struct ntfs_inode *ni, struct ATTRIB *attr,
|
|
|
|
struct ATTR_LIST_ENTRY *le)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct mft_inode *mi;
|
|
|
|
|
|
|
|
err = ni_load_mi(ni, le, &mi);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
mi_remove_attr(mi, attr);
|
|
|
|
|
|
|
|
if (le)
|
|
|
|
al_remove_le(ni, le);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_delete_all
|
|
|
|
*
|
|
|
|
* removes all attributes and frees allocates space
|
|
|
|
* ntfs_evict_inode->ntfs_clear_inode->ni_delete_all (if no links)
|
|
|
|
*/
|
|
|
|
int ni_delete_all(struct ntfs_inode *ni)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct ATTR_LIST_ENTRY *le = NULL;
|
|
|
|
struct ATTRIB *attr = NULL;
|
|
|
|
struct rb_node *node;
|
|
|
|
u16 roff;
|
|
|
|
u32 asize;
|
|
|
|
CLST svcn, evcn;
|
|
|
|
struct ntfs_sb_info *sbi = ni->mi.sbi;
|
|
|
|
bool nt3 = is_ntfs3(sbi);
|
|
|
|
struct MFT_REF ref;
|
|
|
|
|
|
|
|
while ((attr = ni_enum_attr_ex(ni, attr, &le, NULL))) {
|
|
|
|
if (!nt3 || attr->name_len) {
|
|
|
|
;
|
|
|
|
} else if (attr->type == ATTR_REPARSE) {
|
|
|
|
mi_get_ref(&ni->mi, &ref);
|
|
|
|
ntfs_remove_reparse(sbi, 0, &ref);
|
|
|
|
} else if (attr->type == ATTR_ID && !attr->non_res &&
|
|
|
|
le32_to_cpu(attr->res.data_size) >=
|
|
|
|
sizeof(struct GUID)) {
|
|
|
|
ntfs_objid_remove(sbi, resident_data(attr));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!attr->non_res)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
svcn = le64_to_cpu(attr->nres.svcn);
|
|
|
|
evcn = le64_to_cpu(attr->nres.evcn);
|
|
|
|
|
|
|
|
if (evcn + 1 <= svcn)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
asize = le32_to_cpu(attr->size);
|
|
|
|
roff = le16_to_cpu(attr->nres.run_off);
|
|
|
|
|
|
|
|
/*run==1 means unpack and deallocate*/
|
|
|
|
run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn,
|
|
|
|
Add2Ptr(attr, roff), asize - roff);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ni->attr_list.size) {
|
|
|
|
run_deallocate(ni->mi.sbi, &ni->attr_list.run, true);
|
|
|
|
al_destroy(ni);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Free all subrecords */
|
|
|
|
for (node = rb_first(&ni->mi_tree); node;) {
|
|
|
|
struct rb_node *next = rb_next(node);
|
|
|
|
struct mft_inode *mi = rb_entry(node, struct mft_inode, node);
|
|
|
|
|
|
|
|
clear_rec_inuse(mi->mrec);
|
|
|
|
mi->dirty = true;
|
|
|
|
mi_write(mi, 0);
|
|
|
|
|
|
|
|
ntfs_mark_rec_free(sbi, mi->rno);
|
|
|
|
ni_remove_mi(ni, mi);
|
|
|
|
mi_put(mi);
|
|
|
|
node = next;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Free base record
|
|
|
|
clear_rec_inuse(ni->mi.mrec);
|
|
|
|
ni->mi.dirty = true;
|
|
|
|
err = mi_write(&ni->mi, 0);
|
|
|
|
|
|
|
|
ntfs_mark_rec_free(sbi, ni->mi.rno);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_fname_name
|
|
|
|
*
|
|
|
|
* returns file name attribute by its value
|
|
|
|
*/
|
|
|
|
struct ATTR_FILE_NAME *ni_fname_name(struct ntfs_inode *ni,
|
|
|
|
const struct cpu_str *uni,
|
|
|
|
const struct MFT_REF *home_dir,
|
|
|
|
struct ATTR_LIST_ENTRY **le)
|
|
|
|
{
|
|
|
|
struct ATTRIB *attr = NULL;
|
|
|
|
struct ATTR_FILE_NAME *fname;
|
|
|
|
|
|
|
|
*le = NULL;
|
|
|
|
|
|
|
|
/* Enumerate all names */
|
|
|
|
next:
|
|
|
|
attr = ni_find_attr(ni, attr, le, ATTR_NAME, NULL, 0, NULL, NULL);
|
|
|
|
if (!attr)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
fname = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME);
|
|
|
|
if (!fname)
|
|
|
|
goto next;
|
|
|
|
|
|
|
|
if (home_dir && memcmp(home_dir, &fname->home, sizeof(*home_dir)))
|
|
|
|
goto next;
|
|
|
|
|
|
|
|
if (!uni)
|
|
|
|
goto next;
|
|
|
|
|
|
|
|
if (uni->len != fname->name_len)
|
|
|
|
goto next;
|
|
|
|
|
|
|
|
if (ntfs_cmp_names_cpu(uni, (struct le_str *)&fname->name_len, NULL,
|
|
|
|
false))
|
|
|
|
goto next;
|
|
|
|
|
|
|
|
return fname;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_fname_type
|
|
|
|
*
|
|
|
|
* returns file name attribute with given type
|
|
|
|
*/
|
|
|
|
struct ATTR_FILE_NAME *ni_fname_type(struct ntfs_inode *ni, u8 name_type,
|
|
|
|
struct ATTR_LIST_ENTRY **le)
|
|
|
|
{
|
|
|
|
struct ATTRIB *attr = NULL;
|
|
|
|
struct ATTR_FILE_NAME *fname;
|
|
|
|
|
|
|
|
*le = NULL;
|
|
|
|
|
|
|
|
/* Enumerate all names */
|
|
|
|
for (;;) {
|
|
|
|
attr = ni_find_attr(ni, attr, le, ATTR_NAME, NULL, 0, NULL,
|
|
|
|
NULL);
|
|
|
|
if (!attr)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
fname = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME);
|
|
|
|
if (fname && name_type == fname->type)
|
|
|
|
return fname;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Process compressed/sparsed in special way
|
|
|
|
* NOTE: you need to set ni->std_fa = new_fa
|
|
|
|
* after this function to keep internal structures in consistency
|
|
|
|
*/
|
|
|
|
int ni_new_attr_flags(struct ntfs_inode *ni, enum FILE_ATTRIBUTE new_fa)
|
|
|
|
{
|
|
|
|
struct ATTRIB *attr;
|
|
|
|
struct mft_inode *mi;
|
|
|
|
__le16 new_aflags;
|
|
|
|
u32 new_asize;
|
|
|
|
|
|
|
|
attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
|
|
|
|
if (!attr)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
new_aflags = attr->flags;
|
|
|
|
|
|
|
|
if (new_fa & FILE_ATTRIBUTE_SPARSE_FILE)
|
|
|
|
new_aflags |= ATTR_FLAG_SPARSED;
|
|
|
|
else
|
|
|
|
new_aflags &= ~ATTR_FLAG_SPARSED;
|
|
|
|
|
|
|
|
if (new_fa & FILE_ATTRIBUTE_COMPRESSED)
|
|
|
|
new_aflags |= ATTR_FLAG_COMPRESSED;
|
|
|
|
else
|
|
|
|
new_aflags &= ~ATTR_FLAG_COMPRESSED;
|
|
|
|
|
|
|
|
if (new_aflags == attr->flags)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if ((new_aflags & (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED)) ==
|
|
|
|
(ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED)) {
|
|
|
|
ntfs_inode_warn(&ni->vfs_inode,
|
|
|
|
"file can't be sparsed and compressed");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!attr->non_res)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (attr->nres.data_size) {
|
|
|
|
ntfs_inode_warn(
|
|
|
|
&ni->vfs_inode,
|
|
|
|
"one can change sparsed/compressed only for empty files");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* resize nonresident empty attribute in-place only*/
|
|
|
|
new_asize = (new_aflags & (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED))
|
|
|
|
? (SIZEOF_NONRESIDENT_EX + 8)
|
|
|
|
: (SIZEOF_NONRESIDENT + 8);
|
|
|
|
|
|
|
|
if (!mi_resize_attr(mi, attr, new_asize - le32_to_cpu(attr->size)))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
if (new_aflags & ATTR_FLAG_SPARSED) {
|
|
|
|
attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
|
|
|
|
/* windows uses 16 clusters per frame but supports one cluster per frame too*/
|
|
|
|
attr->nres.c_unit = 0;
|
|
|
|
ni->vfs_inode.i_mapping->a_ops = &ntfs_aops;
|
|
|
|
} else if (new_aflags & ATTR_FLAG_COMPRESSED) {
|
|
|
|
attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
|
|
|
|
/* the only allowed: 16 clusters per frame */
|
|
|
|
attr->nres.c_unit = NTFS_LZNT_CUNIT;
|
|
|
|
ni->vfs_inode.i_mapping->a_ops = &ntfs_aops_cmpr;
|
|
|
|
} else {
|
|
|
|
attr->name_off = SIZEOF_NONRESIDENT_LE;
|
|
|
|
/* normal files */
|
|
|
|
attr->nres.c_unit = 0;
|
|
|
|
ni->vfs_inode.i_mapping->a_ops = &ntfs_aops;
|
|
|
|
}
|
|
|
|
attr->nres.run_off = attr->name_off;
|
|
|
|
out:
|
|
|
|
attr->flags = new_aflags;
|
|
|
|
mi->dirty = true;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_parse_reparse
|
|
|
|
*
|
|
|
|
* buffer is at least 24 bytes
|
|
|
|
*/
|
|
|
|
enum REPARSE_SIGN ni_parse_reparse(struct ntfs_inode *ni, struct ATTRIB *attr,
|
|
|
|
void *buffer)
|
|
|
|
{
|
|
|
|
const struct REPARSE_DATA_BUFFER *rp = NULL;
|
|
|
|
u8 bits;
|
|
|
|
u16 len;
|
|
|
|
typeof(rp->CompressReparseBuffer) *cmpr;
|
|
|
|
|
|
|
|
static_assert(sizeof(struct REPARSE_DATA_BUFFER) <= 24);
|
|
|
|
|
|
|
|
/* Try to estimate reparse point */
|
|
|
|
if (!attr->non_res) {
|
|
|
|
rp = resident_data_ex(attr, sizeof(struct REPARSE_DATA_BUFFER));
|
|
|
|
} else if (le64_to_cpu(attr->nres.data_size) >=
|
|
|
|
sizeof(struct REPARSE_DATA_BUFFER)) {
|
|
|
|
struct runs_tree run;
|
|
|
|
|
|
|
|
run_init(&run);
|
|
|
|
|
|
|
|
if (!attr_load_runs_vcn(ni, ATTR_REPARSE, NULL, 0, &run, 0) &&
|
|
|
|
!ntfs_read_run_nb(ni->mi.sbi, &run, 0, buffer,
|
|
|
|
sizeof(struct REPARSE_DATA_BUFFER),
|
|
|
|
NULL)) {
|
|
|
|
rp = buffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
run_close(&run);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!rp)
|
|
|
|
return REPARSE_NONE;
|
|
|
|
|
|
|
|
len = le16_to_cpu(rp->ReparseDataLength);
|
|
|
|
switch (rp->ReparseTag) {
|
|
|
|
case (IO_REPARSE_TAG_MICROSOFT | IO_REPARSE_TAG_SYMBOLIC_LINK):
|
|
|
|
break; /* Symbolic link */
|
|
|
|
case IO_REPARSE_TAG_MOUNT_POINT:
|
|
|
|
break; /* Mount points and junctions */
|
|
|
|
case IO_REPARSE_TAG_SYMLINK:
|
|
|
|
break;
|
|
|
|
case IO_REPARSE_TAG_COMPRESS:
|
|
|
|
/*
|
2021-08-25 02:37:06 +08:00
|
|
|
* WOF - Windows Overlay Filter - Used to compress files with
|
|
|
|
* LZX/Xpress.
|
|
|
|
*
|
|
|
|
* Unlike native NTFS file compression, the Windows
|
|
|
|
* Overlay Filter supports only read operations. This means
|
|
|
|
* that it doesn't need to sector-align each compressed chunk,
|
|
|
|
* so the compressed data can be packed more tightly together.
|
|
|
|
* If you open the file for writing, the WOF just decompresses
|
2021-08-13 22:21:29 +08:00
|
|
|
* the entire file, turning it back into a plain file.
|
|
|
|
*
|
2021-08-25 02:37:06 +08:00
|
|
|
* Ntfs3 driver decompresses the entire file only on write or
|
|
|
|
* change size requests.
|
2021-08-13 22:21:29 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
cmpr = &rp->CompressReparseBuffer;
|
|
|
|
if (len < sizeof(*cmpr) ||
|
|
|
|
cmpr->WofVersion != WOF_CURRENT_VERSION ||
|
|
|
|
cmpr->WofProvider != WOF_PROVIDER_SYSTEM ||
|
|
|
|
cmpr->ProviderVer != WOF_PROVIDER_CURRENT_VERSION) {
|
|
|
|
return REPARSE_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (cmpr->CompressionFormat) {
|
|
|
|
case WOF_COMPRESSION_XPRESS4K:
|
|
|
|
bits = 0xc; // 4k
|
|
|
|
break;
|
|
|
|
case WOF_COMPRESSION_XPRESS8K:
|
|
|
|
bits = 0xd; // 8k
|
|
|
|
break;
|
|
|
|
case WOF_COMPRESSION_XPRESS16K:
|
|
|
|
bits = 0xe; // 16k
|
|
|
|
break;
|
|
|
|
case WOF_COMPRESSION_LZX32K:
|
|
|
|
bits = 0xf; // 32k
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
bits = 0x10; // 64k
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
ni_set_ext_compress_bits(ni, bits);
|
|
|
|
return REPARSE_COMPRESSED;
|
|
|
|
|
|
|
|
case IO_REPARSE_TAG_DEDUP:
|
|
|
|
ni->ni_flags |= NI_FLAG_DEDUPLICATED;
|
|
|
|
return REPARSE_DEDUPLICATED;
|
|
|
|
|
|
|
|
default:
|
|
|
|
if (rp->ReparseTag & IO_REPARSE_TAG_NAME_SURROGATE)
|
|
|
|
break;
|
|
|
|
|
|
|
|
return REPARSE_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Looks like normal symlink */
|
|
|
|
return REPARSE_LINK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* helper for file_fiemap
|
|
|
|
* assumed ni_lock
|
|
|
|
* TODO: less aggressive locks
|
|
|
|
*/
|
|
|
|
int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
|
|
|
|
__u64 vbo, __u64 len)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
struct ntfs_sb_info *sbi = ni->mi.sbi;
|
|
|
|
u8 cluster_bits = sbi->cluster_bits;
|
|
|
|
struct runs_tree *run;
|
|
|
|
struct rw_semaphore *run_lock;
|
|
|
|
struct ATTRIB *attr;
|
|
|
|
CLST vcn = vbo >> cluster_bits;
|
|
|
|
CLST lcn, clen;
|
|
|
|
u64 valid = ni->i_valid;
|
|
|
|
u64 lbo, bytes;
|
|
|
|
u64 end, alloc_size;
|
|
|
|
size_t idx = -1;
|
|
|
|
u32 flags;
|
|
|
|
bool ok;
|
|
|
|
|
|
|
|
if (S_ISDIR(ni->vfs_inode.i_mode)) {
|
|
|
|
run = &ni->dir.alloc_run;
|
|
|
|
attr = ni_find_attr(ni, NULL, NULL, ATTR_ALLOC, I30_NAME,
|
|
|
|
ARRAY_SIZE(I30_NAME), NULL, NULL);
|
|
|
|
run_lock = &ni->dir.run_lock;
|
|
|
|
} else {
|
|
|
|
run = &ni->file.run;
|
|
|
|
attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
|
|
|
|
NULL);
|
|
|
|
if (!attr) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (is_attr_compressed(attr)) {
|
|
|
|
/*unfortunately cp -r incorrectly treats compressed clusters*/
|
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
ntfs_inode_warn(
|
|
|
|
&ni->vfs_inode,
|
|
|
|
"fiemap is not supported for compressed file (cp -r)");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
run_lock = &ni->file.run_lock;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!attr || !attr->non_res) {
|
|
|
|
err = fiemap_fill_next_extent(
|
|
|
|
fieinfo, 0, 0,
|
|
|
|
attr ? le32_to_cpu(attr->res.data_size) : 0,
|
|
|
|
FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_LAST |
|
|
|
|
FIEMAP_EXTENT_MERGED);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
end = vbo + len;
|
|
|
|
alloc_size = le64_to_cpu(attr->nres.alloc_size);
|
|
|
|
if (end > alloc_size)
|
|
|
|
end = alloc_size;
|
|
|
|
|
|
|
|
down_read(run_lock);
|
|
|
|
|
|
|
|
while (vbo < end) {
|
|
|
|
if (idx == -1) {
|
|
|
|
ok = run_lookup_entry(run, vcn, &lcn, &clen, &idx);
|
|
|
|
} else {
|
|
|
|
CLST vcn_next = vcn;
|
|
|
|
|
|
|
|
ok = run_get_entry(run, ++idx, &vcn, &lcn, &clen) &&
|
|
|
|
vcn == vcn_next;
|
|
|
|
if (!ok)
|
|
|
|
vcn = vcn_next;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ok) {
|
|
|
|
up_read(run_lock);
|
|
|
|
down_write(run_lock);
|
|
|
|
|
|
|
|
err = attr_load_runs_vcn(ni, attr->type,
|
|
|
|
attr_name(attr),
|
|
|
|
attr->name_len, run, vcn);
|
|
|
|
|
|
|
|
up_write(run_lock);
|
|
|
|
down_read(run_lock);
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
|
|
|
|
ok = run_lookup_entry(run, vcn, &lcn, &clen, &idx);
|
|
|
|
|
|
|
|
if (!ok) {
|
|
|
|
err = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!clen) {
|
|
|
|
err = -EINVAL; // ?
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (lcn == SPARSE_LCN) {
|
|
|
|
vcn += clen;
|
|
|
|
vbo = (u64)vcn << cluster_bits;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
flags = FIEMAP_EXTENT_MERGED;
|
|
|
|
if (S_ISDIR(ni->vfs_inode.i_mode)) {
|
|
|
|
;
|
|
|
|
} else if (is_attr_compressed(attr)) {
|
|
|
|
CLST clst_data;
|
|
|
|
|
|
|
|
err = attr_is_frame_compressed(
|
|
|
|
ni, attr, vcn >> attr->nres.c_unit, &clst_data);
|
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
if (clst_data < NTFS_LZNT_CLUSTERS)
|
|
|
|
flags |= FIEMAP_EXTENT_ENCODED;
|
|
|
|
} else if (is_attr_encrypted(attr)) {
|
|
|
|
flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
|
|
|
|
}
|
|
|
|
|
|
|
|
vbo = (u64)vcn << cluster_bits;
|
|
|
|
bytes = (u64)clen << cluster_bits;
|
|
|
|
lbo = (u64)lcn << cluster_bits;
|
|
|
|
|
|
|
|
vcn += clen;
|
|
|
|
|
|
|
|
if (vbo + bytes >= end) {
|
|
|
|
bytes = end - vbo;
|
|
|
|
flags |= FIEMAP_EXTENT_LAST;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vbo + bytes <= valid) {
|
|
|
|
;
|
|
|
|
} else if (vbo >= valid) {
|
|
|
|
flags |= FIEMAP_EXTENT_UNWRITTEN;
|
|
|
|
} else {
|
|
|
|
/* vbo < valid && valid < vbo + bytes */
|
|
|
|
u64 dlen = valid - vbo;
|
|
|
|
|
|
|
|
err = fiemap_fill_next_extent(fieinfo, vbo, lbo, dlen,
|
|
|
|
flags);
|
|
|
|
if (err < 0)
|
|
|
|
break;
|
|
|
|
if (err == 1) {
|
|
|
|
err = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
vbo = valid;
|
|
|
|
bytes -= dlen;
|
|
|
|
if (!bytes)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
lbo += dlen;
|
|
|
|
flags |= FIEMAP_EXTENT_UNWRITTEN;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = fiemap_fill_next_extent(fieinfo, vbo, lbo, bytes, flags);
|
|
|
|
if (err < 0)
|
|
|
|
break;
|
|
|
|
if (err == 1) {
|
|
|
|
err = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
vbo += bytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
up_read(run_lock);
|
|
|
|
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When decompressing, we typically obtain more than one page per reference.
|
|
|
|
* We inject the additional pages into the page cache.
|
|
|
|
*/
|
|
|
|
int ni_readpage_cmpr(struct ntfs_inode *ni, struct page *page)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct ntfs_sb_info *sbi = ni->mi.sbi;
|
|
|
|
struct address_space *mapping = page->mapping;
|
|
|
|
pgoff_t index = page->index;
|
|
|
|
u64 frame_vbo, vbo = (u64)index << PAGE_SHIFT;
|
|
|
|
struct page **pages = NULL; /*array of at most 16 pages. stack?*/
|
|
|
|
u8 frame_bits;
|
|
|
|
CLST frame;
|
|
|
|
u32 i, idx, frame_size, pages_per_frame;
|
|
|
|
gfp_t gfp_mask;
|
|
|
|
struct page *pg;
|
|
|
|
|
|
|
|
if (vbo >= ni->vfs_inode.i_size) {
|
|
|
|
SetPageUptodate(page);
|
|
|
|
err = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) {
|
|
|
|
/* xpress or lzx */
|
|
|
|
frame_bits = ni_ext_compress_bits(ni);
|
|
|
|
} else {
|
|
|
|
/* lznt compression*/
|
|
|
|
frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits;
|
|
|
|
}
|
|
|
|
frame_size = 1u << frame_bits;
|
|
|
|
frame = vbo >> frame_bits;
|
|
|
|
frame_vbo = (u64)frame << frame_bits;
|
|
|
|
idx = (vbo - frame_vbo) >> PAGE_SHIFT;
|
|
|
|
|
|
|
|
pages_per_frame = frame_size >> PAGE_SHIFT;
|
2021-08-25 02:37:08 +08:00
|
|
|
pages = kcalloc(pages_per_frame, sizeof(struct page *), GFP_NOFS);
|
2021-08-13 22:21:29 +08:00
|
|
|
if (!pages) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
pages[idx] = page;
|
|
|
|
index = frame_vbo >> PAGE_SHIFT;
|
|
|
|
gfp_mask = mapping_gfp_mask(mapping);
|
|
|
|
|
|
|
|
for (i = 0; i < pages_per_frame; i++, index++) {
|
|
|
|
if (i == idx)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
pg = find_or_create_page(mapping, index, gfp_mask);
|
|
|
|
if (!pg) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out1;
|
|
|
|
}
|
|
|
|
pages[i] = pg;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = ni_read_frame(ni, frame_vbo, pages, pages_per_frame);
|
|
|
|
|
|
|
|
out1:
|
|
|
|
if (err)
|
|
|
|
SetPageError(page);
|
|
|
|
|
|
|
|
for (i = 0; i < pages_per_frame; i++) {
|
|
|
|
pg = pages[i];
|
|
|
|
if (i == idx)
|
|
|
|
continue;
|
|
|
|
unlock_page(pg);
|
|
|
|
put_page(pg);
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
/* At this point, err contains 0 or -EIO depending on the "critical" page */
|
2021-08-25 02:37:07 +08:00
|
|
|
kfree(pages);
|
2021-08-13 22:21:29 +08:00
|
|
|
unlock_page(page);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_NTFS3_LZX_XPRESS
|
|
|
|
/*
|
|
|
|
* decompress lzx/xpress compressed file
|
|
|
|
* remove ATTR_DATA::WofCompressedData
|
|
|
|
* remove ATTR_REPARSE
|
|
|
|
*/
|
|
|
|
int ni_decompress_file(struct ntfs_inode *ni)
|
|
|
|
{
|
|
|
|
struct ntfs_sb_info *sbi = ni->mi.sbi;
|
|
|
|
struct inode *inode = &ni->vfs_inode;
|
|
|
|
loff_t i_size = inode->i_size;
|
|
|
|
struct address_space *mapping = inode->i_mapping;
|
|
|
|
gfp_t gfp_mask = mapping_gfp_mask(mapping);
|
|
|
|
struct page **pages = NULL;
|
|
|
|
struct ATTR_LIST_ENTRY *le;
|
|
|
|
struct ATTRIB *attr;
|
|
|
|
CLST vcn, cend, lcn, clen, end;
|
|
|
|
pgoff_t index;
|
|
|
|
u64 vbo;
|
|
|
|
u8 frame_bits;
|
|
|
|
u32 i, frame_size, pages_per_frame, bytes;
|
|
|
|
struct mft_inode *mi;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* clusters for decompressed data*/
|
|
|
|
cend = bytes_to_cluster(sbi, i_size);
|
|
|
|
|
|
|
|
if (!i_size)
|
|
|
|
goto remove_wof;
|
|
|
|
|
|
|
|
/* check in advance */
|
|
|
|
if (cend > wnd_zeroes(&sbi->used.bitmap)) {
|
|
|
|
err = -ENOSPC;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
frame_bits = ni_ext_compress_bits(ni);
|
|
|
|
frame_size = 1u << frame_bits;
|
|
|
|
pages_per_frame = frame_size >> PAGE_SHIFT;
|
2021-08-25 02:37:08 +08:00
|
|
|
pages = kcalloc(pages_per_frame, sizeof(struct page *), GFP_NOFS);
|
2021-08-13 22:21:29 +08:00
|
|
|
if (!pages) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Step 1: decompress data and copy to new allocated clusters
|
|
|
|
*/
|
|
|
|
index = 0;
|
|
|
|
for (vbo = 0; vbo < i_size; vbo += bytes) {
|
|
|
|
u32 nr_pages;
|
|
|
|
bool new;
|
|
|
|
|
|
|
|
if (vbo + frame_size > i_size) {
|
|
|
|
bytes = i_size - vbo;
|
|
|
|
nr_pages = (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
|
|
|
} else {
|
|
|
|
nr_pages = pages_per_frame;
|
|
|
|
bytes = frame_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
end = bytes_to_cluster(sbi, vbo + bytes);
|
|
|
|
|
|
|
|
for (vcn = vbo >> sbi->cluster_bits; vcn < end; vcn += clen) {
|
|
|
|
err = attr_data_get_block(ni, vcn, cend - vcn, &lcn,
|
|
|
|
&clen, &new);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < pages_per_frame; i++, index++) {
|
|
|
|
struct page *pg;
|
|
|
|
|
|
|
|
pg = find_or_create_page(mapping, index, gfp_mask);
|
|
|
|
if (!pg) {
|
|
|
|
while (i--) {
|
|
|
|
unlock_page(pages[i]);
|
|
|
|
put_page(pages[i]);
|
|
|
|
}
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
pages[i] = pg;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = ni_read_frame(ni, vbo, pages, pages_per_frame);
|
|
|
|
|
|
|
|
if (!err) {
|
|
|
|
down_read(&ni->file.run_lock);
|
|
|
|
err = ntfs_bio_pages(sbi, &ni->file.run, pages,
|
|
|
|
nr_pages, vbo, bytes,
|
|
|
|
REQ_OP_WRITE);
|
|
|
|
up_read(&ni->file.run_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < pages_per_frame; i++) {
|
|
|
|
unlock_page(pages[i]);
|
|
|
|
put_page(pages[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
|
|
|
|
remove_wof:
|
|
|
|
/*
|
|
|
|
* Step 2: deallocate attributes ATTR_DATA::WofCompressedData and ATTR_REPARSE
|
|
|
|
*/
|
|
|
|
attr = NULL;
|
|
|
|
le = NULL;
|
|
|
|
while ((attr = ni_enum_attr_ex(ni, attr, &le, NULL))) {
|
|
|
|
CLST svcn, evcn;
|
|
|
|
u32 asize, roff;
|
|
|
|
|
|
|
|
if (attr->type == ATTR_REPARSE) {
|
|
|
|
struct MFT_REF ref;
|
|
|
|
|
|
|
|
mi_get_ref(&ni->mi, &ref);
|
|
|
|
ntfs_remove_reparse(sbi, 0, &ref);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!attr->non_res)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (attr->type != ATTR_REPARSE &&
|
|
|
|
(attr->type != ATTR_DATA ||
|
|
|
|
attr->name_len != ARRAY_SIZE(WOF_NAME) ||
|
|
|
|
memcmp(attr_name(attr), WOF_NAME, sizeof(WOF_NAME))))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
svcn = le64_to_cpu(attr->nres.svcn);
|
|
|
|
evcn = le64_to_cpu(attr->nres.evcn);
|
|
|
|
|
|
|
|
if (evcn + 1 <= svcn)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
asize = le32_to_cpu(attr->size);
|
|
|
|
roff = le16_to_cpu(attr->nres.run_off);
|
|
|
|
|
|
|
|
/*run==1 means unpack and deallocate*/
|
|
|
|
run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn,
|
|
|
|
Add2Ptr(attr, roff), asize - roff);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Step 3: remove attribute ATTR_DATA::WofCompressedData
|
|
|
|
*/
|
|
|
|
err = ni_remove_attr(ni, ATTR_DATA, WOF_NAME, ARRAY_SIZE(WOF_NAME),
|
|
|
|
false, NULL);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Step 4: remove ATTR_REPARSE
|
|
|
|
*/
|
|
|
|
err = ni_remove_attr(ni, ATTR_REPARSE, NULL, 0, false, NULL);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Step 5: remove sparse flag from data attribute
|
|
|
|
*/
|
|
|
|
attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
|
|
|
|
if (!attr) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (attr->non_res && is_attr_sparsed(attr)) {
|
|
|
|
/* sparsed attribute header is 8 bytes bigger than normal*/
|
|
|
|
struct MFT_REC *rec = mi->mrec;
|
|
|
|
u32 used = le32_to_cpu(rec->used);
|
|
|
|
u32 asize = le32_to_cpu(attr->size);
|
|
|
|
u16 roff = le16_to_cpu(attr->nres.run_off);
|
|
|
|
char *rbuf = Add2Ptr(attr, roff);
|
|
|
|
|
|
|
|
memmove(rbuf - 8, rbuf, used - PtrOffset(rec, rbuf));
|
|
|
|
attr->size = cpu_to_le32(asize - 8);
|
|
|
|
attr->flags &= ~ATTR_FLAG_SPARSED;
|
|
|
|
attr->nres.run_off = cpu_to_le16(roff - 8);
|
|
|
|
attr->nres.c_unit = 0;
|
|
|
|
rec->used = cpu_to_le32(used - 8);
|
|
|
|
mi->dirty = true;
|
|
|
|
ni->std_fa &= ~(FILE_ATTRIBUTE_SPARSE_FILE |
|
|
|
|
FILE_ATTRIBUTE_REPARSE_POINT);
|
|
|
|
|
|
|
|
mark_inode_dirty(inode);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* clear cached flag */
|
|
|
|
ni->ni_flags &= ~NI_FLAG_COMPRESSED_MASK;
|
|
|
|
if (ni->file.offs_page) {
|
|
|
|
put_page(ni->file.offs_page);
|
|
|
|
ni->file.offs_page = NULL;
|
|
|
|
}
|
|
|
|
mapping->a_ops = &ntfs_aops;
|
|
|
|
|
|
|
|
out:
|
2021-08-25 02:37:07 +08:00
|
|
|
kfree(pages);
|
2021-08-13 22:21:29 +08:00
|
|
|
if (err) {
|
|
|
|
make_bad_inode(inode);
|
|
|
|
ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* external compression lzx/xpress */
|
|
|
|
static int decompress_lzx_xpress(struct ntfs_sb_info *sbi, const char *cmpr,
|
|
|
|
size_t cmpr_size, void *unc, size_t unc_size,
|
|
|
|
u32 frame_size)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
void *ctx;
|
|
|
|
|
|
|
|
if (cmpr_size == unc_size) {
|
|
|
|
/* frame not compressed */
|
|
|
|
memcpy(unc, cmpr, unc_size);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
if (frame_size == 0x8000) {
|
|
|
|
mutex_lock(&sbi->compress.mtx_lzx);
|
|
|
|
/* LZX: frame compressed */
|
|
|
|
ctx = sbi->compress.lzx;
|
|
|
|
if (!ctx) {
|
|
|
|
/* Lazy initialize lzx decompress context */
|
|
|
|
ctx = lzx_allocate_decompressor();
|
|
|
|
if (!ctx) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out1;
|
|
|
|
}
|
|
|
|
|
|
|
|
sbi->compress.lzx = ctx;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (lzx_decompress(ctx, cmpr, cmpr_size, unc, unc_size)) {
|
|
|
|
/* treat all errors as "invalid argument" */
|
|
|
|
err = -EINVAL;
|
|
|
|
}
|
|
|
|
out1:
|
|
|
|
mutex_unlock(&sbi->compress.mtx_lzx);
|
|
|
|
} else {
|
|
|
|
/* XPRESS: frame compressed */
|
|
|
|
mutex_lock(&sbi->compress.mtx_xpress);
|
|
|
|
ctx = sbi->compress.xpress;
|
|
|
|
if (!ctx) {
|
|
|
|
/* Lazy initialize xpress decompress context */
|
|
|
|
ctx = xpress_allocate_decompressor();
|
|
|
|
if (!ctx) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out2;
|
|
|
|
}
|
|
|
|
|
|
|
|
sbi->compress.xpress = ctx;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (xpress_decompress(ctx, cmpr, cmpr_size, unc, unc_size)) {
|
|
|
|
/* treat all errors as "invalid argument" */
|
|
|
|
err = -EINVAL;
|
|
|
|
}
|
|
|
|
out2:
|
|
|
|
mutex_unlock(&sbi->compress.mtx_xpress);
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_read_frame
|
|
|
|
*
|
|
|
|
* pages - array of locked pages
|
|
|
|
*/
|
|
|
|
int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
|
|
|
|
u32 pages_per_frame)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct ntfs_sb_info *sbi = ni->mi.sbi;
|
|
|
|
u8 cluster_bits = sbi->cluster_bits;
|
|
|
|
char *frame_ondisk = NULL;
|
|
|
|
char *frame_mem = NULL;
|
|
|
|
struct page **pages_disk = NULL;
|
|
|
|
struct ATTR_LIST_ENTRY *le = NULL;
|
|
|
|
struct runs_tree *run = &ni->file.run;
|
|
|
|
u64 valid_size = ni->i_valid;
|
|
|
|
u64 vbo_disk;
|
|
|
|
size_t unc_size;
|
|
|
|
u32 frame_size, i, npages_disk, ondisk_size;
|
|
|
|
struct page *pg;
|
|
|
|
struct ATTRIB *attr;
|
|
|
|
CLST frame, clst_data;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* To simplify decompress algorithm do vmap for source and target pages
|
|
|
|
*/
|
|
|
|
for (i = 0; i < pages_per_frame; i++)
|
|
|
|
kmap(pages[i]);
|
|
|
|
|
|
|
|
frame_size = pages_per_frame << PAGE_SHIFT;
|
|
|
|
frame_mem = vmap(pages, pages_per_frame, VM_MAP, PAGE_KERNEL);
|
|
|
|
if (!frame_mem) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL, NULL);
|
|
|
|
if (!attr) {
|
|
|
|
err = -ENOENT;
|
|
|
|
goto out1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!attr->non_res) {
|
|
|
|
u32 data_size = le32_to_cpu(attr->res.data_size);
|
|
|
|
|
|
|
|
memset(frame_mem, 0, frame_size);
|
|
|
|
if (frame_vbo < data_size) {
|
|
|
|
ondisk_size = data_size - frame_vbo;
|
|
|
|
memcpy(frame_mem, resident_data(attr) + frame_vbo,
|
|
|
|
min(ondisk_size, frame_size));
|
|
|
|
}
|
|
|
|
err = 0;
|
|
|
|
goto out1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (frame_vbo >= valid_size) {
|
|
|
|
memset(frame_mem, 0, frame_size);
|
|
|
|
err = 0;
|
|
|
|
goto out1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) {
|
|
|
|
#ifndef CONFIG_NTFS3_LZX_XPRESS
|
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
goto out1;
|
|
|
|
#else
|
|
|
|
u32 frame_bits = ni_ext_compress_bits(ni);
|
|
|
|
u64 frame64 = frame_vbo >> frame_bits;
|
|
|
|
u64 frames, vbo_data;
|
|
|
|
|
|
|
|
if (frame_size != (1u << frame_bits)) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out1;
|
|
|
|
}
|
|
|
|
switch (frame_size) {
|
|
|
|
case 0x1000:
|
|
|
|
case 0x2000:
|
|
|
|
case 0x4000:
|
|
|
|
case 0x8000:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* unknown compression */
|
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
goto out1;
|
|
|
|
}
|
|
|
|
|
|
|
|
attr = ni_find_attr(ni, attr, &le, ATTR_DATA, WOF_NAME,
|
|
|
|
ARRAY_SIZE(WOF_NAME), NULL, NULL);
|
|
|
|
if (!attr) {
|
|
|
|
ntfs_inode_err(
|
|
|
|
&ni->vfs_inode,
|
|
|
|
"external compressed file should contains data attribute \"WofCompressedData\"");
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!attr->non_res) {
|
|
|
|
run = NULL;
|
|
|
|
} else {
|
|
|
|
run = run_alloc();
|
|
|
|
if (!run) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
frames = (ni->vfs_inode.i_size - 1) >> frame_bits;
|
|
|
|
|
|
|
|
err = attr_wof_frame_info(ni, attr, run, frame64, frames,
|
|
|
|
frame_bits, &ondisk_size, &vbo_data);
|
|
|
|
if (err)
|
|
|
|
goto out2;
|
|
|
|
|
|
|
|
if (frame64 == frames) {
|
|
|
|
unc_size = 1 + ((ni->vfs_inode.i_size - 1) &
|
|
|
|
(frame_size - 1));
|
|
|
|
ondisk_size = attr_size(attr) - vbo_data;
|
|
|
|
} else {
|
|
|
|
unc_size = frame_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ondisk_size > frame_size) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out2;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!attr->non_res) {
|
|
|
|
if (vbo_data + ondisk_size >
|
|
|
|
le32_to_cpu(attr->res.data_size)) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out1;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = decompress_lzx_xpress(
|
|
|
|
sbi, Add2Ptr(resident_data(attr), vbo_data),
|
|
|
|
ondisk_size, frame_mem, unc_size, frame_size);
|
|
|
|
goto out1;
|
|
|
|
}
|
|
|
|
vbo_disk = vbo_data;
|
|
|
|
/* load all runs to read [vbo_disk-vbo_to) */
|
|
|
|
err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
|
|
|
|
ARRAY_SIZE(WOF_NAME), run, vbo_disk,
|
|
|
|
vbo_data + ondisk_size);
|
|
|
|
if (err)
|
|
|
|
goto out2;
|
|
|
|
npages_disk = (ondisk_size + (vbo_disk & (PAGE_SIZE - 1)) +
|
|
|
|
PAGE_SIZE - 1) >>
|
|
|
|
PAGE_SHIFT;
|
|
|
|
#endif
|
|
|
|
} else if (is_attr_compressed(attr)) {
|
|
|
|
/* lznt compression*/
|
|
|
|
if (sbi->cluster_size > NTFS_LZNT_MAX_CLUSTER) {
|
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
goto out1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (attr->nres.c_unit != NTFS_LZNT_CUNIT) {
|
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
goto out1;
|
|
|
|
}
|
|
|
|
|
|
|
|
down_write(&ni->file.run_lock);
|
|
|
|
run_truncate_around(run, le64_to_cpu(attr->nres.svcn));
|
|
|
|
frame = frame_vbo >> (cluster_bits + NTFS_LZNT_CUNIT);
|
|
|
|
err = attr_is_frame_compressed(ni, attr, frame, &clst_data);
|
|
|
|
up_write(&ni->file.run_lock);
|
|
|
|
if (err)
|
|
|
|
goto out1;
|
|
|
|
|
|
|
|
if (!clst_data) {
|
|
|
|
memset(frame_mem, 0, frame_size);
|
|
|
|
goto out1;
|
|
|
|
}
|
|
|
|
|
|
|
|
frame_size = sbi->cluster_size << NTFS_LZNT_CUNIT;
|
|
|
|
ondisk_size = clst_data << cluster_bits;
|
|
|
|
|
|
|
|
if (clst_data >= NTFS_LZNT_CLUSTERS) {
|
|
|
|
/* frame is not compressed */
|
|
|
|
down_read(&ni->file.run_lock);
|
|
|
|
err = ntfs_bio_pages(sbi, run, pages, pages_per_frame,
|
|
|
|
frame_vbo, ondisk_size,
|
|
|
|
REQ_OP_READ);
|
|
|
|
up_read(&ni->file.run_lock);
|
|
|
|
goto out1;
|
|
|
|
}
|
|
|
|
vbo_disk = frame_vbo;
|
|
|
|
npages_disk = (ondisk_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
|
|
|
} else {
|
|
|
|
__builtin_unreachable();
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out1;
|
|
|
|
}
|
|
|
|
|
2021-08-25 02:37:07 +08:00
|
|
|
pages_disk = kzalloc(npages_disk * sizeof(struct page *), GFP_NOFS);
|
2021-08-13 22:21:29 +08:00
|
|
|
if (!pages_disk) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out2;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < npages_disk; i++) {
|
|
|
|
pg = alloc_page(GFP_KERNEL);
|
|
|
|
if (!pg) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out3;
|
|
|
|
}
|
|
|
|
pages_disk[i] = pg;
|
|
|
|
lock_page(pg);
|
|
|
|
kmap(pg);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* read 'ondisk_size' bytes from disk */
|
|
|
|
down_read(&ni->file.run_lock);
|
|
|
|
err = ntfs_bio_pages(sbi, run, pages_disk, npages_disk, vbo_disk,
|
|
|
|
ondisk_size, REQ_OP_READ);
|
|
|
|
up_read(&ni->file.run_lock);
|
|
|
|
if (err)
|
|
|
|
goto out3;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* To simplify decompress algorithm do vmap for source and target pages
|
|
|
|
*/
|
|
|
|
frame_ondisk = vmap(pages_disk, npages_disk, VM_MAP, PAGE_KERNEL_RO);
|
|
|
|
if (!frame_ondisk) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out3;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* decompress: frame_ondisk -> frame_mem */
|
|
|
|
#ifdef CONFIG_NTFS3_LZX_XPRESS
|
|
|
|
if (run != &ni->file.run) {
|
|
|
|
/* LZX or XPRESS */
|
|
|
|
err = decompress_lzx_xpress(
|
|
|
|
sbi, frame_ondisk + (vbo_disk & (PAGE_SIZE - 1)),
|
|
|
|
ondisk_size, frame_mem, unc_size, frame_size);
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
/* LZNT - native ntfs compression */
|
|
|
|
unc_size = decompress_lznt(frame_ondisk, ondisk_size, frame_mem,
|
|
|
|
frame_size);
|
|
|
|
if ((ssize_t)unc_size < 0)
|
|
|
|
err = unc_size;
|
|
|
|
else if (!unc_size || unc_size > frame_size)
|
|
|
|
err = -EINVAL;
|
|
|
|
}
|
|
|
|
if (!err && valid_size < frame_vbo + frame_size) {
|
|
|
|
size_t ok = valid_size - frame_vbo;
|
|
|
|
|
|
|
|
memset(frame_mem + ok, 0, frame_size - ok);
|
|
|
|
}
|
|
|
|
|
|
|
|
vunmap(frame_ondisk);
|
|
|
|
|
|
|
|
out3:
|
|
|
|
for (i = 0; i < npages_disk; i++) {
|
|
|
|
pg = pages_disk[i];
|
|
|
|
if (pg) {
|
|
|
|
kunmap(pg);
|
|
|
|
unlock_page(pg);
|
|
|
|
put_page(pg);
|
|
|
|
}
|
|
|
|
}
|
2021-08-25 02:37:07 +08:00
|
|
|
kfree(pages_disk);
|
2021-08-13 22:21:29 +08:00
|
|
|
|
|
|
|
out2:
|
|
|
|
#ifdef CONFIG_NTFS3_LZX_XPRESS
|
|
|
|
if (run != &ni->file.run)
|
|
|
|
run_free(run);
|
|
|
|
#endif
|
|
|
|
out1:
|
|
|
|
vunmap(frame_mem);
|
|
|
|
out:
|
|
|
|
for (i = 0; i < pages_per_frame; i++) {
|
|
|
|
pg = pages[i];
|
|
|
|
kunmap(pg);
|
|
|
|
ClearPageError(pg);
|
|
|
|
SetPageUptodate(pg);
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_write_frame
|
|
|
|
*
|
|
|
|
* pages - array of locked pages
|
|
|
|
*/
|
|
|
|
int ni_write_frame(struct ntfs_inode *ni, struct page **pages,
|
|
|
|
u32 pages_per_frame)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct ntfs_sb_info *sbi = ni->mi.sbi;
|
|
|
|
u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits;
|
|
|
|
u32 frame_size = sbi->cluster_size << NTFS_LZNT_CUNIT;
|
|
|
|
u64 frame_vbo = (u64)pages[0]->index << PAGE_SHIFT;
|
|
|
|
CLST frame = frame_vbo >> frame_bits;
|
|
|
|
char *frame_ondisk = NULL;
|
|
|
|
struct page **pages_disk = NULL;
|
|
|
|
struct ATTR_LIST_ENTRY *le = NULL;
|
|
|
|
char *frame_mem;
|
|
|
|
struct ATTRIB *attr;
|
|
|
|
struct mft_inode *mi;
|
|
|
|
u32 i;
|
|
|
|
struct page *pg;
|
|
|
|
size_t compr_size, ondisk_size;
|
|
|
|
struct lznt *lznt;
|
|
|
|
|
|
|
|
attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL, &mi);
|
|
|
|
if (!attr) {
|
|
|
|
err = -ENOENT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (WARN_ON(!is_attr_compressed(attr))) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sbi->cluster_size > NTFS_LZNT_MAX_CLUSTER) {
|
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!attr->non_res) {
|
|
|
|
down_write(&ni->file.run_lock);
|
|
|
|
err = attr_make_nonresident(ni, attr, le, mi,
|
|
|
|
le32_to_cpu(attr->res.data_size),
|
|
|
|
&ni->file.run, &attr, pages[0]);
|
|
|
|
up_write(&ni->file.run_lock);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (attr->nres.c_unit != NTFS_LZNT_CUNIT) {
|
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2021-08-25 02:37:08 +08:00
|
|
|
pages_disk = kcalloc(pages_per_frame, sizeof(struct page *), GFP_NOFS);
|
2021-08-13 22:21:29 +08:00
|
|
|
if (!pages_disk) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < pages_per_frame; i++) {
|
|
|
|
pg = alloc_page(GFP_KERNEL);
|
|
|
|
if (!pg) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out1;
|
|
|
|
}
|
|
|
|
pages_disk[i] = pg;
|
|
|
|
lock_page(pg);
|
|
|
|
kmap(pg);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* To simplify compress algorithm do vmap for source and target pages
|
|
|
|
*/
|
|
|
|
frame_ondisk = vmap(pages_disk, pages_per_frame, VM_MAP, PAGE_KERNEL);
|
|
|
|
if (!frame_ondisk) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out1;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < pages_per_frame; i++)
|
|
|
|
kmap(pages[i]);
|
|
|
|
|
|
|
|
/* map in-memory frame for read-only */
|
|
|
|
frame_mem = vmap(pages, pages_per_frame, VM_MAP, PAGE_KERNEL_RO);
|
|
|
|
if (!frame_mem) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out2;
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_lock(&sbi->compress.mtx_lznt);
|
|
|
|
lznt = NULL;
|
|
|
|
if (!sbi->compress.lznt) {
|
|
|
|
/*
|
|
|
|
* lznt implements two levels of compression:
|
|
|
|
* 0 - standard compression
|
|
|
|
* 1 - best compression, requires a lot of cpu
|
|
|
|
* use mount option?
|
|
|
|
*/
|
|
|
|
lznt = get_lznt_ctx(0);
|
|
|
|
if (!lznt) {
|
|
|
|
mutex_unlock(&sbi->compress.mtx_lznt);
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out3;
|
|
|
|
}
|
|
|
|
|
|
|
|
sbi->compress.lznt = lznt;
|
|
|
|
lznt = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* compress: frame_mem -> frame_ondisk */
|
|
|
|
compr_size = compress_lznt(frame_mem, frame_size, frame_ondisk,
|
|
|
|
frame_size, sbi->compress.lznt);
|
|
|
|
mutex_unlock(&sbi->compress.mtx_lznt);
|
2021-08-25 02:37:07 +08:00
|
|
|
kfree(lznt);
|
2021-08-13 22:21:29 +08:00
|
|
|
|
|
|
|
if (compr_size + sbi->cluster_size > frame_size) {
|
|
|
|
/* frame is not compressed */
|
|
|
|
compr_size = frame_size;
|
|
|
|
ondisk_size = frame_size;
|
|
|
|
} else if (compr_size) {
|
|
|
|
/* frame is compressed */
|
|
|
|
ondisk_size = ntfs_up_cluster(sbi, compr_size);
|
|
|
|
memset(frame_ondisk + compr_size, 0, ondisk_size - compr_size);
|
|
|
|
} else {
|
|
|
|
/* frame is sparsed */
|
|
|
|
ondisk_size = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
down_write(&ni->file.run_lock);
|
|
|
|
run_truncate_around(&ni->file.run, le64_to_cpu(attr->nres.svcn));
|
|
|
|
err = attr_allocate_frame(ni, frame, compr_size, ni->i_valid);
|
|
|
|
up_write(&ni->file.run_lock);
|
|
|
|
if (err)
|
|
|
|
goto out2;
|
|
|
|
|
|
|
|
if (!ondisk_size)
|
|
|
|
goto out2;
|
|
|
|
|
|
|
|
down_read(&ni->file.run_lock);
|
|
|
|
err = ntfs_bio_pages(sbi, &ni->file.run,
|
|
|
|
ondisk_size < frame_size ? pages_disk : pages,
|
|
|
|
pages_per_frame, frame_vbo, ondisk_size,
|
|
|
|
REQ_OP_WRITE);
|
|
|
|
up_read(&ni->file.run_lock);
|
|
|
|
|
|
|
|
out3:
|
|
|
|
vunmap(frame_mem);
|
|
|
|
|
|
|
|
out2:
|
|
|
|
for (i = 0; i < pages_per_frame; i++)
|
|
|
|
kunmap(pages[i]);
|
|
|
|
|
|
|
|
vunmap(frame_ondisk);
|
|
|
|
out1:
|
|
|
|
for (i = 0; i < pages_per_frame; i++) {
|
|
|
|
pg = pages_disk[i];
|
|
|
|
if (pg) {
|
|
|
|
kunmap(pg);
|
|
|
|
unlock_page(pg);
|
|
|
|
put_page(pg);
|
|
|
|
}
|
|
|
|
}
|
2021-08-25 02:37:07 +08:00
|
|
|
kfree(pages_disk);
|
2021-08-13 22:21:29 +08:00
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* update duplicate info of ATTR_FILE_NAME in MFT and in parent directories
|
|
|
|
*/
|
|
|
|
static bool ni_update_parent(struct ntfs_inode *ni, struct NTFS_DUP_INFO *dup,
|
|
|
|
int sync)
|
|
|
|
{
|
|
|
|
struct ATTRIB *attr;
|
|
|
|
struct mft_inode *mi;
|
|
|
|
struct ATTR_LIST_ENTRY *le = NULL;
|
|
|
|
struct ntfs_sb_info *sbi = ni->mi.sbi;
|
|
|
|
struct super_block *sb = sbi->sb;
|
|
|
|
bool re_dirty = false;
|
|
|
|
bool active = sb->s_flags & SB_ACTIVE;
|
|
|
|
bool upd_parent = ni->ni_flags & NI_FLAG_UPDATE_PARENT;
|
|
|
|
|
|
|
|
if (ni->mi.mrec->flags & RECORD_FLAG_DIR) {
|
|
|
|
dup->fa |= FILE_ATTRIBUTE_DIRECTORY;
|
|
|
|
attr = NULL;
|
|
|
|
dup->alloc_size = 0;
|
|
|
|
dup->data_size = 0;
|
|
|
|
} else {
|
|
|
|
dup->fa &= ~FILE_ATTRIBUTE_DIRECTORY;
|
|
|
|
|
|
|
|
attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL,
|
|
|
|
&mi);
|
|
|
|
if (!attr) {
|
|
|
|
dup->alloc_size = dup->data_size = 0;
|
|
|
|
} else if (!attr->non_res) {
|
|
|
|
u32 data_size = le32_to_cpu(attr->res.data_size);
|
|
|
|
|
fs/ntfs3: Use kernel ALIGN macros over driver specific
The static checkers (Smatch) were complaining because QuadAlign() was
buggy. If you try to align something higher than UINT_MAX it got
truncated to a u32.
Smatch warning was:
fs/ntfs3/attrib.c:383 attr_set_size_res()
warn: was expecting a 64 bit value instead of '~7'
So that this will not happen again we will change all these macros to
kernel made ones. This can also help some other static analyzing tools
to give us better warnings.
Patch was generated with Coccinelle script and after that some style
issue was hand fixed.
Coccinelle script:
virtual patch
@alloc depends on patch@
expression x;
@@
(
- #define QuadAlign(n) (((n) + 7u) & (~7u))
|
- QuadAlign(x)
+ ALIGN(x, 8)
|
- #define IsQuadAligned(n) (!((size_t)(n)&7u))
|
- IsQuadAligned(x)
+ IS_ALIGNED(x, 8)
|
- #define Quad2Align(n) (((n) + 15u) & (~15u))
|
- Quad2Align(x)
+ ALIGN(x, 16)
|
- #define IsQuad2Aligned(n) (!((size_t)(n)&15u))
|
- IsQuad2Aligned(x)
+ IS_ALIGNED(x, 16)
|
- #define Quad4Align(n) (((n) + 31u) & (~31u))
|
- Quad4Align(x)
+ ALIGN(x, 32)
|
- #define IsSizeTAligned(n) (!((size_t)(n) & (sizeof(size_t) - 1)))
|
- IsSizeTAligned(x)
+ IS_ALIGNED(x, sizeof(size_t))
|
- #define DwordAlign(n) (((n) + 3u) & (~3u))
|
- DwordAlign(x)
+ ALIGN(x, 4)
|
- #define IsDwordAligned(n) (!((size_t)(n)&3u))
|
- IsDwordAligned(x)
+ IS_ALIGNED(x, 4)
|
- #define WordAlign(n) (((n) + 1u) & (~1u))
|
- WordAlign(x)
+ ALIGN(x, 2)
|
- #define IsWordAligned(n) (!((size_t)(n)&1u))
|
- IsWordAligned(x)
+ IS_ALIGNED(x, 2)
|
)
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Kari Argillander <kari.argillander@gmail.com>
Signed-off-by: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
2021-08-26 16:56:29 +08:00
|
|
|
dup->alloc_size = cpu_to_le64(ALIGN(data_size, 8));
|
2021-08-13 22:21:29 +08:00
|
|
|
dup->data_size = cpu_to_le64(data_size);
|
|
|
|
} else {
|
|
|
|
u64 new_valid = ni->i_valid;
|
|
|
|
u64 data_size = le64_to_cpu(attr->nres.data_size);
|
|
|
|
__le64 valid_le;
|
|
|
|
|
|
|
|
dup->alloc_size = is_attr_ext(attr)
|
|
|
|
? attr->nres.total_size
|
|
|
|
: attr->nres.alloc_size;
|
|
|
|
dup->data_size = attr->nres.data_size;
|
|
|
|
|
|
|
|
if (new_valid > data_size)
|
|
|
|
new_valid = data_size;
|
|
|
|
|
|
|
|
valid_le = cpu_to_le64(new_valid);
|
|
|
|
if (valid_le != attr->nres.valid_size) {
|
|
|
|
attr->nres.valid_size = valid_le;
|
|
|
|
mi->dirty = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* TODO: fill reparse info */
|
|
|
|
dup->reparse = 0;
|
|
|
|
dup->ea_size = 0;
|
|
|
|
|
|
|
|
if (ni->ni_flags & NI_FLAG_EA) {
|
|
|
|
attr = ni_find_attr(ni, attr, &le, ATTR_EA_INFO, NULL, 0, NULL,
|
|
|
|
NULL);
|
|
|
|
if (attr) {
|
|
|
|
const struct EA_INFO *info;
|
|
|
|
|
|
|
|
info = resident_data_ex(attr, sizeof(struct EA_INFO));
|
|
|
|
dup->ea_size = info->size_pack;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
attr = NULL;
|
|
|
|
le = NULL;
|
|
|
|
|
|
|
|
while ((attr = ni_find_attr(ni, attr, &le, ATTR_NAME, NULL, 0, NULL,
|
|
|
|
&mi))) {
|
|
|
|
struct inode *dir;
|
|
|
|
struct ATTR_FILE_NAME *fname;
|
|
|
|
|
|
|
|
fname = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME);
|
|
|
|
if (!fname)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (memcmp(&fname->dup, dup, sizeof(fname->dup))) {
|
|
|
|
memcpy(&fname->dup, dup, sizeof(fname->dup));
|
|
|
|
mi->dirty = true;
|
|
|
|
} else if (!upd_parent) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!active)
|
|
|
|
continue; /*avoid __wait_on_freeing_inode(inode); */
|
|
|
|
|
|
|
|
/*ntfs_iget5 may sleep*/
|
|
|
|
dir = ntfs_iget5(sb, &fname->home, NULL);
|
|
|
|
if (IS_ERR(dir)) {
|
|
|
|
ntfs_inode_warn(
|
|
|
|
&ni->vfs_inode,
|
|
|
|
"failed to open parent directory r=%lx to update",
|
|
|
|
(long)ino_get(&fname->home));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!is_bad_inode(dir)) {
|
|
|
|
struct ntfs_inode *dir_ni = ntfs_i(dir);
|
|
|
|
|
|
|
|
if (!ni_trylock(dir_ni)) {
|
|
|
|
re_dirty = true;
|
|
|
|
} else {
|
|
|
|
indx_update_dup(dir_ni, sbi, fname, dup, sync);
|
|
|
|
ni_unlock(dir_ni);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
iput(dir);
|
|
|
|
}
|
|
|
|
|
|
|
|
return re_dirty;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ni_write_inode
|
|
|
|
*
|
|
|
|
* write mft base record and all subrecords to disk
|
|
|
|
*/
|
|
|
|
int ni_write_inode(struct inode *inode, int sync, const char *hint)
|
|
|
|
{
|
|
|
|
int err = 0, err2;
|
|
|
|
struct ntfs_inode *ni = ntfs_i(inode);
|
|
|
|
struct super_block *sb = inode->i_sb;
|
|
|
|
struct ntfs_sb_info *sbi = sb->s_fs_info;
|
|
|
|
bool re_dirty = false;
|
|
|
|
struct ATTR_STD_INFO *std;
|
|
|
|
struct rb_node *node, *next;
|
|
|
|
struct NTFS_DUP_INFO dup;
|
|
|
|
|
|
|
|
if (is_bad_inode(inode) || sb_rdonly(sb))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!ni_trylock(ni)) {
|
|
|
|
/* 'ni' is under modification, skip for now */
|
|
|
|
mark_inode_dirty_sync(inode);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_rec_inuse(ni->mi.mrec) &&
|
|
|
|
!(sbi->flags & NTFS_FLAGS_LOG_REPLAYING) && inode->i_nlink) {
|
|
|
|
bool modified = false;
|
|
|
|
|
|
|
|
/* update times in standard attribute */
|
|
|
|
std = ni_std(ni);
|
|
|
|
if (!std) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update the access times if they have changed. */
|
|
|
|
dup.m_time = kernel2nt(&inode->i_mtime);
|
|
|
|
if (std->m_time != dup.m_time) {
|
|
|
|
std->m_time = dup.m_time;
|
|
|
|
modified = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
dup.c_time = kernel2nt(&inode->i_ctime);
|
|
|
|
if (std->c_time != dup.c_time) {
|
|
|
|
std->c_time = dup.c_time;
|
|
|
|
modified = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
dup.a_time = kernel2nt(&inode->i_atime);
|
|
|
|
if (std->a_time != dup.a_time) {
|
|
|
|
std->a_time = dup.a_time;
|
|
|
|
modified = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
dup.fa = ni->std_fa;
|
|
|
|
if (std->fa != dup.fa) {
|
|
|
|
std->fa = dup.fa;
|
|
|
|
modified = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (modified)
|
|
|
|
ni->mi.dirty = true;
|
|
|
|
|
|
|
|
if (!ntfs_is_meta_file(sbi, inode->i_ino) &&
|
|
|
|
(modified || (ni->ni_flags & NI_FLAG_UPDATE_PARENT))) {
|
|
|
|
dup.cr_time = std->cr_time;
|
|
|
|
/* Not critical if this function fail */
|
|
|
|
re_dirty = ni_update_parent(ni, &dup, sync);
|
|
|
|
|
|
|
|
if (re_dirty)
|
|
|
|
ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
|
|
|
|
else
|
|
|
|
ni->ni_flags &= ~NI_FLAG_UPDATE_PARENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* update attribute list */
|
|
|
|
if (ni->attr_list.size && ni->attr_list.dirty) {
|
|
|
|
if (inode->i_ino != MFT_REC_MFT || sync) {
|
|
|
|
err = ni_try_remove_attr_list(ni);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = al_update(ni);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (node = rb_first(&ni->mi_tree); node; node = next) {
|
|
|
|
struct mft_inode *mi = rb_entry(node, struct mft_inode, node);
|
|
|
|
bool is_empty;
|
|
|
|
|
|
|
|
next = rb_next(node);
|
|
|
|
|
|
|
|
if (!mi->dirty)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
is_empty = !mi_enum_attr(mi, NULL);
|
|
|
|
|
|
|
|
if (is_empty)
|
|
|
|
clear_rec_inuse(mi->mrec);
|
|
|
|
|
|
|
|
err2 = mi_write(mi, sync);
|
|
|
|
if (!err && err2)
|
|
|
|
err = err2;
|
|
|
|
|
|
|
|
if (is_empty) {
|
|
|
|
ntfs_mark_rec_free(sbi, mi->rno);
|
|
|
|
rb_erase(node, &ni->mi_tree);
|
|
|
|
mi_put(mi);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ni->mi.dirty) {
|
|
|
|
err2 = mi_write(&ni->mi, sync);
|
|
|
|
if (!err && err2)
|
|
|
|
err = err2;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
ni_unlock(ni);
|
|
|
|
|
|
|
|
if (err) {
|
|
|
|
ntfs_err(sb, "%s r=%lx failed, %d.", hint, inode->i_ino, err);
|
|
|
|
ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (re_dirty && (sb->s_flags & SB_ACTIVE))
|
|
|
|
mark_inode_dirty_sync(inode);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|