mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-29 15:43:59 +08:00
0b61f8a407
Remove the verbose license text from XFS files and replace them with SPDX tags. This does not change the license of any of the code, merely refers to the common, up-to-date license files in LICENSES/ This change was mostly scripted. fs/xfs/Makefile and fs/xfs/libxfs/xfs_fs.h were modified by hand, the rest were detected and modified by the following command: for f in `git grep -l "GNU General" fs/xfs/` ; do echo $f cat $f | awk -f hdr.awk > $f.new mv -f $f.new $f done And the hdr.awk script that did the modification (including detecting the difference between GPL-2.0 and GPL-2.0+ licenses) is as follows: $ cat hdr.awk BEGIN { hdr = 1.0 tag = "GPL-2.0" str = "" } /^ \* This program is free software/ { hdr = 2.0; next } /any later version./ { tag = "GPL-2.0+" next } /^ \*\// { if (hdr > 0.0) { print "// SPDX-License-Identifier: " tag print str print $0 str="" hdr = 0.0 next } print $0 next } /^ \* / { if (hdr > 1.0) next if (hdr > 0.0) { if (str != "") str = str "\n" str = str $0 next } print $0 next } /^ \*/ { if (hdr > 0.0) next print $0 next } // { if (hdr > 0.0) { if (str != "") str = str "\n" str = str $0 next } print $0 } END { } $ Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
128 lines
3.0 KiB
C
128 lines
3.0 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
|
|
* All Rights Reserved.
|
|
*/
|
|
#ifndef __XFS_SUPPORT_KMEM_H__
|
|
#define __XFS_SUPPORT_KMEM_H__
|
|
|
|
#include <linux/slab.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/vmalloc.h>
|
|
|
|
/*
|
|
* General memory allocation interfaces
|
|
*/
|
|
|
|
typedef unsigned __bitwise xfs_km_flags_t;
|
|
#define KM_SLEEP ((__force xfs_km_flags_t)0x0001u)
|
|
#define KM_NOSLEEP ((__force xfs_km_flags_t)0x0002u)
|
|
#define KM_NOFS ((__force xfs_km_flags_t)0x0004u)
|
|
#define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u)
|
|
#define KM_ZERO ((__force xfs_km_flags_t)0x0010u)
|
|
|
|
/*
|
|
* We use a special process flag to avoid recursive callbacks into
|
|
* the filesystem during transactions. We will also issue our own
|
|
* warnings, so we explicitly skip any generic ones (silly of us).
|
|
*/
|
|
static inline gfp_t
|
|
kmem_flags_convert(xfs_km_flags_t flags)
|
|
{
|
|
gfp_t lflags;
|
|
|
|
BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL|KM_ZERO));
|
|
|
|
if (flags & KM_NOSLEEP) {
|
|
lflags = GFP_ATOMIC | __GFP_NOWARN;
|
|
} else {
|
|
lflags = GFP_KERNEL | __GFP_NOWARN;
|
|
if (flags & KM_NOFS)
|
|
lflags &= ~__GFP_FS;
|
|
}
|
|
|
|
/*
|
|
* Default page/slab allocator behavior is to retry for ever
|
|
* for small allocations. We can override this behavior by using
|
|
* __GFP_RETRY_MAYFAIL which will tell the allocator to retry as long
|
|
* as it is feasible but rather fail than retry forever for all
|
|
* request sizes.
|
|
*/
|
|
if (flags & KM_MAYFAIL)
|
|
lflags |= __GFP_RETRY_MAYFAIL;
|
|
|
|
if (flags & KM_ZERO)
|
|
lflags |= __GFP_ZERO;
|
|
|
|
return lflags;
|
|
}
|
|
|
|
extern void *kmem_alloc(size_t, xfs_km_flags_t);
|
|
extern void *kmem_alloc_large(size_t size, xfs_km_flags_t);
|
|
extern void *kmem_realloc(const void *, size_t, xfs_km_flags_t);
|
|
static inline void kmem_free(const void *ptr)
|
|
{
|
|
kvfree(ptr);
|
|
}
|
|
|
|
|
|
static inline void *
|
|
kmem_zalloc(size_t size, xfs_km_flags_t flags)
|
|
{
|
|
return kmem_alloc(size, flags | KM_ZERO);
|
|
}
|
|
|
|
static inline void *
|
|
kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
|
|
{
|
|
return kmem_alloc_large(size, flags | KM_ZERO);
|
|
}
|
|
|
|
/*
|
|
* Zone interfaces
|
|
*/
|
|
|
|
#define KM_ZONE_HWALIGN SLAB_HWCACHE_ALIGN
|
|
#define KM_ZONE_RECLAIM SLAB_RECLAIM_ACCOUNT
|
|
#define KM_ZONE_SPREAD SLAB_MEM_SPREAD
|
|
#define KM_ZONE_ACCOUNT SLAB_ACCOUNT
|
|
|
|
#define kmem_zone kmem_cache
|
|
#define kmem_zone_t struct kmem_cache
|
|
|
|
static inline kmem_zone_t *
|
|
kmem_zone_init(int size, char *zone_name)
|
|
{
|
|
return kmem_cache_create(zone_name, size, 0, 0, NULL);
|
|
}
|
|
|
|
static inline kmem_zone_t *
|
|
kmem_zone_init_flags(int size, char *zone_name, slab_flags_t flags,
|
|
void (*construct)(void *))
|
|
{
|
|
return kmem_cache_create(zone_name, size, 0, flags, construct);
|
|
}
|
|
|
|
static inline void
|
|
kmem_zone_free(kmem_zone_t *zone, void *ptr)
|
|
{
|
|
kmem_cache_free(zone, ptr);
|
|
}
|
|
|
|
static inline void
|
|
kmem_zone_destroy(kmem_zone_t *zone)
|
|
{
|
|
kmem_cache_destroy(zone);
|
|
}
|
|
|
|
extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t);
|
|
|
|
static inline void *
|
|
kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags)
|
|
{
|
|
return kmem_zone_alloc(zone, flags | KM_ZERO);
|
|
}
|
|
|
|
#endif /* __XFS_SUPPORT_KMEM_H__ */
|