btrfs-progs: kerncompat: add helpers for ctree.c sync

Here are the helpers and stubbed out functions to be able to sync in
ctree.c into btrfs-progs.  These are various utilities the kernel
provides, 1 relocation and qgroup related function, and a trace point we
have in ctree.c.

Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Josef Bacik 2023-08-23 10:32:51 -04:00 committed by David Sterba
parent 8c2e4578cd
commit a7c33f90b4
2 changed files with 82 additions and 0 deletions

View File

@ -201,6 +201,10 @@ typedef struct spinlock_struct {
unsigned long lock;
} spinlock_t;
struct rw_semaphore {
long lock;
};
#define mutex_init(m) \
do { \
(m)->lock = 1; \
@ -243,6 +247,27 @@ static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
spin_unlock(lock);
}
static inline void init_rwsem(struct rw_semaphore *sem)
{
sem->lock = 0;
}
static inline bool down_read_trylock(struct rw_semaphore *sem)
{
sem->lock++;
return true;
}
static inline void down_read(struct rw_semaphore *sem)
{
sem->lock++;
}
static inline void up_read(struct rw_semaphore *sem)
{
sem->lock--;
}
#define cond_resched() do { } while (0)
#define preempt_enable() do { } while (0)
#define preempt_disable() do { } while (0)
@ -400,6 +425,11 @@ static inline void *kmem_cache_alloc(struct kmem_cache *cache, gfp_t mask)
return malloc(cache->size);
}
static inline void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t mask)
{
return calloc(1, cache->size);
}
static inline void kmem_cache_free(struct kmem_cache *cache, void *ptr)
{
free(ptr);
@ -704,6 +734,10 @@ static inline bool sb_rdonly(struct super_block *sb)
#define unlikely(cond) (cond)
#define rcu_dereference(c) (c)
#define rcu_assign_pointer(p, v) do { (p) = (v); } while (0)
static inline void atomic_set(atomic_t *a, int val)
{
*a = val;
@ -724,6 +758,15 @@ static inline void atomic_dec(atomic_t *a)
(*a)--;
}
static inline bool atomic_inc_not_zero(atomic_t *a)
{
if (*a) {
atomic_inc(a);
return true;
}
return false;
}
static inline struct workqueue_struct *alloc_workqueue(const char *name,
unsigned long flags,
int max_active, ...)
@ -766,6 +809,10 @@ static inline void lockdep_set_class(spinlock_t *lock, struct lock_class_key *lc
{
}
static inline void lockdep_assert_held_read(struct rw_semaphore *sem)
{
}
static inline bool cond_resched_lock(spinlock_t *lock)
{
return false;
@ -800,11 +847,26 @@ static inline void schedule(void)
{
}
static inline void rcu_read_lock(void)
{
}
static inline void rcu_read_unlock(void)
{
}
static inline void synchronize_rcu(void)
{
}
/*
* Temporary definitions while syncing.
*/
struct btrfs_inode;
struct extent_state;
struct extent_buffer;
struct btrfs_root;
struct btrfs_trans_handle;
static inline void btrfs_merge_delalloc_extent(struct btrfs_inode *inode,
struct extent_state *state,
@ -830,4 +892,18 @@ static inline void btrfs_clear_delalloc_extent(struct btrfs_inode *inode,
{
}
static inline int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *buf,
struct extent_buffer *cow)
{
return 0;
}
static inline void btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *buf)
{
}
#endif

View File

@ -57,4 +57,10 @@ static inline void trace_btrfs_convert_extent_bit(struct extent_io_tree *tree,
{
}
static inline void trace_btrfs_cow_block(struct btrfs_root *root,
struct extent_buffer *buf,
struct extent_buffer *cow)
{
}
#endif /* __PROGS_TRACE_H__ */