2016-05-13 04:28:10 +08:00
|
|
|
/*
|
|
|
|
* Internal header file _only_ for device mapper core
|
|
|
|
*
|
|
|
|
* Copyright (C) 2016 Red Hat, Inc. All rights reserved.
|
|
|
|
*
|
|
|
|
* This file is released under the LGPL.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef DM_CORE_INTERNAL_H
|
|
|
|
#define DM_CORE_INTERNAL_H
|
|
|
|
|
|
|
|
#include <linux/kthread.h>
|
|
|
|
#include <linux/ktime.h>
|
|
|
|
#include <linux/blk-mq.h>
|
|
|
|
|
|
|
|
#include <trace/events/block.h>
|
|
|
|
|
|
|
|
#include "dm.h"
|
|
|
|
|
|
|
|
#define DM_RESERVED_MAX_IOS 1024
|
|
|
|
|
|
|
|
struct dm_kobject_holder {
|
|
|
|
struct kobject kobj;
|
|
|
|
struct completion completion;
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* DM core internal structure that used directly by dm.c and dm-rq.c
|
|
|
|
* DM targets must _not_ deference a mapped_device to directly access its members!
|
|
|
|
*/
|
|
|
|
struct mapped_device {
|
|
|
|
struct mutex suspend_lock;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The current mapping (struct dm_table *).
|
|
|
|
* Use dm_get_live_table{_fast} or take suspend_lock for
|
|
|
|
* dereference.
|
|
|
|
*/
|
|
|
|
void __rcu *map;
|
|
|
|
|
|
|
|
struct list_head table_devices;
|
|
|
|
struct mutex table_devices_lock;
|
|
|
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
struct request_queue *queue;
|
|
|
|
int numa_node_id;
|
|
|
|
|
2017-04-28 01:11:23 +08:00
|
|
|
enum dm_queue_mode type;
|
2016-05-13 04:28:10 +08:00
|
|
|
/* Protect queue and type against concurrent access. */
|
|
|
|
struct mutex type_lock;
|
|
|
|
|
|
|
|
atomic_t holders;
|
|
|
|
atomic_t open_count;
|
|
|
|
|
|
|
|
struct dm_target *immutable_target;
|
|
|
|
struct target_type *immutable_target_type;
|
|
|
|
|
|
|
|
struct gendisk *disk;
|
2017-04-13 03:35:44 +08:00
|
|
|
struct dax_device *dax_dev;
|
2016-05-13 04:28:10 +08:00
|
|
|
char name[16];
|
|
|
|
|
|
|
|
void *interface_ptr;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A list of ios that arrived while we were suspended.
|
|
|
|
*/
|
|
|
|
atomic_t pending[2];
|
|
|
|
wait_queue_head_t wait;
|
|
|
|
struct work_struct work;
|
|
|
|
spinlock_t deferred_lock;
|
|
|
|
struct bio_list deferred;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Event handling.
|
|
|
|
*/
|
|
|
|
wait_queue_head_t eventq;
|
|
|
|
atomic_t event_nr;
|
|
|
|
atomic_t uevent_seq;
|
|
|
|
struct list_head uevent_list;
|
|
|
|
spinlock_t uevent_lock; /* Protect access to uevent_list */
|
|
|
|
|
|
|
|
/* the number of internal suspends */
|
|
|
|
unsigned internal_suspend_count;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Processing queue (flush)
|
|
|
|
*/
|
|
|
|
struct workqueue_struct *wq;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* io objects are allocated from here.
|
|
|
|
*/
|
|
|
|
mempool_t *io_pool;
|
|
|
|
|
|
|
|
struct bio_set *bs;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* freeze/thaw support require holding onto a super block
|
|
|
|
*/
|
|
|
|
struct super_block *frozen_sb;
|
|
|
|
|
|
|
|
/* forced geometry settings */
|
|
|
|
struct hd_geometry geometry;
|
|
|
|
|
|
|
|
struct block_device *bdev;
|
|
|
|
|
|
|
|
/* kobject and completion */
|
|
|
|
struct dm_kobject_holder kobj_holder;
|
|
|
|
|
|
|
|
/* zero-length flush that will be cloned and submitted to targets */
|
|
|
|
struct bio flush_bio;
|
|
|
|
|
|
|
|
struct dm_stats stats;
|
|
|
|
|
|
|
|
struct kthread_worker kworker;
|
|
|
|
struct task_struct *kworker_task;
|
|
|
|
|
|
|
|
/* for request-based merge heuristic in dm_request_fn() */
|
|
|
|
unsigned seq_rq_merge_deadline_usecs;
|
|
|
|
int last_rq_rw;
|
|
|
|
sector_t last_rq_pos;
|
|
|
|
ktime_t last_rq_start_time;
|
|
|
|
|
|
|
|
/* for blk-mq request-based DM support */
|
|
|
|
struct blk_mq_tag_set *tag_set;
|
|
|
|
bool use_blk_mq:1;
|
|
|
|
bool init_tio_pdu:1;
|
2017-11-01 07:33:02 +08:00
|
|
|
|
|
|
|
struct srcu_struct io_barrier;
|
2016-05-13 04:28:10 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
void dm_init_md_queue(struct mapped_device *md);
|
|
|
|
void dm_init_normal_md_queue(struct mapped_device *md);
|
|
|
|
int md_in_flight(struct mapped_device *md);
|
|
|
|
void disable_write_same(struct mapped_device *md);
|
2017-04-06 01:21:05 +08:00
|
|
|
void disable_write_zeroes(struct mapped_device *md);
|
2016-05-13 04:28:10 +08:00
|
|
|
|
|
|
|
static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
|
|
|
|
{
|
|
|
|
return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
|
|
|
|
|
|
|
|
static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
|
|
|
|
{
|
|
|
|
return !maxlen || strlen(result) + 1 >= maxlen;
|
|
|
|
}
|
|
|
|
|
2017-01-17 05:05:59 +08:00
|
|
|
extern atomic_t dm_global_event_nr;
|
|
|
|
extern wait_queue_head_t dm_global_eventq;
|
2017-09-20 19:29:49 +08:00
|
|
|
void dm_issue_global_event(void);
|
2017-01-17 05:05:59 +08:00
|
|
|
|
2016-05-13 04:28:10 +08:00
|
|
|
#endif
|