mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-28 23:23:55 +08:00
40889e8d9f
Pull Ceph update from Sage Weil: "There are a few different groups of commits here. The largest is Alex's ongoing work to enable the coming RBD features (cloning, striping). There is some cleanup in libceph that goes along with it. Cyril and David have fixed some problems with NFS reexport (leaking dentries and page locks), and there is a batch of patches from Yan fixing problems with the fs client when running against a clustered MDS. There are a few bug fixes mixed in for good measure, many of which will be going to the stable trees once they're upstream. My apologies for the late pull. There is still a gremlin in the rbd map/unmap code and I was hoping to include the fix for that as well, but we haven't been able to confirm the fix is correct yet; I'll send that in a separate pull once it's nailed down." * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client: (68 commits) rbd: get rid of rbd_{get,put}_dev() libceph: register request before unregister linger libceph: don't use rb_init_node() in ceph_osdc_alloc_request() libceph: init event->node in ceph_osdc_create_event() libceph: init osd->o_node in create_osd() libceph: report connection fault with warning libceph: socket can close in any connection state rbd: don't use ENOTSUPP rbd: remove linger unconditionally rbd: get rid of RBD_MAX_SEG_NAME_LEN libceph: avoid using freed osd in __kick_osd_requests() ceph: don't reference req after put rbd: do not allow remove of mounted-on image libceph: Unlock unprocessed pages in start_read() error path ceph: call handle_cap_grant() for cap import message ceph: Fix __ceph_do_pending_vmtruncate ceph: Don't add dirty inode to dirty list if caps is in migration ceph: Fix infinite loop in __wake_requests ceph: Don't update i_max_size when handling non-auth cap bdi_register: add __printf verification, fix arg mismatch ...
130 lines
3.9 KiB
C
130 lines
3.9 KiB
C
#ifndef _FS_CEPH_OSDMAP_H
|
|
#define _FS_CEPH_OSDMAP_H
|
|
|
|
#include <linux/rbtree.h>
|
|
#include <linux/ceph/types.h>
|
|
#include <linux/ceph/ceph_fs.h>
|
|
#include <linux/crush/crush.h>
|
|
|
|
/*
|
|
* The osd map describes the current membership of the osd cluster and
|
|
* specifies the mapping of objects to placement groups and placement
|
|
* groups to (sets of) osds. That is, it completely specifies the
|
|
* (desired) distribution of all data objects in the system at some
|
|
* point in time.
|
|
*
|
|
* Each map version is identified by an epoch, which increases monotonically.
|
|
*
|
|
* The map can be updated either via an incremental map (diff) describing
|
|
* the change between two successive epochs, or as a fully encoded map.
|
|
*/
|
|
struct ceph_pg_pool_info {
|
|
struct rb_node node;
|
|
int id;
|
|
struct ceph_pg_pool v;
|
|
int pg_num_mask, pgp_num_mask, lpg_num_mask, lpgp_num_mask;
|
|
char *name;
|
|
};
|
|
|
|
struct ceph_pg_mapping {
|
|
struct rb_node node;
|
|
struct ceph_pg pgid;
|
|
int len;
|
|
int osds[];
|
|
};
|
|
|
|
struct ceph_osdmap {
|
|
struct ceph_fsid fsid;
|
|
u32 epoch;
|
|
u32 mkfs_epoch;
|
|
struct ceph_timespec created, modified;
|
|
|
|
u32 flags; /* CEPH_OSDMAP_* */
|
|
|
|
u32 max_osd; /* size of osd_state, _offload, _addr arrays */
|
|
u8 *osd_state; /* CEPH_OSD_* */
|
|
u32 *osd_weight; /* 0 = failed, 0x10000 = 100% normal */
|
|
struct ceph_entity_addr *osd_addr;
|
|
|
|
struct rb_root pg_temp;
|
|
struct rb_root pg_pools;
|
|
u32 pool_max;
|
|
|
|
/* the CRUSH map specifies the mapping of placement groups to
|
|
* the list of osds that store+replicate them. */
|
|
struct crush_map *crush;
|
|
};
|
|
|
|
/*
|
|
* file layout helpers
|
|
*/
|
|
#define ceph_file_layout_su(l) ((__s32)le32_to_cpu((l).fl_stripe_unit))
|
|
#define ceph_file_layout_stripe_count(l) \
|
|
((__s32)le32_to_cpu((l).fl_stripe_count))
|
|
#define ceph_file_layout_object_size(l) ((__s32)le32_to_cpu((l).fl_object_size))
|
|
#define ceph_file_layout_cas_hash(l) ((__s32)le32_to_cpu((l).fl_cas_hash))
|
|
#define ceph_file_layout_object_su(l) \
|
|
((__s32)le32_to_cpu((l).fl_object_stripe_unit))
|
|
#define ceph_file_layout_pg_pool(l) \
|
|
((__s32)le32_to_cpu((l).fl_pg_pool))
|
|
|
|
static inline unsigned ceph_file_layout_stripe_width(struct ceph_file_layout *l)
|
|
{
|
|
return le32_to_cpu(l->fl_stripe_unit) *
|
|
le32_to_cpu(l->fl_stripe_count);
|
|
}
|
|
|
|
/* "period" == bytes before i start on a new set of objects */
|
|
static inline unsigned ceph_file_layout_period(struct ceph_file_layout *l)
|
|
{
|
|
return le32_to_cpu(l->fl_object_size) *
|
|
le32_to_cpu(l->fl_stripe_count);
|
|
}
|
|
|
|
|
|
static inline int ceph_osd_is_up(struct ceph_osdmap *map, int osd)
|
|
{
|
|
return (osd < map->max_osd) && (map->osd_state[osd] & CEPH_OSD_UP);
|
|
}
|
|
|
|
static inline bool ceph_osdmap_flag(struct ceph_osdmap *map, int flag)
|
|
{
|
|
return map && (map->flags & flag);
|
|
}
|
|
|
|
extern char *ceph_osdmap_state_str(char *str, int len, int state);
|
|
|
|
static inline struct ceph_entity_addr *ceph_osd_addr(struct ceph_osdmap *map,
|
|
int osd)
|
|
{
|
|
if (osd >= map->max_osd)
|
|
return NULL;
|
|
return &map->osd_addr[osd];
|
|
}
|
|
|
|
extern struct ceph_osdmap *osdmap_decode(void **p, void *end);
|
|
extern struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
|
|
struct ceph_osdmap *map,
|
|
struct ceph_messenger *msgr);
|
|
extern void ceph_osdmap_destroy(struct ceph_osdmap *map);
|
|
|
|
/* calculate mapping of a file extent to an object */
|
|
extern int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
|
|
u64 off, u64 *plen,
|
|
u64 *bno, u64 *oxoff, u64 *oxlen);
|
|
|
|
/* calculate mapping of object to a placement group */
|
|
extern int ceph_calc_object_layout(struct ceph_object_layout *ol,
|
|
const char *oid,
|
|
struct ceph_file_layout *fl,
|
|
struct ceph_osdmap *osdmap);
|
|
extern int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
|
|
int *acting);
|
|
extern int ceph_calc_pg_primary(struct ceph_osdmap *osdmap,
|
|
struct ceph_pg pgid);
|
|
|
|
extern const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id);
|
|
extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name);
|
|
|
|
#endif
|