/* SPDX-License-Identifier: GPL-2.0-or-later */ /* Copyright (c) 2016 Mellanox Technologies. All rights reserved. * Copyright (c) 2016 Jiri Pirko */ #include #include #include #include #include #include #include #define DEVLINK_REGISTERED XA_MARK_1 #define DEVLINK_UNREGISTERING XA_MARK_2 #define DEVLINK_RELOAD_STATS_ARRAY_SIZE \ (__DEVLINK_RELOAD_LIMIT_MAX * __DEVLINK_RELOAD_ACTION_MAX) struct devlink_dev_stats { u32 reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE]; u32 remote_reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE]; }; struct devlink { u32 index; struct xarray ports; struct list_head rate_list; struct list_head sb_list; struct list_head dpipe_table_list; struct list_head resource_list; struct list_head param_list; struct list_head region_list; struct list_head reporter_list; struct mutex reporters_lock; /* protects reporter_list */ struct devlink_dpipe_headers *dpipe_headers; struct list_head trap_list; struct list_head trap_group_list; struct list_head trap_policer_list; struct list_head linecard_list; struct mutex linecards_lock; /* protects linecard_list */ const struct devlink_ops *ops; u64 features; struct xarray snapshot_ids; struct devlink_dev_stats stats; struct device *dev; possible_net_t _net; /* Serializes access to devlink instance specific objects such as * port, sb, dpipe, resource, params, region, traps and more. */ struct mutex lock; struct lock_class_key lock_key; u8 reload_failed:1; refcount_t refcount; struct completion comp; struct rcu_head rcu; struct notifier_block netdevice_nb; char priv[] __aligned(NETDEV_ALIGN); }; extern struct xarray devlinks; extern struct genl_family devlink_nl_family; /* devlink instances are open to the access from the user space after * devlink_register() call. Such logical barrier allows us to have certain * expectations related to locking. * * Before *_register() - we are in initialization stage and no parallel * access possible to the devlink instance. All drivers perform that phase * by implicitly holding device_lock. * * After *_register() - users and driver can access devlink instance at * the same time. */ #define ASSERT_DEVLINK_REGISTERED(d) \ WARN_ON_ONCE(!xa_get_mark(&devlinks, (d)->index, DEVLINK_REGISTERED)) #define ASSERT_DEVLINK_NOT_REGISTERED(d) \ WARN_ON_ONCE(xa_get_mark(&devlinks, (d)->index, DEVLINK_REGISTERED)) /* Iterate over devlink pointers which were possible to get reference to. * devlink_put() needs to be called for each iterated devlink pointer * in loop body in order to release the reference. */ #define devlinks_xa_for_each_registered_get(net, index, devlink) \ for (index = 0, \ devlink = devlinks_xa_find_get_first(net, &index); \ devlink; devlink = devlinks_xa_find_get_next(net, &index)) struct devlink * devlinks_xa_find_get(struct net *net, unsigned long *indexp, void * (*xa_find_fn)(struct xarray *, unsigned long *, unsigned long, xa_mark_t)); struct devlink * devlinks_xa_find_get_first(struct net *net, unsigned long *indexp); struct devlink * devlinks_xa_find_get_next(struct net *net, unsigned long *indexp); /* Netlink */ #define DEVLINK_NL_FLAG_NEED_PORT BIT(0) #define DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT BIT(1) #define DEVLINK_NL_FLAG_NEED_RATE BIT(2) #define DEVLINK_NL_FLAG_NEED_RATE_NODE BIT(3) #define DEVLINK_NL_FLAG_NEED_LINECARD BIT(4) enum devlink_multicast_groups { DEVLINK_MCGRP_CONFIG, }; /* state held across netlink dumps */ struct devlink_nl_dump_state { unsigned long instance; int idx; union { /* DEVLINK_CMD_REGION_READ */ struct { u64 start_offset; }; /* DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET */ struct { u64 dump_ts; }; }; }; /* Iterate over registered devlink instances for devlink dump. * devlink_put() needs to be called for each iterated devlink pointer * in loop body in order to release the reference. * Note: this is NOT a generic iterator, it makes assumptions about the use * of @state and can only be used once per dumpit implementation. */ #define devlink_dump_for_each_instance_get(msg, state, devlink) \ for (; (devlink = devlinks_xa_find_get(sock_net(msg->sk), \ &state->instance, xa_find)); \ state->instance++) extern const struct genl_small_ops devlink_nl_ops[56]; struct devlink *devlink_get_from_attrs(struct net *net, struct nlattr **attrs); void devlink_notify_unregister(struct devlink *devlink); void devlink_notify_register(struct devlink *devlink); static inline struct devlink_nl_dump_state * devlink_dump_state(struct netlink_callback *cb) { NL_ASSET_DUMP_CTX_FITS(struct devlink_nl_dump_state); return (struct devlink_nl_dump_state *)cb->ctx; } /* Ports */ int devlink_port_netdevice_event(struct notifier_block *nb, unsigned long event, void *ptr); struct devlink_port * devlink_port_get_from_info(struct devlink *devlink, struct genl_info *info); /* Reload */ bool devlink_reload_actions_valid(const struct devlink_ops *ops); int devlink_reload(struct devlink *devlink, struct net *dest_net, enum devlink_reload_action action, enum devlink_reload_limit limit, u32 *actions_performed, struct netlink_ext_ack *extack); static inline bool devlink_reload_supported(const struct devlink_ops *ops) { return ops->reload_down && ops->reload_up; } /* Line cards */ struct devlink_linecard; struct devlink_linecard * devlink_linecard_get_from_info(struct devlink *devlink, struct genl_info *info); void devlink_linecard_put(struct devlink_linecard *linecard); /* Rates */ struct devlink_rate * devlink_rate_get_from_info(struct devlink *devlink, struct genl_info *info); struct devlink_rate * devlink_rate_node_get_from_info(struct devlink *devlink, struct genl_info *info);