Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and/or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | */ |
| 32 | |
| 33 | #ifndef _CORE_PRIV_H |
| 34 | #define _CORE_PRIV_H |
| 35 | |
| 36 | #include <linux/list.h> |
| 37 | #include <linux/spinlock.h> |
Parav Pandit | 43579b5 | 2017-01-10 00:02:14 +0000 | [diff] [blame] | 38 | #include <linux/cgroup_rdma.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | |
Roland Dreier | a4d61e8 | 2005-08-25 13:40:04 -0700 | [diff] [blame] | 40 | #include <rdma/ib_verbs.h> |
Dasaratharaman Chandramouli | 582faf3 | 2017-06-08 13:37:47 -0400 | [diff] [blame] | 41 | #include <rdma/opa_addr.h> |
Daniel Jurgens | 47a2b33 | 2017-05-19 15:48:54 +0300 | [diff] [blame] | 42 | #include <rdma/ib_mad.h> |
Leon Romanovsky | 02d8883 | 2018-01-28 11:17:20 +0200 | [diff] [blame] | 43 | #include <rdma/restrack.h> |
Daniel Jurgens | 47a2b33 | 2017-05-19 15:48:54 +0300 | [diff] [blame] | 44 | #include "mad_priv.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | |
Huy Nguyen | 8cf12d7 | 2018-01-08 12:15:38 +0200 | [diff] [blame] | 46 | /* Total number of ports combined across all struct ib_devices's */ |
| 47 | #define RDMA_MAX_PORTS 1024 |
| 48 | |
Daniel Jurgens | d291f1a | 2017-05-19 15:48:52 +0300 | [diff] [blame] | 49 | struct pkey_index_qp_list { |
| 50 | struct list_head pkey_index_list; |
| 51 | u16 pkey_index; |
| 52 | /* Lock to hold while iterating the qp_list. */ |
| 53 | spinlock_t qp_list_lock; |
| 54 | struct list_head qp_list; |
| 55 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | |
Matan Barak | 045959d | 2015-12-23 14:56:55 +0200 | [diff] [blame] | 57 | #if IS_ENABLED(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS) |
| 58 | int cma_configfs_init(void); |
| 59 | void cma_configfs_exit(void); |
| 60 | #else |
| 61 | static inline int cma_configfs_init(void) |
| 62 | { |
| 63 | return 0; |
| 64 | } |
| 65 | |
| 66 | static inline void cma_configfs_exit(void) |
| 67 | { |
| 68 | } |
| 69 | #endif |
Matan Barak | 218a773 | 2015-12-23 14:56:54 +0200 | [diff] [blame] | 70 | struct cma_device; |
| 71 | void cma_ref_dev(struct cma_device *cma_dev); |
| 72 | void cma_deref_dev(struct cma_device *cma_dev); |
Matan Barak | 045959d | 2015-12-23 14:56:55 +0200 | [diff] [blame] | 73 | typedef bool (*cma_device_filter)(struct ib_device *, void *); |
| 74 | struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, |
| 75 | void *cookie); |
| 76 | int cma_get_default_gid_type(struct cma_device *cma_dev, |
| 77 | unsigned int port); |
| 78 | int cma_set_default_gid_type(struct cma_device *cma_dev, |
| 79 | unsigned int port, |
| 80 | enum ib_gid_type default_gid_type); |
Majd Dibbiny | 89052d7 | 2017-02-14 07:21:52 +0200 | [diff] [blame] | 81 | int cma_get_default_roce_tos(struct cma_device *cma_dev, unsigned int port); |
| 82 | int cma_set_default_roce_tos(struct cma_device *a_dev, unsigned int port, |
| 83 | u8 default_roce_tos); |
Matan Barak | 045959d | 2015-12-23 14:56:55 +0200 | [diff] [blame] | 84 | struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev); |
Matan Barak | 218a773 | 2015-12-23 14:56:54 +0200 | [diff] [blame] | 85 | |
Ralph Campbell | 9a6edb6 | 2010-05-06 17:03:25 -0700 | [diff] [blame] | 86 | int ib_device_register_sysfs(struct ib_device *device, |
| 87 | int (*port_callback)(struct ib_device *, |
| 88 | u8, struct kobject *)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | void ib_device_unregister_sysfs(struct ib_device *device); |
| 90 | |
Matan Barak | 03db3a2 | 2015-07-30 18:33:26 +0300 | [diff] [blame] | 91 | void ib_cache_setup(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | void ib_cache_cleanup(void); |
| 93 | |
Matan Barak | 03db3a2 | 2015-07-30 18:33:26 +0300 | [diff] [blame] | 94 | typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port, |
| 95 | struct net_device *idev, void *cookie); |
| 96 | |
| 97 | typedef int (*roce_netdev_filter)(struct ib_device *device, u8 port, |
| 98 | struct net_device *idev, void *cookie); |
| 99 | |
| 100 | void ib_enum_roce_netdev(struct ib_device *ib_dev, |
| 101 | roce_netdev_filter filter, |
| 102 | void *filter_cookie, |
| 103 | roce_netdev_callback cb, |
| 104 | void *cookie); |
| 105 | void ib_enum_all_roce_netdevs(roce_netdev_filter filter, |
| 106 | void *filter_cookie, |
| 107 | roce_netdev_callback cb, |
| 108 | void *cookie); |
| 109 | |
Leon Romanovsky | 8030c83 | 2017-06-19 14:04:56 +0300 | [diff] [blame] | 110 | typedef int (*nldev_callback)(struct ib_device *device, |
| 111 | struct sk_buff *skb, |
| 112 | struct netlink_callback *cb, |
| 113 | unsigned int idx); |
| 114 | |
| 115 | int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb, |
| 116 | struct netlink_callback *cb); |
| 117 | |
Matan Barak | 03db3a2 | 2015-07-30 18:33:26 +0300 | [diff] [blame] | 118 | enum ib_cache_gid_default_mode { |
| 119 | IB_CACHE_GID_DEFAULT_MODE_SET, |
| 120 | IB_CACHE_GID_DEFAULT_MODE_DELETE |
| 121 | }; |
| 122 | |
Matan Barak | 045959d | 2015-12-23 14:56:55 +0200 | [diff] [blame] | 123 | int ib_cache_gid_parse_type_str(const char *buf); |
| 124 | |
Matan Barak | b39ffa1 | 2015-12-23 14:56:47 +0200 | [diff] [blame] | 125 | const char *ib_cache_gid_type_str(enum ib_gid_type gid_type); |
| 126 | |
Matan Barak | 03db3a2 | 2015-07-30 18:33:26 +0300 | [diff] [blame] | 127 | void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port, |
| 128 | struct net_device *ndev, |
Matan Barak | b39ffa1 | 2015-12-23 14:56:47 +0200 | [diff] [blame] | 129 | unsigned long gid_type_mask, |
Matan Barak | 03db3a2 | 2015-07-30 18:33:26 +0300 | [diff] [blame] | 130 | enum ib_cache_gid_default_mode mode); |
| 131 | |
| 132 | int ib_cache_gid_add(struct ib_device *ib_dev, u8 port, |
| 133 | union ib_gid *gid, struct ib_gid_attr *attr); |
| 134 | |
| 135 | int ib_cache_gid_del(struct ib_device *ib_dev, u8 port, |
| 136 | union ib_gid *gid, struct ib_gid_attr *attr); |
| 137 | |
| 138 | int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, |
| 139 | struct net_device *ndev); |
| 140 | |
| 141 | int roce_gid_mgmt_init(void); |
| 142 | void roce_gid_mgmt_cleanup(void); |
| 143 | |
Matan Barak | b39ffa1 | 2015-12-23 14:56:47 +0200 | [diff] [blame] | 144 | unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port); |
Matan Barak | 03db3a2 | 2015-07-30 18:33:26 +0300 | [diff] [blame] | 145 | |
| 146 | int ib_cache_setup_one(struct ib_device *device); |
| 147 | void ib_cache_cleanup_one(struct ib_device *device); |
| 148 | void ib_cache_release_one(struct ib_device *device); |
| 149 | |
Parav Pandit | 43579b5 | 2017-01-10 00:02:14 +0000 | [diff] [blame] | 150 | #ifdef CONFIG_CGROUP_RDMA |
| 151 | int ib_device_register_rdmacg(struct ib_device *device); |
| 152 | void ib_device_unregister_rdmacg(struct ib_device *device); |
| 153 | |
| 154 | int ib_rdmacg_try_charge(struct ib_rdmacg_object *cg_obj, |
| 155 | struct ib_device *device, |
| 156 | enum rdmacg_resource_type resource_index); |
| 157 | |
| 158 | void ib_rdmacg_uncharge(struct ib_rdmacg_object *cg_obj, |
| 159 | struct ib_device *device, |
| 160 | enum rdmacg_resource_type resource_index); |
| 161 | #else |
| 162 | static inline int ib_device_register_rdmacg(struct ib_device *device) |
| 163 | { return 0; } |
| 164 | |
| 165 | static inline void ib_device_unregister_rdmacg(struct ib_device *device) |
| 166 | { } |
| 167 | |
| 168 | static inline int ib_rdmacg_try_charge(struct ib_rdmacg_object *cg_obj, |
| 169 | struct ib_device *device, |
| 170 | enum rdmacg_resource_type resource_index) |
| 171 | { return 0; } |
| 172 | |
| 173 | static inline void ib_rdmacg_uncharge(struct ib_rdmacg_object *cg_obj, |
| 174 | struct ib_device *device, |
| 175 | enum rdmacg_resource_type resource_index) |
| 176 | { } |
| 177 | #endif |
| 178 | |
Matan Barak | 6020d7e | 2015-12-23 14:56:52 +0200 | [diff] [blame] | 179 | static inline bool rdma_is_upper_dev_rcu(struct net_device *dev, |
| 180 | struct net_device *upper) |
| 181 | { |
David Ahern | 453d393 | 2016-10-17 19:15:46 -0700 | [diff] [blame] | 182 | return netdev_has_upper_dev_all_rcu(dev, upper); |
Matan Barak | 6020d7e | 2015-12-23 14:56:52 +0200 | [diff] [blame] | 183 | } |
| 184 | |
Leon Romanovsky | e3f20f0 | 2016-05-19 17:12:31 +0300 | [diff] [blame] | 185 | int addr_init(void); |
| 186 | void addr_cleanup(void); |
| 187 | |
Mark Bloch | 4c2cb42 | 2016-05-19 17:12:32 +0300 | [diff] [blame] | 188 | int ib_mad_init(void); |
| 189 | void ib_mad_cleanup(void); |
| 190 | |
Mark Bloch | c2e49c9 | 2016-05-19 17:12:33 +0300 | [diff] [blame] | 191 | int ib_sa_init(void); |
| 192 | void ib_sa_cleanup(void); |
| 193 | |
Leon Romanovsky | c990172 | 2017-06-05 10:20:11 +0300 | [diff] [blame] | 194 | int rdma_nl_init(void); |
| 195 | void rdma_nl_exit(void); |
Leon Romanovsky | 233c195 | 2017-05-14 15:49:57 +0300 | [diff] [blame] | 196 | |
Mark Bloch | 735c631 | 2016-05-19 17:12:35 +0300 | [diff] [blame] | 197 | int ib_nl_handle_resolve_resp(struct sk_buff *skb, |
Leon Romanovsky | 647c75a | 2017-06-15 14:20:39 +0300 | [diff] [blame] | 198 | struct nlmsghdr *nlh, |
| 199 | struct netlink_ext_ack *extack); |
Mark Bloch | 735c631 | 2016-05-19 17:12:35 +0300 | [diff] [blame] | 200 | int ib_nl_handle_set_timeout(struct sk_buff *skb, |
Leon Romanovsky | 647c75a | 2017-06-15 14:20:39 +0300 | [diff] [blame] | 201 | struct nlmsghdr *nlh, |
| 202 | struct netlink_ext_ack *extack); |
Mark Bloch | ae43f82 | 2016-05-19 17:12:36 +0300 | [diff] [blame] | 203 | int ib_nl_handle_ip_res_resp(struct sk_buff *skb, |
Leon Romanovsky | 647c75a | 2017-06-15 14:20:39 +0300 | [diff] [blame] | 204 | struct nlmsghdr *nlh, |
| 205 | struct netlink_ext_ack *extack); |
Mark Bloch | 735c631 | 2016-05-19 17:12:35 +0300 | [diff] [blame] | 206 | |
Daniel Jurgens | 883c71f | 2017-05-19 15:48:51 +0300 | [diff] [blame] | 207 | int ib_get_cached_subnet_prefix(struct ib_device *device, |
| 208 | u8 port_num, |
| 209 | u64 *sn_pfx); |
Daniel Jurgens | d291f1a | 2017-05-19 15:48:52 +0300 | [diff] [blame] | 210 | |
| 211 | #ifdef CONFIG_SECURITY_INFINIBAND |
| 212 | void ib_security_destroy_port_pkey_list(struct ib_device *device); |
| 213 | |
| 214 | void ib_security_cache_change(struct ib_device *device, |
| 215 | u8 port_num, |
| 216 | u64 subnet_prefix); |
| 217 | |
| 218 | int ib_security_modify_qp(struct ib_qp *qp, |
| 219 | struct ib_qp_attr *qp_attr, |
| 220 | int qp_attr_mask, |
| 221 | struct ib_udata *udata); |
| 222 | |
| 223 | int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev); |
| 224 | void ib_destroy_qp_security_begin(struct ib_qp_security *sec); |
| 225 | void ib_destroy_qp_security_abort(struct ib_qp_security *sec); |
| 226 | void ib_destroy_qp_security_end(struct ib_qp_security *sec); |
| 227 | int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev); |
| 228 | void ib_close_shared_qp_security(struct ib_qp_security *sec); |
Daniel Jurgens | 47a2b33 | 2017-05-19 15:48:54 +0300 | [diff] [blame] | 229 | int ib_mad_agent_security_setup(struct ib_mad_agent *agent, |
| 230 | enum ib_qp_type qp_type); |
| 231 | void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent); |
| 232 | int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index); |
Daniel Jurgens | d291f1a | 2017-05-19 15:48:52 +0300 | [diff] [blame] | 233 | #else |
| 234 | static inline void ib_security_destroy_port_pkey_list(struct ib_device *device) |
| 235 | { |
| 236 | } |
| 237 | |
| 238 | static inline void ib_security_cache_change(struct ib_device *device, |
| 239 | u8 port_num, |
| 240 | u64 subnet_prefix) |
| 241 | { |
| 242 | } |
| 243 | |
| 244 | static inline int ib_security_modify_qp(struct ib_qp *qp, |
| 245 | struct ib_qp_attr *qp_attr, |
| 246 | int qp_attr_mask, |
| 247 | struct ib_udata *udata) |
| 248 | { |
| 249 | return qp->device->modify_qp(qp->real_qp, |
| 250 | qp_attr, |
| 251 | qp_attr_mask, |
| 252 | udata); |
| 253 | } |
| 254 | |
| 255 | static inline int ib_create_qp_security(struct ib_qp *qp, |
| 256 | struct ib_device *dev) |
| 257 | { |
| 258 | return 0; |
| 259 | } |
| 260 | |
| 261 | static inline void ib_destroy_qp_security_begin(struct ib_qp_security *sec) |
| 262 | { |
| 263 | } |
| 264 | |
| 265 | static inline void ib_destroy_qp_security_abort(struct ib_qp_security *sec) |
| 266 | { |
| 267 | } |
| 268 | |
| 269 | static inline void ib_destroy_qp_security_end(struct ib_qp_security *sec) |
| 270 | { |
| 271 | } |
| 272 | |
| 273 | static inline int ib_open_shared_qp_security(struct ib_qp *qp, |
| 274 | struct ib_device *dev) |
| 275 | { |
| 276 | return 0; |
| 277 | } |
| 278 | |
| 279 | static inline void ib_close_shared_qp_security(struct ib_qp_security *sec) |
| 280 | { |
| 281 | } |
Daniel Jurgens | 47a2b33 | 2017-05-19 15:48:54 +0300 | [diff] [blame] | 282 | |
| 283 | static inline int ib_mad_agent_security_setup(struct ib_mad_agent *agent, |
| 284 | enum ib_qp_type qp_type) |
| 285 | { |
| 286 | return 0; |
| 287 | } |
| 288 | |
| 289 | static inline void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent) |
| 290 | { |
| 291 | } |
| 292 | |
| 293 | static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map, |
| 294 | u16 pkey_index) |
| 295 | { |
| 296 | return 0; |
| 297 | } |
Daniel Jurgens | d291f1a | 2017-05-19 15:48:52 +0300 | [diff] [blame] | 298 | #endif |
Leon Romanovsky | ecc82c5 | 2017-06-18 14:39:59 +0300 | [diff] [blame] | 299 | |
Leon Romanovsky | f8978bd | 2018-01-01 13:07:15 +0200 | [diff] [blame] | 300 | struct ib_device *ib_device_get_by_index(u32 ifindex); |
Leon Romanovsky | ecc82c5 | 2017-06-18 14:39:59 +0300 | [diff] [blame] | 301 | /* RDMA device netlink */ |
| 302 | void nldev_init(void); |
| 303 | void nldev_exit(void); |
Leon Romanovsky | 78a0cd6 | 2018-01-28 11:17:21 +0200 | [diff] [blame] | 304 | |
| 305 | static inline struct ib_qp *_ib_create_qp(struct ib_device *dev, |
| 306 | struct ib_pd *pd, |
| 307 | struct ib_qp_init_attr *attr, |
Steve Wise | 2f08ee3 | 2018-02-14 18:43:36 -0800 | [diff] [blame] | 308 | struct ib_udata *udata, |
| 309 | struct ib_uobject *uobj) |
Leon Romanovsky | 78a0cd6 | 2018-01-28 11:17:21 +0200 | [diff] [blame] | 310 | { |
| 311 | struct ib_qp *qp; |
| 312 | |
Leon Romanovsky | 2188558 | 2018-02-14 14:38:43 +0200 | [diff] [blame] | 313 | if (!dev->create_qp) |
| 314 | return ERR_PTR(-EOPNOTSUPP); |
| 315 | |
Leon Romanovsky | 78a0cd6 | 2018-01-28 11:17:21 +0200 | [diff] [blame] | 316 | qp = dev->create_qp(pd, attr, udata); |
| 317 | if (IS_ERR(qp)) |
| 318 | return qp; |
| 319 | |
| 320 | qp->device = dev; |
| 321 | qp->pd = pd; |
Steve Wise | 2f08ee3 | 2018-02-14 18:43:36 -0800 | [diff] [blame] | 322 | qp->uobject = uobj; |
Leon Romanovsky | 78a0cd6 | 2018-01-28 11:17:21 +0200 | [diff] [blame] | 323 | /* |
| 324 | * We don't track XRC QPs for now, because they don't have PD |
| 325 | * and more importantly they are created internaly by driver, |
| 326 | * see mlx5 create_dev_resources() as an example. |
| 327 | */ |
| 328 | if (attr->qp_type < IB_QPT_XRC_INI) { |
| 329 | qp->res.type = RDMA_RESTRACK_QP; |
| 330 | rdma_restrack_add(&qp->res); |
| 331 | } else |
| 332 | qp->res.valid = false; |
| 333 | |
| 334 | return qp; |
| 335 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | #endif /* _CORE_PRIV_H */ |