blob: f6ff0b61e08a966254625ad13fd3b97e99329cf7 [file] [log] [blame]
Murali Karicheri0e7623b2019-04-05 13:31:34 -04001// SPDX-License-Identifier: GPL-2.0
Arvid Brodin70ebe4a2014-07-04 23:34:38 +02002/* Copyright 2011-2014 Autronica Fire and Security AS
Arvid Brodinf4214362013-10-30 21:10:47 +01003 *
Arvid Brodinf4214362013-10-30 21:10:47 +01004 * Author(s):
Arvid Brodin70ebe4a2014-07-04 23:34:38 +02005 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
Arvid Brodinf4214362013-10-30 21:10:47 +01006 *
Murali Karicheri8f4c0e02020-07-22 10:40:16 -04007 * Routines for handling Netlink messages for HSR and PRP.
Arvid Brodinf4214362013-10-30 21:10:47 +01008 */
9
10#include "hsr_netlink.h"
11#include <linux/kernel.h>
12#include <net/rtnetlink.h>
13#include <net/genetlink.h>
14#include "hsr_main.h"
15#include "hsr_device.h"
16#include "hsr_framereg.h"
17
18static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = {
19 [IFLA_HSR_SLAVE1] = { .type = NLA_U32 },
20 [IFLA_HSR_SLAVE2] = { .type = NLA_U32 },
21 [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 },
Peter Heiseee1c2792016-04-13 13:52:22 +020022 [IFLA_HSR_VERSION] = { .type = NLA_U8 },
Peter Heisef9375722016-04-19 13:34:28 +020023 [IFLA_HSR_SUPERVISION_ADDR] = { .len = ETH_ALEN },
Arvid Brodin98bf8362013-11-29 23:38:16 +010024 [IFLA_HSR_SEQ_NR] = { .type = NLA_U16 },
Murali Karicheri8f4c0e02020-07-22 10:40:16 -040025 [IFLA_HSR_PROTOCOL] = { .type = NLA_U8 },
Lukasz Majewski5055ccc2024-04-23 14:49:04 +020026 [IFLA_HSR_INTERLINK] = { .type = NLA_U32 },
Arvid Brodinf4214362013-10-30 21:10:47 +010027};
28
Arvid Brodinf4214362013-10-30 21:10:47 +010029/* Here, it seems a netdevice has already been allocated for us, and the
30 * hsr_dev_setup routine has been executed. Nice!
31 */
32static int hsr_newlink(struct net *src_net, struct net_device *dev,
Matthias Schiffer7a3f4a12017-06-25 23:55:59 +020033 struct nlattr *tb[], struct nlattr *data[],
34 struct netlink_ext_ack *extack)
Arvid Brodinf4214362013-10-30 21:10:47 +010035{
Murali Karicheri8f4c0e02020-07-22 10:40:16 -040036 enum hsr_version proto_version;
37 unsigned char multicast_spec;
38 u8 proto = HSR_PROTOCOL_HSR;
Arvid Brodinf4214362013-10-30 21:10:47 +010039
Lukasz Majewski5055ccc2024-04-23 14:49:04 +020040 struct net_device *link[2], *interlink = NULL;
Arvid Brodina718dcc2014-07-04 23:42:00 +020041 if (!data) {
Taehee Yoo13eeb5f2020-02-28 18:01:35 +000042 NL_SET_ERR_MSG_MOD(extack, "No slave devices specified");
Arvid Brodina718dcc2014-07-04 23:42:00 +020043 return -EINVAL;
44 }
Arvid Brodinf4214362013-10-30 21:10:47 +010045 if (!data[IFLA_HSR_SLAVE1]) {
Taehee Yoo13eeb5f2020-02-28 18:01:35 +000046 NL_SET_ERR_MSG_MOD(extack, "Slave1 device not specified");
Arvid Brodinf4214362013-10-30 21:10:47 +010047 return -EINVAL;
48 }
Murali Karicherid595b852019-04-05 13:31:23 -040049 link[0] = __dev_get_by_index(src_net,
50 nla_get_u32(data[IFLA_HSR_SLAVE1]));
Taehee Yoo13eeb5f2020-02-28 18:01:35 +000051 if (!link[0]) {
52 NL_SET_ERR_MSG_MOD(extack, "Slave1 does not exist");
53 return -EINVAL;
54 }
Arvid Brodinf4214362013-10-30 21:10:47 +010055 if (!data[IFLA_HSR_SLAVE2]) {
Taehee Yoo13eeb5f2020-02-28 18:01:35 +000056 NL_SET_ERR_MSG_MOD(extack, "Slave2 device not specified");
Arvid Brodinf4214362013-10-30 21:10:47 +010057 return -EINVAL;
58 }
Murali Karicherid595b852019-04-05 13:31:23 -040059 link[1] = __dev_get_by_index(src_net,
60 nla_get_u32(data[IFLA_HSR_SLAVE2]));
Taehee Yoo13eeb5f2020-02-28 18:01:35 +000061 if (!link[1]) {
62 NL_SET_ERR_MSG_MOD(extack, "Slave2 does not exist");
Arvid Brodinf4214362013-10-30 21:10:47 +010063 return -EINVAL;
Taehee Yoo13eeb5f2020-02-28 18:01:35 +000064 }
65
66 if (link[0] == link[1]) {
67 NL_SET_ERR_MSG_MOD(extack, "Slave1 and Slave2 are same");
68 return -EINVAL;
69 }
Arvid Brodinf4214362013-10-30 21:10:47 +010070
Lukasz Majewski5055ccc2024-04-23 14:49:04 +020071 if (data[IFLA_HSR_INTERLINK])
72 interlink = __dev_get_by_index(src_net,
73 nla_get_u32(data[IFLA_HSR_INTERLINK]));
74
75 if (interlink && interlink == link[0]) {
76 NL_SET_ERR_MSG_MOD(extack, "Interlink and Slave1 are the same");
77 return -EINVAL;
78 }
79
80 if (interlink && interlink == link[1]) {
81 NL_SET_ERR_MSG_MOD(extack, "Interlink and Slave2 are the same");
82 return -EINVAL;
83 }
84
Arvid Brodinf4214362013-10-30 21:10:47 +010085 if (!data[IFLA_HSR_MULTICAST_SPEC])
86 multicast_spec = 0;
87 else
88 multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]);
89
Murali Karicheri8f4c0e02020-07-22 10:40:16 -040090 if (data[IFLA_HSR_PROTOCOL])
91 proto = nla_get_u8(data[IFLA_HSR_PROTOCOL]);
92
93 if (proto >= HSR_PROTOCOL_MAX) {
Ye Binb87f9fe2020-09-09 17:38:21 +080094 NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol");
Murali Karicheri8f4c0e02020-07-22 10:40:16 -040095 return -EINVAL;
96 }
97
Taehee Yoo4faab8c2020-04-07 13:23:21 +000098 if (!data[IFLA_HSR_VERSION]) {
Murali Karicheri8f4c0e02020-07-22 10:40:16 -040099 proto_version = HSR_V0;
Taehee Yoo4faab8c2020-04-07 13:23:21 +0000100 } else {
Murali Karicheri8f4c0e02020-07-22 10:40:16 -0400101 if (proto == HSR_PROTOCOL_PRP) {
Ye Binb87f9fe2020-09-09 17:38:21 +0800102 NL_SET_ERR_MSG_MOD(extack, "PRP version unsupported");
Murali Karicheri8f4c0e02020-07-22 10:40:16 -0400103 return -EINVAL;
104 }
105
106 proto_version = nla_get_u8(data[IFLA_HSR_VERSION]);
107 if (proto_version > HSR_V1) {
Taehee Yoo4faab8c2020-04-07 13:23:21 +0000108 NL_SET_ERR_MSG_MOD(extack,
Ye Binb87f9fe2020-09-09 17:38:21 +0800109 "Only HSR version 0/1 supported");
Taehee Yoo4faab8c2020-04-07 13:23:21 +0000110 return -EINVAL;
111 }
112 }
Peter Heiseee1c2792016-04-13 13:52:22 +0200113
Lukasz Majewski5055ccc2024-04-23 14:49:04 +0200114 if (proto == HSR_PROTOCOL_PRP) {
Murali Karicheri8f4c0e02020-07-22 10:40:16 -0400115 proto_version = PRP_V1;
Lukasz Majewski5055ccc2024-04-23 14:49:04 +0200116 if (interlink) {
117 NL_SET_ERR_MSG_MOD(extack,
118 "Interlink only works with HSR");
119 return -EINVAL;
120 }
121 }
Murali Karicheri8f4c0e02020-07-22 10:40:16 -0400122
Lukasz Majewski5055ccc2024-04-23 14:49:04 +0200123 return hsr_dev_finalize(dev, link, interlink, multicast_spec,
124 proto_version, extack);
Arvid Brodinf4214362013-10-30 21:10:47 +0100125}
126
Taehee Yoode0083c2020-06-21 13:46:25 +0000127static void hsr_dellink(struct net_device *dev, struct list_head *head)
128{
129 struct hsr_priv *hsr = netdev_priv(dev);
130
131 del_timer_sync(&hsr->prune_timer);
Lukasz Majewski5055ccc2024-04-23 14:49:04 +0200132 del_timer_sync(&hsr->prune_proxy_timer);
Taehee Yoode0083c2020-06-21 13:46:25 +0000133 del_timer_sync(&hsr->announce_timer);
Lukasz Majewski5f703ce2024-06-10 15:39:14 +0200134 timer_delete_sync(&hsr->announce_proxy_timer);
Taehee Yoode0083c2020-06-21 13:46:25 +0000135
136 hsr_debugfs_term(hsr);
137 hsr_del_ports(hsr);
138
139 hsr_del_self_node(hsr);
Sebastian Andrzej Siewiore0127642022-11-29 17:48:08 +0100140 hsr_del_nodes(&hsr->node_db);
Lukasz Majewski5055ccc2024-04-23 14:49:04 +0200141 hsr_del_nodes(&hsr->proxy_node_db);
Taehee Yoode0083c2020-06-21 13:46:25 +0000142
143 unregister_netdevice_queue(dev, head);
144}
145
Arvid Brodin98bf8362013-11-29 23:38:16 +0100146static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
147{
Taehee Yoo81390d02020-02-28 18:01:56 +0000148 struct hsr_priv *hsr = netdev_priv(dev);
Murali Karicheri8f4c0e02020-07-22 10:40:16 -0400149 u8 proto = HSR_PROTOCOL_HSR;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200150 struct hsr_port *port;
Arvid Brodin98bf8362013-11-29 23:38:16 +0100151
Arvid Brodinc5a75912014-07-04 23:38:05 +0200152 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
Taehee Yoo81390d02020-02-28 18:01:56 +0000153 if (port) {
154 if (nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex))
155 goto nla_put_failure;
156 }
Arvid Brodin51f3c602014-07-04 23:37:27 +0200157
Arvid Brodinc5a75912014-07-04 23:38:05 +0200158 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
Taehee Yoo81390d02020-02-28 18:01:56 +0000159 if (port) {
160 if (nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex))
161 goto nla_put_failure;
162 }
Arvid Brodin98bf8362013-11-29 23:38:16 +0100163
164 if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN,
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200165 hsr->sup_multicast_addr) ||
166 nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr))
Arvid Brodin98bf8362013-11-29 23:38:16 +0100167 goto nla_put_failure;
Murali Karicheri8f4c0e02020-07-22 10:40:16 -0400168 if (hsr->prot_version == PRP_V1)
169 proto = HSR_PROTOCOL_PRP;
170 if (nla_put_u8(skb, IFLA_HSR_PROTOCOL, proto))
171 goto nla_put_failure;
Arvid Brodin98bf8362013-11-29 23:38:16 +0100172
173 return 0;
174
175nla_put_failure:
176 return -EMSGSIZE;
177}
178
Arvid Brodinf4214362013-10-30 21:10:47 +0100179static struct rtnl_link_ops hsr_link_ops __read_mostly = {
180 .kind = "hsr",
181 .maxtype = IFLA_HSR_MAX,
182 .policy = hsr_policy,
183 .priv_size = sizeof(struct hsr_priv),
184 .setup = hsr_dev_setup,
185 .newlink = hsr_newlink,
Taehee Yoode0083c2020-06-21 13:46:25 +0000186 .dellink = hsr_dellink,
Arvid Brodin98bf8362013-11-29 23:38:16 +0100187 .fill_info = hsr_fill_info,
Arvid Brodinf4214362013-10-30 21:10:47 +0100188};
189
Arvid Brodinf4214362013-10-30 21:10:47 +0100190/* attribute policy */
Arvid Brodinf4214362013-10-30 21:10:47 +0100191static const struct nla_policy hsr_genl_policy[HSR_A_MAX + 1] = {
Peter Heisef9375722016-04-19 13:34:28 +0200192 [HSR_A_NODE_ADDR] = { .len = ETH_ALEN },
193 [HSR_A_NODE_ADDR_B] = { .len = ETH_ALEN },
Arvid Brodinf4214362013-10-30 21:10:47 +0100194 [HSR_A_IFINDEX] = { .type = NLA_U32 },
195 [HSR_A_IF1_AGE] = { .type = NLA_U32 },
196 [HSR_A_IF2_AGE] = { .type = NLA_U32 },
197 [HSR_A_IF1_SEQ] = { .type = NLA_U16 },
198 [HSR_A_IF2_SEQ] = { .type = NLA_U16 },
199};
200
Johannes Berg489111e2016-10-24 14:40:03 +0200201static struct genl_family hsr_genl_family;
Arvid Brodinf4214362013-10-30 21:10:47 +0100202
Johannes Berg2a94fe42013-11-19 15:19:39 +0100203static const struct genl_multicast_group hsr_mcgrps[] = {
204 { .name = "hsr-network", },
Arvid Brodinf4214362013-10-30 21:10:47 +0100205};
206
Arvid Brodinf4214362013-10-30 21:10:47 +0100207/* This is called if for some node with MAC address addr, we only get frames
208 * over one of the slave interfaces. This would indicate an open network ring
209 * (i.e. a link has failed somewhere).
210 */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200211void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN],
Arvid Brodinc5a75912014-07-04 23:38:05 +0200212 struct hsr_port *port)
Arvid Brodinf4214362013-10-30 21:10:47 +0100213{
214 struct sk_buff *skb;
215 void *msg_head;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200216 struct hsr_port *master;
Arvid Brodinf4214362013-10-30 21:10:47 +0100217 int res;
Arvid Brodinf4214362013-10-30 21:10:47 +0100218
219 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
220 if (!skb)
221 goto fail;
222
Murali Karicherid595b852019-04-05 13:31:23 -0400223 msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0,
224 HSR_C_RING_ERROR);
Arvid Brodinf4214362013-10-30 21:10:47 +0100225 if (!msg_head)
226 goto nla_put_failure;
227
228 res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
229 if (res < 0)
230 goto nla_put_failure;
231
Arvid Brodinc5a75912014-07-04 23:38:05 +0200232 res = nla_put_u32(skb, HSR_A_IFINDEX, port->dev->ifindex);
Arvid Brodinf4214362013-10-30 21:10:47 +0100233 if (res < 0)
234 goto nla_put_failure;
235
236 genlmsg_end(skb, msg_head);
Johannes Berg2a94fe42013-11-19 15:19:39 +0100237 genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
Arvid Brodinf4214362013-10-30 21:10:47 +0100238
239 return;
240
241nla_put_failure:
242 kfree_skb(skb);
243
244fail:
Arvid Brodinc5a75912014-07-04 23:38:05 +0200245 rcu_read_lock();
246 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
247 netdev_warn(master->dev, "Could not send HSR ring error message\n");
248 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100249}
250
251/* This is called when we haven't heard from the node with MAC address addr for
252 * some time (just before the node is removed from the node table/list).
253 */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200254void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN])
Arvid Brodinf4214362013-10-30 21:10:47 +0100255{
256 struct sk_buff *skb;
257 void *msg_head;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200258 struct hsr_port *master;
Arvid Brodinf4214362013-10-30 21:10:47 +0100259 int res;
260
261 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
262 if (!skb)
263 goto fail;
264
265 msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_NODE_DOWN);
266 if (!msg_head)
267 goto nla_put_failure;
268
Arvid Brodinf4214362013-10-30 21:10:47 +0100269 res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
270 if (res < 0)
271 goto nla_put_failure;
272
273 genlmsg_end(skb, msg_head);
Johannes Berg2a94fe42013-11-19 15:19:39 +0100274 genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
Arvid Brodinf4214362013-10-30 21:10:47 +0100275
276 return;
277
278nla_put_failure:
279 kfree_skb(skb);
280
281fail:
Arvid Brodinc5a75912014-07-04 23:38:05 +0200282 rcu_read_lock();
283 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
284 netdev_warn(master->dev, "Could not send HSR node down\n");
285 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100286}
287
Arvid Brodinf4214362013-10-30 21:10:47 +0100288/* HSR_C_GET_NODE_STATUS lets userspace query the internal HSR node table
289 * about the status of a specific node in the network, defined by its MAC
290 * address.
291 *
292 * Input: hsr ifindex, node mac address
293 * Output: hsr ifindex, node mac address (copied from request),
294 * age of latest frame from node over slave 1, slave 2 [ms]
295 */
296static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
297{
298 /* For receiving */
299 struct nlattr *na;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200300 struct net_device *hsr_dev;
Arvid Brodinf4214362013-10-30 21:10:47 +0100301
302 /* For sending */
303 struct sk_buff *skb_out;
304 void *msg_head;
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200305 struct hsr_priv *hsr;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200306 struct hsr_port *port;
Arvid Brodinf4214362013-10-30 21:10:47 +0100307 unsigned char hsr_node_addr_b[ETH_ALEN];
308 int hsr_node_if1_age;
309 u16 hsr_node_if1_seq;
310 int hsr_node_if2_age;
311 u16 hsr_node_if2_seq;
312 int addr_b_ifindex;
313 int res;
314
315 if (!info)
316 goto invalid;
317
318 na = info->attrs[HSR_A_IFINDEX];
319 if (!na)
320 goto invalid;
321 na = info->attrs[HSR_A_NODE_ADDR];
322 if (!na)
323 goto invalid;
324
Taehee Yoo173756b2020-03-13 06:50:14 +0000325 rcu_read_lock();
326 hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
327 nla_get_u32(info->attrs[HSR_A_IFINDEX]));
Arvid Brodinf4214362013-10-30 21:10:47 +0100328 if (!hsr_dev)
Taehee Yoo173756b2020-03-13 06:50:14 +0000329 goto rcu_unlock;
Arvid Brodinf4214362013-10-30 21:10:47 +0100330 if (!is_hsr_master(hsr_dev))
Taehee Yoo173756b2020-03-13 06:50:14 +0000331 goto rcu_unlock;
Arvid Brodinf4214362013-10-30 21:10:47 +0100332
Arvid Brodinf4214362013-10-30 21:10:47 +0100333 /* Send reply */
Taehee Yoo173756b2020-03-13 06:50:14 +0000334 skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
Arvid Brodinf4214362013-10-30 21:10:47 +0100335 if (!skb_out) {
336 res = -ENOMEM;
337 goto fail;
338 }
339
340 msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
Murali Karicheri4fe25bd2019-04-05 13:31:26 -0400341 info->snd_seq, &hsr_genl_family, 0,
342 HSR_C_SET_NODE_STATUS);
Arvid Brodinf4214362013-10-30 21:10:47 +0100343 if (!msg_head) {
344 res = -ENOMEM;
345 goto nla_put_failure;
346 }
347
348 res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
349 if (res < 0)
350 goto nla_put_failure;
351
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200352 hsr = netdev_priv(hsr_dev);
353 res = hsr_get_node_data(hsr,
Murali Karicherid595b852019-04-05 13:31:23 -0400354 (unsigned char *)
355 nla_data(info->attrs[HSR_A_NODE_ADDR]),
356 hsr_node_addr_b,
357 &addr_b_ifindex,
358 &hsr_node_if1_age,
359 &hsr_node_if1_seq,
360 &hsr_node_if2_age,
361 &hsr_node_if2_seq);
Arvid Brodinf4214362013-10-30 21:10:47 +0100362 if (res < 0)
Geyslan G. Bem84a035f2013-11-14 16:12:54 -0300363 goto nla_put_failure;
Arvid Brodinf4214362013-10-30 21:10:47 +0100364
365 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN,
Murali Karicheri4fe25bd2019-04-05 13:31:26 -0400366 nla_data(info->attrs[HSR_A_NODE_ADDR]));
Arvid Brodinf4214362013-10-30 21:10:47 +0100367 if (res < 0)
368 goto nla_put_failure;
369
370 if (addr_b_ifindex > -1) {
371 res = nla_put(skb_out, HSR_A_NODE_ADDR_B, ETH_ALEN,
Murali Karicherid595b852019-04-05 13:31:23 -0400372 hsr_node_addr_b);
Arvid Brodinf4214362013-10-30 21:10:47 +0100373 if (res < 0)
374 goto nla_put_failure;
375
Murali Karicherid595b852019-04-05 13:31:23 -0400376 res = nla_put_u32(skb_out, HSR_A_ADDR_B_IFINDEX,
377 addr_b_ifindex);
Arvid Brodinf4214362013-10-30 21:10:47 +0100378 if (res < 0)
379 goto nla_put_failure;
380 }
381
382 res = nla_put_u32(skb_out, HSR_A_IF1_AGE, hsr_node_if1_age);
383 if (res < 0)
384 goto nla_put_failure;
385 res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
386 if (res < 0)
387 goto nla_put_failure;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200388 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
389 if (port)
390 res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
391 port->dev->ifindex);
Arvid Brodinf4214362013-10-30 21:10:47 +0100392 if (res < 0)
393 goto nla_put_failure;
394
395 res = nla_put_u32(skb_out, HSR_A_IF2_AGE, hsr_node_if2_age);
396 if (res < 0)
397 goto nla_put_failure;
398 res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
399 if (res < 0)
400 goto nla_put_failure;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200401 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
402 if (port)
403 res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
404 port->dev->ifindex);
Arvid Brodin51f3c602014-07-04 23:37:27 +0200405 if (res < 0)
406 goto nla_put_failure;
Arvid Brodinf4214362013-10-30 21:10:47 +0100407
Taehee Yoo173756b2020-03-13 06:50:14 +0000408 rcu_read_unlock();
409
Arvid Brodinf4214362013-10-30 21:10:47 +0100410 genlmsg_end(skb_out, msg_head);
411 genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
412
413 return 0;
414
Taehee Yoo173756b2020-03-13 06:50:14 +0000415rcu_unlock:
416 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100417invalid:
Johannes Berg2d4bc932017-04-12 14:34:04 +0200418 netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
Arvid Brodinf4214362013-10-30 21:10:47 +0100419 return 0;
420
421nla_put_failure:
422 kfree_skb(skb_out);
423 /* Fall through */
424
425fail:
Taehee Yoo173756b2020-03-13 06:50:14 +0000426 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100427 return res;
428}
429
Arvid Brodinf266a682014-07-04 23:41:03 +0200430/* Get a list of MacAddressA of all nodes known to this node (including self).
Arvid Brodinf4214362013-10-30 21:10:47 +0100431 */
432static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
433{
Arvid Brodinf4214362013-10-30 21:10:47 +0100434 unsigned char addr[ETH_ALEN];
Taehee Yooca19c702020-03-13 06:50:24 +0000435 struct net_device *hsr_dev;
436 struct sk_buff *skb_out;
437 struct hsr_priv *hsr;
438 bool restart = false;
439 struct nlattr *na;
440 void *pos = NULL;
441 void *msg_head;
Arvid Brodinf4214362013-10-30 21:10:47 +0100442 int res;
443
444 if (!info)
445 goto invalid;
446
447 na = info->attrs[HSR_A_IFINDEX];
448 if (!na)
449 goto invalid;
450
Taehee Yoo173756b2020-03-13 06:50:14 +0000451 rcu_read_lock();
452 hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
453 nla_get_u32(info->attrs[HSR_A_IFINDEX]));
Arvid Brodinf4214362013-10-30 21:10:47 +0100454 if (!hsr_dev)
Taehee Yoo173756b2020-03-13 06:50:14 +0000455 goto rcu_unlock;
Arvid Brodinf4214362013-10-30 21:10:47 +0100456 if (!is_hsr_master(hsr_dev))
Taehee Yoo173756b2020-03-13 06:50:14 +0000457 goto rcu_unlock;
Arvid Brodinf4214362013-10-30 21:10:47 +0100458
Taehee Yooca19c702020-03-13 06:50:24 +0000459restart:
Arvid Brodinf4214362013-10-30 21:10:47 +0100460 /* Send reply */
Taehee Yooca19c702020-03-13 06:50:24 +0000461 skb_out = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
Arvid Brodinf4214362013-10-30 21:10:47 +0100462 if (!skb_out) {
463 res = -ENOMEM;
464 goto fail;
465 }
466
467 msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
Murali Karicheri4fe25bd2019-04-05 13:31:26 -0400468 info->snd_seq, &hsr_genl_family, 0,
469 HSR_C_SET_NODE_LIST);
Arvid Brodinf4214362013-10-30 21:10:47 +0100470 if (!msg_head) {
471 res = -ENOMEM;
472 goto nla_put_failure;
473 }
474
Taehee Yooca19c702020-03-13 06:50:24 +0000475 if (!restart) {
476 res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
477 if (res < 0)
478 goto nla_put_failure;
479 }
Arvid Brodinf4214362013-10-30 21:10:47 +0100480
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200481 hsr = netdev_priv(hsr_dev);
Arvid Brodinf4214362013-10-30 21:10:47 +0100482
Taehee Yooca19c702020-03-13 06:50:24 +0000483 if (!pos)
484 pos = hsr_get_next_node(hsr, NULL, addr);
Arvid Brodinf4214362013-10-30 21:10:47 +0100485 while (pos) {
486 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
487 if (res < 0) {
Taehee Yooca19c702020-03-13 06:50:24 +0000488 if (res == -EMSGSIZE) {
489 genlmsg_end(skb_out, msg_head);
490 genlmsg_unicast(genl_info_net(info), skb_out,
491 info->snd_portid);
492 restart = true;
493 goto restart;
494 }
Arvid Brodinf4214362013-10-30 21:10:47 +0100495 goto nla_put_failure;
496 }
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200497 pos = hsr_get_next_node(hsr, pos, addr);
Arvid Brodinf4214362013-10-30 21:10:47 +0100498 }
499 rcu_read_unlock();
500
501 genlmsg_end(skb_out, msg_head);
502 genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
503
504 return 0;
505
Taehee Yoo173756b2020-03-13 06:50:14 +0000506rcu_unlock:
507 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100508invalid:
Johannes Berg2d4bc932017-04-12 14:34:04 +0200509 netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
Arvid Brodinf4214362013-10-30 21:10:47 +0100510 return 0;
511
512nla_put_failure:
Taehee Yooca19c702020-03-13 06:50:24 +0000513 nlmsg_free(skb_out);
Arvid Brodinf4214362013-10-30 21:10:47 +0100514 /* Fall through */
515
516fail:
Taehee Yoo173756b2020-03-13 06:50:14 +0000517 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100518 return res;
519}
520
Jakub Kicinski66a9b922020-10-02 14:49:54 -0700521static const struct genl_small_ops hsr_ops[] = {
Johannes Berg9504b3e2013-11-14 17:14:40 +0100522 {
523 .cmd = HSR_C_GET_NODE_STATUS,
Johannes Bergef6243a2019-04-26 14:07:31 +0200524 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Johannes Berg9504b3e2013-11-14 17:14:40 +0100525 .flags = 0,
Johannes Berg9504b3e2013-11-14 17:14:40 +0100526 .doit = hsr_get_node_status,
527 .dumpit = NULL,
528 },
529 {
530 .cmd = HSR_C_GET_NODE_LIST,
Johannes Bergef6243a2019-04-26 14:07:31 +0200531 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Johannes Berg9504b3e2013-11-14 17:14:40 +0100532 .flags = 0,
Johannes Berg9504b3e2013-11-14 17:14:40 +0100533 .doit = hsr_get_node_list,
534 .dumpit = NULL,
535 },
Arvid Brodinf4214362013-10-30 21:10:47 +0100536};
537
Johannes Berg56989f62016-10-24 14:40:05 +0200538static struct genl_family hsr_genl_family __ro_after_init = {
Johannes Berg489111e2016-10-24 14:40:03 +0200539 .hdrsize = 0,
540 .name = "HSR",
541 .version = 1,
542 .maxattr = HSR_A_MAX,
Johannes Berg3b0f31f2019-03-21 22:51:02 +0100543 .policy = hsr_genl_policy,
Taehee Yoo09e91db2020-03-13 06:50:33 +0000544 .netnsok = true,
Johannes Berg489111e2016-10-24 14:40:03 +0200545 .module = THIS_MODULE,
Jakub Kicinski66a9b922020-10-02 14:49:54 -0700546 .small_ops = hsr_ops,
547 .n_small_ops = ARRAY_SIZE(hsr_ops),
Jakub Kicinski9c5d03d2022-08-24 17:18:30 -0700548 .resv_start_op = HSR_C_SET_NODE_LIST + 1,
Johannes Berg489111e2016-10-24 14:40:03 +0200549 .mcgrps = hsr_mcgrps,
550 .n_mcgrps = ARRAY_SIZE(hsr_mcgrps),
551};
552
Arvid Brodinf4214362013-10-30 21:10:47 +0100553int __init hsr_netlink_init(void)
554{
555 int rc;
556
557 rc = rtnl_link_register(&hsr_link_ops);
558 if (rc)
559 goto fail_rtnl_link_register;
560
Johannes Berg489111e2016-10-24 14:40:03 +0200561 rc = genl_register_family(&hsr_genl_family);
Arvid Brodinf4214362013-10-30 21:10:47 +0100562 if (rc)
563 goto fail_genl_register_family;
564
Taehee Yooc6c4ccd2019-12-22 11:26:27 +0000565 hsr_debugfs_create_root();
Arvid Brodinf4214362013-10-30 21:10:47 +0100566 return 0;
567
Arvid Brodinf4214362013-10-30 21:10:47 +0100568fail_genl_register_family:
569 rtnl_link_unregister(&hsr_link_ops);
570fail_rtnl_link_register:
571
572 return rc;
573}
574
575void __exit hsr_netlink_exit(void)
576{
Arvid Brodinf4214362013-10-30 21:10:47 +0100577 genl_unregister_family(&hsr_genl_family);
Arvid Brodinf4214362013-10-30 21:10:47 +0100578 rtnl_link_unregister(&hsr_link_ops);
579}
580
581MODULE_ALIAS_RTNL_LINK("hsr");