blob: ec2322529727820d60e1fa13e1422d6ed8431608 [file] [log] [blame]
Thomas Gleixner97fb5e82019-05-29 07:17:58 -07001// SPDX-License-Identifier: GPL-2.0-only
Courtney Cavinbdabad32016-05-06 07:09:08 -07002/*
3 * Copyright (c) 2015, Sony Mobile Communications Inc.
4 * Copyright (c) 2013, The Linux Foundation. All rights reserved.
Courtney Cavinbdabad32016-05-06 07:09:08 -07005 */
6#include <linux/module.h>
7#include <linux/netlink.h>
8#include <linux/qrtr.h>
9#include <linux/termios.h> /* For TIOCINQ/OUTQ */
Bjorn Andersson0a7e0d02020-01-13 23:57:01 -080010#include <linux/spinlock.h>
Bjorn Andersson5fdeb0d2020-01-13 23:57:00 -080011#include <linux/wait.h>
Courtney Cavinbdabad32016-05-06 07:09:08 -070012
13#include <net/sock.h>
14
15#include "qrtr.h"
16
Bjorn Andersson194ccc82017-10-10 23:45:23 -070017#define QRTR_PROTO_VER_1 1
18#define QRTR_PROTO_VER_2 3
Courtney Cavinbdabad32016-05-06 07:09:08 -070019
20/* auto-bind range */
21#define QRTR_MIN_EPH_SOCKET 0x4000
22#define QRTR_MAX_EPH_SOCKET 0x7fff
Matthew Wilcox (Oracle)3cbf75302021-03-31 05:36:42 +010023#define QRTR_EPH_PORT_RANGE \
24 XA_LIMIT(QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET)
Courtney Cavinbdabad32016-05-06 07:09:08 -070025
Courtney Cavinbdabad32016-05-06 07:09:08 -070026/**
Bjorn Andersson194ccc82017-10-10 23:45:23 -070027 * struct qrtr_hdr_v1 - (I|R)PCrouter packet header version 1
Courtney Cavinbdabad32016-05-06 07:09:08 -070028 * @version: protocol version
29 * @type: packet type; one of QRTR_TYPE_*
30 * @src_node_id: source node
31 * @src_port_id: source port
32 * @confirm_rx: boolean; whether a resume-tx packet should be send in reply
33 * @size: length of packet, excluding this header
34 * @dst_node_id: destination node
35 * @dst_port_id: destination port
36 */
Bjorn Andersson194ccc82017-10-10 23:45:23 -070037struct qrtr_hdr_v1 {
Courtney Cavinbdabad32016-05-06 07:09:08 -070038 __le32 version;
39 __le32 type;
40 __le32 src_node_id;
41 __le32 src_port_id;
42 __le32 confirm_rx;
43 __le32 size;
44 __le32 dst_node_id;
45 __le32 dst_port_id;
46} __packed;
47
Bjorn Andersson194ccc82017-10-10 23:45:23 -070048/**
49 * struct qrtr_hdr_v2 - (I|R)PCrouter packet header later versions
50 * @version: protocol version
51 * @type: packet type; one of QRTR_TYPE_*
52 * @flags: bitmask of QRTR_FLAGS_*
53 * @optlen: length of optional header data
54 * @size: length of packet, excluding this header and optlen
55 * @src_node_id: source node
56 * @src_port_id: source port
57 * @dst_node_id: destination node
58 * @dst_port_id: destination port
59 */
60struct qrtr_hdr_v2 {
61 u8 version;
62 u8 type;
63 u8 flags;
64 u8 optlen;
65 __le32 size;
66 __le16 src_node_id;
67 __le16 src_port_id;
68 __le16 dst_node_id;
69 __le16 dst_port_id;
70};
71
72#define QRTR_FLAGS_CONFIRM_RX BIT(0)
73
Bjorn Anderssonf507a9b62017-10-10 23:45:22 -070074struct qrtr_cb {
75 u32 src_node;
76 u32 src_port;
77 u32 dst_node;
78 u32 dst_port;
79
80 u8 type;
81 u8 confirm_rx;
82};
83
Bjorn Andersson194ccc82017-10-10 23:45:23 -070084#define QRTR_HDR_MAX_SIZE max_t(size_t, sizeof(struct qrtr_hdr_v1), \
85 sizeof(struct qrtr_hdr_v2))
Courtney Cavinbdabad32016-05-06 07:09:08 -070086
87struct qrtr_sock {
88 /* WARNING: sk must be the first member */
89 struct sock sk;
90 struct sockaddr_qrtr us;
91 struct sockaddr_qrtr peer;
92};
93
94static inline struct qrtr_sock *qrtr_sk(struct sock *sk)
95{
96 BUILD_BUG_ON(offsetof(struct qrtr_sock, sk) != 0);
97 return container_of(sk, struct qrtr_sock, sk);
98}
99
Manivannan Sadhasivam31d6cbe2020-02-20 20:43:27 +0530100static unsigned int qrtr_local_nid = 1;
Courtney Cavinbdabad32016-05-06 07:09:08 -0700101
102/* for node ids */
Bjorn Andersson0a7e0d02020-01-13 23:57:01 -0800103static RADIX_TREE(qrtr_nodes, GFP_ATOMIC);
104static DEFINE_SPINLOCK(qrtr_nodes_lock);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700105/* broadcast list */
106static LIST_HEAD(qrtr_all_nodes);
Bjorn Andersson0a7e0d02020-01-13 23:57:01 -0800107/* lock for qrtr_all_nodes and node reference */
Courtney Cavinbdabad32016-05-06 07:09:08 -0700108static DEFINE_MUTEX(qrtr_node_lock);
109
110/* local port allocation management */
Matthew Wilcox (Oracle)3cbf75302021-03-31 05:36:42 +0100111static DEFINE_XARRAY_ALLOC(qrtr_ports);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700112
113/**
114 * struct qrtr_node - endpoint node
115 * @ep_lock: lock for endpoint management and callbacks
116 * @ep: endpoint
117 * @ref: reference count for node
118 * @nid: node id
Bjorn Andersson5fdeb0d2020-01-13 23:57:00 -0800119 * @qrtr_tx_flow: tree of qrtr_tx_flow, keyed by node << 32 | port
120 * @qrtr_tx_lock: lock for qrtr_tx_flow inserts
Courtney Cavinbdabad32016-05-06 07:09:08 -0700121 * @rx_queue: receive queue
Courtney Cavinbdabad32016-05-06 07:09:08 -0700122 * @item: list item for broadcast list
123 */
124struct qrtr_node {
125 struct mutex ep_lock;
126 struct qrtr_endpoint *ep;
127 struct kref ref;
128 unsigned int nid;
129
Bjorn Andersson5fdeb0d2020-01-13 23:57:00 -0800130 struct radix_tree_root qrtr_tx_flow;
131 struct mutex qrtr_tx_lock; /* for qrtr_tx_flow */
132
Courtney Cavinbdabad32016-05-06 07:09:08 -0700133 struct sk_buff_head rx_queue;
Courtney Cavinbdabad32016-05-06 07:09:08 -0700134 struct list_head item;
135};
136
Bjorn Andersson5fdeb0d2020-01-13 23:57:00 -0800137/**
138 * struct qrtr_tx_flow - tx flow control
139 * @resume_tx: waiters for a resume tx from the remote
140 * @pending: number of waiting senders
141 * @tx_failed: indicates that a message with confirm_rx flag was lost
142 */
143struct qrtr_tx_flow {
144 struct wait_queue_head resume_tx;
145 int pending;
146 int tx_failed;
147};
148
149#define QRTR_TX_FLOW_HIGH 10
150#define QRTR_TX_FLOW_LOW 5
151
Bjorn Anderssone7044482017-10-10 23:45:20 -0700152static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
153 int type, struct sockaddr_qrtr *from,
154 struct sockaddr_qrtr *to);
155static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
156 int type, struct sockaddr_qrtr *from,
157 struct sockaddr_qrtr *to);
Bjorn Anderssone04df982020-01-13 23:57:03 -0800158static struct qrtr_sock *qrtr_port_lookup(int port);
159static void qrtr_port_put(struct qrtr_sock *ipc);
Bjorn Andersson8acc8ee2017-06-07 14:07:37 -0700160
Courtney Cavinbdabad32016-05-06 07:09:08 -0700161/* Release node resources and free the node.
162 *
163 * Do not call directly, use qrtr_node_release. To be used with
164 * kref_put_mutex. As such, the node mutex is expected to be locked on call.
165 */
166static void __qrtr_node_release(struct kref *kref)
167{
168 struct qrtr_node *node = container_of(kref, struct qrtr_node, ref);
Bjorn Andersson5fdeb0d2020-01-13 23:57:00 -0800169 struct radix_tree_iter iter;
Carl Huang28541f32020-06-30 14:52:51 +0800170 struct qrtr_tx_flow *flow;
Bjorn Andersson0a7e0d02020-01-13 23:57:01 -0800171 unsigned long flags;
Bjorn Andersson5fdeb0d2020-01-13 23:57:00 -0800172 void __rcu **slot;
Courtney Cavinbdabad32016-05-06 07:09:08 -0700173
Bjorn Andersson0a7e0d02020-01-13 23:57:01 -0800174 spin_lock_irqsave(&qrtr_nodes_lock, flags);
Loic Poulain90829f02020-11-06 18:33:30 +0100175 /* If the node is a bridge for other nodes, there are possibly
176 * multiple entries pointing to our released node, delete them all.
177 */
178 radix_tree_for_each_slot(slot, &qrtr_nodes, &iter, 0) {
179 if (*slot == node)
180 radix_tree_iter_delete(&qrtr_nodes, &iter, slot);
181 }
Bjorn Andersson0a7e0d02020-01-13 23:57:01 -0800182 spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700183
184 list_del(&node->item);
185 mutex_unlock(&qrtr_node_lock);
186
187 skb_queue_purge(&node->rx_queue);
Bjorn Andersson5fdeb0d2020-01-13 23:57:00 -0800188
189 /* Free tx flow counters */
190 radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) {
Carl Huang28541f32020-06-30 14:52:51 +0800191 flow = *slot;
Bjorn Andersson5fdeb0d2020-01-13 23:57:00 -0800192 radix_tree_iter_delete(&node->qrtr_tx_flow, &iter, slot);
Carl Huang28541f32020-06-30 14:52:51 +0800193 kfree(flow);
Bjorn Andersson5fdeb0d2020-01-13 23:57:00 -0800194 }
Courtney Cavinbdabad32016-05-06 07:09:08 -0700195 kfree(node);
196}
197
198/* Increment reference to node. */
199static struct qrtr_node *qrtr_node_acquire(struct qrtr_node *node)
200{
201 if (node)
202 kref_get(&node->ref);
203 return node;
204}
205
206/* Decrement reference to node and release as necessary. */
207static void qrtr_node_release(struct qrtr_node *node)
208{
209 if (!node)
210 return;
211 kref_put_mutex(&node->ref, __qrtr_node_release, &qrtr_node_lock);
212}
213
Bjorn Andersson5fdeb0d2020-01-13 23:57:00 -0800214/**
215 * qrtr_tx_resume() - reset flow control counter
216 * @node: qrtr_node that the QRTR_TYPE_RESUME_TX packet arrived on
217 * @skb: resume_tx packet
218 */
219static void qrtr_tx_resume(struct qrtr_node *node, struct sk_buff *skb)
220{
221 struct qrtr_ctrl_pkt *pkt = (struct qrtr_ctrl_pkt *)skb->data;
222 u64 remote_node = le32_to_cpu(pkt->client.node);
223 u32 remote_port = le32_to_cpu(pkt->client.port);
224 struct qrtr_tx_flow *flow;
225 unsigned long key;
226
227 key = remote_node << 32 | remote_port;
228
229 rcu_read_lock();
230 flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
231 rcu_read_unlock();
232 if (flow) {
233 spin_lock(&flow->resume_tx.lock);
234 flow->pending = 0;
235 spin_unlock(&flow->resume_tx.lock);
236 wake_up_interruptible_all(&flow->resume_tx);
237 }
238
239 consume_skb(skb);
240}
241
242/**
243 * qrtr_tx_wait() - flow control for outgoing packets
244 * @node: qrtr_node that the packet is to be send to
245 * @dest_node: node id of the destination
246 * @dest_port: port number of the destination
247 * @type: type of message
248 *
249 * The flow control scheme is based around the low and high "watermarks". When
250 * the low watermark is passed the confirm_rx flag is set on the outgoing
251 * message, which will trigger the remote to send a control message of the type
252 * QRTR_TYPE_RESUME_TX to reset the counter. If the high watermark is hit
253 * further transmision should be paused.
254 *
255 * Return: 1 if confirm_rx should be set, 0 otherwise or errno failure
256 */
257static int qrtr_tx_wait(struct qrtr_node *node, int dest_node, int dest_port,
258 int type)
259{
260 unsigned long key = (u64)dest_node << 32 | dest_port;
261 struct qrtr_tx_flow *flow;
262 int confirm_rx = 0;
263 int ret;
264
265 /* Never set confirm_rx on non-data packets */
266 if (type != QRTR_TYPE_DATA)
267 return 0;
268
269 mutex_lock(&node->qrtr_tx_lock);
270 flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
271 if (!flow) {
272 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
273 if (flow) {
274 init_waitqueue_head(&flow->resume_tx);
Loic Poulain8a03dd92021-03-30 16:11:08 +0200275 if (radix_tree_insert(&node->qrtr_tx_flow, key, flow)) {
276 kfree(flow);
277 flow = NULL;
278 }
Bjorn Andersson5fdeb0d2020-01-13 23:57:00 -0800279 }
280 }
281 mutex_unlock(&node->qrtr_tx_lock);
282
283 /* Set confirm_rx if we where unable to find and allocate a flow */
284 if (!flow)
285 return 1;
286
287 spin_lock_irq(&flow->resume_tx.lock);
288 ret = wait_event_interruptible_locked_irq(flow->resume_tx,
289 flow->pending < QRTR_TX_FLOW_HIGH ||
290 flow->tx_failed ||
291 !node->ep);
292 if (ret < 0) {
293 confirm_rx = ret;
294 } else if (!node->ep) {
295 confirm_rx = -EPIPE;
296 } else if (flow->tx_failed) {
297 flow->tx_failed = 0;
298 confirm_rx = 1;
299 } else {
300 flow->pending++;
301 confirm_rx = flow->pending == QRTR_TX_FLOW_LOW;
302 }
303 spin_unlock_irq(&flow->resume_tx.lock);
304
305 return confirm_rx;
306}
307
308/**
309 * qrtr_tx_flow_failed() - flag that tx of confirm_rx flagged messages failed
310 * @node: qrtr_node that the packet is to be send to
311 * @dest_node: node id of the destination
312 * @dest_port: port number of the destination
313 *
314 * Signal that the transmission of a message with confirm_rx flag failed. The
315 * flow's "pending" counter will keep incrementing towards QRTR_TX_FLOW_HIGH,
316 * at which point transmission would stall forever waiting for the resume TX
317 * message associated with the dropped confirm_rx message.
318 * Work around this by marking the flow as having a failed transmission and
319 * cause the next transmission attempt to be sent with the confirm_rx.
320 */
321static void qrtr_tx_flow_failed(struct qrtr_node *node, int dest_node,
322 int dest_port)
323{
324 unsigned long key = (u64)dest_node << 32 | dest_port;
325 struct qrtr_tx_flow *flow;
326
327 rcu_read_lock();
328 flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
329 rcu_read_unlock();
330 if (flow) {
331 spin_lock_irq(&flow->resume_tx.lock);
332 flow->tx_failed = 1;
333 spin_unlock_irq(&flow->resume_tx.lock);
334 }
335}
336
Courtney Cavinbdabad32016-05-06 07:09:08 -0700337/* Pass an outgoing packet socket buffer to the endpoint driver. */
Bjorn Anderssone7044482017-10-10 23:45:20 -0700338static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
339 int type, struct sockaddr_qrtr *from,
340 struct sockaddr_qrtr *to)
Courtney Cavinbdabad32016-05-06 07:09:08 -0700341{
Bjorn Andersson194ccc82017-10-10 23:45:23 -0700342 struct qrtr_hdr_v1 *hdr;
Bjorn Anderssone7044482017-10-10 23:45:20 -0700343 size_t len = skb->len;
Eric Dumazet3ca1a422020-09-09 01:27:39 -0700344 int rc, confirm_rx;
Bjorn Andersson5fdeb0d2020-01-13 23:57:00 -0800345
346 confirm_rx = qrtr_tx_wait(node, to->sq_node, to->sq_port, type);
347 if (confirm_rx < 0) {
348 kfree_skb(skb);
349 return confirm_rx;
350 }
Courtney Cavinbdabad32016-05-06 07:09:08 -0700351
Bjorn Andersson194ccc82017-10-10 23:45:23 -0700352 hdr = skb_push(skb, sizeof(*hdr));
353 hdr->version = cpu_to_le32(QRTR_PROTO_VER_1);
Bjorn Anderssone7044482017-10-10 23:45:20 -0700354 hdr->type = cpu_to_le32(type);
355 hdr->src_node_id = cpu_to_le32(from->sq_node);
356 hdr->src_port_id = cpu_to_le32(from->sq_port);
Arun Kumar Neelakantamd27e77a2018-07-04 19:49:33 +0530357 if (to->sq_port == QRTR_PORT_CTRL) {
358 hdr->dst_node_id = cpu_to_le32(node->nid);
Loic Poulainae068f562020-11-06 18:33:26 +0100359 hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL);
Arun Kumar Neelakantamd27e77a2018-07-04 19:49:33 +0530360 } else {
361 hdr->dst_node_id = cpu_to_le32(to->sq_node);
362 hdr->dst_port_id = cpu_to_le32(to->sq_port);
363 }
Bjorn Anderssone7044482017-10-10 23:45:20 -0700364
365 hdr->size = cpu_to_le32(len);
Bjorn Andersson5fdeb0d2020-01-13 23:57:00 -0800366 hdr->confirm_rx = !!confirm_rx;
Bjorn Anderssone7044482017-10-10 23:45:20 -0700367
Eric Dumazet3ca1a422020-09-09 01:27:39 -0700368 rc = skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr));
Bjorn Anderssone7044482017-10-10 23:45:20 -0700369
Eric Dumazet3ca1a422020-09-09 01:27:39 -0700370 if (!rc) {
371 mutex_lock(&node->ep_lock);
372 rc = -ENODEV;
373 if (node->ep)
374 rc = node->ep->xmit(node->ep, skb);
375 else
376 kfree_skb(skb);
377 mutex_unlock(&node->ep_lock);
378 }
Bjorn Andersson5fdeb0d2020-01-13 23:57:00 -0800379 /* Need to ensure that a subsequent message carries the otherwise lost
380 * confirm_rx flag if we dropped this one */
381 if (rc && confirm_rx)
382 qrtr_tx_flow_failed(node, to->sq_node, to->sq_port);
383
Courtney Cavinbdabad32016-05-06 07:09:08 -0700384 return rc;
385}
386
387/* Lookup node by id.
388 *
389 * callers must release with qrtr_node_release()
390 */
391static struct qrtr_node *qrtr_node_lookup(unsigned int nid)
392{
393 struct qrtr_node *node;
Bjorn Andersson0a7e0d02020-01-13 23:57:01 -0800394 unsigned long flags;
Courtney Cavinbdabad32016-05-06 07:09:08 -0700395
Bjorn Andersson0a7e0d02020-01-13 23:57:01 -0800396 spin_lock_irqsave(&qrtr_nodes_lock, flags);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700397 node = radix_tree_lookup(&qrtr_nodes, nid);
398 node = qrtr_node_acquire(node);
Bjorn Andersson0a7e0d02020-01-13 23:57:01 -0800399 spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700400
401 return node;
402}
403
404/* Assign node id to node.
405 *
406 * This is mostly useful for automatic node id assignment, based on
407 * the source id in the incoming packet.
408 */
409static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid)
410{
Bjorn Andersson0a7e0d02020-01-13 23:57:01 -0800411 unsigned long flags;
412
Loic Poulain0baa99e2020-11-06 18:33:28 +0100413 if (nid == QRTR_EP_NID_AUTO)
Courtney Cavinbdabad32016-05-06 07:09:08 -0700414 return;
415
Bjorn Andersson0a7e0d02020-01-13 23:57:01 -0800416 spin_lock_irqsave(&qrtr_nodes_lock, flags);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700417 radix_tree_insert(&qrtr_nodes, nid, node);
Loic Poulain0baa99e2020-11-06 18:33:28 +0100418 if (node->nid == QRTR_EP_NID_AUTO)
419 node->nid = nid;
Bjorn Andersson0a7e0d02020-01-13 23:57:01 -0800420 spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700421}
422
423/**
424 * qrtr_endpoint_post() - post incoming data
425 * @ep: endpoint handle
426 * @data: data pointer
427 * @len: size of data in bytes
428 *
429 * Return: 0 on success; negative error code on failure
430 */
431int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
432{
433 struct qrtr_node *node = ep->node;
Bjorn Andersson194ccc82017-10-10 23:45:23 -0700434 const struct qrtr_hdr_v1 *v1;
435 const struct qrtr_hdr_v2 *v2;
Bjorn Anderssone04df982020-01-13 23:57:03 -0800436 struct qrtr_sock *ipc;
Courtney Cavinbdabad32016-05-06 07:09:08 -0700437 struct sk_buff *skb;
Bjorn Anderssonf507a9b62017-10-10 23:45:22 -0700438 struct qrtr_cb *cb;
Pavel Skripkinad9d24c2021-06-14 15:06:50 +0300439 size_t size;
Courtney Cavinbdabad32016-05-06 07:09:08 -0700440 unsigned int ver;
Bjorn Andersson194ccc82017-10-10 23:45:23 -0700441 size_t hdrlen;
Courtney Cavinbdabad32016-05-06 07:09:08 -0700442
Dan Carpenter8ff41cc2020-06-30 14:46:15 +0300443 if (len == 0 || len & 3)
Courtney Cavinbdabad32016-05-06 07:09:08 -0700444 return -EINVAL;
445
Pavel Skripkin093b0362021-03-01 02:22:40 +0300446 skb = __netdev_alloc_skb(NULL, len, GFP_ATOMIC | __GFP_NOWARN);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700447 if (!skb)
448 return -ENOMEM;
449
Bjorn Anderssonf507a9b62017-10-10 23:45:22 -0700450 cb = (struct qrtr_cb *)skb->cb;
Bjorn Anderssonf507a9b62017-10-10 23:45:22 -0700451
Bjorn Andersson194ccc82017-10-10 23:45:23 -0700452 /* Version field in v1 is little endian, so this works for both cases */
453 ver = *(u8*)data;
454
455 switch (ver) {
456 case QRTR_PROTO_VER_1:
Dan Carpenter8ff41cc2020-06-30 14:46:15 +0300457 if (len < sizeof(*v1))
458 goto err;
Bjorn Andersson194ccc82017-10-10 23:45:23 -0700459 v1 = data;
460 hdrlen = sizeof(*v1);
461
462 cb->type = le32_to_cpu(v1->type);
463 cb->src_node = le32_to_cpu(v1->src_node_id);
464 cb->src_port = le32_to_cpu(v1->src_port_id);
465 cb->confirm_rx = !!v1->confirm_rx;
466 cb->dst_node = le32_to_cpu(v1->dst_node_id);
467 cb->dst_port = le32_to_cpu(v1->dst_port_id);
468
469 size = le32_to_cpu(v1->size);
470 break;
471 case QRTR_PROTO_VER_2:
Dan Carpenter8ff41cc2020-06-30 14:46:15 +0300472 if (len < sizeof(*v2))
473 goto err;
Bjorn Andersson194ccc82017-10-10 23:45:23 -0700474 v2 = data;
475 hdrlen = sizeof(*v2) + v2->optlen;
476
477 cb->type = v2->type;
478 cb->confirm_rx = !!(v2->flags & QRTR_FLAGS_CONFIRM_RX);
479 cb->src_node = le16_to_cpu(v2->src_node_id);
480 cb->src_port = le16_to_cpu(v2->src_port_id);
481 cb->dst_node = le16_to_cpu(v2->dst_node_id);
482 cb->dst_port = le16_to_cpu(v2->dst_port_id);
483
484 if (cb->src_port == (u16)QRTR_PORT_CTRL)
485 cb->src_port = QRTR_PORT_CTRL;
486 if (cb->dst_port == (u16)QRTR_PORT_CTRL)
487 cb->dst_port = QRTR_PORT_CTRL;
488
489 size = le32_to_cpu(v2->size);
490 break;
491 default:
492 pr_err("qrtr: Invalid version %d\n", ver);
493 goto err;
494 }
495
Dan Carpenterd2cabd22021-09-02 13:08:51 +0300496 if (!size || len != ALIGN(size, 4) + hdrlen)
Bjorn Andersson194ccc82017-10-10 23:45:23 -0700497 goto err;
498
Bjorn Andersson5fdeb0d2020-01-13 23:57:00 -0800499 if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA &&
500 cb->type != QRTR_TYPE_RESUME_TX)
Bjorn Andersson194ccc82017-10-10 23:45:23 -0700501 goto err;
502
503 skb_put_data(skb, data + hdrlen, size);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700504
Bjorn Anderssone04df982020-01-13 23:57:03 -0800505 qrtr_node_assign(node, cb->src_node);
506
Loic Poulain0baa99e2020-11-06 18:33:28 +0100507 if (cb->type == QRTR_TYPE_NEW_SERVER) {
508 /* Remote node endpoint can bridge other distant nodes */
Dan Carpenteraaa8e492021-08-30 11:37:17 +0300509 const struct qrtr_ctrl_pkt *pkt;
Loic Poulain0baa99e2020-11-06 18:33:28 +0100510
Dan Carpenteraaa8e492021-08-30 11:37:17 +0300511 if (size < sizeof(*pkt))
512 goto err;
513
514 pkt = data + hdrlen;
Loic Poulain0baa99e2020-11-06 18:33:28 +0100515 qrtr_node_assign(node, le32_to_cpu(pkt->server.node));
516 }
517
Bjorn Anderssone04df982020-01-13 23:57:03 -0800518 if (cb->type == QRTR_TYPE_RESUME_TX) {
519 qrtr_tx_resume(node, skb);
520 } else {
521 ipc = qrtr_port_lookup(cb->dst_port);
522 if (!ipc)
523 goto err;
524
Pavel Skripkin52f34562021-07-23 18:31:32 +0300525 if (sock_queue_rcv_skb(&ipc->sk, skb)) {
526 qrtr_port_put(ipc);
Bjorn Anderssone04df982020-01-13 23:57:03 -0800527 goto err;
Pavel Skripkin52f34562021-07-23 18:31:32 +0300528 }
Bjorn Anderssone04df982020-01-13 23:57:03 -0800529
530 qrtr_port_put(ipc);
531 }
Courtney Cavinbdabad32016-05-06 07:09:08 -0700532
533 return 0;
Bjorn Andersson194ccc82017-10-10 23:45:23 -0700534
535err:
536 kfree_skb(skb);
537 return -EINVAL;
538
Courtney Cavinbdabad32016-05-06 07:09:08 -0700539}
540EXPORT_SYMBOL_GPL(qrtr_endpoint_post);
541
Bjorn Andersson1a7959c2017-10-10 23:45:21 -0700542/**
543 * qrtr_alloc_ctrl_packet() - allocate control packet skb
544 * @pkt: reference to qrtr_ctrl_pkt pointer
Loic Poulainf7dec6c2020-11-06 18:33:29 +0100545 * @flags: the type of memory to allocate
Bjorn Andersson1a7959c2017-10-10 23:45:21 -0700546 *
547 * Returns newly allocated sk_buff, or NULL on failure
548 *
549 * This function allocates a sk_buff large enough to carry a qrtr_ctrl_pkt and
550 * on success returns a reference to the control packet in @pkt.
551 */
Loic Poulainf7dec6c2020-11-06 18:33:29 +0100552static struct sk_buff *qrtr_alloc_ctrl_packet(struct qrtr_ctrl_pkt **pkt,
553 gfp_t flags)
Courtney Cavinbdabad32016-05-06 07:09:08 -0700554{
Bjorn Andersson1a7959c2017-10-10 23:45:21 -0700555 const int pkt_len = sizeof(struct qrtr_ctrl_pkt);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700556 struct sk_buff *skb;
Courtney Cavinbdabad32016-05-06 07:09:08 -0700557
Loic Poulainf7dec6c2020-11-06 18:33:29 +0100558 skb = alloc_skb(QRTR_HDR_MAX_SIZE + pkt_len, flags);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700559 if (!skb)
560 return NULL;
Courtney Cavinbdabad32016-05-06 07:09:08 -0700561
Bjorn Andersson194ccc82017-10-10 23:45:23 -0700562 skb_reserve(skb, QRTR_HDR_MAX_SIZE);
Bjorn Andersson1a7959c2017-10-10 23:45:21 -0700563 *pkt = skb_put_zero(skb, pkt_len);
Bjorn Andersson17844732017-06-07 14:07:38 -0700564
565 return skb;
566}
567
Courtney Cavinbdabad32016-05-06 07:09:08 -0700568/**
569 * qrtr_endpoint_register() - register a new endpoint
570 * @ep: endpoint to register
571 * @nid: desired node id; may be QRTR_EP_NID_AUTO for auto-assignment
572 * Return: 0 on success; negative error code on failure
573 *
574 * The specified endpoint must have the xmit function pointer set on call.
575 */
576int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid)
577{
578 struct qrtr_node *node;
579
580 if (!ep || !ep->xmit)
581 return -EINVAL;
582
583 node = kzalloc(sizeof(*node), GFP_KERNEL);
584 if (!node)
585 return -ENOMEM;
586
Courtney Cavinbdabad32016-05-06 07:09:08 -0700587 kref_init(&node->ref);
588 mutex_init(&node->ep_lock);
589 skb_queue_head_init(&node->rx_queue);
590 node->nid = QRTR_EP_NID_AUTO;
591 node->ep = ep;
592
Bjorn Andersson5fdeb0d2020-01-13 23:57:00 -0800593 INIT_RADIX_TREE(&node->qrtr_tx_flow, GFP_KERNEL);
594 mutex_init(&node->qrtr_tx_lock);
595
Courtney Cavinbdabad32016-05-06 07:09:08 -0700596 qrtr_node_assign(node, nid);
597
598 mutex_lock(&qrtr_node_lock);
599 list_add(&node->item, &qrtr_all_nodes);
600 mutex_unlock(&qrtr_node_lock);
601 ep->node = node;
602
603 return 0;
604}
605EXPORT_SYMBOL_GPL(qrtr_endpoint_register);
606
607/**
608 * qrtr_endpoint_unregister - unregister endpoint
609 * @ep: endpoint to unregister
610 */
611void qrtr_endpoint_unregister(struct qrtr_endpoint *ep)
612{
613 struct qrtr_node *node = ep->node;
Bjorn Anderssone7044482017-10-10 23:45:20 -0700614 struct sockaddr_qrtr src = {AF_QIPCRTR, node->nid, QRTR_PORT_CTRL};
615 struct sockaddr_qrtr dst = {AF_QIPCRTR, qrtr_local_nid, QRTR_PORT_CTRL};
Bjorn Andersson5fdeb0d2020-01-13 23:57:00 -0800616 struct radix_tree_iter iter;
Bjorn Andersson1a7959c2017-10-10 23:45:21 -0700617 struct qrtr_ctrl_pkt *pkt;
Bjorn Andersson5fdeb0d2020-01-13 23:57:00 -0800618 struct qrtr_tx_flow *flow;
Bjorn Andersson8acc8ee2017-06-07 14:07:37 -0700619 struct sk_buff *skb;
Loic Poulain90829f02020-11-06 18:33:30 +0100620 unsigned long flags;
Bjorn Andersson5fdeb0d2020-01-13 23:57:00 -0800621 void __rcu **slot;
Courtney Cavinbdabad32016-05-06 07:09:08 -0700622
623 mutex_lock(&node->ep_lock);
624 node->ep = NULL;
625 mutex_unlock(&node->ep_lock);
626
Bjorn Andersson8acc8ee2017-06-07 14:07:37 -0700627 /* Notify the local controller about the event */
Loic Poulain90829f02020-11-06 18:33:30 +0100628 spin_lock_irqsave(&qrtr_nodes_lock, flags);
629 radix_tree_for_each_slot(slot, &qrtr_nodes, &iter, 0) {
630 if (*slot != node)
631 continue;
632 src.sq_node = iter.index;
633 skb = qrtr_alloc_ctrl_packet(&pkt, GFP_ATOMIC);
634 if (skb) {
635 pkt->cmd = cpu_to_le32(QRTR_TYPE_BYE);
636 qrtr_local_enqueue(NULL, skb, QRTR_TYPE_BYE, &src, &dst);
637 }
Bjorn Andersson1a7959c2017-10-10 23:45:21 -0700638 }
Loic Poulain90829f02020-11-06 18:33:30 +0100639 spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
Bjorn Andersson8acc8ee2017-06-07 14:07:37 -0700640
Bjorn Andersson5fdeb0d2020-01-13 23:57:00 -0800641 /* Wake up any transmitters waiting for resume-tx from the node */
642 mutex_lock(&node->qrtr_tx_lock);
643 radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) {
644 flow = *slot;
645 wake_up_interruptible_all(&flow->resume_tx);
646 }
647 mutex_unlock(&node->qrtr_tx_lock);
648
Courtney Cavinbdabad32016-05-06 07:09:08 -0700649 qrtr_node_release(node);
650 ep->node = NULL;
651}
652EXPORT_SYMBOL_GPL(qrtr_endpoint_unregister);
653
654/* Lookup socket by port.
655 *
656 * Callers must release with qrtr_port_put()
657 */
658static struct qrtr_sock *qrtr_port_lookup(int port)
659{
660 struct qrtr_sock *ipc;
661
662 if (port == QRTR_PORT_CTRL)
663 port = 0;
664
Bjorn Anderssonf16a4b262020-01-13 23:57:02 -0800665 rcu_read_lock();
Matthew Wilcox (Oracle)3cbf75302021-03-31 05:36:42 +0100666 ipc = xa_load(&qrtr_ports, port);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700667 if (ipc)
668 sock_hold(&ipc->sk);
Bjorn Anderssonf16a4b262020-01-13 23:57:02 -0800669 rcu_read_unlock();
Courtney Cavinbdabad32016-05-06 07:09:08 -0700670
671 return ipc;
672}
673
674/* Release acquired socket. */
675static void qrtr_port_put(struct qrtr_sock *ipc)
676{
677 sock_put(&ipc->sk);
678}
679
680/* Remove port assignment. */
681static void qrtr_port_remove(struct qrtr_sock *ipc)
682{
Bjorn Andersson1a7959c2017-10-10 23:45:21 -0700683 struct qrtr_ctrl_pkt *pkt;
Bjorn Andersson17844732017-06-07 14:07:38 -0700684 struct sk_buff *skb;
Courtney Cavinbdabad32016-05-06 07:09:08 -0700685 int port = ipc->us.sq_port;
Bjorn Anderssone7044482017-10-10 23:45:20 -0700686 struct sockaddr_qrtr to;
687
688 to.sq_family = AF_QIPCRTR;
689 to.sq_node = QRTR_NODE_BCAST;
690 to.sq_port = QRTR_PORT_CTRL;
Courtney Cavinbdabad32016-05-06 07:09:08 -0700691
Loic Poulainf7dec6c2020-11-06 18:33:29 +0100692 skb = qrtr_alloc_ctrl_packet(&pkt, GFP_KERNEL);
Bjorn Andersson17844732017-06-07 14:07:38 -0700693 if (skb) {
Bjorn Andersson1a7959c2017-10-10 23:45:21 -0700694 pkt->cmd = cpu_to_le32(QRTR_TYPE_DEL_CLIENT);
695 pkt->client.node = cpu_to_le32(ipc->us.sq_node);
696 pkt->client.port = cpu_to_le32(ipc->us.sq_port);
697
Bjorn Andersson17844732017-06-07 14:07:38 -0700698 skb_set_owner_w(skb, &ipc->sk);
Bjorn Anderssone7044482017-10-10 23:45:20 -0700699 qrtr_bcast_enqueue(NULL, skb, QRTR_TYPE_DEL_CLIENT, &ipc->us,
700 &to);
Bjorn Andersson17844732017-06-07 14:07:38 -0700701 }
702
Courtney Cavinbdabad32016-05-06 07:09:08 -0700703 if (port == QRTR_PORT_CTRL)
704 port = 0;
705
706 __sock_put(&ipc->sk);
707
Matthew Wilcox (Oracle)3cbf75302021-03-31 05:36:42 +0100708 xa_erase(&qrtr_ports, port);
Bjorn Anderssonf16a4b262020-01-13 23:57:02 -0800709
710 /* Ensure that if qrtr_port_lookup() did enter the RCU read section we
711 * wait for it to up increment the refcount */
712 synchronize_rcu();
Courtney Cavinbdabad32016-05-06 07:09:08 -0700713}
714
715/* Assign port number to socket.
716 *
717 * Specify port in the integer pointed to by port, and it will be adjusted
718 * on return as necesssary.
719 *
720 * Port may be:
721 * 0: Assign ephemeral port in [QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET]
722 * <QRTR_MIN_EPH_SOCKET: Specified; requires CAP_NET_ADMIN
723 * >QRTR_MIN_EPH_SOCKET: Specified; available to all
724 */
725static int qrtr_port_assign(struct qrtr_sock *ipc, int *port)
726{
727 int rc;
728
Courtney Cavinbdabad32016-05-06 07:09:08 -0700729 if (!*port) {
Matthew Wilcox (Oracle)3cbf75302021-03-31 05:36:42 +0100730 rc = xa_alloc(&qrtr_ports, port, ipc, QRTR_EPH_PORT_RANGE,
731 GFP_KERNEL);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700732 } else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) {
733 rc = -EACCES;
734 } else if (*port == QRTR_PORT_CTRL) {
Matthew Wilcox (Oracle)3cbf75302021-03-31 05:36:42 +0100735 rc = xa_insert(&qrtr_ports, 0, ipc, GFP_KERNEL);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700736 } else {
Matthew Wilcox (Oracle)3cbf75302021-03-31 05:36:42 +0100737 rc = xa_insert(&qrtr_ports, *port, ipc, GFP_KERNEL);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700738 }
Courtney Cavinbdabad32016-05-06 07:09:08 -0700739
Matthew Wilcox (Oracle)3cbf75302021-03-31 05:36:42 +0100740 if (rc == -EBUSY)
Courtney Cavinbdabad32016-05-06 07:09:08 -0700741 return -EADDRINUSE;
742 else if (rc < 0)
743 return rc;
744
745 sock_hold(&ipc->sk);
746
747 return 0;
748}
749
Bjorn Anderssonb24844b2017-06-07 14:07:39 -0700750/* Reset all non-control ports */
751static void qrtr_reset_ports(void)
752{
753 struct qrtr_sock *ipc;
Matthew Wilcox (Oracle)3cbf75302021-03-31 05:36:42 +0100754 unsigned long index;
Bjorn Anderssonb24844b2017-06-07 14:07:39 -0700755
Matthew Wilcox (Oracle)3cbf75302021-03-31 05:36:42 +0100756 rcu_read_lock();
757 xa_for_each_start(&qrtr_ports, index, ipc, 1) {
Bjorn Anderssonb24844b2017-06-07 14:07:39 -0700758 sock_hold(&ipc->sk);
759 ipc->sk.sk_err = ENETRESET;
Alexander Aringe3ae2362021-06-27 18:48:21 -0400760 sk_error_report(&ipc->sk);
Bjorn Anderssonb24844b2017-06-07 14:07:39 -0700761 sock_put(&ipc->sk);
762 }
Matthew Wilcox (Oracle)3cbf75302021-03-31 05:36:42 +0100763 rcu_read_unlock();
Bjorn Anderssonb24844b2017-06-07 14:07:39 -0700764}
765
Courtney Cavinbdabad32016-05-06 07:09:08 -0700766/* Bind socket to address.
767 *
768 * Socket should be locked upon call.
769 */
770static int __qrtr_bind(struct socket *sock,
771 const struct sockaddr_qrtr *addr, int zapped)
772{
773 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
774 struct sock *sk = sock->sk;
775 int port;
776 int rc;
777
778 /* rebinding ok */
779 if (!zapped && addr->sq_port == ipc->us.sq_port)
780 return 0;
781
782 port = addr->sq_port;
783 rc = qrtr_port_assign(ipc, &port);
784 if (rc)
785 return rc;
786
787 /* unbind previous, if any */
788 if (!zapped)
789 qrtr_port_remove(ipc);
790 ipc->us.sq_port = port;
791
792 sock_reset_flag(sk, SOCK_ZAPPED);
793
Bjorn Anderssonb24844b2017-06-07 14:07:39 -0700794 /* Notify all open ports about the new controller */
795 if (port == QRTR_PORT_CTRL)
796 qrtr_reset_ports();
797
Courtney Cavinbdabad32016-05-06 07:09:08 -0700798 return 0;
799}
800
801/* Auto bind to an ephemeral port. */
802static int qrtr_autobind(struct socket *sock)
803{
804 struct sock *sk = sock->sk;
805 struct sockaddr_qrtr addr;
806
807 if (!sock_flag(sk, SOCK_ZAPPED))
808 return 0;
809
810 addr.sq_family = AF_QIPCRTR;
811 addr.sq_node = qrtr_local_nid;
812 addr.sq_port = 0;
813
814 return __qrtr_bind(sock, &addr, 1);
815}
816
817/* Bind socket to specified sockaddr. */
818static int qrtr_bind(struct socket *sock, struct sockaddr *saddr, int len)
819{
820 DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
821 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
822 struct sock *sk = sock->sk;
823 int rc;
824
825 if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
826 return -EINVAL;
827
828 if (addr->sq_node != ipc->us.sq_node)
829 return -EINVAL;
830
831 lock_sock(sk);
832 rc = __qrtr_bind(sock, addr, sock_flag(sk, SOCK_ZAPPED));
833 release_sock(sk);
834
835 return rc;
836}
837
838/* Queue packet to local peer socket. */
Bjorn Anderssone7044482017-10-10 23:45:20 -0700839static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
840 int type, struct sockaddr_qrtr *from,
841 struct sockaddr_qrtr *to)
Courtney Cavinbdabad32016-05-06 07:09:08 -0700842{
Courtney Cavinbdabad32016-05-06 07:09:08 -0700843 struct qrtr_sock *ipc;
Bjorn Anderssonf507a9b62017-10-10 23:45:22 -0700844 struct qrtr_cb *cb;
Courtney Cavinbdabad32016-05-06 07:09:08 -0700845
Bjorn Anderssone7044482017-10-10 23:45:20 -0700846 ipc = qrtr_port_lookup(to->sq_port);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700847 if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */
Pavel Skripkin52f34562021-07-23 18:31:32 +0300848 if (ipc)
849 qrtr_port_put(ipc);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700850 kfree_skb(skb);
851 return -ENODEV;
852 }
853
Bjorn Anderssonf507a9b62017-10-10 23:45:22 -0700854 cb = (struct qrtr_cb *)skb->cb;
855 cb->src_node = from->sq_node;
856 cb->src_port = from->sq_port;
Bjorn Anderssone7044482017-10-10 23:45:20 -0700857
Courtney Cavinbdabad32016-05-06 07:09:08 -0700858 if (sock_queue_rcv_skb(&ipc->sk, skb)) {
859 qrtr_port_put(ipc);
860 kfree_skb(skb);
861 return -ENOSPC;
862 }
863
864 qrtr_port_put(ipc);
865
866 return 0;
867}
868
869/* Queue packet for broadcast. */
Bjorn Anderssone7044482017-10-10 23:45:20 -0700870static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
871 int type, struct sockaddr_qrtr *from,
872 struct sockaddr_qrtr *to)
Courtney Cavinbdabad32016-05-06 07:09:08 -0700873{
874 struct sk_buff *skbn;
875
876 mutex_lock(&qrtr_node_lock);
877 list_for_each_entry(node, &qrtr_all_nodes, item) {
878 skbn = skb_clone(skb, GFP_KERNEL);
879 if (!skbn)
880 break;
881 skb_set_owner_w(skbn, skb->sk);
Bjorn Anderssone7044482017-10-10 23:45:20 -0700882 qrtr_node_enqueue(node, skbn, type, from, to);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700883 }
884 mutex_unlock(&qrtr_node_lock);
885
Manivannan Sadhasivamd28ea1f2020-05-19 23:44:16 +0530886 qrtr_local_enqueue(NULL, skb, type, from, to);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700887
888 return 0;
889}
890
891static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
892{
893 DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
Bjorn Anderssone7044482017-10-10 23:45:20 -0700894 int (*enqueue_fn)(struct qrtr_node *, struct sk_buff *, int,
895 struct sockaddr_qrtr *, struct sockaddr_qrtr *);
Nicholas Mc Guire8f5e2452019-05-11 02:56:33 +0200896 __le32 qrtr_type = cpu_to_le32(QRTR_TYPE_DATA);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700897 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
898 struct sock *sk = sock->sk;
899 struct qrtr_node *node;
Courtney Cavinbdabad32016-05-06 07:09:08 -0700900 struct sk_buff *skb;
901 size_t plen;
Bjorn Andersson7036e622019-05-20 16:51:56 -0700902 u32 type;
Courtney Cavinbdabad32016-05-06 07:09:08 -0700903 int rc;
904
905 if (msg->msg_flags & ~(MSG_DONTWAIT))
906 return -EINVAL;
907
908 if (len > 65535)
909 return -EMSGSIZE;
910
911 lock_sock(sk);
912
913 if (addr) {
914 if (msg->msg_namelen < sizeof(*addr)) {
915 release_sock(sk);
916 return -EINVAL;
917 }
918
919 if (addr->sq_family != AF_QIPCRTR) {
920 release_sock(sk);
921 return -EINVAL;
922 }
923
924 rc = qrtr_autobind(sock);
925 if (rc) {
926 release_sock(sk);
927 return rc;
928 }
929 } else if (sk->sk_state == TCP_ESTABLISHED) {
930 addr = &ipc->peer;
931 } else {
932 release_sock(sk);
933 return -ENOTCONN;
934 }
935
936 node = NULL;
937 if (addr->sq_node == QRTR_NODE_BCAST) {
Wang Wenhu6dbf02a2020-04-08 19:53:53 -0700938 if (addr->sq_port != QRTR_PORT_CTRL &&
939 qrtr_local_nid != QRTR_NODE_BCAST) {
Arun Kumar Neelakantamfdf5fd32018-07-04 19:49:32 +0530940 release_sock(sk);
941 return -ENOTCONN;
942 }
Wang Wenhu6dbf02a2020-04-08 19:53:53 -0700943 enqueue_fn = qrtr_bcast_enqueue;
Courtney Cavinbdabad32016-05-06 07:09:08 -0700944 } else if (addr->sq_node == ipc->us.sq_node) {
945 enqueue_fn = qrtr_local_enqueue;
946 } else {
Courtney Cavinbdabad32016-05-06 07:09:08 -0700947 node = qrtr_node_lookup(addr->sq_node);
948 if (!node) {
949 release_sock(sk);
950 return -ECONNRESET;
951 }
Wang Wenhu6dbf02a2020-04-08 19:53:53 -0700952 enqueue_fn = qrtr_node_enqueue;
Courtney Cavinbdabad32016-05-06 07:09:08 -0700953 }
954
955 plen = (len + 3) & ~3;
Bjorn Andersson194ccc82017-10-10 23:45:23 -0700956 skb = sock_alloc_send_skb(sk, plen + QRTR_HDR_MAX_SIZE,
Courtney Cavinbdabad32016-05-06 07:09:08 -0700957 msg->msg_flags & MSG_DONTWAIT, &rc);
Jia-Ju Bai179d0ba2021-03-08 01:13:55 -0800958 if (!skb) {
959 rc = -ENOMEM;
Courtney Cavinbdabad32016-05-06 07:09:08 -0700960 goto out_node;
Jia-Ju Bai179d0ba2021-03-08 01:13:55 -0800961 }
Courtney Cavinbdabad32016-05-06 07:09:08 -0700962
Bjorn Andersson194ccc82017-10-10 23:45:23 -0700963 skb_reserve(skb, QRTR_HDR_MAX_SIZE);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700964
Bjorn Anderssone7044482017-10-10 23:45:20 -0700965 rc = memcpy_from_msg(skb_put(skb, len), msg, len);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700966 if (rc) {
967 kfree_skb(skb);
968 goto out_node;
969 }
970
Courtney Cavinbdabad32016-05-06 07:09:08 -0700971 if (ipc->us.sq_port == QRTR_PORT_CTRL) {
972 if (len < 4) {
973 rc = -EINVAL;
974 kfree_skb(skb);
975 goto out_node;
976 }
977
978 /* control messages already require the type as 'command' */
Nicholas Mc Guire8f5e2452019-05-11 02:56:33 +0200979 skb_copy_bits(skb, 0, &qrtr_type, 4);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700980 }
981
Bjorn Andersson7036e622019-05-20 16:51:56 -0700982 type = le32_to_cpu(qrtr_type);
Bjorn Anderssone7044482017-10-10 23:45:20 -0700983 rc = enqueue_fn(node, skb, type, &ipc->us, addr);
Courtney Cavinbdabad32016-05-06 07:09:08 -0700984 if (rc >= 0)
985 rc = len;
986
987out_node:
988 qrtr_node_release(node);
989 release_sock(sk);
990
991 return rc;
992}
993
Bjorn Anderssoncb6530b2020-01-13 23:56:59 -0800994static int qrtr_send_resume_tx(struct qrtr_cb *cb)
995{
996 struct sockaddr_qrtr remote = { AF_QIPCRTR, cb->src_node, cb->src_port };
997 struct sockaddr_qrtr local = { AF_QIPCRTR, cb->dst_node, cb->dst_port };
998 struct qrtr_ctrl_pkt *pkt;
999 struct qrtr_node *node;
1000 struct sk_buff *skb;
1001 int ret;
1002
1003 node = qrtr_node_lookup(remote.sq_node);
1004 if (!node)
1005 return -EINVAL;
1006
Loic Poulainf7dec6c2020-11-06 18:33:29 +01001007 skb = qrtr_alloc_ctrl_packet(&pkt, GFP_KERNEL);
Bjorn Anderssoncb6530b2020-01-13 23:56:59 -08001008 if (!skb)
1009 return -ENOMEM;
1010
1011 pkt->cmd = cpu_to_le32(QRTR_TYPE_RESUME_TX);
1012 pkt->client.node = cpu_to_le32(cb->dst_node);
1013 pkt->client.port = cpu_to_le32(cb->dst_port);
1014
1015 ret = qrtr_node_enqueue(node, skb, QRTR_TYPE_RESUME_TX, &local, &remote);
1016
1017 qrtr_node_release(node);
1018
1019 return ret;
1020}
1021
Courtney Cavinbdabad32016-05-06 07:09:08 -07001022static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg,
1023 size_t size, int flags)
1024{
1025 DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
Courtney Cavinbdabad32016-05-06 07:09:08 -07001026 struct sock *sk = sock->sk;
1027 struct sk_buff *skb;
Bjorn Anderssonf507a9b62017-10-10 23:45:22 -07001028 struct qrtr_cb *cb;
Courtney Cavinbdabad32016-05-06 07:09:08 -07001029 int copied, rc;
1030
1031 lock_sock(sk);
1032
1033 if (sock_flag(sk, SOCK_ZAPPED)) {
1034 release_sock(sk);
1035 return -EADDRNOTAVAIL;
1036 }
1037
1038 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
1039 flags & MSG_DONTWAIT, &rc);
1040 if (!skb) {
1041 release_sock(sk);
1042 return rc;
1043 }
Bjorn Anderssoncb6530b2020-01-13 23:56:59 -08001044 cb = (struct qrtr_cb *)skb->cb;
Courtney Cavinbdabad32016-05-06 07:09:08 -07001045
Bjorn Anderssonf507a9b62017-10-10 23:45:22 -07001046 copied = skb->len;
Courtney Cavinbdabad32016-05-06 07:09:08 -07001047 if (copied > size) {
1048 copied = size;
1049 msg->msg_flags |= MSG_TRUNC;
1050 }
1051
Bjorn Anderssonf507a9b62017-10-10 23:45:22 -07001052 rc = skb_copy_datagram_msg(skb, 0, msg, copied);
Courtney Cavinbdabad32016-05-06 07:09:08 -07001053 if (rc < 0)
1054 goto out;
1055 rc = copied;
1056
1057 if (addr) {
Eric Dumazet50535242021-03-12 08:59:48 -08001058 /* There is an anonymous 2-byte hole after sq_family,
1059 * make sure to clear it.
1060 */
1061 memset(addr, 0, sizeof(*addr));
1062
Courtney Cavinbdabad32016-05-06 07:09:08 -07001063 addr->sq_family = AF_QIPCRTR;
Bjorn Anderssonf507a9b62017-10-10 23:45:22 -07001064 addr->sq_node = cb->src_node;
1065 addr->sq_port = cb->src_port;
Courtney Cavinbdabad32016-05-06 07:09:08 -07001066 msg->msg_namelen = sizeof(*addr);
1067 }
1068
1069out:
Bjorn Anderssoncb6530b2020-01-13 23:56:59 -08001070 if (cb->confirm_rx)
1071 qrtr_send_resume_tx(cb);
1072
Courtney Cavinbdabad32016-05-06 07:09:08 -07001073 skb_free_datagram(sk, skb);
1074 release_sock(sk);
1075
1076 return rc;
1077}
1078
1079static int qrtr_connect(struct socket *sock, struct sockaddr *saddr,
1080 int len, int flags)
1081{
1082 DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
1083 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
1084 struct sock *sk = sock->sk;
1085 int rc;
1086
1087 if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
1088 return -EINVAL;
1089
1090 lock_sock(sk);
1091
1092 sk->sk_state = TCP_CLOSE;
1093 sock->state = SS_UNCONNECTED;
1094
1095 rc = qrtr_autobind(sock);
1096 if (rc) {
1097 release_sock(sk);
1098 return rc;
1099 }
1100
1101 ipc->peer = *addr;
1102 sock->state = SS_CONNECTED;
1103 sk->sk_state = TCP_ESTABLISHED;
1104
1105 release_sock(sk);
1106
1107 return 0;
1108}
1109
1110static int qrtr_getname(struct socket *sock, struct sockaddr *saddr,
Denys Vlasenko9b2c45d2018-02-12 20:00:20 +01001111 int peer)
Courtney Cavinbdabad32016-05-06 07:09:08 -07001112{
1113 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
1114 struct sockaddr_qrtr qaddr;
1115 struct sock *sk = sock->sk;
1116
1117 lock_sock(sk);
1118 if (peer) {
1119 if (sk->sk_state != TCP_ESTABLISHED) {
1120 release_sock(sk);
1121 return -ENOTCONN;
1122 }
1123
1124 qaddr = ipc->peer;
1125 } else {
1126 qaddr = ipc->us;
1127 }
1128 release_sock(sk);
1129
Courtney Cavinbdabad32016-05-06 07:09:08 -07001130 qaddr.sq_family = AF_QIPCRTR;
1131
1132 memcpy(saddr, &qaddr, sizeof(qaddr));
1133
Denys Vlasenko9b2c45d2018-02-12 20:00:20 +01001134 return sizeof(qaddr);
Courtney Cavinbdabad32016-05-06 07:09:08 -07001135}
1136
1137static int qrtr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1138{
1139 void __user *argp = (void __user *)arg;
1140 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
1141 struct sock *sk = sock->sk;
1142 struct sockaddr_qrtr *sq;
1143 struct sk_buff *skb;
1144 struct ifreq ifr;
1145 long len = 0;
1146 int rc = 0;
1147
1148 lock_sock(sk);
1149
1150 switch (cmd) {
1151 case TIOCOUTQ:
1152 len = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1153 if (len < 0)
1154 len = 0;
1155 rc = put_user(len, (int __user *)argp);
1156 break;
1157 case TIOCINQ:
1158 skb = skb_peek(&sk->sk_receive_queue);
1159 if (skb)
Bjorn Anderssonf507a9b62017-10-10 23:45:22 -07001160 len = skb->len;
Courtney Cavinbdabad32016-05-06 07:09:08 -07001161 rc = put_user(len, (int __user *)argp);
1162 break;
1163 case SIOCGIFADDR:
Arnd Bergmann29c49642021-07-22 16:29:03 +02001164 if (get_user_ifreq(&ifr, NULL, argp)) {
Courtney Cavinbdabad32016-05-06 07:09:08 -07001165 rc = -EFAULT;
1166 break;
1167 }
1168
1169 sq = (struct sockaddr_qrtr *)&ifr.ifr_addr;
1170 *sq = ipc->us;
Arnd Bergmann29c49642021-07-22 16:29:03 +02001171 if (put_user_ifreq(&ifr, argp)) {
Courtney Cavinbdabad32016-05-06 07:09:08 -07001172 rc = -EFAULT;
1173 break;
1174 }
1175 break;
Courtney Cavinbdabad32016-05-06 07:09:08 -07001176 case SIOCADDRT:
1177 case SIOCDELRT:
1178 case SIOCSIFADDR:
1179 case SIOCGIFDSTADDR:
1180 case SIOCSIFDSTADDR:
1181 case SIOCGIFBRDADDR:
1182 case SIOCSIFBRDADDR:
1183 case SIOCGIFNETMASK:
1184 case SIOCSIFNETMASK:
1185 rc = -EINVAL;
1186 break;
1187 default:
1188 rc = -ENOIOCTLCMD;
1189 break;
1190 }
1191
1192 release_sock(sk);
1193
1194 return rc;
1195}
1196
1197static int qrtr_release(struct socket *sock)
1198{
1199 struct sock *sk = sock->sk;
1200 struct qrtr_sock *ipc;
1201
1202 if (!sk)
1203 return 0;
1204
1205 lock_sock(sk);
1206
1207 ipc = qrtr_sk(sk);
1208 sk->sk_shutdown = SHUTDOWN_MASK;
1209 if (!sock_flag(sk, SOCK_DEAD))
1210 sk->sk_state_change(sk);
1211
1212 sock_set_flag(sk, SOCK_DEAD);
Cong Wangaf9f6912020-07-24 09:45:51 -07001213 sock_orphan(sk);
Courtney Cavinbdabad32016-05-06 07:09:08 -07001214 sock->sk = NULL;
1215
1216 if (!sock_flag(sk, SOCK_ZAPPED))
1217 qrtr_port_remove(ipc);
1218
1219 skb_queue_purge(&sk->sk_receive_queue);
1220
1221 release_sock(sk);
1222 sock_put(sk);
1223
1224 return 0;
1225}
1226
1227static const struct proto_ops qrtr_proto_ops = {
1228 .owner = THIS_MODULE,
1229 .family = AF_QIPCRTR,
1230 .bind = qrtr_bind,
1231 .connect = qrtr_connect,
1232 .socketpair = sock_no_socketpair,
1233 .accept = sock_no_accept,
1234 .listen = sock_no_listen,
1235 .sendmsg = qrtr_sendmsg,
1236 .recvmsg = qrtr_recvmsg,
1237 .getname = qrtr_getname,
1238 .ioctl = qrtr_ioctl,
Arnd Bergmannc7cbdbf2019-04-17 22:51:48 +02001239 .gettstamp = sock_gettstamp,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001240 .poll = datagram_poll,
Courtney Cavinbdabad32016-05-06 07:09:08 -07001241 .shutdown = sock_no_shutdown,
Courtney Cavinbdabad32016-05-06 07:09:08 -07001242 .release = qrtr_release,
1243 .mmap = sock_no_mmap,
1244 .sendpage = sock_no_sendpage,
1245};
1246
1247static struct proto qrtr_proto = {
1248 .name = "QIPCRTR",
1249 .owner = THIS_MODULE,
1250 .obj_size = sizeof(struct qrtr_sock),
1251};
1252
1253static int qrtr_create(struct net *net, struct socket *sock,
1254 int protocol, int kern)
1255{
1256 struct qrtr_sock *ipc;
1257 struct sock *sk;
1258
1259 if (sock->type != SOCK_DGRAM)
1260 return -EPROTOTYPE;
1261
1262 sk = sk_alloc(net, AF_QIPCRTR, GFP_KERNEL, &qrtr_proto, kern);
1263 if (!sk)
1264 return -ENOMEM;
1265
1266 sock_set_flag(sk, SOCK_ZAPPED);
1267
1268 sock_init_data(sock, sk);
1269 sock->ops = &qrtr_proto_ops;
1270
1271 ipc = qrtr_sk(sk);
1272 ipc->us.sq_family = AF_QIPCRTR;
1273 ipc->us.sq_node = qrtr_local_nid;
1274 ipc->us.sq_port = 0;
1275
1276 return 0;
1277}
1278
Courtney Cavinbdabad32016-05-06 07:09:08 -07001279static const struct net_proto_family qrtr_family = {
1280 .owner = THIS_MODULE,
1281 .family = AF_QIPCRTR,
1282 .create = qrtr_create,
1283};
1284
1285static int __init qrtr_proto_init(void)
1286{
1287 int rc;
1288
1289 rc = proto_register(&qrtr_proto, 1);
1290 if (rc)
1291 return rc;
1292
1293 rc = sock_register(&qrtr_family);
Qinglang Miao4beb17e2021-01-05 13:57:54 +08001294 if (rc)
1295 goto err_proto;
Courtney Cavinbdabad32016-05-06 07:09:08 -07001296
Qinglang Miao4beb17e2021-01-05 13:57:54 +08001297 rc = qrtr_ns_init();
1298 if (rc)
1299 goto err_sock;
Courtney Cavinbdabad32016-05-06 07:09:08 -07001300
Qinglang Miao4beb17e2021-01-05 13:57:54 +08001301 return 0;
1302
1303err_sock:
1304 sock_unregister(qrtr_family.family);
1305err_proto:
1306 proto_unregister(&qrtr_proto);
Florian Westphalc1c502b2017-12-02 21:44:07 +01001307 return rc;
Courtney Cavinbdabad32016-05-06 07:09:08 -07001308}
Bjorn Anderssonb7e732f2017-11-06 20:50:35 -08001309postcore_initcall(qrtr_proto_init);
Courtney Cavinbdabad32016-05-06 07:09:08 -07001310
1311static void __exit qrtr_proto_fini(void)
1312{
Manivannan Sadhasivam0c2204a2020-02-20 20:43:26 +05301313 qrtr_ns_remove();
Courtney Cavinbdabad32016-05-06 07:09:08 -07001314 sock_unregister(qrtr_family.family);
1315 proto_unregister(&qrtr_proto);
1316}
1317module_exit(qrtr_proto_fini);
1318
1319MODULE_DESCRIPTION("Qualcomm IPC-router driver");
1320MODULE_LICENSE("GPL v2");
Nicolas Dechesne77ac7252018-04-17 14:03:26 +02001321MODULE_ALIAS_NETPROTO(PF_QIPCRTR);