blob: 8da1b5cb31b31040bdddfc3dffb730856bdde8ee [file] [log] [blame]
Suman Anna49b05972018-05-31 12:11:01 -05001// SPDX-License-Identifier: GPL-2.0
Bjorn Andersson53e28222016-09-01 15:28:09 -07002/*
3 * Copyright (c) 2015, Sony Mobile Communications AB.
4 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Bjorn Andersson53e28222016-09-01 15:28:09 -07005 */
6
7#include <linux/interrupt.h>
8#include <linux/io.h>
Bjorn Anderssonab460a22018-04-19 18:17:57 -07009#include <linux/mailbox_client.h>
Bjorn Andersson53e28222016-09-01 15:28:09 -070010#include <linux/mfd/syscon.h>
11#include <linux/module.h>
12#include <linux/of_irq.h>
13#include <linux/of_platform.h>
14#include <linux/platform_device.h>
15#include <linux/regmap.h>
16#include <linux/sched.h>
Niklas Cassel67cd0ee2018-06-29 19:01:01 +020017#include <linux/sizes.h>
Bjorn Andersson53e28222016-09-01 15:28:09 -070018#include <linux/slab.h>
19#include <linux/soc/qcom/smem.h>
20#include <linux/wait.h>
21#include <linux/rpmsg.h>
Bjorn Andersson8fc94722016-10-19 19:40:03 -070022#include <linux/rpmsg/qcom_smd.h>
Bjorn Andersson53e28222016-09-01 15:28:09 -070023
24#include "rpmsg_internal.h"
25
26/*
27 * The Qualcomm Shared Memory communication solution provides point-to-point
28 * channels for clients to send and receive streaming or packet based data.
29 *
30 * Each channel consists of a control item (channel info) and a ring buffer
31 * pair. The channel info carry information related to channel state, flow
32 * control and the offsets within the ring buffer.
33 *
34 * All allocated channels are listed in an allocation table, identifying the
35 * pair of items by name, type and remote processor.
36 *
37 * Upon creating a new channel the remote processor allocates channel info and
38 * ring buffer items from the smem heap and populate the allocation table. An
39 * interrupt is sent to the other end of the channel and a scan for new
40 * channels should be done. A channel never goes away, it will only change
41 * state.
42 *
43 * The remote processor signals it intent for bring up the communication
44 * channel by setting the state of its end of the channel to "opening" and
45 * sends out an interrupt. We detect this change and register a smd device to
46 * consume the channel. Upon finding a consumer we finish the handshake and the
47 * channel is up.
48 *
49 * Upon closing a channel, the remote processor will update the state of its
50 * end of the channel and signal us, we will then unregister any attached
51 * device and close our end of the channel.
52 *
53 * Devices attached to a channel can use the qcom_smd_send function to push
54 * data to the channel, this is done by copying the data into the tx ring
55 * buffer, updating the pointers in the channel info and signaling the remote
56 * processor.
57 *
58 * The remote processor does the equivalent when it transfer data and upon
59 * receiving the interrupt we check the channel info for new data and delivers
60 * this to the attached device. If the device is not ready to receive the data
61 * we leave it in the ring buffer for now.
62 */
63
64struct smd_channel_info;
65struct smd_channel_info_pair;
66struct smd_channel_info_word;
67struct smd_channel_info_word_pair;
68
69static const struct rpmsg_endpoint_ops qcom_smd_endpoint_ops;
70
71#define SMD_ALLOC_TBL_COUNT 2
72#define SMD_ALLOC_TBL_SIZE 64
73
74/*
75 * This lists the various smem heap items relevant for the allocation table and
76 * smd channel entries.
77 */
78static const struct {
79 unsigned alloc_tbl_id;
80 unsigned info_base_id;
81 unsigned fifo_base_id;
82} smem_items[SMD_ALLOC_TBL_COUNT] = {
83 {
84 .alloc_tbl_id = 13,
85 .info_base_id = 14,
86 .fifo_base_id = 338
87 },
88 {
89 .alloc_tbl_id = 266,
90 .info_base_id = 138,
91 .fifo_base_id = 202,
92 },
93};
94
95/**
96 * struct qcom_smd_edge - representing a remote processor
Srinivas Kandagatla82eca592018-06-18 13:33:39 +010097 * @dev: device associated with this edge
98 * @name: name of this edge
Bjorn Andersson53e28222016-09-01 15:28:09 -070099 * @of_node: of_node handle for information related to this edge
100 * @edge_id: identifier of this edge
101 * @remote_pid: identifier of remote processor
102 * @irq: interrupt for signals on this edge
103 * @ipc_regmap: regmap handle holding the outgoing ipc register
104 * @ipc_offset: offset within @ipc_regmap of the register for ipc
105 * @ipc_bit: bit in the register at @ipc_offset of @ipc_regmap
Bjorn Anderssonab460a22018-04-19 18:17:57 -0700106 * @mbox_client: mailbox client handle
107 * @mbox_chan: apcs ipc mailbox channel handle
Bjorn Andersson53e28222016-09-01 15:28:09 -0700108 * @channels: list of all channels detected on this edge
109 * @channels_lock: guard for modifications of @channels
110 * @allocated: array of bitmaps representing already allocated channels
111 * @smem_available: last available amount of smem triggering a channel scan
Srinivas Kandagatla82eca592018-06-18 13:33:39 +0100112 * @new_channel_event: wait queue for new channel events
Bjorn Andersson53e28222016-09-01 15:28:09 -0700113 * @scan_work: work item for discovering new channels
114 * @state_work: work item for edge state changes
115 */
116struct qcom_smd_edge {
117 struct device dev;
118
Bjorn Andersson5e53c422016-12-02 14:06:02 -0800119 const char *name;
120
Bjorn Andersson53e28222016-09-01 15:28:09 -0700121 struct device_node *of_node;
122 unsigned edge_id;
123 unsigned remote_pid;
124
125 int irq;
126
127 struct regmap *ipc_regmap;
128 int ipc_offset;
129 int ipc_bit;
130
Bjorn Anderssonab460a22018-04-19 18:17:57 -0700131 struct mbox_client mbox_client;
132 struct mbox_chan *mbox_chan;
133
Bjorn Andersson53e28222016-09-01 15:28:09 -0700134 struct list_head channels;
135 spinlock_t channels_lock;
136
137 DECLARE_BITMAP(allocated[SMD_ALLOC_TBL_COUNT], SMD_ALLOC_TBL_SIZE);
138
139 unsigned smem_available;
140
141 wait_queue_head_t new_channel_event;
142
143 struct work_struct scan_work;
144 struct work_struct state_work;
145};
146
147/*
148 * SMD channel states.
149 */
150enum smd_channel_state {
151 SMD_CHANNEL_CLOSED,
152 SMD_CHANNEL_OPENING,
153 SMD_CHANNEL_OPENED,
154 SMD_CHANNEL_FLUSHING,
155 SMD_CHANNEL_CLOSING,
156 SMD_CHANNEL_RESET,
157 SMD_CHANNEL_RESET_OPENING
158};
159
160struct qcom_smd_device {
161 struct rpmsg_device rpdev;
162
163 struct qcom_smd_edge *edge;
164};
165
166struct qcom_smd_endpoint {
167 struct rpmsg_endpoint ept;
168
169 struct qcom_smd_channel *qsch;
170};
171
Bjorn Andersson6ddf12d2018-03-27 14:06:41 -0700172#define to_smd_device(r) container_of(r, struct qcom_smd_device, rpdev)
Bjorn Andersson53e28222016-09-01 15:28:09 -0700173#define to_smd_edge(d) container_of(d, struct qcom_smd_edge, dev)
Bjorn Andersson6ddf12d2018-03-27 14:06:41 -0700174#define to_smd_endpoint(e) container_of(e, struct qcom_smd_endpoint, ept)
Bjorn Andersson53e28222016-09-01 15:28:09 -0700175
176/**
177 * struct qcom_smd_channel - smd channel struct
178 * @edge: qcom_smd_edge this channel is living on
Srinivas Kandagatla82eca592018-06-18 13:33:39 +0100179 * @qsept: reference to a associated smd endpoint
180 * @registered: flag to indicate if the channel is registered
Bjorn Andersson53e28222016-09-01 15:28:09 -0700181 * @name: name of the channel
182 * @state: local state of the channel
183 * @remote_state: remote state of the channel
Srinivas Kandagatla82eca592018-06-18 13:33:39 +0100184 * @state_change_event: state change event
Bjorn Andersson53e28222016-09-01 15:28:09 -0700185 * @info: byte aligned outgoing/incoming channel info
186 * @info_word: word aligned outgoing/incoming channel info
187 * @tx_lock: lock to make writes to the channel mutually exclusive
188 * @fblockread_event: wakeup event tied to tx fBLOCKREADINTR
189 * @tx_fifo: pointer to the outgoing ring buffer
190 * @rx_fifo: pointer to the incoming ring buffer
191 * @fifo_size: size of each ring buffer
192 * @bounce_buffer: bounce buffer for reading wrapped packets
193 * @cb: callback function registered for this channel
194 * @recv_lock: guard for rx info modifications and cb pointer
195 * @pkt_size: size of the currently handled packet
Srinivas Kandagatla82eca592018-06-18 13:33:39 +0100196 * @drvdata: driver private data
Bjorn Andersson53e28222016-09-01 15:28:09 -0700197 * @list: lite entry for @channels in qcom_smd_edge
198 */
199struct qcom_smd_channel {
200 struct qcom_smd_edge *edge;
201
202 struct qcom_smd_endpoint *qsept;
203 bool registered;
204
205 char *name;
206 enum smd_channel_state state;
207 enum smd_channel_state remote_state;
Bjorn Andersson268105f2017-12-12 15:58:53 -0800208 wait_queue_head_t state_change_event;
Bjorn Andersson53e28222016-09-01 15:28:09 -0700209
210 struct smd_channel_info_pair *info;
211 struct smd_channel_info_word_pair *info_word;
212
Bjorn Andersson33e38202018-02-13 11:04:11 -0800213 spinlock_t tx_lock;
Bjorn Andersson53e28222016-09-01 15:28:09 -0700214 wait_queue_head_t fblockread_event;
215
216 void *tx_fifo;
217 void *rx_fifo;
218 int fifo_size;
219
220 void *bounce_buffer;
221
222 spinlock_t recv_lock;
223
224 int pkt_size;
225
226 void *drvdata;
227
228 struct list_head list;
229};
230
231/*
232 * Format of the smd_info smem items, for byte aligned channels.
233 */
234struct smd_channel_info {
235 __le32 state;
236 u8 fDSR;
237 u8 fCTS;
238 u8 fCD;
239 u8 fRI;
240 u8 fHEAD;
241 u8 fTAIL;
242 u8 fSTATE;
243 u8 fBLOCKREADINTR;
244 __le32 tail;
245 __le32 head;
246};
247
248struct smd_channel_info_pair {
249 struct smd_channel_info tx;
250 struct smd_channel_info rx;
251};
252
253/*
254 * Format of the smd_info smem items, for word aligned channels.
255 */
256struct smd_channel_info_word {
257 __le32 state;
258 __le32 fDSR;
259 __le32 fCTS;
260 __le32 fCD;
261 __le32 fRI;
262 __le32 fHEAD;
263 __le32 fTAIL;
264 __le32 fSTATE;
265 __le32 fBLOCKREADINTR;
266 __le32 tail;
267 __le32 head;
268};
269
270struct smd_channel_info_word_pair {
271 struct smd_channel_info_word tx;
272 struct smd_channel_info_word rx;
273};
274
275#define GET_RX_CHANNEL_FLAG(channel, param) \
276 ({ \
277 BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \
278 channel->info_word ? \
279 le32_to_cpu(channel->info_word->rx.param) : \
280 channel->info->rx.param; \
281 })
282
283#define GET_RX_CHANNEL_INFO(channel, param) \
284 ({ \
285 BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \
286 le32_to_cpu(channel->info_word ? \
287 channel->info_word->rx.param : \
288 channel->info->rx.param); \
289 })
290
291#define SET_RX_CHANNEL_FLAG(channel, param, value) \
292 ({ \
293 BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \
294 if (channel->info_word) \
295 channel->info_word->rx.param = cpu_to_le32(value); \
296 else \
297 channel->info->rx.param = value; \
298 })
299
300#define SET_RX_CHANNEL_INFO(channel, param, value) \
301 ({ \
302 BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \
303 if (channel->info_word) \
304 channel->info_word->rx.param = cpu_to_le32(value); \
305 else \
306 channel->info->rx.param = cpu_to_le32(value); \
307 })
308
309#define GET_TX_CHANNEL_FLAG(channel, param) \
310 ({ \
311 BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \
312 channel->info_word ? \
313 le32_to_cpu(channel->info_word->tx.param) : \
314 channel->info->tx.param; \
315 })
316
317#define GET_TX_CHANNEL_INFO(channel, param) \
318 ({ \
319 BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \
320 le32_to_cpu(channel->info_word ? \
321 channel->info_word->tx.param : \
322 channel->info->tx.param); \
323 })
324
325#define SET_TX_CHANNEL_FLAG(channel, param, value) \
326 ({ \
327 BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \
328 if (channel->info_word) \
329 channel->info_word->tx.param = cpu_to_le32(value); \
330 else \
331 channel->info->tx.param = value; \
332 })
333
334#define SET_TX_CHANNEL_INFO(channel, param, value) \
335 ({ \
336 BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \
337 if (channel->info_word) \
338 channel->info_word->tx.param = cpu_to_le32(value); \
339 else \
340 channel->info->tx.param = cpu_to_le32(value); \
341 })
342
343/**
344 * struct qcom_smd_alloc_entry - channel allocation entry
345 * @name: channel name
346 * @cid: channel index
347 * @flags: channel flags and edge id
348 * @ref_count: reference count of the channel
349 */
350struct qcom_smd_alloc_entry {
351 u8 name[20];
352 __le32 cid;
353 __le32 flags;
354 __le32 ref_count;
355} __packed;
356
357#define SMD_CHANNEL_FLAGS_EDGE_MASK 0xff
358#define SMD_CHANNEL_FLAGS_STREAM BIT(8)
359#define SMD_CHANNEL_FLAGS_PACKET BIT(9)
360
361/*
362 * Each smd packet contains a 20 byte header, with the first 4 being the length
363 * of the packet.
364 */
365#define SMD_PACKET_HEADER_LEN 20
366
367/*
368 * Signal the remote processor associated with 'channel'.
369 */
370static void qcom_smd_signal_channel(struct qcom_smd_channel *channel)
371{
372 struct qcom_smd_edge *edge = channel->edge;
373
Bjorn Anderssonab460a22018-04-19 18:17:57 -0700374 if (edge->mbox_chan) {
375 /*
376 * We can ignore a failing mbox_send_message() as the only
377 * possible cause is that the FIFO in the framework is full of
378 * other writes to the same bit.
379 */
380 mbox_send_message(edge->mbox_chan, NULL);
381 mbox_client_txdone(edge->mbox_chan, 0);
382 } else {
383 regmap_write(edge->ipc_regmap, edge->ipc_offset, BIT(edge->ipc_bit));
384 }
Bjorn Andersson53e28222016-09-01 15:28:09 -0700385}
386
387/*
388 * Initialize the tx channel info
389 */
390static void qcom_smd_channel_reset(struct qcom_smd_channel *channel)
391{
392 SET_TX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED);
393 SET_TX_CHANNEL_FLAG(channel, fDSR, 0);
394 SET_TX_CHANNEL_FLAG(channel, fCTS, 0);
395 SET_TX_CHANNEL_FLAG(channel, fCD, 0);
396 SET_TX_CHANNEL_FLAG(channel, fRI, 0);
397 SET_TX_CHANNEL_FLAG(channel, fHEAD, 0);
398 SET_TX_CHANNEL_FLAG(channel, fTAIL, 0);
399 SET_TX_CHANNEL_FLAG(channel, fSTATE, 1);
400 SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1);
401 SET_TX_CHANNEL_INFO(channel, head, 0);
402 SET_RX_CHANNEL_INFO(channel, tail, 0);
403
404 qcom_smd_signal_channel(channel);
405
406 channel->state = SMD_CHANNEL_CLOSED;
407 channel->pkt_size = 0;
408}
409
410/*
411 * Set the callback for a channel, with appropriate locking
412 */
413static void qcom_smd_channel_set_callback(struct qcom_smd_channel *channel,
414 rpmsg_rx_cb_t cb)
415{
416 struct rpmsg_endpoint *ept = &channel->qsept->ept;
417 unsigned long flags;
418
419 spin_lock_irqsave(&channel->recv_lock, flags);
420 ept->cb = cb;
421 spin_unlock_irqrestore(&channel->recv_lock, flags);
422};
423
424/*
425 * Calculate the amount of data available in the rx fifo
426 */
427static size_t qcom_smd_channel_get_rx_avail(struct qcom_smd_channel *channel)
428{
429 unsigned head;
430 unsigned tail;
431
432 head = GET_RX_CHANNEL_INFO(channel, head);
433 tail = GET_RX_CHANNEL_INFO(channel, tail);
434
435 return (head - tail) & (channel->fifo_size - 1);
436}
437
438/*
439 * Set tx channel state and inform the remote processor
440 */
441static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel,
442 int state)
443{
444 struct qcom_smd_edge *edge = channel->edge;
445 bool is_open = state == SMD_CHANNEL_OPENED;
446
447 if (channel->state == state)
448 return;
449
450 dev_dbg(&edge->dev, "set_state(%s, %d)\n", channel->name, state);
451
452 SET_TX_CHANNEL_FLAG(channel, fDSR, is_open);
453 SET_TX_CHANNEL_FLAG(channel, fCTS, is_open);
454 SET_TX_CHANNEL_FLAG(channel, fCD, is_open);
455
456 SET_TX_CHANNEL_INFO(channel, state, state);
457 SET_TX_CHANNEL_FLAG(channel, fSTATE, 1);
458
459 channel->state = state;
460 qcom_smd_signal_channel(channel);
461}
462
463/*
464 * Copy count bytes of data using 32bit accesses, if that's required.
465 */
466static void smd_copy_to_fifo(void __iomem *dst,
467 const void *src,
468 size_t count,
469 bool word_aligned)
470{
471 if (word_aligned) {
472 __iowrite32_copy(dst, src, count / sizeof(u32));
473 } else {
474 memcpy_toio(dst, src, count);
475 }
476}
477
478/*
479 * Copy count bytes of data using 32bit accesses, if that is required.
480 */
481static void smd_copy_from_fifo(void *dst,
482 const void __iomem *src,
483 size_t count,
484 bool word_aligned)
485{
486 if (word_aligned) {
487 __ioread32_copy(dst, src, count / sizeof(u32));
488 } else {
489 memcpy_fromio(dst, src, count);
490 }
491}
492
493/*
494 * Read count bytes of data from the rx fifo into buf, but don't advance the
495 * tail.
496 */
497static size_t qcom_smd_channel_peek(struct qcom_smd_channel *channel,
498 void *buf, size_t count)
499{
500 bool word_aligned;
501 unsigned tail;
502 size_t len;
503
504 word_aligned = channel->info_word;
505 tail = GET_RX_CHANNEL_INFO(channel, tail);
506
507 len = min_t(size_t, count, channel->fifo_size - tail);
508 if (len) {
509 smd_copy_from_fifo(buf,
510 channel->rx_fifo + tail,
511 len,
512 word_aligned);
513 }
514
515 if (len != count) {
516 smd_copy_from_fifo(buf + len,
517 channel->rx_fifo,
518 count - len,
519 word_aligned);
520 }
521
522 return count;
523}
524
525/*
526 * Advance the rx tail by count bytes.
527 */
528static void qcom_smd_channel_advance(struct qcom_smd_channel *channel,
529 size_t count)
530{
531 unsigned tail;
532
533 tail = GET_RX_CHANNEL_INFO(channel, tail);
534 tail += count;
535 tail &= (channel->fifo_size - 1);
536 SET_RX_CHANNEL_INFO(channel, tail, tail);
537}
538
539/*
540 * Read out a single packet from the rx fifo and deliver it to the device
541 */
542static int qcom_smd_channel_recv_single(struct qcom_smd_channel *channel)
543{
544 struct rpmsg_endpoint *ept = &channel->qsept->ept;
545 unsigned tail;
546 size_t len;
547 void *ptr;
548 int ret;
549
550 tail = GET_RX_CHANNEL_INFO(channel, tail);
551
552 /* Use bounce buffer if the data wraps */
553 if (tail + channel->pkt_size >= channel->fifo_size) {
554 ptr = channel->bounce_buffer;
555 len = qcom_smd_channel_peek(channel, ptr, channel->pkt_size);
556 } else {
557 ptr = channel->rx_fifo + tail;
558 len = channel->pkt_size;
559 }
560
561 ret = ept->cb(ept->rpdev, ptr, len, ept->priv, RPMSG_ADDR_ANY);
562 if (ret < 0)
563 return ret;
564
565 /* Only forward the tail if the client consumed the data */
566 qcom_smd_channel_advance(channel, len);
567
568 channel->pkt_size = 0;
569
570 return 0;
571}
572
573/*
574 * Per channel interrupt handling
575 */
576static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel)
577{
578 bool need_state_scan = false;
579 int remote_state;
580 __le32 pktlen;
581 int avail;
582 int ret;
583
584 /* Handle state changes */
585 remote_state = GET_RX_CHANNEL_INFO(channel, state);
586 if (remote_state != channel->remote_state) {
587 channel->remote_state = remote_state;
588 need_state_scan = true;
Bjorn Andersson268105f2017-12-12 15:58:53 -0800589
590 wake_up_interruptible_all(&channel->state_change_event);
Bjorn Andersson53e28222016-09-01 15:28:09 -0700591 }
592 /* Indicate that we have seen any state change */
593 SET_RX_CHANNEL_FLAG(channel, fSTATE, 0);
594
595 /* Signal waiting qcom_smd_send() about the interrupt */
596 if (!GET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR))
Bjorn Anderssoneb114f22017-12-12 15:58:55 -0800597 wake_up_interruptible_all(&channel->fblockread_event);
Bjorn Andersson53e28222016-09-01 15:28:09 -0700598
599 /* Don't consume any data until we've opened the channel */
600 if (channel->state != SMD_CHANNEL_OPENED)
601 goto out;
602
603 /* Indicate that we've seen the new data */
604 SET_RX_CHANNEL_FLAG(channel, fHEAD, 0);
605
606 /* Consume data */
607 for (;;) {
608 avail = qcom_smd_channel_get_rx_avail(channel);
609
610 if (!channel->pkt_size && avail >= SMD_PACKET_HEADER_LEN) {
611 qcom_smd_channel_peek(channel, &pktlen, sizeof(pktlen));
612 qcom_smd_channel_advance(channel, SMD_PACKET_HEADER_LEN);
613 channel->pkt_size = le32_to_cpu(pktlen);
614 } else if (channel->pkt_size && avail >= channel->pkt_size) {
615 ret = qcom_smd_channel_recv_single(channel);
616 if (ret)
617 break;
618 } else {
619 break;
620 }
621 }
622
623 /* Indicate that we have seen and updated tail */
624 SET_RX_CHANNEL_FLAG(channel, fTAIL, 1);
625
626 /* Signal the remote that we've consumed the data (if requested) */
627 if (!GET_RX_CHANNEL_FLAG(channel, fBLOCKREADINTR)) {
628 /* Ensure ordering of channel info updates */
629 wmb();
630
631 qcom_smd_signal_channel(channel);
632 }
633
634out:
635 return need_state_scan;
636}
637
638/*
639 * The edge interrupts are triggered by the remote processor on state changes,
640 * channel info updates or when new channels are created.
641 */
642static irqreturn_t qcom_smd_edge_intr(int irq, void *data)
643{
644 struct qcom_smd_edge *edge = data;
645 struct qcom_smd_channel *channel;
646 unsigned available;
647 bool kick_scanner = false;
648 bool kick_state = false;
649
650 /*
651 * Handle state changes or data on each of the channels on this edge
652 */
653 spin_lock(&edge->channels_lock);
654 list_for_each_entry(channel, &edge->channels, list) {
655 spin_lock(&channel->recv_lock);
656 kick_state |= qcom_smd_channel_intr(channel);
657 spin_unlock(&channel->recv_lock);
658 }
659 spin_unlock(&edge->channels_lock);
660
661 /*
662 * Creating a new channel requires allocating an smem entry, so we only
663 * have to scan if the amount of available space in smem have changed
664 * since last scan.
665 */
666 available = qcom_smem_get_free_space(edge->remote_pid);
667 if (available != edge->smem_available) {
668 edge->smem_available = available;
669 kick_scanner = true;
670 }
671
672 if (kick_scanner)
673 schedule_work(&edge->scan_work);
674 if (kick_state)
675 schedule_work(&edge->state_work);
676
677 return IRQ_HANDLED;
678}
679
680/*
681 * Calculate how much space is available in the tx fifo.
682 */
683static size_t qcom_smd_get_tx_avail(struct qcom_smd_channel *channel)
684{
685 unsigned head;
686 unsigned tail;
687 unsigned mask = channel->fifo_size - 1;
688
689 head = GET_TX_CHANNEL_INFO(channel, head);
690 tail = GET_TX_CHANNEL_INFO(channel, tail);
691
692 return mask - ((head - tail) & mask);
693}
694
695/*
696 * Write count bytes of data into channel, possibly wrapping in the ring buffer
697 */
698static int qcom_smd_write_fifo(struct qcom_smd_channel *channel,
699 const void *data,
700 size_t count)
701{
702 bool word_aligned;
703 unsigned head;
704 size_t len;
705
706 word_aligned = channel->info_word;
707 head = GET_TX_CHANNEL_INFO(channel, head);
708
709 len = min_t(size_t, count, channel->fifo_size - head);
710 if (len) {
711 smd_copy_to_fifo(channel->tx_fifo + head,
712 data,
713 len,
714 word_aligned);
715 }
716
717 if (len != count) {
718 smd_copy_to_fifo(channel->tx_fifo,
719 data + len,
720 count - len,
721 word_aligned);
722 }
723
724 head += count;
725 head &= (channel->fifo_size - 1);
726 SET_TX_CHANNEL_INFO(channel, head, head);
727
728 return count;
729}
730
731/**
732 * qcom_smd_send - write data to smd channel
733 * @channel: channel handle
734 * @data: buffer of data to write
735 * @len: number of bytes to write
Srinivas Kandagatla82eca592018-06-18 13:33:39 +0100736 * @wait: flag to indicate if write has ca wait
Bjorn Andersson53e28222016-09-01 15:28:09 -0700737 *
738 * This is a blocking write of len bytes into the channel's tx ring buffer and
739 * signal the remote end. It will sleep until there is enough space available
740 * in the tx buffer, utilizing the fBLOCKREADINTR signaling mechanism to avoid
741 * polling.
742 */
743static int __qcom_smd_send(struct qcom_smd_channel *channel, const void *data,
744 int len, bool wait)
745{
746 __le32 hdr[5] = { cpu_to_le32(len), };
747 int tlen = sizeof(hdr) + len;
Bjorn Andersson33e38202018-02-13 11:04:11 -0800748 unsigned long flags;
Bjorn Andersson53e28222016-09-01 15:28:09 -0700749 int ret;
750
751 /* Word aligned channels only accept word size aligned data */
752 if (channel->info_word && len % 4)
753 return -EINVAL;
754
755 /* Reject packets that are too big */
756 if (tlen >= channel->fifo_size)
757 return -EINVAL;
758
Bjorn Andersson33e38202018-02-13 11:04:11 -0800759 /* Highlight the fact that if we enter the loop below we might sleep */
760 if (wait)
761 might_sleep();
762
763 spin_lock_irqsave(&channel->tx_lock, flags);
Bjorn Andersson53e28222016-09-01 15:28:09 -0700764
Bjorn Anderssonb2c932e2017-12-12 15:58:56 -0800765 while (qcom_smd_get_tx_avail(channel) < tlen &&
766 channel->state == SMD_CHANNEL_OPENED) {
Bjorn Andersson53e28222016-09-01 15:28:09 -0700767 if (!wait) {
Bjorn Andersson1d74e7e2016-12-01 16:59:55 -0800768 ret = -EAGAIN;
Dan Carpenterc3388a02018-01-19 16:22:36 +0300769 goto out_unlock;
Bjorn Andersson53e28222016-09-01 15:28:09 -0700770 }
771
772 SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 0);
773
Bjorn Andersson178f3f72017-12-12 15:58:57 -0800774 /* Wait without holding the tx_lock */
Bjorn Andersson33e38202018-02-13 11:04:11 -0800775 spin_unlock_irqrestore(&channel->tx_lock, flags);
Bjorn Andersson178f3f72017-12-12 15:58:57 -0800776
Bjorn Andersson53e28222016-09-01 15:28:09 -0700777 ret = wait_event_interruptible(channel->fblockread_event,
778 qcom_smd_get_tx_avail(channel) >= tlen ||
779 channel->state != SMD_CHANNEL_OPENED);
780 if (ret)
Dan Carpenterc3388a02018-01-19 16:22:36 +0300781 return ret;
Bjorn Andersson53e28222016-09-01 15:28:09 -0700782
Bjorn Andersson33e38202018-02-13 11:04:11 -0800783 spin_lock_irqsave(&channel->tx_lock, flags);
Bjorn Andersson53e28222016-09-01 15:28:09 -0700784
785 SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1);
786 }
787
Bjorn Anderssonb2c932e2017-12-12 15:58:56 -0800788 /* Fail if the channel was closed */
789 if (channel->state != SMD_CHANNEL_OPENED) {
790 ret = -EPIPE;
Dan Carpenterc3388a02018-01-19 16:22:36 +0300791 goto out_unlock;
Bjorn Anderssonb2c932e2017-12-12 15:58:56 -0800792 }
793
Bjorn Andersson53e28222016-09-01 15:28:09 -0700794 SET_TX_CHANNEL_FLAG(channel, fTAIL, 0);
795
796 qcom_smd_write_fifo(channel, hdr, sizeof(hdr));
797 qcom_smd_write_fifo(channel, data, len);
798
799 SET_TX_CHANNEL_FLAG(channel, fHEAD, 1);
800
801 /* Ensure ordering of channel info updates */
802 wmb();
803
804 qcom_smd_signal_channel(channel);
805
Dan Carpenterc3388a02018-01-19 16:22:36 +0300806out_unlock:
Bjorn Andersson33e38202018-02-13 11:04:11 -0800807 spin_unlock_irqrestore(&channel->tx_lock, flags);
Bjorn Andersson53e28222016-09-01 15:28:09 -0700808
809 return ret;
810}
811
812/*
813 * Helper for opening a channel
814 */
815static int qcom_smd_channel_open(struct qcom_smd_channel *channel,
816 rpmsg_rx_cb_t cb)
817{
Bjorn Andersson268105f2017-12-12 15:58:53 -0800818 struct qcom_smd_edge *edge = channel->edge;
Bjorn Andersson53e28222016-09-01 15:28:09 -0700819 size_t bb_size;
Bjorn Andersson268105f2017-12-12 15:58:53 -0800820 int ret;
Bjorn Andersson53e28222016-09-01 15:28:09 -0700821
822 /*
823 * Packets are maximum 4k, but reduce if the fifo is smaller
824 */
825 bb_size = min(channel->fifo_size, SZ_4K);
826 channel->bounce_buffer = kmalloc(bb_size, GFP_KERNEL);
827 if (!channel->bounce_buffer)
828 return -ENOMEM;
829
830 qcom_smd_channel_set_callback(channel, cb);
831 qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENING);
Bjorn Andersson268105f2017-12-12 15:58:53 -0800832
833 /* Wait for remote to enter opening or opened */
834 ret = wait_event_interruptible_timeout(channel->state_change_event,
835 channel->remote_state == SMD_CHANNEL_OPENING ||
836 channel->remote_state == SMD_CHANNEL_OPENED,
837 HZ);
838 if (!ret) {
839 dev_err(&edge->dev, "remote side did not enter opening state\n");
840 goto out_close_timeout;
841 }
842
Bjorn Andersson53e28222016-09-01 15:28:09 -0700843 qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENED);
844
Bjorn Andersson268105f2017-12-12 15:58:53 -0800845 /* Wait for remote to enter opened */
846 ret = wait_event_interruptible_timeout(channel->state_change_event,
847 channel->remote_state == SMD_CHANNEL_OPENED,
848 HZ);
849 if (!ret) {
850 dev_err(&edge->dev, "remote side did not enter open state\n");
851 goto out_close_timeout;
852 }
853
Bjorn Andersson53e28222016-09-01 15:28:09 -0700854 return 0;
Bjorn Andersson268105f2017-12-12 15:58:53 -0800855
856out_close_timeout:
857 qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
858 return -ETIMEDOUT;
Bjorn Andersson53e28222016-09-01 15:28:09 -0700859}
860
861/*
862 * Helper for closing and resetting a channel
863 */
864static void qcom_smd_channel_close(struct qcom_smd_channel *channel)
865{
866 qcom_smd_channel_set_callback(channel, NULL);
867
868 kfree(channel->bounce_buffer);
869 channel->bounce_buffer = NULL;
870
871 qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
872 qcom_smd_channel_reset(channel);
873}
874
875static struct qcom_smd_channel *
876qcom_smd_find_channel(struct qcom_smd_edge *edge, const char *name)
877{
878 struct qcom_smd_channel *channel;
879 struct qcom_smd_channel *ret = NULL;
880 unsigned long flags;
Bjorn Andersson53e28222016-09-01 15:28:09 -0700881
882 spin_lock_irqsave(&edge->channels_lock, flags);
883 list_for_each_entry(channel, &edge->channels, list) {
Bjorn Andersson66dca392016-10-07 21:23:11 -0700884 if (!strcmp(channel->name, name)) {
885 ret = channel;
886 break;
887 }
Bjorn Andersson53e28222016-09-01 15:28:09 -0700888 }
889 spin_unlock_irqrestore(&edge->channels_lock, flags);
890
891 return ret;
892}
893
894static void __ept_release(struct kref *kref)
895{
896 struct rpmsg_endpoint *ept = container_of(kref, struct rpmsg_endpoint,
897 refcount);
898 kfree(to_smd_endpoint(ept));
899}
900
901static struct rpmsg_endpoint *qcom_smd_create_ept(struct rpmsg_device *rpdev,
902 rpmsg_rx_cb_t cb, void *priv,
903 struct rpmsg_channel_info chinfo)
904{
905 struct qcom_smd_endpoint *qsept;
906 struct qcom_smd_channel *channel;
907 struct qcom_smd_device *qsdev = to_smd_device(rpdev);
908 struct qcom_smd_edge *edge = qsdev->edge;
909 struct rpmsg_endpoint *ept;
910 const char *name = chinfo.name;
911 int ret;
912
913 /* Wait up to HZ for the channel to appear */
914 ret = wait_event_interruptible_timeout(edge->new_channel_event,
915 (channel = qcom_smd_find_channel(edge, name)) != NULL,
916 HZ);
917 if (!ret)
918 return NULL;
919
920 if (channel->state != SMD_CHANNEL_CLOSED) {
921 dev_err(&rpdev->dev, "channel %s is busy\n", channel->name);
922 return NULL;
923 }
924
925 qsept = kzalloc(sizeof(*qsept), GFP_KERNEL);
926 if (!qsept)
927 return NULL;
928
929 ept = &qsept->ept;
930
931 kref_init(&ept->refcount);
932
933 ept->rpdev = rpdev;
934 ept->cb = cb;
935 ept->priv = priv;
936 ept->ops = &qcom_smd_endpoint_ops;
937
938 channel->qsept = qsept;
939 qsept->qsch = channel;
940
941 ret = qcom_smd_channel_open(channel, cb);
942 if (ret)
943 goto free_ept;
944
945 return ept;
946
947free_ept:
948 channel->qsept = NULL;
949 kref_put(&ept->refcount, __ept_release);
950 return NULL;
951}
952
953static void qcom_smd_destroy_ept(struct rpmsg_endpoint *ept)
954{
955 struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept);
956 struct qcom_smd_channel *ch = qsept->qsch;
957
958 qcom_smd_channel_close(ch);
959 ch->qsept = NULL;
960 kref_put(&ept->refcount, __ept_release);
961}
962
963static int qcom_smd_send(struct rpmsg_endpoint *ept, void *data, int len)
964{
965 struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept);
966
967 return __qcom_smd_send(qsept->qsch, data, len, true);
968}
969
970static int qcom_smd_trysend(struct rpmsg_endpoint *ept, void *data, int len)
971{
972 struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept);
973
974 return __qcom_smd_send(qsept->qsch, data, len, false);
975}
976
Arnaud Pouliquenb4ce7e22021-03-11 15:04:11 +0100977static int qcom_smd_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst)
978{
979 struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept);
980
981 return __qcom_smd_send(qsept->qsch, data, len, true);
982}
983
984static int qcom_smd_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst)
985{
986 struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept);
987
988 return __qcom_smd_send(qsept->qsch, data, len, false);
989}
990
Al Viroafc9a422017-07-03 06:39:46 -0400991static __poll_t qcom_smd_poll(struct rpmsg_endpoint *ept,
Bjorn Anderssonadaa11b2017-01-11 06:35:11 -0800992 struct file *filp, poll_table *wait)
993{
994 struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept);
995 struct qcom_smd_channel *channel = qsept->qsch;
Al Viroafc9a422017-07-03 06:39:46 -0400996 __poll_t mask = 0;
Bjorn Anderssonadaa11b2017-01-11 06:35:11 -0800997
998 poll_wait(filp, &channel->fblockread_event, wait);
999
1000 if (qcom_smd_get_tx_avail(channel) > 20)
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001001 mask |= EPOLLOUT | EPOLLWRNORM;
Bjorn Anderssonadaa11b2017-01-11 06:35:11 -08001002
1003 return mask;
1004}
1005
Bjorn Andersson53e28222016-09-01 15:28:09 -07001006/*
1007 * Finds the device_node for the smd child interested in this channel.
1008 */
1009static struct device_node *qcom_smd_match_channel(struct device_node *edge_node,
1010 const char *channel)
1011{
1012 struct device_node *child;
1013 const char *name;
1014 const char *key;
1015 int ret;
1016
1017 for_each_available_child_of_node(edge_node, child) {
1018 key = "qcom,smd-channels";
1019 ret = of_property_read_string(child, key, &name);
1020 if (ret)
1021 continue;
1022
1023 if (strcmp(name, channel) == 0)
1024 return child;
1025 }
1026
1027 return NULL;
1028}
1029
Bjorn Andersson0d720382018-03-27 14:06:43 -07001030static int qcom_smd_announce_create(struct rpmsg_device *rpdev)
1031{
1032 struct qcom_smd_endpoint *qept = to_smd_endpoint(rpdev->ept);
1033 struct qcom_smd_channel *channel = qept->qsch;
1034 unsigned long flags;
1035 bool kick_state;
1036
1037 spin_lock_irqsave(&channel->recv_lock, flags);
1038 kick_state = qcom_smd_channel_intr(channel);
1039 spin_unlock_irqrestore(&channel->recv_lock, flags);
1040
1041 if (kick_state)
1042 schedule_work(&channel->edge->state_work);
1043
1044 return 0;
1045}
1046
Bjorn Andersson53e28222016-09-01 15:28:09 -07001047static const struct rpmsg_device_ops qcom_smd_device_ops = {
1048 .create_ept = qcom_smd_create_ept,
Bjorn Andersson0d720382018-03-27 14:06:43 -07001049 .announce_create = qcom_smd_announce_create,
Bjorn Andersson53e28222016-09-01 15:28:09 -07001050};
1051
1052static const struct rpmsg_endpoint_ops qcom_smd_endpoint_ops = {
1053 .destroy_ept = qcom_smd_destroy_ept,
1054 .send = qcom_smd_send,
Arnaud Pouliquenb4ce7e22021-03-11 15:04:11 +01001055 .sendto = qcom_smd_sendto,
Bjorn Andersson53e28222016-09-01 15:28:09 -07001056 .trysend = qcom_smd_trysend,
Arnaud Pouliquenb4ce7e22021-03-11 15:04:11 +01001057 .trysendto = qcom_smd_trysendto,
Bjorn Anderssonadaa11b2017-01-11 06:35:11 -08001058 .poll = qcom_smd_poll,
Bjorn Andersson53e28222016-09-01 15:28:09 -07001059};
1060
Bjorn Anderssonb0b03b82017-03-15 22:18:35 -07001061static void qcom_smd_release_device(struct device *dev)
1062{
1063 struct rpmsg_device *rpdev = to_rpmsg_device(dev);
1064 struct qcom_smd_device *qsdev = to_smd_device(rpdev);
1065
1066 kfree(qsdev);
1067}
1068
Bjorn Andersson53e28222016-09-01 15:28:09 -07001069/*
1070 * Create a smd client device for channel that is being opened.
1071 */
1072static int qcom_smd_create_device(struct qcom_smd_channel *channel)
1073{
1074 struct qcom_smd_device *qsdev;
1075 struct rpmsg_device *rpdev;
1076 struct qcom_smd_edge *edge = channel->edge;
1077
1078 dev_dbg(&edge->dev, "registering '%s'\n", channel->name);
1079
1080 qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL);
1081 if (!qsdev)
1082 return -ENOMEM;
1083
1084 /* Link qsdev to our SMD edge */
1085 qsdev->edge = edge;
1086
1087 /* Assign callbacks for rpmsg_device */
1088 qsdev->rpdev.ops = &qcom_smd_device_ops;
1089
1090 /* Assign public information to the rpmsg_device */
1091 rpdev = &qsdev->rpdev;
1092 strncpy(rpdev->id.name, channel->name, RPMSG_NAME_SIZE);
1093 rpdev->src = RPMSG_ADDR_ANY;
1094 rpdev->dst = RPMSG_ADDR_ANY;
1095
1096 rpdev->dev.of_node = qcom_smd_match_channel(edge->of_node, channel->name);
1097 rpdev->dev.parent = &edge->dev;
Bjorn Anderssonb0b03b82017-03-15 22:18:35 -07001098 rpdev->dev.release = qcom_smd_release_device;
Bjorn Andersson53e28222016-09-01 15:28:09 -07001099
1100 return rpmsg_register_device(rpdev);
1101}
1102
Bjorn Andersson0be363b2017-01-11 06:35:13 -08001103static int qcom_smd_create_chrdev(struct qcom_smd_edge *edge)
1104{
1105 struct qcom_smd_device *qsdev;
1106
1107 qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL);
1108 if (!qsdev)
1109 return -ENOMEM;
1110
1111 qsdev->edge = edge;
1112 qsdev->rpdev.ops = &qcom_smd_device_ops;
1113 qsdev->rpdev.dev.parent = &edge->dev;
Bjorn Anderssonb0b03b82017-03-15 22:18:35 -07001114 qsdev->rpdev.dev.release = qcom_smd_release_device;
1115
Bjorn Andersson0be363b2017-01-11 06:35:13 -08001116 return rpmsg_chrdev_register_device(&qsdev->rpdev);
1117}
1118
Bjorn Andersson53e28222016-09-01 15:28:09 -07001119/*
1120 * Allocate the qcom_smd_channel object for a newly found smd channel,
1121 * retrieving and validating the smem items involved.
1122 */
1123static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *edge,
1124 unsigned smem_info_item,
1125 unsigned smem_fifo_item,
1126 char *name)
1127{
1128 struct qcom_smd_channel *channel;
1129 size_t fifo_size;
1130 size_t info_size;
1131 void *fifo_base;
1132 void *info;
1133 int ret;
1134
Srinivas Kandagatla4a2e84c2018-06-04 10:39:01 +01001135 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
Bjorn Andersson53e28222016-09-01 15:28:09 -07001136 if (!channel)
1137 return ERR_PTR(-ENOMEM);
1138
1139 channel->edge = edge;
Srinivas Kandagatla4a2e84c2018-06-04 10:39:01 +01001140 channel->name = kstrdup(name, GFP_KERNEL);
Colin Ian King940c6202018-09-27 22:36:27 +01001141 if (!channel->name) {
1142 ret = -ENOMEM;
1143 goto free_channel;
1144 }
Bjorn Andersson53e28222016-09-01 15:28:09 -07001145
Bjorn Andersson33e38202018-02-13 11:04:11 -08001146 spin_lock_init(&channel->tx_lock);
Bjorn Andersson53e28222016-09-01 15:28:09 -07001147 spin_lock_init(&channel->recv_lock);
1148 init_waitqueue_head(&channel->fblockread_event);
Bjorn Andersson268105f2017-12-12 15:58:53 -08001149 init_waitqueue_head(&channel->state_change_event);
Bjorn Andersson53e28222016-09-01 15:28:09 -07001150
1151 info = qcom_smem_get(edge->remote_pid, smem_info_item, &info_size);
1152 if (IS_ERR(info)) {
1153 ret = PTR_ERR(info);
1154 goto free_name_and_channel;
1155 }
1156
1157 /*
1158 * Use the size of the item to figure out which channel info struct to
1159 * use.
1160 */
1161 if (info_size == 2 * sizeof(struct smd_channel_info_word)) {
1162 channel->info_word = info;
1163 } else if (info_size == 2 * sizeof(struct smd_channel_info)) {
1164 channel->info = info;
1165 } else {
1166 dev_err(&edge->dev,
1167 "channel info of size %zu not supported\n", info_size);
1168 ret = -EINVAL;
1169 goto free_name_and_channel;
1170 }
1171
1172 fifo_base = qcom_smem_get(edge->remote_pid, smem_fifo_item, &fifo_size);
1173 if (IS_ERR(fifo_base)) {
1174 ret = PTR_ERR(fifo_base);
1175 goto free_name_and_channel;
1176 }
1177
1178 /* The channel consist of a rx and tx fifo of equal size */
1179 fifo_size /= 2;
1180
1181 dev_dbg(&edge->dev, "new channel '%s' info-size: %zu fifo-size: %zu\n",
1182 name, info_size, fifo_size);
1183
1184 channel->tx_fifo = fifo_base;
1185 channel->rx_fifo = fifo_base + fifo_size;
1186 channel->fifo_size = fifo_size;
1187
1188 qcom_smd_channel_reset(channel);
1189
1190 return channel;
1191
1192free_name_and_channel:
Srinivas Kandagatla4a2e84c2018-06-04 10:39:01 +01001193 kfree(channel->name);
Colin Ian King940c6202018-09-27 22:36:27 +01001194free_channel:
Srinivas Kandagatla4a2e84c2018-06-04 10:39:01 +01001195 kfree(channel);
Bjorn Andersson53e28222016-09-01 15:28:09 -07001196
1197 return ERR_PTR(ret);
1198}
1199
1200/*
1201 * Scans the allocation table for any newly allocated channels, calls
1202 * qcom_smd_create_channel() to create representations of these and add
1203 * them to the edge's list of channels.
1204 */
1205static void qcom_channel_scan_worker(struct work_struct *work)
1206{
1207 struct qcom_smd_edge *edge = container_of(work, struct qcom_smd_edge, scan_work);
1208 struct qcom_smd_alloc_entry *alloc_tbl;
1209 struct qcom_smd_alloc_entry *entry;
1210 struct qcom_smd_channel *channel;
1211 unsigned long flags;
1212 unsigned fifo_id;
1213 unsigned info_id;
1214 int tbl;
1215 int i;
1216 u32 eflags, cid;
1217
1218 for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) {
1219 alloc_tbl = qcom_smem_get(edge->remote_pid,
1220 smem_items[tbl].alloc_tbl_id, NULL);
1221 if (IS_ERR(alloc_tbl))
1222 continue;
1223
1224 for (i = 0; i < SMD_ALLOC_TBL_SIZE; i++) {
1225 entry = &alloc_tbl[i];
1226 eflags = le32_to_cpu(entry->flags);
1227 if (test_bit(i, edge->allocated[tbl]))
1228 continue;
1229
1230 if (entry->ref_count == 0)
1231 continue;
1232
1233 if (!entry->name[0])
1234 continue;
1235
1236 if (!(eflags & SMD_CHANNEL_FLAGS_PACKET))
1237 continue;
1238
1239 if ((eflags & SMD_CHANNEL_FLAGS_EDGE_MASK) != edge->edge_id)
1240 continue;
1241
1242 cid = le32_to_cpu(entry->cid);
1243 info_id = smem_items[tbl].info_base_id + cid;
1244 fifo_id = smem_items[tbl].fifo_base_id + cid;
1245
1246 channel = qcom_smd_create_channel(edge, info_id, fifo_id, entry->name);
1247 if (IS_ERR(channel))
1248 continue;
1249
1250 spin_lock_irqsave(&edge->channels_lock, flags);
1251 list_add(&channel->list, &edge->channels);
1252 spin_unlock_irqrestore(&edge->channels_lock, flags);
1253
1254 dev_dbg(&edge->dev, "new channel found: '%s'\n", channel->name);
1255 set_bit(i, edge->allocated[tbl]);
1256
Bjorn Anderssoneb114f22017-12-12 15:58:55 -08001257 wake_up_interruptible_all(&edge->new_channel_event);
Bjorn Andersson53e28222016-09-01 15:28:09 -07001258 }
1259 }
1260
1261 schedule_work(&edge->state_work);
1262}
1263
1264/*
1265 * This per edge worker scans smem for any new channels and register these. It
1266 * then scans all registered channels for state changes that should be handled
1267 * by creating or destroying smd client devices for the registered channels.
1268 *
1269 * LOCKING: edge->channels_lock only needs to cover the list operations, as the
1270 * worker is killed before any channels are deallocated
1271 */
1272static void qcom_channel_state_worker(struct work_struct *work)
1273{
1274 struct qcom_smd_channel *channel;
1275 struct qcom_smd_edge *edge = container_of(work,
1276 struct qcom_smd_edge,
1277 state_work);
1278 struct rpmsg_channel_info chinfo;
1279 unsigned remote_state;
1280 unsigned long flags;
1281
1282 /*
1283 * Register a device for any closed channel where the remote processor
1284 * is showing interest in opening the channel.
1285 */
1286 spin_lock_irqsave(&edge->channels_lock, flags);
1287 list_for_each_entry(channel, &edge->channels, list) {
1288 if (channel->state != SMD_CHANNEL_CLOSED)
1289 continue;
1290
Bjorn Andersson2bd9b432018-03-15 11:12:44 -07001291 remote_state = GET_RX_CHANNEL_INFO(channel, state);
1292 if (remote_state != SMD_CHANNEL_OPENING &&
1293 remote_state != SMD_CHANNEL_OPENED)
1294 continue;
1295
Bjorn Andersson53e28222016-09-01 15:28:09 -07001296 if (channel->registered)
1297 continue;
1298
1299 spin_unlock_irqrestore(&edge->channels_lock, flags);
1300 qcom_smd_create_device(channel);
1301 channel->registered = true;
1302 spin_lock_irqsave(&edge->channels_lock, flags);
1303
1304 channel->registered = true;
1305 }
1306
1307 /*
1308 * Unregister the device for any channel that is opened where the
1309 * remote processor is closing the channel.
1310 */
1311 list_for_each_entry(channel, &edge->channels, list) {
1312 if (channel->state != SMD_CHANNEL_OPENING &&
1313 channel->state != SMD_CHANNEL_OPENED)
1314 continue;
1315
1316 remote_state = GET_RX_CHANNEL_INFO(channel, state);
1317 if (remote_state == SMD_CHANNEL_OPENING ||
1318 remote_state == SMD_CHANNEL_OPENED)
1319 continue;
1320
1321 spin_unlock_irqrestore(&edge->channels_lock, flags);
1322
1323 strncpy(chinfo.name, channel->name, sizeof(chinfo.name));
1324 chinfo.src = RPMSG_ADDR_ANY;
1325 chinfo.dst = RPMSG_ADDR_ANY;
1326 rpmsg_unregister_device(&edge->dev, &chinfo);
1327 channel->registered = false;
1328 spin_lock_irqsave(&edge->channels_lock, flags);
1329 }
1330 spin_unlock_irqrestore(&edge->channels_lock, flags);
1331}
1332
1333/*
1334 * Parses an of_node describing an edge.
1335 */
1336static int qcom_smd_parse_edge(struct device *dev,
1337 struct device_node *node,
1338 struct qcom_smd_edge *edge)
1339{
1340 struct device_node *syscon_np;
1341 const char *key;
1342 int irq;
1343 int ret;
1344
1345 INIT_LIST_HEAD(&edge->channels);
1346 spin_lock_init(&edge->channels_lock);
1347
1348 INIT_WORK(&edge->scan_work, qcom_channel_scan_worker);
1349 INIT_WORK(&edge->state_work, qcom_channel_state_worker);
1350
1351 edge->of_node = of_node_get(node);
1352
1353 key = "qcom,smd-edge";
1354 ret = of_property_read_u32(node, key, &edge->edge_id);
1355 if (ret) {
1356 dev_err(dev, "edge missing %s property\n", key);
Dan Carpentere69ee0c2020-09-08 10:18:41 +03001357 goto put_node;
Bjorn Andersson53e28222016-09-01 15:28:09 -07001358 }
1359
1360 edge->remote_pid = QCOM_SMEM_HOST_ANY;
1361 key = "qcom,remote-pid";
1362 of_property_read_u32(node, key, &edge->remote_pid);
1363
Bjorn Anderssonab460a22018-04-19 18:17:57 -07001364 edge->mbox_client.dev = dev;
1365 edge->mbox_client.knows_txdone = true;
1366 edge->mbox_chan = mbox_request_channel(&edge->mbox_client, 0);
1367 if (IS_ERR(edge->mbox_chan)) {
Dan Carpentere69ee0c2020-09-08 10:18:41 +03001368 if (PTR_ERR(edge->mbox_chan) != -ENODEV) {
1369 ret = PTR_ERR(edge->mbox_chan);
1370 goto put_node;
1371 }
Bjorn Andersson53e28222016-09-01 15:28:09 -07001372
Bjorn Anderssonab460a22018-04-19 18:17:57 -07001373 edge->mbox_chan = NULL;
Bjorn Andersson53e28222016-09-01 15:28:09 -07001374
Bjorn Anderssonab460a22018-04-19 18:17:57 -07001375 syscon_np = of_parse_phandle(node, "qcom,ipc", 0);
1376 if (!syscon_np) {
1377 dev_err(dev, "no qcom,ipc node\n");
Dan Carpentere69ee0c2020-09-08 10:18:41 +03001378 ret = -ENODEV;
1379 goto put_node;
Bjorn Anderssonab460a22018-04-19 18:17:57 -07001380 }
Bjorn Andersson53e28222016-09-01 15:28:09 -07001381
Bjorn Anderssonab460a22018-04-19 18:17:57 -07001382 edge->ipc_regmap = syscon_node_to_regmap(syscon_np);
Dan Carpentere69ee0c2020-09-08 10:18:41 +03001383 if (IS_ERR(edge->ipc_regmap)) {
1384 ret = PTR_ERR(edge->ipc_regmap);
1385 goto put_node;
1386 }
Bjorn Anderssonab460a22018-04-19 18:17:57 -07001387
1388 key = "qcom,ipc";
1389 ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset);
1390 if (ret < 0) {
1391 dev_err(dev, "no offset in %s\n", key);
Dan Carpentere69ee0c2020-09-08 10:18:41 +03001392 goto put_node;
Bjorn Anderssonab460a22018-04-19 18:17:57 -07001393 }
1394
1395 ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit);
1396 if (ret < 0) {
1397 dev_err(dev, "no bit in %s\n", key);
Dan Carpentere69ee0c2020-09-08 10:18:41 +03001398 goto put_node;
Bjorn Anderssonab460a22018-04-19 18:17:57 -07001399 }
Bjorn Andersson53e28222016-09-01 15:28:09 -07001400 }
1401
Bjorn Andersson5e53c422016-12-02 14:06:02 -08001402 ret = of_property_read_string(node, "label", &edge->name);
1403 if (ret < 0)
1404 edge->name = node->name;
1405
Bjorn Andersson53e28222016-09-01 15:28:09 -07001406 irq = irq_of_parse_and_map(node, 0);
1407 if (irq < 0) {
1408 dev_err(dev, "required smd interrupt missing\n");
Dan Carpentere69ee0c2020-09-08 10:18:41 +03001409 ret = irq;
1410 goto put_node;
Bjorn Andersson53e28222016-09-01 15:28:09 -07001411 }
1412
1413 ret = devm_request_irq(dev, irq,
1414 qcom_smd_edge_intr, IRQF_TRIGGER_RISING,
1415 node->name, edge);
1416 if (ret) {
1417 dev_err(dev, "failed to request smd irq\n");
Dan Carpentere69ee0c2020-09-08 10:18:41 +03001418 goto put_node;
Bjorn Andersson53e28222016-09-01 15:28:09 -07001419 }
1420
1421 edge->irq = irq;
1422
1423 return 0;
Dan Carpentere69ee0c2020-09-08 10:18:41 +03001424
1425put_node:
1426 of_node_put(node);
1427 edge->of_node = NULL;
1428
1429 return ret;
Bjorn Andersson53e28222016-09-01 15:28:09 -07001430}
1431
1432/*
1433 * Release function for an edge.
1434 * Reset the state of each associated channel and free the edge context.
1435 */
1436static void qcom_smd_edge_release(struct device *dev)
1437{
Srinivas Kandagatla4a2e84c2018-06-04 10:39:01 +01001438 struct qcom_smd_channel *channel, *tmp;
Bjorn Andersson53e28222016-09-01 15:28:09 -07001439 struct qcom_smd_edge *edge = to_smd_edge(dev);
1440
Srinivas Kandagatla4a2e84c2018-06-04 10:39:01 +01001441 list_for_each_entry_safe(channel, tmp, &edge->channels, list) {
1442 list_del(&channel->list);
1443 kfree(channel->name);
1444 kfree(channel);
Bjorn Andersson53e28222016-09-01 15:28:09 -07001445 }
1446
1447 kfree(edge);
1448}
1449
Bjorn Andersson5e53c422016-12-02 14:06:02 -08001450static ssize_t rpmsg_name_show(struct device *dev,
1451 struct device_attribute *attr, char *buf)
1452{
1453 struct qcom_smd_edge *edge = to_smd_edge(dev);
1454
1455 return sprintf(buf, "%s\n", edge->name);
1456}
1457static DEVICE_ATTR_RO(rpmsg_name);
1458
1459static struct attribute *qcom_smd_edge_attrs[] = {
1460 &dev_attr_rpmsg_name.attr,
1461 NULL
1462};
1463ATTRIBUTE_GROUPS(qcom_smd_edge);
1464
Bjorn Andersson53e28222016-09-01 15:28:09 -07001465/**
1466 * qcom_smd_register_edge() - register an edge based on an device_node
1467 * @parent: parent device for the edge
1468 * @node: device_node describing the edge
1469 *
1470 * Returns an edge reference, or negative ERR_PTR() on failure.
1471 */
1472struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent,
1473 struct device_node *node)
1474{
1475 struct qcom_smd_edge *edge;
1476 int ret;
1477
1478 edge = kzalloc(sizeof(*edge), GFP_KERNEL);
1479 if (!edge)
1480 return ERR_PTR(-ENOMEM);
1481
1482 init_waitqueue_head(&edge->new_channel_event);
1483
1484 edge->dev.parent = parent;
1485 edge->dev.release = qcom_smd_edge_release;
Srinivas Kandagatlaaaafb242017-07-26 18:53:44 +02001486 edge->dev.of_node = node;
Bjorn Andersson5e53c422016-12-02 14:06:02 -08001487 edge->dev.groups = qcom_smd_edge_groups;
Rob Herringc8a54c02018-08-27 20:52:43 -05001488 dev_set_name(&edge->dev, "%s:%pOFn", dev_name(parent), node);
Bjorn Andersson53e28222016-09-01 15:28:09 -07001489 ret = device_register(&edge->dev);
1490 if (ret) {
1491 pr_err("failed to register smd edge\n");
Arvind Yadavbe5acd22018-03-08 15:06:08 +05301492 put_device(&edge->dev);
Bjorn Andersson53e28222016-09-01 15:28:09 -07001493 return ERR_PTR(ret);
1494 }
1495
1496 ret = qcom_smd_parse_edge(&edge->dev, node, edge);
1497 if (ret) {
1498 dev_err(&edge->dev, "failed to parse smd edge\n");
1499 goto unregister_dev;
1500 }
1501
Bjorn Andersson0be363b2017-01-11 06:35:13 -08001502 ret = qcom_smd_create_chrdev(edge);
1503 if (ret) {
1504 dev_err(&edge->dev, "failed to register chrdev for edge\n");
1505 goto unregister_dev;
1506 }
1507
Bjorn Andersson53e28222016-09-01 15:28:09 -07001508 schedule_work(&edge->scan_work);
1509
1510 return edge;
1511
1512unregister_dev:
Bjorn Anderssonab460a22018-04-19 18:17:57 -07001513 if (!IS_ERR_OR_NULL(edge->mbox_chan))
1514 mbox_free_channel(edge->mbox_chan);
1515
Arvind Yadavbe5acd22018-03-08 15:06:08 +05301516 device_unregister(&edge->dev);
Bjorn Andersson53e28222016-09-01 15:28:09 -07001517 return ERR_PTR(ret);
1518}
1519EXPORT_SYMBOL(qcom_smd_register_edge);
1520
1521static int qcom_smd_remove_device(struct device *dev, void *data)
1522{
1523 device_unregister(dev);
1524
1525 return 0;
1526}
1527
1528/**
1529 * qcom_smd_unregister_edge() - release an edge and its children
1530 * @edge: edge reference acquired from qcom_smd_register_edge
1531 */
1532int qcom_smd_unregister_edge(struct qcom_smd_edge *edge)
1533{
1534 int ret;
1535
1536 disable_irq(edge->irq);
1537 cancel_work_sync(&edge->scan_work);
1538 cancel_work_sync(&edge->state_work);
1539
1540 ret = device_for_each_child(&edge->dev, NULL, qcom_smd_remove_device);
1541 if (ret)
1542 dev_warn(&edge->dev, "can't remove smd device: %d\n", ret);
1543
Bjorn Anderssonab460a22018-04-19 18:17:57 -07001544 mbox_free_channel(edge->mbox_chan);
Bjorn Andersson53e28222016-09-01 15:28:09 -07001545 device_unregister(&edge->dev);
1546
1547 return 0;
1548}
1549EXPORT_SYMBOL(qcom_smd_unregister_edge);
1550
1551static int qcom_smd_probe(struct platform_device *pdev)
1552{
1553 struct device_node *node;
1554 void *p;
1555
1556 /* Wait for smem */
1557 p = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id, NULL);
1558 if (PTR_ERR(p) == -EPROBE_DEFER)
1559 return PTR_ERR(p);
1560
1561 for_each_available_child_of_node(pdev->dev.of_node, node)
1562 qcom_smd_register_edge(&pdev->dev, node);
1563
1564 return 0;
1565}
1566
1567static int qcom_smd_remove_edge(struct device *dev, void *data)
1568{
1569 struct qcom_smd_edge *edge = to_smd_edge(dev);
1570
1571 return qcom_smd_unregister_edge(edge);
1572}
1573
1574/*
1575 * Shut down all smd clients by making sure that each edge stops processing
1576 * events and scanning for new channels, then call destroy on the devices.
1577 */
1578static int qcom_smd_remove(struct platform_device *pdev)
1579{
1580 int ret;
1581
1582 ret = device_for_each_child(&pdev->dev, NULL, qcom_smd_remove_edge);
1583 if (ret)
1584 dev_warn(&pdev->dev, "can't remove smd device: %d\n", ret);
1585
1586 return ret;
1587}
1588
1589static const struct of_device_id qcom_smd_of_match[] = {
1590 { .compatible = "qcom,smd" },
1591 {}
1592};
1593MODULE_DEVICE_TABLE(of, qcom_smd_of_match);
1594
1595static struct platform_driver qcom_smd_driver = {
1596 .probe = qcom_smd_probe,
1597 .remove = qcom_smd_remove,
1598 .driver = {
1599 .name = "qcom-smd",
1600 .of_match_table = qcom_smd_of_match,
1601 },
1602};
1603
1604static int __init qcom_smd_init(void)
1605{
1606 return platform_driver_register(&qcom_smd_driver);
1607}
1608subsys_initcall(qcom_smd_init);
1609
1610static void __exit qcom_smd_exit(void)
1611{
1612 platform_driver_unregister(&qcom_smd_driver);
1613}
1614module_exit(qcom_smd_exit);
1615
1616MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
1617MODULE_DESCRIPTION("Qualcomm Shared Memory Driver");
1618MODULE_LICENSE("GPL v2");