blob: 069d1c8fde73e70c04d5e1690c5bc79ba509f2e9 [file] [log] [blame]
Johan Hedberg7dec65c2012-07-16 16:12:02 +03001/*
2 *
3 * Bluetooth HCI Three-wire UART driver
4 *
5 * Copyright (C) 2012 Intel Corporation
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Jeremy Cline4eb3cbc2018-08-02 16:57:19 +020024#include <linux/acpi.h>
Johan Hedberg7dec65c2012-07-16 16:12:02 +030025#include <linux/errno.h>
Hans de Goede4c791482018-08-02 16:57:21 +020026#include <linux/gpio/consumer.h>
Jeremy Cline4eb3cbc2018-08-02 16:57:19 +020027#include <linux/kernel.h>
28#include <linux/mod_devicetable.h>
Hans de Goedece945552018-08-02 16:57:18 +020029#include <linux/serdev.h>
Johan Hedberg7dec65c2012-07-16 16:12:02 +030030#include <linux/skbuff.h>
31
32#include <net/bluetooth/bluetooth.h>
33#include <net/bluetooth/hci_core.h>
34
Jeremy Clineb825d7c2018-08-02 16:57:20 +020035#include "btrtl.h"
Johan Hedberg7dec65c2012-07-16 16:12:02 +030036#include "hci_uart.h"
37
Johan Hedbergc0a1b732012-07-16 16:12:06 +030038#define HCI_3WIRE_ACK_PKT 0
39#define HCI_3WIRE_LINK_PKT 15
40
Johan Hedbergafdc9442012-07-16 16:12:18 +030041/* Sliding window size */
42#define H5_TX_WIN_MAX 4
Johan Hedberg3f27e952012-07-16 16:12:04 +030043
44#define H5_ACK_TIMEOUT msecs_to_jiffies(250)
Johan Hedberg40f10222012-07-16 16:12:09 +030045#define H5_SYNC_TIMEOUT msecs_to_jiffies(100)
Johan Hedberg3f27e952012-07-16 16:12:04 +030046
Johan Hedbergbc1f35b2012-07-16 16:12:05 +030047/*
48 * Maximum Three-wire packet:
49 * 4 byte header + max value for 12-bit length + 2 bytes for CRC
50 */
51#define H5_MAX_LEN (4 + 0xfff + 2)
52
Johan Hedberg01977c02012-07-16 16:12:07 +030053/* Convenience macros for reading Three-wire header values */
54#define H5_HDR_SEQ(hdr) ((hdr)[0] & 0x07)
55#define H5_HDR_ACK(hdr) (((hdr)[0] >> 3) & 0x07)
56#define H5_HDR_CRC(hdr) (((hdr)[0] >> 6) & 0x01)
57#define H5_HDR_RELIABLE(hdr) (((hdr)[0] >> 7) & 0x01)
58#define H5_HDR_PKT_TYPE(hdr) ((hdr)[1] & 0x0f)
Andrei Emeltchenko4223f362015-11-19 11:29:11 +020059#define H5_HDR_LEN(hdr) ((((hdr)[1] >> 4) & 0x0f) + ((hdr)[2] << 4))
Johan Hedberg01977c02012-07-16 16:12:07 +030060
Johan Hedbergbc1f35b2012-07-16 16:12:05 +030061#define SLIP_DELIMITER 0xc0
62#define SLIP_ESC 0xdb
63#define SLIP_ESC_DELIM 0xdc
64#define SLIP_ESC_ESC 0xdd
65
Johan Hedberge0482102012-07-16 16:12:19 +030066/* H5 state flags */
67enum {
68 H5_RX_ESC, /* SLIP escape mode */
69 H5_TX_ACK_REQ, /* Pending ack to send */
70};
71
Johan Hedberg7d664fb2012-07-16 16:12:03 +030072struct h5 {
Hans de Goedece945552018-08-02 16:57:18 +020073 /* Must be the first member, hci_serdev.c expects this. */
74 struct hci_uart serdev_hu;
75
Johan Hedbergbc1f35b2012-07-16 16:12:05 +030076 struct sk_buff_head unack; /* Unack'ed packets queue */
77 struct sk_buff_head rel; /* Reliable packets queue */
78 struct sk_buff_head unrel; /* Unreliable packets queue */
Johan Hedberg7d664fb2012-07-16 16:12:03 +030079
Johan Hedberge0482102012-07-16 16:12:19 +030080 unsigned long flags;
81
Johan Hedbergbc1f35b2012-07-16 16:12:05 +030082 struct sk_buff *rx_skb; /* Receive buffer */
83 size_t rx_pending; /* Expecting more bytes */
Johan Hedberg43eb12d2012-07-16 16:12:08 +030084 u8 rx_ack; /* Last ack number received */
Johan Hedberg7d664fb2012-07-16 16:12:03 +030085
Prasanna Karthikfa4cf042015-07-23 11:22:56 +000086 int (*rx_func)(struct hci_uart *hu, u8 c);
Johan Hedberg3f27e952012-07-16 16:12:04 +030087
Johan Hedbergbc1f35b2012-07-16 16:12:05 +030088 struct timer_list timer; /* Retransmission timer */
Kees Cook043560522017-10-04 17:54:29 -070089 struct hci_uart *hu; /* Parent HCI UART */
Johan Hedberg7d664fb2012-07-16 16:12:03 +030090
Johan Hedberg43eb12d2012-07-16 16:12:08 +030091 u8 tx_seq; /* Next seq number to send */
Johan Hedberg40f10222012-07-16 16:12:09 +030092 u8 tx_ack; /* Next ack number to send */
Johan Hedbergafdc9442012-07-16 16:12:18 +030093 u8 tx_win; /* Sliding window size */
Johan Hedberg10122d02012-07-16 16:12:14 +030094
Johan Hedbergf674a052012-07-16 16:12:15 +030095 enum {
96 H5_UNINITIALIZED,
97 H5_INITIALIZED,
98 H5_ACTIVE,
99 } state;
100
Johan Hedberg95c5c222012-07-16 16:12:16 +0300101 enum {
102 H5_AWAKE,
103 H5_SLEEPING,
104 H5_WAKING_UP,
105 } sleep;
Jeremy Cline4eb3cbc2018-08-02 16:57:19 +0200106
107 const struct h5_vnd *vnd;
108 const char *id;
Hans de Goede4c791482018-08-02 16:57:21 +0200109
110 struct gpio_desc *enable_gpio;
111 struct gpio_desc *device_wake_gpio;
Jeremy Cline4eb3cbc2018-08-02 16:57:19 +0200112};
113
114struct h5_vnd {
115 int (*setup)(struct h5 *h5);
116 void (*open)(struct h5 *h5);
117 void (*close)(struct h5 *h5);
Hans de Goede28a75e42018-10-30 14:17:22 +0100118 int (*suspend)(struct h5 *h5);
119 int (*resume)(struct h5 *h5);
Hans de Goede4c791482018-08-02 16:57:21 +0200120 const struct acpi_gpio_mapping *acpi_gpio_map;
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300121};
122
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300123static void h5_reset_rx(struct h5 *h5);
124
Johan Hedberg40f10222012-07-16 16:12:09 +0300125static void h5_link_control(struct hci_uart *hu, const void *data, size_t len)
126{
127 struct h5 *h5 = hu->priv;
128 struct sk_buff *nskb;
129
130 nskb = alloc_skb(3, GFP_ATOMIC);
131 if (!nskb)
132 return;
133
Marcel Holtmann618e8bc2015-11-05 07:33:56 +0100134 hci_skb_pkt_type(nskb) = HCI_3WIRE_LINK_PKT;
Johan Hedberg40f10222012-07-16 16:12:09 +0300135
Johannes Berg59ae1d12017-06-16 14:29:20 +0200136 skb_put_data(nskb, data, len);
Johan Hedberg40f10222012-07-16 16:12:09 +0300137
138 skb_queue_tail(&h5->unrel, nskb);
139}
140
Johan Hedbergafdc9442012-07-16 16:12:18 +0300141static u8 h5_cfg_field(struct h5 *h5)
142{
Johan Hedbergafdc9442012-07-16 16:12:18 +0300143 /* Sliding window size (first 3 bits) */
Andrei Emeltchenko742c5952015-11-26 16:49:34 +0200144 return h5->tx_win & 0x07;
Johan Hedbergafdc9442012-07-16 16:12:18 +0300145}
146
Kees Cook043560522017-10-04 17:54:29 -0700147static void h5_timed_event(struct timer_list *t)
Johan Hedbergf674a052012-07-16 16:12:15 +0300148{
149 const unsigned char sync_req[] = { 0x01, 0x7e };
Andrei Emeltchenko87a6b9b2015-12-10 17:03:43 +0200150 unsigned char conf_req[3] = { 0x03, 0xfc };
Kees Cook043560522017-10-04 17:54:29 -0700151 struct h5 *h5 = from_timer(h5, t, timer);
152 struct hci_uart *hu = h5->hu;
Johan Hedbergf674a052012-07-16 16:12:15 +0300153 struct sk_buff *skb;
154 unsigned long flags;
155
Johan Hedberg95c5c222012-07-16 16:12:16 +0300156 BT_DBG("%s", hu->hdev->name);
157
Johan Hedbergf674a052012-07-16 16:12:15 +0300158 if (h5->state == H5_UNINITIALIZED)
159 h5_link_control(hu, sync_req, sizeof(sync_req));
160
Johan Hedbergafdc9442012-07-16 16:12:18 +0300161 if (h5->state == H5_INITIALIZED) {
162 conf_req[2] = h5_cfg_field(h5);
Johan Hedbergf674a052012-07-16 16:12:15 +0300163 h5_link_control(hu, conf_req, sizeof(conf_req));
Johan Hedbergafdc9442012-07-16 16:12:18 +0300164 }
Johan Hedbergf674a052012-07-16 16:12:15 +0300165
166 if (h5->state != H5_ACTIVE) {
167 mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
168 goto wakeup;
169 }
170
Johan Hedberg95c5c222012-07-16 16:12:16 +0300171 if (h5->sleep != H5_AWAKE) {
172 h5->sleep = H5_SLEEPING;
173 goto wakeup;
174 }
175
Johan Hedbergf674a052012-07-16 16:12:15 +0300176 BT_DBG("hu %p retransmitting %u pkts", hu, h5->unack.qlen);
177
178 spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
179
180 while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) {
181 h5->tx_seq = (h5->tx_seq - 1) & 0x07;
182 skb_queue_head(&h5->rel, skb);
183 }
184
185 spin_unlock_irqrestore(&h5->unack.lock, flags);
186
187wakeup:
188 hci_uart_tx_wakeup(hu);
189}
190
Loic Poulainb509c022014-10-08 16:54:28 +0200191static void h5_peer_reset(struct hci_uart *hu)
192{
193 struct h5 *h5 = hu->priv;
Loic Poulainb509c022014-10-08 16:54:28 +0200194
195 BT_ERR("Peer device has reset");
196
197 h5->state = H5_UNINITIALIZED;
198
199 del_timer(&h5->timer);
200
201 skb_queue_purge(&h5->rel);
202 skb_queue_purge(&h5->unrel);
203 skb_queue_purge(&h5->unack);
204
205 h5->tx_seq = 0;
206 h5->tx_ack = 0;
207
Marcel Holtmann882809f2014-11-02 08:15:39 +0100208 /* Send reset request to upper stack */
209 hci_reset_dev(hu->hdev);
Loic Poulainb509c022014-10-08 16:54:28 +0200210}
211
Johan Hedberg7dec65c2012-07-16 16:12:02 +0300212static int h5_open(struct hci_uart *hu)
213{
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300214 struct h5 *h5;
Johan Hedberg40f10222012-07-16 16:12:09 +0300215 const unsigned char sync[] = { 0x01, 0x7e };
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300216
217 BT_DBG("hu %p", hu);
218
Hans de Goedece945552018-08-02 16:57:18 +0200219 if (hu->serdev) {
220 h5 = serdev_device_get_drvdata(hu->serdev);
221 } else {
222 h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
223 if (!h5)
224 return -ENOMEM;
225 }
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300226
227 hu->priv = h5;
Kees Cook043560522017-10-04 17:54:29 -0700228 h5->hu = hu;
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300229
230 skb_queue_head_init(&h5->unack);
231 skb_queue_head_init(&h5->rel);
232 skb_queue_head_init(&h5->unrel);
233
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300234 h5_reset_rx(h5);
235
Kees Cook043560522017-10-04 17:54:29 -0700236 timer_setup(&h5->timer, h5_timed_event, 0);
Johan Hedberg3f27e952012-07-16 16:12:04 +0300237
Johan Hedbergafdc9442012-07-16 16:12:18 +0300238 h5->tx_win = H5_TX_WIN_MAX;
239
Jeremy Cline4eb3cbc2018-08-02 16:57:19 +0200240 if (h5->vnd && h5->vnd->open)
241 h5->vnd->open(h5);
242
Johan Hedbergcd1b4422012-07-16 16:12:12 +0300243 set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags);
244
Johan Hedberg40f10222012-07-16 16:12:09 +0300245 /* Send initial sync request */
246 h5_link_control(hu, sync, sizeof(sync));
247 mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
248
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300249 return 0;
Johan Hedberg7dec65c2012-07-16 16:12:02 +0300250}
251
252static int h5_close(struct hci_uart *hu)
253{
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300254 struct h5 *h5 = hu->priv;
255
Michael Knudsenc327cdd2014-02-18 09:48:08 +0100256 del_timer_sync(&h5->timer);
257
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300258 skb_queue_purge(&h5->unack);
259 skb_queue_purge(&h5->rel);
260 skb_queue_purge(&h5->unrel);
261
Jeremy Cline4eb3cbc2018-08-02 16:57:19 +0200262 if (h5->vnd && h5->vnd->close)
263 h5->vnd->close(h5);
264
Hans de Goedece945552018-08-02 16:57:18 +0200265 if (!hu->serdev)
266 kfree(h5);
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300267
268 return 0;
Johan Hedberg7dec65c2012-07-16 16:12:02 +0300269}
270
Jeremy Cline4eb3cbc2018-08-02 16:57:19 +0200271static int h5_setup(struct hci_uart *hu)
272{
273 struct h5 *h5 = hu->priv;
274
275 if (h5->vnd && h5->vnd->setup)
276 return h5->vnd->setup(h5);
277
278 return 0;
279}
280
Johan Hedberg43eb12d2012-07-16 16:12:08 +0300281static void h5_pkt_cull(struct h5 *h5)
282{
283 struct sk_buff *skb, *tmp;
284 unsigned long flags;
285 int i, to_remove;
286 u8 seq;
287
288 spin_lock_irqsave(&h5->unack.lock, flags);
289
290 to_remove = skb_queue_len(&h5->unack);
Johan Hedberg40f10222012-07-16 16:12:09 +0300291 if (to_remove == 0)
292 goto unlock;
Johan Hedberg43eb12d2012-07-16 16:12:08 +0300293
294 seq = h5->tx_seq;
295
296 while (to_remove > 0) {
297 if (h5->rx_ack == seq)
298 break;
299
300 to_remove--;
Loic Poulain4807b512014-08-08 19:07:16 +0200301 seq = (seq - 1) & 0x07;
Johan Hedberg43eb12d2012-07-16 16:12:08 +0300302 }
303
304 if (seq != h5->rx_ack)
305 BT_ERR("Controller acked invalid packet");
306
307 i = 0;
308 skb_queue_walk_safe(&h5->unack, skb, tmp) {
309 if (i++ >= to_remove)
310 break;
311
312 __skb_unlink(skb, &h5->unack);
313 kfree_skb(skb);
314 }
315
316 if (skb_queue_empty(&h5->unack))
317 del_timer(&h5->timer);
318
Johan Hedberg40f10222012-07-16 16:12:09 +0300319unlock:
Johan Hedberg43eb12d2012-07-16 16:12:08 +0300320 spin_unlock_irqrestore(&h5->unack.lock, flags);
321}
322
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300323static void h5_handle_internal_rx(struct hci_uart *hu)
324{
Johan Hedberg40f10222012-07-16 16:12:09 +0300325 struct h5 *h5 = hu->priv;
326 const unsigned char sync_req[] = { 0x01, 0x7e };
327 const unsigned char sync_rsp[] = { 0x02, 0x7d };
Andrei Emeltchenko87a6b9b2015-12-10 17:03:43 +0200328 unsigned char conf_req[3] = { 0x03, 0xfc };
Johan Hedbergafdc9442012-07-16 16:12:18 +0300329 const unsigned char conf_rsp[] = { 0x04, 0x7b };
Johan Hedberg10122d02012-07-16 16:12:14 +0300330 const unsigned char wakeup_req[] = { 0x05, 0xfa };
331 const unsigned char woken_req[] = { 0x06, 0xf9 };
332 const unsigned char sleep_req[] = { 0x07, 0x78 };
Johan Hedberg40f10222012-07-16 16:12:09 +0300333 const unsigned char *hdr = h5->rx_skb->data;
334 const unsigned char *data = &h5->rx_skb->data[4];
335
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300336 BT_DBG("%s", hu->hdev->name);
Johan Hedberg40f10222012-07-16 16:12:09 +0300337
338 if (H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT)
339 return;
340
341 if (H5_HDR_LEN(hdr) < 2)
342 return;
343
Johan Hedbergafdc9442012-07-16 16:12:18 +0300344 conf_req[2] = h5_cfg_field(h5);
345
Johan Hedberg40f10222012-07-16 16:12:09 +0300346 if (memcmp(data, sync_req, 2) == 0) {
Loic Poulainb509c022014-10-08 16:54:28 +0200347 if (h5->state == H5_ACTIVE)
348 h5_peer_reset(hu);
Johan Hedberg40f10222012-07-16 16:12:09 +0300349 h5_link_control(hu, sync_rsp, 2);
350 } else if (memcmp(data, sync_rsp, 2) == 0) {
Loic Poulainb509c022014-10-08 16:54:28 +0200351 if (h5->state == H5_ACTIVE)
352 h5_peer_reset(hu);
Johan Hedbergf674a052012-07-16 16:12:15 +0300353 h5->state = H5_INITIALIZED;
Johan Hedberg40f10222012-07-16 16:12:09 +0300354 h5_link_control(hu, conf_req, 3);
355 } else if (memcmp(data, conf_req, 2) == 0) {
356 h5_link_control(hu, conf_rsp, 2);
357 h5_link_control(hu, conf_req, 3);
358 } else if (memcmp(data, conf_rsp, 2) == 0) {
Johan Hedbergafdc9442012-07-16 16:12:18 +0300359 if (H5_HDR_LEN(hdr) > 2)
Andrei Emeltchenko1d3a1e62015-11-26 16:49:33 +0200360 h5->tx_win = (data[2] & 0x07);
Johan Hedbergafdc9442012-07-16 16:12:18 +0300361 BT_DBG("Three-wire init complete. tx_win %u", h5->tx_win);
Johan Hedbergf674a052012-07-16 16:12:15 +0300362 h5->state = H5_ACTIVE;
Johan Hedbergcd1b4422012-07-16 16:12:12 +0300363 hci_uart_init_ready(hu);
Johan Hedberg40f10222012-07-16 16:12:09 +0300364 return;
Johan Hedberg10122d02012-07-16 16:12:14 +0300365 } else if (memcmp(data, sleep_req, 2) == 0) {
366 BT_DBG("Peer went to sleep");
Johan Hedberg95c5c222012-07-16 16:12:16 +0300367 h5->sleep = H5_SLEEPING;
368 return;
Johan Hedberg10122d02012-07-16 16:12:14 +0300369 } else if (memcmp(data, woken_req, 2) == 0) {
370 BT_DBG("Peer woke up");
Johan Hedberg95c5c222012-07-16 16:12:16 +0300371 h5->sleep = H5_AWAKE;
372 } else if (memcmp(data, wakeup_req, 2) == 0) {
373 BT_DBG("Peer requested wakeup");
374 h5_link_control(hu, woken_req, 2);
375 h5->sleep = H5_AWAKE;
Johan Hedberg40f10222012-07-16 16:12:09 +0300376 } else {
377 BT_DBG("Link Control: 0x%02hhx 0x%02hhx", data[0], data[1]);
378 return;
379 }
380
381 hci_uart_tx_wakeup(hu);
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300382}
383
384static void h5_complete_rx_pkt(struct hci_uart *hu)
385{
386 struct h5 *h5 = hu->priv;
Johan Hedberg43eb12d2012-07-16 16:12:08 +0300387 const unsigned char *hdr = h5->rx_skb->data;
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300388
Johan Hedberg43eb12d2012-07-16 16:12:08 +0300389 if (H5_HDR_RELIABLE(hdr)) {
Johan Hedberg40f10222012-07-16 16:12:09 +0300390 h5->tx_ack = (h5->tx_ack + 1) % 8;
Johan Hedberge0482102012-07-16 16:12:19 +0300391 set_bit(H5_TX_ACK_REQ, &h5->flags);
Johan Hedberg40f10222012-07-16 16:12:09 +0300392 hci_uart_tx_wakeup(hu);
Johan Hedberg43eb12d2012-07-16 16:12:08 +0300393 }
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300394
Johan Hedberg43eb12d2012-07-16 16:12:08 +0300395 h5->rx_ack = H5_HDR_ACK(hdr);
396
397 h5_pkt_cull(h5);
398
399 switch (H5_HDR_PKT_TYPE(hdr)) {
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300400 case HCI_EVENT_PKT:
401 case HCI_ACLDATA_PKT:
402 case HCI_SCODATA_PKT:
Marcel Holtmann618e8bc2015-11-05 07:33:56 +0100403 hci_skb_pkt_type(h5->rx_skb) = H5_HDR_PKT_TYPE(hdr);
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300404
405 /* Remove Three-wire header */
406 skb_pull(h5->rx_skb, 4);
407
Marcel Holtmanne1a26172013-10-10 16:52:43 -0700408 hci_recv_frame(hu->hdev, h5->rx_skb);
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300409 h5->rx_skb = NULL;
410
411 break;
412
413 default:
414 h5_handle_internal_rx(hu);
415 break;
416 }
417
418 h5_reset_rx(h5);
419}
420
421static int h5_rx_crc(struct hci_uart *hu, unsigned char c)
422{
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300423 h5_complete_rx_pkt(hu);
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300424
425 return 0;
426}
427
428static int h5_rx_payload(struct hci_uart *hu, unsigned char c)
429{
430 struct h5 *h5 = hu->priv;
431 const unsigned char *hdr = h5->rx_skb->data;
432
Johan Hedberg43eb12d2012-07-16 16:12:08 +0300433 if (H5_HDR_CRC(hdr)) {
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300434 h5->rx_func = h5_rx_crc;
435 h5->rx_pending = 2;
436 } else {
437 h5_complete_rx_pkt(hu);
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300438 }
439
440 return 0;
441}
442
443static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
444{
445 struct h5 *h5 = hu->priv;
446 const unsigned char *hdr = h5->rx_skb->data;
447
Johan Hedberg40f10222012-07-16 16:12:09 +0300448 BT_DBG("%s rx: seq %u ack %u crc %u rel %u type %u len %u",
449 hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
450 H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
451 H5_HDR_LEN(hdr));
452
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300453 if (((hdr[0] + hdr[1] + hdr[2] + hdr[3]) & 0xff) != 0xff) {
454 BT_ERR("Invalid header checksum");
455 h5_reset_rx(h5);
456 return 0;
457 }
458
Johan Hedberg40f10222012-07-16 16:12:09 +0300459 if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) {
Johan Hedberg43eb12d2012-07-16 16:12:08 +0300460 BT_ERR("Out-of-order packet arrived (%u != %u)",
Johan Hedberg40f10222012-07-16 16:12:09 +0300461 H5_HDR_SEQ(hdr), h5->tx_ack);
Johan Hedberg43eb12d2012-07-16 16:12:08 +0300462 h5_reset_rx(h5);
463 return 0;
464 }
465
Johan Hedbergf674a052012-07-16 16:12:15 +0300466 if (h5->state != H5_ACTIVE &&
467 H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
468 BT_ERR("Non-link packet received in non-active state");
469 h5_reset_rx(h5);
Loic Poulain48439d52014-06-23 17:42:44 +0200470 return 0;
Johan Hedbergf674a052012-07-16 16:12:15 +0300471 }
472
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300473 h5->rx_func = h5_rx_payload;
Johan Hedberg43eb12d2012-07-16 16:12:08 +0300474 h5->rx_pending = H5_HDR_LEN(hdr);
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300475
476 return 0;
477}
478
479static int h5_rx_pkt_start(struct hci_uart *hu, unsigned char c)
480{
481 struct h5 *h5 = hu->priv;
482
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300483 if (c == SLIP_DELIMITER)
484 return 1;
485
486 h5->rx_func = h5_rx_3wire_hdr;
487 h5->rx_pending = 4;
488
489 h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC);
490 if (!h5->rx_skb) {
491 BT_ERR("Can't allocate mem for new packet");
492 h5_reset_rx(h5);
493 return -ENOMEM;
494 }
495
Prasanna Karthik4a2fa2b2015-09-30 13:02:05 +0000496 h5->rx_skb->dev = (void *)hu->hdev;
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300497
498 return 0;
499}
500
501static int h5_rx_delimiter(struct hci_uart *hu, unsigned char c)
502{
503 struct h5 *h5 = hu->priv;
504
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300505 if (c == SLIP_DELIMITER)
506 h5->rx_func = h5_rx_pkt_start;
507
508 return 1;
509}
510
511static void h5_unslip_one_byte(struct h5 *h5, unsigned char c)
512{
513 const u8 delim = SLIP_DELIMITER, esc = SLIP_ESC;
514 const u8 *byte = &c;
515
Johan Hedberge0482102012-07-16 16:12:19 +0300516 if (!test_bit(H5_RX_ESC, &h5->flags) && c == SLIP_ESC) {
517 set_bit(H5_RX_ESC, &h5->flags);
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300518 return;
519 }
520
Johan Hedberge0482102012-07-16 16:12:19 +0300521 if (test_and_clear_bit(H5_RX_ESC, &h5->flags)) {
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300522 switch (c) {
523 case SLIP_ESC_DELIM:
524 byte = &delim;
525 break;
526 case SLIP_ESC_ESC:
527 byte = &esc;
528 break;
529 default:
530 BT_ERR("Invalid esc byte 0x%02hhx", c);
531 h5_reset_rx(h5);
532 return;
533 }
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300534 }
535
Johannes Berg59ae1d12017-06-16 14:29:20 +0200536 skb_put_data(h5->rx_skb, byte, 1);
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300537 h5->rx_pending--;
538
Johan Hedberg255a68e2012-07-16 16:12:13 +0300539 BT_DBG("unsliped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending);
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300540}
541
542static void h5_reset_rx(struct h5 *h5)
543{
544 if (h5->rx_skb) {
545 kfree_skb(h5->rx_skb);
546 h5->rx_skb = NULL;
547 }
548
549 h5->rx_func = h5_rx_delimiter;
550 h5->rx_pending = 0;
Johan Hedberge0482102012-07-16 16:12:19 +0300551 clear_bit(H5_RX_ESC, &h5->flags);
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300552}
553
Marcel Holtmann9d1c40e2015-04-04 20:59:41 -0700554static int h5_recv(struct hci_uart *hu, const void *data, int count)
Johan Hedberg7dec65c2012-07-16 16:12:02 +0300555{
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300556 struct h5 *h5 = hu->priv;
Marcel Holtmann9d1c40e2015-04-04 20:59:41 -0700557 const unsigned char *ptr = data;
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300558
Johan Hedberg255a68e2012-07-16 16:12:13 +0300559 BT_DBG("%s pending %zu count %d", hu->hdev->name, h5->rx_pending,
560 count);
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300561
562 while (count > 0) {
563 int processed;
564
565 if (h5->rx_pending > 0) {
566 if (*ptr == SLIP_DELIMITER) {
567 BT_ERR("Too short H5 packet");
568 h5_reset_rx(h5);
569 continue;
570 }
571
572 h5_unslip_one_byte(h5, *ptr);
573
574 ptr++; count--;
575 continue;
576 }
577
578 processed = h5->rx_func(hu, *ptr);
579 if (processed < 0)
580 return processed;
581
582 ptr += processed;
583 count -= processed;
584 }
585
586 return 0;
Johan Hedberg7dec65c2012-07-16 16:12:02 +0300587}
588
589static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
590{
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300591 struct h5 *h5 = hu->priv;
592
593 if (skb->len > 0xfff) {
594 BT_ERR("Packet too long (%u bytes)", skb->len);
595 kfree_skb(skb);
596 return 0;
597 }
598
Johan Hedbergf674a052012-07-16 16:12:15 +0300599 if (h5->state != H5_ACTIVE) {
600 BT_ERR("Ignoring HCI data in non-active state");
601 kfree_skb(skb);
602 return 0;
603 }
604
Marcel Holtmann618e8bc2015-11-05 07:33:56 +0100605 switch (hci_skb_pkt_type(skb)) {
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300606 case HCI_ACLDATA_PKT:
607 case HCI_COMMAND_PKT:
608 skb_queue_tail(&h5->rel, skb);
609 break;
610
611 case HCI_SCODATA_PKT:
612 skb_queue_tail(&h5->unrel, skb);
613 break;
614
615 default:
Marcel Holtmann618e8bc2015-11-05 07:33:56 +0100616 BT_ERR("Unknown packet type %u", hci_skb_pkt_type(skb));
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300617 kfree_skb(skb);
618 break;
619 }
620
621 return 0;
622}
623
Johan Hedbergc0a1b732012-07-16 16:12:06 +0300624static void h5_slip_delim(struct sk_buff *skb)
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300625{
Johan Hedbergc0a1b732012-07-16 16:12:06 +0300626 const char delim = SLIP_DELIMITER;
627
Johannes Berg59ae1d12017-06-16 14:29:20 +0200628 skb_put_data(skb, &delim, 1);
Johan Hedbergc0a1b732012-07-16 16:12:06 +0300629}
630
631static void h5_slip_one_byte(struct sk_buff *skb, u8 c)
632{
633 const char esc_delim[2] = { SLIP_ESC, SLIP_ESC_DELIM };
634 const char esc_esc[2] = { SLIP_ESC, SLIP_ESC_ESC };
635
636 switch (c) {
637 case SLIP_DELIMITER:
Johannes Berg59ae1d12017-06-16 14:29:20 +0200638 skb_put_data(skb, &esc_delim, 2);
Johan Hedbergc0a1b732012-07-16 16:12:06 +0300639 break;
640 case SLIP_ESC:
Johannes Berg59ae1d12017-06-16 14:29:20 +0200641 skb_put_data(skb, &esc_esc, 2);
Johan Hedbergc0a1b732012-07-16 16:12:06 +0300642 break;
643 default:
Johannes Berg59ae1d12017-06-16 14:29:20 +0200644 skb_put_data(skb, &c, 1);
Johan Hedbergc0a1b732012-07-16 16:12:06 +0300645 }
646}
647
Johan Hedbergc826ed02012-07-16 16:12:17 +0300648static bool valid_packet_type(u8 type)
649{
650 switch (type) {
651 case HCI_ACLDATA_PKT:
652 case HCI_COMMAND_PKT:
653 case HCI_SCODATA_PKT:
654 case HCI_3WIRE_LINK_PKT:
655 case HCI_3WIRE_ACK_PKT:
656 return true;
657 default:
658 return false;
659 }
660}
661
662static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
663 const u8 *data, size_t len)
Johan Hedbergc0a1b732012-07-16 16:12:06 +0300664{
Johan Hedberg40f10222012-07-16 16:12:09 +0300665 struct h5 *h5 = hu->priv;
Johan Hedbergc0a1b732012-07-16 16:12:06 +0300666 struct sk_buff *nskb;
667 u8 hdr[4];
668 int i;
669
Johan Hedbergc826ed02012-07-16 16:12:17 +0300670 if (!valid_packet_type(pkt_type)) {
671 BT_ERR("Unknown packet type %u", pkt_type);
672 return NULL;
673 }
674
Johan Hedbergc0a1b732012-07-16 16:12:06 +0300675 /*
676 * Max len of packet: (original len + 4 (H5 hdr) + 2 (crc)) * 2
677 * (because bytes 0xc0 and 0xdb are escaped, worst case is when
678 * the packet is all made of 0xc0 and 0xdb) + 2 (0xc0
679 * delimiters at start and end).
680 */
681 nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC);
682 if (!nskb)
683 return NULL;
684
Marcel Holtmann618e8bc2015-11-05 07:33:56 +0100685 hci_skb_pkt_type(nskb) = pkt_type;
Johan Hedbergc0a1b732012-07-16 16:12:06 +0300686
687 h5_slip_delim(nskb);
688
Johan Hedberg40f10222012-07-16 16:12:09 +0300689 hdr[0] = h5->tx_ack << 3;
Johan Hedberge0482102012-07-16 16:12:19 +0300690 clear_bit(H5_TX_ACK_REQ, &h5->flags);
Johan Hedbergc0a1b732012-07-16 16:12:06 +0300691
Johan Hedbergc826ed02012-07-16 16:12:17 +0300692 /* Reliable packet? */
693 if (pkt_type == HCI_ACLDATA_PKT || pkt_type == HCI_COMMAND_PKT) {
Johan Hedbergc0a1b732012-07-16 16:12:06 +0300694 hdr[0] |= 1 << 7;
Johan Hedberg43eb12d2012-07-16 16:12:08 +0300695 hdr[0] |= h5->tx_seq;
696 h5->tx_seq = (h5->tx_seq + 1) % 8;
Johan Hedbergc0a1b732012-07-16 16:12:06 +0300697 }
698
699 hdr[1] = pkt_type | ((len & 0x0f) << 4);
700 hdr[2] = len >> 4;
701 hdr[3] = ~((hdr[0] + hdr[1] + hdr[2]) & 0xff);
702
Johan Hedberg40f10222012-07-16 16:12:09 +0300703 BT_DBG("%s tx: seq %u ack %u crc %u rel %u type %u len %u",
704 hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
705 H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
706 H5_HDR_LEN(hdr));
707
Johan Hedbergc0a1b732012-07-16 16:12:06 +0300708 for (i = 0; i < 4; i++)
709 h5_slip_one_byte(nskb, hdr[i]);
710
711 for (i = 0; i < len; i++)
712 h5_slip_one_byte(nskb, data[i]);
713
714 h5_slip_delim(nskb);
715
716 return nskb;
717}
718
Johan Hedberg7dec65c2012-07-16 16:12:02 +0300719static struct sk_buff *h5_dequeue(struct hci_uart *hu)
720{
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300721 struct h5 *h5 = hu->priv;
Johan Hedberg3f27e952012-07-16 16:12:04 +0300722 unsigned long flags;
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300723 struct sk_buff *skb, *nskb;
724
Johan Hedberg95c5c222012-07-16 16:12:16 +0300725 if (h5->sleep != H5_AWAKE) {
726 const unsigned char wakeup_req[] = { 0x05, 0xfa };
727
728 if (h5->sleep == H5_WAKING_UP)
729 return NULL;
730
731 h5->sleep = H5_WAKING_UP;
732 BT_DBG("Sending wakeup request");
733
734 mod_timer(&h5->timer, jiffies + HZ / 100);
735 return h5_prepare_pkt(hu, HCI_3WIRE_LINK_PKT, wakeup_req, 2);
736 }
737
Valentin Iliea08b15e2013-08-12 18:46:00 +0300738 skb = skb_dequeue(&h5->unrel);
Prasanna Karthik4a2fa2b2015-09-30 13:02:05 +0000739 if (skb) {
Marcel Holtmann618e8bc2015-11-05 07:33:56 +0100740 nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
Johan Hedbergc0a1b732012-07-16 16:12:06 +0300741 skb->data, skb->len);
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300742 if (nskb) {
743 kfree_skb(skb);
744 return nskb;
745 }
746
747 skb_queue_head(&h5->unrel, skb);
748 BT_ERR("Could not dequeue pkt because alloc_skb failed");
749 }
750
Johan Hedberg3f27e952012-07-16 16:12:04 +0300751 spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
752
Johan Hedbergafdc9442012-07-16 16:12:18 +0300753 if (h5->unack.qlen >= h5->tx_win)
Johan Hedberg3f27e952012-07-16 16:12:04 +0300754 goto unlock;
755
Valentin Iliea08b15e2013-08-12 18:46:00 +0300756 skb = skb_dequeue(&h5->rel);
Prasanna Karthik4a2fa2b2015-09-30 13:02:05 +0000757 if (skb) {
Marcel Holtmann618e8bc2015-11-05 07:33:56 +0100758 nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
Johan Hedbergc0a1b732012-07-16 16:12:06 +0300759 skb->data, skb->len);
Johan Hedberg3f27e952012-07-16 16:12:04 +0300760 if (nskb) {
761 __skb_queue_tail(&h5->unack, skb);
762 mod_timer(&h5->timer, jiffies + H5_ACK_TIMEOUT);
763 spin_unlock_irqrestore(&h5->unack.lock, flags);
764 return nskb;
765 }
766
767 skb_queue_head(&h5->rel, skb);
768 BT_ERR("Could not dequeue pkt because alloc_skb failed");
769 }
770
771unlock:
772 spin_unlock_irqrestore(&h5->unack.lock, flags);
773
Johan Hedberge0482102012-07-16 16:12:19 +0300774 if (test_bit(H5_TX_ACK_REQ, &h5->flags))
Johan Hedberg40f10222012-07-16 16:12:09 +0300775 return h5_prepare_pkt(hu, HCI_3WIRE_ACK_PKT, NULL, 0);
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300776
Johan Hedberg7dec65c2012-07-16 16:12:02 +0300777 return NULL;
778}
779
780static int h5_flush(struct hci_uart *hu)
781{
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300782 BT_DBG("hu %p", hu);
783 return 0;
Johan Hedberg7dec65c2012-07-16 16:12:02 +0300784}
785
Marcel Holtmann4ee7ef12015-04-04 22:11:43 -0700786static const struct hci_uart_proto h5p = {
Johan Hedberg7dec65c2012-07-16 16:12:02 +0300787 .id = HCI_UART_3WIRE,
Marcel Holtmann7c40fb82015-04-04 22:27:34 -0700788 .name = "Three-wire (H5)",
Johan Hedberg7dec65c2012-07-16 16:12:02 +0300789 .open = h5_open,
790 .close = h5_close,
Jeremy Cline4eb3cbc2018-08-02 16:57:19 +0200791 .setup = h5_setup,
Johan Hedberg7dec65c2012-07-16 16:12:02 +0300792 .recv = h5_recv,
793 .enqueue = h5_enqueue,
794 .dequeue = h5_dequeue,
795 .flush = h5_flush,
796};
797
Hans de Goedece945552018-08-02 16:57:18 +0200798static int h5_serdev_probe(struct serdev_device *serdev)
799{
Jeremy Cline4eb3cbc2018-08-02 16:57:19 +0200800 const struct acpi_device_id *match;
Hans de Goedece945552018-08-02 16:57:18 +0200801 struct device *dev = &serdev->dev;
802 struct h5 *h5;
803
804 h5 = devm_kzalloc(dev, sizeof(*h5), GFP_KERNEL);
805 if (!h5)
806 return -ENOMEM;
807
808 set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.flags);
809
810 h5->hu = &h5->serdev_hu;
811 h5->serdev_hu.serdev = serdev;
812 serdev_device_set_drvdata(serdev, h5);
813
Jeremy Cline4eb3cbc2018-08-02 16:57:19 +0200814 if (has_acpi_companion(dev)) {
815 match = acpi_match_device(dev->driver->acpi_match_table, dev);
816 if (!match)
817 return -ENODEV;
818
819 h5->vnd = (const struct h5_vnd *)match->driver_data;
820 h5->id = (char *)match->id;
Hans de Goede4c791482018-08-02 16:57:21 +0200821
822 if (h5->vnd->acpi_gpio_map)
823 devm_acpi_dev_add_driver_gpios(dev,
824 h5->vnd->acpi_gpio_map);
Jeremy Cline4eb3cbc2018-08-02 16:57:19 +0200825 }
826
Hans de Goede4c791482018-08-02 16:57:21 +0200827 h5->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
828 if (IS_ERR(h5->enable_gpio))
829 return PTR_ERR(h5->enable_gpio);
830
831 h5->device_wake_gpio = devm_gpiod_get_optional(dev, "device-wake",
832 GPIOD_OUT_LOW);
833 if (IS_ERR(h5->device_wake_gpio))
834 return PTR_ERR(h5->device_wake_gpio);
835
Hans de Goedece945552018-08-02 16:57:18 +0200836 return hci_uart_register_device(&h5->serdev_hu, &h5p);
837}
838
839static void h5_serdev_remove(struct serdev_device *serdev)
840{
841 struct h5 *h5 = serdev_device_get_drvdata(serdev);
842
843 hci_uart_unregister_device(&h5->serdev_hu);
844}
845
Hans de Goede28a75e42018-10-30 14:17:22 +0100846static int __maybe_unused h5_serdev_suspend(struct device *dev)
847{
848 struct h5 *h5 = dev_get_drvdata(dev);
849 int ret = 0;
850
851 if (h5->vnd && h5->vnd->suspend)
852 ret = h5->vnd->suspend(h5);
853
854 return ret;
855}
856
857static int __maybe_unused h5_serdev_resume(struct device *dev)
858{
859 struct h5 *h5 = dev_get_drvdata(dev);
860 int ret = 0;
861
862 if (h5->vnd && h5->vnd->resume)
863 ret = h5->vnd->resume(h5);
864
865 return ret;
866}
867
Marcel Holtmannb9763cd2018-08-09 10:33:07 +0200868#ifdef CONFIG_BT_HCIUART_RTL
Jeremy Clineb825d7c2018-08-02 16:57:20 +0200869static int h5_btrtl_setup(struct h5 *h5)
870{
871 struct btrtl_device_info *btrtl_dev;
872 struct sk_buff *skb;
873 __le32 baudrate_data;
874 u32 device_baudrate;
875 unsigned int controller_baudrate;
876 bool flow_control;
877 int err;
878
879 btrtl_dev = btrtl_initialize(h5->hu->hdev, h5->id);
880 if (IS_ERR(btrtl_dev))
881 return PTR_ERR(btrtl_dev);
882
883 err = btrtl_get_uart_settings(h5->hu->hdev, btrtl_dev,
884 &controller_baudrate, &device_baudrate,
885 &flow_control);
886 if (err)
887 goto out_free;
888
889 baudrate_data = cpu_to_le32(device_baudrate);
890 skb = __hci_cmd_sync(h5->hu->hdev, 0xfc17, sizeof(baudrate_data),
891 &baudrate_data, HCI_INIT_TIMEOUT);
892 if (IS_ERR(skb)) {
893 rtl_dev_err(h5->hu->hdev, "set baud rate command failed\n");
894 err = PTR_ERR(skb);
895 goto out_free;
896 } else {
897 kfree_skb(skb);
898 }
899 /* Give the device some time to set up the new baudrate. */
900 usleep_range(10000, 20000);
901
902 serdev_device_set_baudrate(h5->hu->serdev, controller_baudrate);
903 serdev_device_set_flow_control(h5->hu->serdev, flow_control);
904
905 err = btrtl_download_firmware(h5->hu->hdev, btrtl_dev);
906 /* Give the device some time before the hci-core sends it a reset */
907 usleep_range(10000, 20000);
908
909out_free:
910 btrtl_free(btrtl_dev);
911
912 return err;
913}
914
915static void h5_btrtl_open(struct h5 *h5)
916{
917 /* Devices always start with these fixed parameters */
918 serdev_device_set_flow_control(h5->hu->serdev, false);
919 serdev_device_set_parity(h5->hu->serdev, SERDEV_PARITY_EVEN);
920 serdev_device_set_baudrate(h5->hu->serdev, 115200);
Hans de Goede4c791482018-08-02 16:57:21 +0200921
922 /* The controller needs up to 500ms to wakeup */
923 gpiod_set_value_cansleep(h5->enable_gpio, 1);
924 gpiod_set_value_cansleep(h5->device_wake_gpio, 1);
925 msleep(500);
Jeremy Clineb825d7c2018-08-02 16:57:20 +0200926}
927
Hans de Goede4c791482018-08-02 16:57:21 +0200928static void h5_btrtl_close(struct h5 *h5)
929{
930 gpiod_set_value_cansleep(h5->device_wake_gpio, 0);
931 gpiod_set_value_cansleep(h5->enable_gpio, 0);
932}
933
Hans de Goede85890862018-10-30 14:17:23 +0100934/* Suspend/resume support. On many devices the RTL BT device loses power during
935 * suspend/resume, causing it to lose its firmware and all state. So we simply
936 * turn it off on suspend and reprobe on resume. This mirrors how RTL devices
937 * are handled in the USB driver, where the USB_QUIRK_RESET_RESUME is used which
938 * also causes a reprobe on resume.
939 */
940static int h5_btrtl_suspend(struct h5 *h5)
941{
942 serdev_device_set_flow_control(h5->hu->serdev, false);
943 gpiod_set_value_cansleep(h5->device_wake_gpio, 0);
944 gpiod_set_value_cansleep(h5->enable_gpio, 0);
945 return 0;
946}
947
948struct h5_btrtl_reprobe {
949 struct device *dev;
950 struct work_struct work;
951};
952
953static void h5_btrtl_reprobe_worker(struct work_struct *work)
954{
955 struct h5_btrtl_reprobe *reprobe =
956 container_of(work, struct h5_btrtl_reprobe, work);
957 int ret;
958
959 ret = device_reprobe(reprobe->dev);
960 if (ret && ret != -EPROBE_DEFER)
961 dev_err(reprobe->dev, "Reprobe error %d\n", ret);
962
963 put_device(reprobe->dev);
964 kfree(reprobe);
965 module_put(THIS_MODULE);
966}
967
968static int h5_btrtl_resume(struct h5 *h5)
969{
970 struct h5_btrtl_reprobe *reprobe;
971
972 reprobe = kzalloc(sizeof(*reprobe), GFP_KERNEL);
973 if (!reprobe)
974 return -ENOMEM;
975
976 __module_get(THIS_MODULE);
977
978 INIT_WORK(&reprobe->work, h5_btrtl_reprobe_worker);
979 reprobe->dev = get_device(&h5->hu->serdev->dev);
980 queue_work(system_long_wq, &reprobe->work);
981 return 0;
982}
983
Hans de Goede4c791482018-08-02 16:57:21 +0200984static const struct acpi_gpio_params btrtl_device_wake_gpios = { 0, 0, false };
985static const struct acpi_gpio_params btrtl_enable_gpios = { 1, 0, false };
986static const struct acpi_gpio_params btrtl_host_wake_gpios = { 2, 0, false };
987static const struct acpi_gpio_mapping acpi_btrtl_gpios[] = {
988 { "device-wake-gpios", &btrtl_device_wake_gpios, 1 },
989 { "enable-gpios", &btrtl_enable_gpios, 1 },
990 { "host-wake-gpios", &btrtl_host_wake_gpios, 1 },
991 {},
992};
993
Jeremy Clineb825d7c2018-08-02 16:57:20 +0200994static struct h5_vnd rtl_vnd = {
995 .setup = h5_btrtl_setup,
996 .open = h5_btrtl_open,
Hans de Goede4c791482018-08-02 16:57:21 +0200997 .close = h5_btrtl_close,
Hans de Goede85890862018-10-30 14:17:23 +0100998 .suspend = h5_btrtl_suspend,
999 .resume = h5_btrtl_resume,
Hans de Goede4c791482018-08-02 16:57:21 +02001000 .acpi_gpio_map = acpi_btrtl_gpios,
Jeremy Clineb825d7c2018-08-02 16:57:20 +02001001};
Marcel Holtmannb9763cd2018-08-09 10:33:07 +02001002#endif
Jeremy Clineb825d7c2018-08-02 16:57:20 +02001003
1004#ifdef CONFIG_ACPI
1005static const struct acpi_device_id h5_acpi_match[] = {
Marcel Holtmannb9763cd2018-08-09 10:33:07 +02001006#ifdef CONFIG_BT_HCIUART_RTL
Jeremy Clineb825d7c2018-08-02 16:57:20 +02001007 { "OBDA8723", (kernel_ulong_t)&rtl_vnd },
Marcel Holtmannb9763cd2018-08-09 10:33:07 +02001008#endif
Jeremy Clineb825d7c2018-08-02 16:57:20 +02001009 { },
1010};
1011MODULE_DEVICE_TABLE(acpi, h5_acpi_match);
1012#endif
1013
Hans de Goede28a75e42018-10-30 14:17:22 +01001014static const struct dev_pm_ops h5_serdev_pm_ops = {
1015 SET_SYSTEM_SLEEP_PM_OPS(h5_serdev_suspend, h5_serdev_resume)
1016};
1017
Hans de Goedece945552018-08-02 16:57:18 +02001018static struct serdev_device_driver h5_serdev_driver = {
1019 .probe = h5_serdev_probe,
1020 .remove = h5_serdev_remove,
1021 .driver = {
1022 .name = "hci_uart_h5",
Jeremy Clineb825d7c2018-08-02 16:57:20 +02001023 .acpi_match_table = ACPI_PTR(h5_acpi_match),
Hans de Goede28a75e42018-10-30 14:17:22 +01001024 .pm = &h5_serdev_pm_ops,
Hans de Goedece945552018-08-02 16:57:18 +02001025 },
1026};
1027
Johan Hedberg7dec65c2012-07-16 16:12:02 +03001028int __init h5_init(void)
1029{
Hans de Goedece945552018-08-02 16:57:18 +02001030 serdev_device_driver_register(&h5_serdev_driver);
Marcel Holtmann01009ee2015-04-04 22:27:35 -07001031 return hci_uart_register_proto(&h5p);
Johan Hedberg7dec65c2012-07-16 16:12:02 +03001032}
1033
1034int __exit h5_deinit(void)
1035{
Hans de Goedece945552018-08-02 16:57:18 +02001036 serdev_device_driver_unregister(&h5_serdev_driver);
Johan Hedberg7dec65c2012-07-16 16:12:02 +03001037 return hci_uart_unregister_proto(&h5p);
1038}